text
stringlengths 2
100k
| meta
dict |
---|---|
/*
* SPDX-License-Identifier: MIT
*
* Copyright © 2018 Intel Corporation
*
* Autogenerated file by GPU Top : https://github.com/rib/gputop
* DO NOT EDIT manually!
*/
#ifndef __I915_OA_KBLGT3_H__
#define __I915_OA_KBLGT3_H__
extern void i915_perf_load_test_config_kblgt3(struct drm_i915_private *dev_priv);
#endif
| {
"language": "C"
} |
/*
* Copyright (C) 2009 Maciej Cencora <m.cencora@gmail.com>
*
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining
* a copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sublicense, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial
* portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
* IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
* LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
* OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
* WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*
*/
#include "radeon_common.h"
#include "radeon_texture.h"
#include "main/enums.h"
#include "main/image.h"
#include "main/teximage.h"
#include "main/texstate.h"
#include "drivers/common/meta.h"
#include "radeon_mipmap_tree.h"
static GLboolean
do_copy_texsubimage(struct gl_context *ctx,
struct radeon_tex_obj *tobj,
radeon_texture_image *timg,
GLint dstx, GLint dsty,
struct radeon_renderbuffer *rrb,
GLint x, GLint y,
GLsizei width, GLsizei height)
{
radeonContextPtr radeon = RADEON_CONTEXT(ctx);
const GLuint face = timg->base.Base.Face;
const GLuint level = timg->base.Base.Level;
unsigned src_bpp;
unsigned dst_bpp;
mesa_format src_mesaformat;
mesa_format dst_mesaformat;
unsigned flip_y;
if (!radeon->vtbl.blit) {
return GL_FALSE;
}
// This is software renderbuffer, fallback to swrast
if (!rrb) {
return GL_FALSE;
}
if (_mesa_get_format_bits(timg->base.Base.TexFormat, GL_DEPTH_BITS) > 0) {
/* copying depth values */
flip_y = ctx->ReadBuffer->Attachment[BUFFER_DEPTH].Type == GL_NONE;
} else {
/* copying color */
flip_y = ctx->ReadBuffer->Attachment[BUFFER_COLOR0].Type == GL_NONE;
}
if (!timg->mt) {
radeon_validate_texture_miptree(ctx, &tobj->base.Sampler, &tobj->base);
}
assert(rrb->bo);
assert(timg->mt);
assert(timg->mt->bo);
assert(timg->base.Base.Width >= dstx + width);
assert(timg->base.Base.Height >= dsty + height);
intptr_t src_offset = rrb->draw_offset;
intptr_t dst_offset = radeon_miptree_image_offset(timg->mt, face, level);
if (0) {
fprintf(stderr, "%s: copying to face %d, level %d\n",
__func__, face, level);
fprintf(stderr, "to: x %d, y %d, offset %d\n", dstx, dsty, (uint32_t) dst_offset);
fprintf(stderr, "from (%dx%d) width %d, height %d, offset %d, pitch %d\n",
x, y, rrb->base.Base.Width, rrb->base.Base.Height, (uint32_t) src_offset, rrb->pitch/rrb->cpp);
fprintf(stderr, "src size %d, dst size %d\n", rrb->bo->size, timg->mt->bo->size);
}
src_mesaformat = rrb->base.Base.Format;
dst_mesaformat = timg->base.Base.TexFormat;
src_bpp = _mesa_get_format_bytes(src_mesaformat);
dst_bpp = _mesa_get_format_bytes(dst_mesaformat);
if (!radeon->vtbl.check_blit(dst_mesaformat, rrb->pitch / rrb->cpp)) {
/* depth formats tend to be special */
if (_mesa_get_format_bits(dst_mesaformat, GL_DEPTH_BITS) > 0)
return GL_FALSE;
if (src_bpp != dst_bpp)
return GL_FALSE;
switch (dst_bpp) {
case 2:
src_mesaformat = MESA_FORMAT_B5G6R5_UNORM;
dst_mesaformat = MESA_FORMAT_B5G6R5_UNORM;
break;
case 4:
src_mesaformat = MESA_FORMAT_B8G8R8A8_UNORM;
dst_mesaformat = MESA_FORMAT_B8G8R8A8_UNORM;
break;
case 1:
src_mesaformat = MESA_FORMAT_A_UNORM8;
dst_mesaformat = MESA_FORMAT_A_UNORM8;
break;
default:
return GL_FALSE;
}
}
/* blit from src buffer to texture */
return radeon->vtbl.blit(ctx, rrb->bo, src_offset, src_mesaformat, rrb->pitch/rrb->cpp,
rrb->base.Base.Width, rrb->base.Base.Height, x, y,
timg->mt->bo, dst_offset, dst_mesaformat,
timg->mt->levels[level].rowstride / dst_bpp,
timg->base.Base.Width, timg->base.Base.Height,
dstx, dsty, width, height, flip_y);
}
void
radeonCopyTexSubImage(struct gl_context *ctx, GLuint dims,
struct gl_texture_image *texImage,
GLint xoffset, GLint yoffset, GLint slice,
struct gl_renderbuffer *rb,
GLint x, GLint y,
GLsizei width, GLsizei height)
{
radeonContextPtr radeon = RADEON_CONTEXT(ctx);
radeon_prepare_render(radeon);
if (slice != 0 || !do_copy_texsubimage(ctx,
radeon_tex_obj(texImage->TexObject),
(radeon_texture_image *)texImage,
xoffset, yoffset,
radeon_renderbuffer(rb), x, y, width, height)) {
radeon_print(RADEON_FALLBACKS, RADEON_NORMAL,
"Falling back to sw for glCopyTexSubImage2D\n");
_mesa_meta_CopyTexSubImage(ctx, dims, texImage,
xoffset, yoffset, slice,
rb, x, y, width, height);
}
}
| {
"language": "C"
} |
/*
* Create default crypto algorithm instances.
*
* Copyright (c) 2006 Herbert Xu <herbert@gondor.apana.org.au>
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the Free
* Software Foundation; either version 2 of the License, or (at your option)
* any later version.
*
*/
#include <crypto/internal/aead.h>
#include <linux/ctype.h>
#include <linux/err.h>
#include <linux/init.h>
#include <linux/kthread.h>
#include <linux/module.h>
#include <linux/notifier.h>
#include <linux/rtnetlink.h>
#include <linux/sched.h>
#include <linux/slab.h>
#include <linux/string.h>
#include "internal.h"
struct cryptomgr_param {
struct rtattr *tb[CRYPTO_MAX_ATTRS + 2];
struct {
struct rtattr attr;
struct crypto_attr_type data;
} type;
union {
struct rtattr attr;
struct {
struct rtattr attr;
struct crypto_attr_alg data;
} alg;
struct {
struct rtattr attr;
struct crypto_attr_u32 data;
} nu32;
} attrs[CRYPTO_MAX_ATTRS];
char larval[CRYPTO_MAX_ALG_NAME];
char template[CRYPTO_MAX_ALG_NAME];
u32 otype;
u32 omask;
};
struct crypto_test_param {
char driver[CRYPTO_MAX_ALG_NAME];
char alg[CRYPTO_MAX_ALG_NAME];
u32 type;
};
static int cryptomgr_probe(void *data)
{
struct cryptomgr_param *param = data;
struct crypto_template *tmpl;
struct crypto_instance *inst;
int err;
tmpl = crypto_lookup_template(param->template);
if (!tmpl)
goto err;
do {
if (tmpl->create) {
err = tmpl->create(tmpl, param->tb);
continue;
}
inst = tmpl->alloc(param->tb);
if (IS_ERR(inst))
err = PTR_ERR(inst);
else if ((err = crypto_register_instance(tmpl, inst)))
tmpl->free(inst);
} while (err == -EAGAIN && !signal_pending(current));
crypto_tmpl_put(tmpl);
if (err)
goto err;
out:
kfree(param);
module_put_and_exit(0);
err:
crypto_larval_error(param->larval, param->otype, param->omask);
goto out;
}
static int cryptomgr_schedule_probe(struct crypto_larval *larval)
{
struct task_struct *thread;
struct cryptomgr_param *param;
const char *name = larval->alg.cra_name;
const char *p;
unsigned int len;
int i;
if (!try_module_get(THIS_MODULE))
goto err;
param = kzalloc(sizeof(*param), GFP_KERNEL);
if (!param)
goto err_put_module;
for (p = name; isalnum(*p) || *p == '-' || *p == '_'; p++)
;
len = p - name;
if (!len || *p != '(')
goto err_free_param;
memcpy(param->template, name, len);
i = 0;
for (;;) {
int notnum = 0;
name = ++p;
len = 0;
for (; isalnum(*p) || *p == '-' || *p == '_'; p++)
notnum |= !isdigit(*p);
if (*p == '(') {
int recursion = 0;
for (;;) {
if (!*++p)
goto err_free_param;
if (*p == '(')
recursion++;
else if (*p == ')' && !recursion--)
break;
}
notnum = 1;
p++;
}
len = p - name;
if (!len)
goto err_free_param;
if (notnum) {
param->attrs[i].alg.attr.rta_len =
sizeof(param->attrs[i].alg);
param->attrs[i].alg.attr.rta_type = CRYPTOA_ALG;
memcpy(param->attrs[i].alg.data.name, name, len);
} else {
param->attrs[i].nu32.attr.rta_len =
sizeof(param->attrs[i].nu32);
param->attrs[i].nu32.attr.rta_type = CRYPTOA_U32;
param->attrs[i].nu32.data.num =
simple_strtol(name, NULL, 0);
}
param->tb[i + 1] = ¶m->attrs[i].attr;
i++;
if (i >= CRYPTO_MAX_ATTRS)
goto err_free_param;
if (*p == ')')
break;
if (*p != ',')
goto err_free_param;
}
if (!i)
goto err_free_param;
param->tb[i + 1] = NULL;
param->type.attr.rta_len = sizeof(param->type);
param->type.attr.rta_type = CRYPTOA_TYPE;
param->type.data.type = larval->alg.cra_flags & ~CRYPTO_ALG_TESTED;
param->type.data.mask = larval->mask & ~CRYPTO_ALG_TESTED;
param->tb[0] = ¶m->type.attr;
param->otype = larval->alg.cra_flags;
param->omask = larval->mask;
memcpy(param->larval, larval->alg.cra_name, CRYPTO_MAX_ALG_NAME);
thread = kthread_run(cryptomgr_probe, param, "cryptomgr_probe");
if (IS_ERR(thread))
goto err_free_param;
return NOTIFY_STOP;
err_free_param:
kfree(param);
err_put_module:
module_put(THIS_MODULE);
err:
return NOTIFY_OK;
}
static int cryptomgr_test(void *data)
{
struct crypto_test_param *param = data;
u32 type = param->type;
int err = 0;
#ifdef CONFIG_CRYPTO_MANAGER_DISABLE_TESTS
goto skiptest;
#endif
if (type & CRYPTO_ALG_TESTED)
goto skiptest;
err = alg_test(param->driver, param->alg, type, CRYPTO_ALG_TESTED);
skiptest:
crypto_alg_tested(param->driver, err);
kfree(param);
module_put_and_exit(0);
}
static int cryptomgr_schedule_test(struct crypto_alg *alg)
{
struct task_struct *thread;
struct crypto_test_param *param;
u32 type;
if (!try_module_get(THIS_MODULE))
goto err;
param = kzalloc(sizeof(*param), GFP_KERNEL);
if (!param)
goto err_put_module;
memcpy(param->driver, alg->cra_driver_name, sizeof(param->driver));
memcpy(param->alg, alg->cra_name, sizeof(param->alg));
type = alg->cra_flags;
/* This piece of crap needs to disappear into per-type test hooks. */
if ((!((type ^ CRYPTO_ALG_TYPE_BLKCIPHER) &
CRYPTO_ALG_TYPE_BLKCIPHER_MASK) && !(type & CRYPTO_ALG_GENIV) &&
((alg->cra_flags & CRYPTO_ALG_TYPE_MASK) ==
CRYPTO_ALG_TYPE_BLKCIPHER ? alg->cra_blkcipher.ivsize :
alg->cra_ablkcipher.ivsize)) ||
(!((type ^ CRYPTO_ALG_TYPE_AEAD) & CRYPTO_ALG_TYPE_MASK) &&
alg->cra_type == &crypto_nivaead_type && alg->cra_aead.ivsize))
type |= CRYPTO_ALG_TESTED;
param->type = type;
thread = kthread_run(cryptomgr_test, param, "cryptomgr_test");
if (IS_ERR(thread))
goto err_free_param;
return NOTIFY_STOP;
err_free_param:
kfree(param);
err_put_module:
module_put(THIS_MODULE);
err:
return NOTIFY_OK;
}
static int cryptomgr_notify(struct notifier_block *this, unsigned long msg,
void *data)
{
switch (msg) {
case CRYPTO_MSG_ALG_REQUEST:
return cryptomgr_schedule_probe(data);
case CRYPTO_MSG_ALG_REGISTER:
return cryptomgr_schedule_test(data);
}
return NOTIFY_DONE;
}
static struct notifier_block cryptomgr_notifier = {
.notifier_call = cryptomgr_notify,
};
static int __init cryptomgr_init(void)
{
return crypto_register_notifier(&cryptomgr_notifier);
}
static void __exit cryptomgr_exit(void)
{
int err = crypto_unregister_notifier(&cryptomgr_notifier);
BUG_ON(err);
}
subsys_initcall(cryptomgr_init);
module_exit(cryptomgr_exit);
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("Crypto Algorithm Manager");
| {
"language": "C"
} |
/*
* This file is part of the UCB release of Plan 9. It is subject to the license
* terms in the LICENSE file found in the top-level directory of this
* distribution and at http://akaros.cs.berkeley.edu/files/Plan9License. No
* part of the UCB release of Plan 9, including this file, may be copied,
* modified, propagated, or distributed except according to the terms contained
* in the LICENSE file.
*/
#include <u.h>
#include <libc.h>
#include <thread.h>
#include "threadimpl.h"
#define PIPEMNT "/mnt/temp"
void
procexec(Channel *pidc, char *prog, char *args[])
{
int n;
Proc *p;
Thread *t;
_threaddebug(DBGEXEC, "procexec %s", prog);
/* must be only thread in proc */
p = _threadgetproc();
t = p->thread;
if(p->threads.head != t || p->threads.head->nextt != nil){
werrstr("not only thread in proc");
Bad:
if(pidc)
sendul(pidc, ~0);
return;
}
/*
* We want procexec to behave like exec; if exec succeeds,
* never return, and if it fails, return with errstr set.
* Unfortunately, the exec happens in another proc since
* we have to wait for the exec'ed process to finish.
* To provide the semantics, we open a pipe with the
* write end close-on-exec and hand it to the proc that
* is doing the exec. If the exec succeeds, the pipe will
* close so that our read below fails. If the exec fails,
* then the proc doing the exec sends the errstr down the
* pipe to us.
*/
if(bind("#|", PIPEMNT, MREPL) < 0)
goto Bad;
if((p->exec.fd[0] = open(PIPEMNT "/data", OREAD)) < 0){
unmount(nil, PIPEMNT);
goto Bad;
}
if((p->exec.fd[1] = open(PIPEMNT "/data1", OWRITE|OCEXEC)) < 0){
close(p->exec.fd[0]);
unmount(nil, PIPEMNT);
goto Bad;
}
unmount(nil, PIPEMNT);
/* exec in parallel via the scheduler */
assert(p->needexec==0);
p->exec.prog = prog;
p->exec.args = args;
p->needexec = 1;
_sched();
close(p->exec.fd[1]);
if((n = read(p->exec.fd[0], p->exitstr, ERRMAX-1)) > 0){ /* exec failed */
p->exitstr[n] = '\0';
errstr(p->exitstr, ERRMAX);
close(p->exec.fd[0]);
goto Bad;
}
close(p->exec.fd[0]);
if(pidc)
sendul(pidc, t->ret);
/* wait for exec'ed program, then exit */
_schedexecwait();
}
void
procexecl(Channel *pidc, char *f, ...)
{
/*
* The cost of realloc is trivial compared the cost of an exec,
* and realloc doesn't necessarily allocate more space anyway;
* often realloc just returns its argument doing no further work.
* Finally, the number of args is usually small.
*
* There is always at least one element in the argument vector
* passed to procexec(), and argv[argc] == nil.
*/
va_list a;
char **args = nil;
char *arg;
int argc = 0;
va_start(a, f);
do {
arg = va_arg(a, char *);
argc++;
args = realloc(args, argc * sizeof(char *));
args[argc-1] = arg;
} while(arg != nil);
va_end(a);
procexec(pidc, f, args);
}
| {
"language": "C"
} |
/*
*******************************************************************************
* Copyright (C) 2010-2014, International Business Machines
* Corporation and others. All Rights Reserved.
*******************************************************************************
* file name: ucharstriebuilder.h
* encoding: US-ASCII
* tab size: 8 (not used)
* indentation:4
*
* created on: 2010nov14
* created by: Markus W. Scherer
*/
#ifndef __UCHARSTRIEBUILDER_H__
#define __UCHARSTRIEBUILDER_H__
#include "unicode/utypes.h"
#include "unicode/stringtriebuilder.h"
#include "unicode/ucharstrie.h"
#include "unicode/unistr.h"
/**
* \file
* \brief C++ API: Builder for icu::UCharsTrie
*/
U_NAMESPACE_BEGIN
class UCharsTrieElement;
/**
* Builder class for UCharsTrie.
*
* This class is not intended for public subclassing.
* @stable ICU 4.8
*/
class U_COMMON_API UCharsTrieBuilder : public StringTrieBuilder {
public:
/**
* Constructs an empty builder.
* @param errorCode Standard ICU error code.
* @stable ICU 4.8
*/
UCharsTrieBuilder(UErrorCode &errorCode);
/**
* Destructor.
* @stable ICU 4.8
*/
virtual ~UCharsTrieBuilder();
/**
* Adds a (string, value) pair.
* The string must be unique.
* The string contents will be copied; the builder does not keep
* a reference to the input UnicodeString or its buffer.
* @param s The input string.
* @param value The value associated with this string.
* @param errorCode Standard ICU error code. Its input value must
* pass the U_SUCCESS() test, or else the function returns
* immediately. Check for U_FAILURE() on output or use with
* function chaining. (See User Guide for details.)
* @return *this
* @stable ICU 4.8
*/
UCharsTrieBuilder &add(const UnicodeString &s, int32_t value, UErrorCode &errorCode);
/**
* Builds a UCharsTrie for the add()ed data.
* Once built, no further data can be add()ed until clear() is called.
*
* A UCharsTrie cannot be empty. At least one (string, value) pair
* must have been add()ed.
*
* This method passes ownership of the builder's internal result array to the new trie object.
* Another call to any build() variant will re-serialize the trie.
* After clear() has been called, a new array will be used as well.
* @param buildOption Build option, see UStringTrieBuildOption.
* @param errorCode Standard ICU error code. Its input value must
* pass the U_SUCCESS() test, or else the function returns
* immediately. Check for U_FAILURE() on output or use with
* function chaining. (See User Guide for details.)
* @return A new UCharsTrie for the add()ed data.
* @stable ICU 4.8
*/
UCharsTrie *build(UStringTrieBuildOption buildOption, UErrorCode &errorCode);
/**
* Builds a UCharsTrie for the add()ed data and UChar-serializes it.
* Once built, no further data can be add()ed until clear() is called.
*
* A UCharsTrie cannot be empty. At least one (string, value) pair
* must have been add()ed.
*
* Multiple calls to buildUnicodeString() set the UnicodeStrings to the
* builder's same UChar array, without rebuilding.
* If buildUnicodeString() is called after build(), the trie will be
* re-serialized into a new array.
* If build() is called after buildUnicodeString(), the trie object will become
* the owner of the previously returned array.
* After clear() has been called, a new array will be used as well.
* @param buildOption Build option, see UStringTrieBuildOption.
* @param result A UnicodeString which will be set to the UChar-serialized
* UCharsTrie for the add()ed data.
* @param errorCode Standard ICU error code. Its input value must
* pass the U_SUCCESS() test, or else the function returns
* immediately. Check for U_FAILURE() on output or use with
* function chaining. (See User Guide for details.)
* @return result
* @stable ICU 4.8
*/
UnicodeString &buildUnicodeString(UStringTrieBuildOption buildOption, UnicodeString &result,
UErrorCode &errorCode);
/**
* Removes all (string, value) pairs.
* New data can then be add()ed and a new trie can be built.
* @return *this
* @stable ICU 4.8
*/
UCharsTrieBuilder &clear() {
strings.remove();
elementsLength=0;
ucharsLength=0;
return *this;
}
private:
UCharsTrieBuilder(const UCharsTrieBuilder &other); // no copy constructor
UCharsTrieBuilder &operator=(const UCharsTrieBuilder &other); // no assignment operator
void buildUChars(UStringTrieBuildOption buildOption, UErrorCode &errorCode);
virtual int32_t getElementStringLength(int32_t i) const;
virtual UChar getElementUnit(int32_t i, int32_t unitIndex) const;
virtual int32_t getElementValue(int32_t i) const;
virtual int32_t getLimitOfLinearMatch(int32_t first, int32_t last, int32_t unitIndex) const;
virtual int32_t countElementUnits(int32_t start, int32_t limit, int32_t unitIndex) const;
virtual int32_t skipElementsBySomeUnits(int32_t i, int32_t unitIndex, int32_t count) const;
virtual int32_t indexOfElementWithNextUnit(int32_t i, int32_t unitIndex, UChar unit) const;
virtual UBool matchNodesCanHaveValues() const { return TRUE; }
virtual int32_t getMaxBranchLinearSubNodeLength() const { return UCharsTrie::kMaxBranchLinearSubNodeLength; }
virtual int32_t getMinLinearMatch() const { return UCharsTrie::kMinLinearMatch; }
virtual int32_t getMaxLinearMatchLength() const { return UCharsTrie::kMaxLinearMatchLength; }
class UCTLinearMatchNode : public LinearMatchNode {
public:
UCTLinearMatchNode(const UChar *units, int32_t len, Node *nextNode);
virtual UBool operator==(const Node &other) const;
virtual void write(StringTrieBuilder &builder);
private:
const UChar *s;
};
virtual Node *createLinearMatchNode(int32_t i, int32_t unitIndex, int32_t length,
Node *nextNode) const;
UBool ensureCapacity(int32_t length);
virtual int32_t write(int32_t unit);
int32_t write(const UChar *s, int32_t length);
virtual int32_t writeElementUnits(int32_t i, int32_t unitIndex, int32_t length);
virtual int32_t writeValueAndFinal(int32_t i, UBool isFinal);
virtual int32_t writeValueAndType(UBool hasValue, int32_t value, int32_t node);
virtual int32_t writeDeltaTo(int32_t jumpTarget);
UnicodeString strings;
UCharsTrieElement *elements;
int32_t elementsCapacity;
int32_t elementsLength;
// UChar serialization of the trie.
// Grows from the back: ucharsLength measures from the end of the buffer!
UChar *uchars;
int32_t ucharsCapacity;
int32_t ucharsLength;
};
U_NAMESPACE_END
#endif // __UCHARSTRIEBUILDER_H__
| {
"language": "C"
} |
#include <fcntl.h>
#include <unistd.h>
#include <sys/mman.h>
#include <string.h>
#include <stdlib.h>
#include <stdio.h>
#include <errno.h>
#define DEFAULT_CHANNEL memory
#include "debug.h"
#include "kernel/errno.h"
#include "kernel/signal.h"
#include "emu/memory.h"
#include "jit/jit.h"
#include "kernel/vdso.h"
#include "kernel/task.h"
#include "fs/fd.h"
// increment the change count
static void mem_changed(struct mem *mem);
void mem_init(struct mem *mem) {
mem->pgdir = calloc(MEM_PGDIR_SIZE, sizeof(struct pt_entry *));
mem->pgdir_used = 0;
mem->changes = 0;
#if ENGINE_JIT
mem->jit = jit_new(mem);
#endif
wrlock_init(&mem->lock);
}
void mem_destroy(struct mem *mem) {
write_wrlock(&mem->lock);
pt_unmap_always(mem, 0, MEM_PAGES);
#if ENGINE_JIT
jit_free(mem->jit);
#endif
for (int i = 0; i < MEM_PGDIR_SIZE; i++) {
if (mem->pgdir[i] != NULL)
free(mem->pgdir[i]);
}
free(mem->pgdir);
write_wrunlock(&mem->lock);
wrlock_destroy(&mem->lock);
}
#define PGDIR_TOP(page) ((page) >> 10)
#define PGDIR_BOTTOM(page) ((page) & (MEM_PGDIR_SIZE - 1))
static struct pt_entry *mem_pt_new(struct mem *mem, page_t page) {
struct pt_entry *pgdir = mem->pgdir[PGDIR_TOP(page)];
if (pgdir == NULL) {
pgdir = mem->pgdir[PGDIR_TOP(page)] = calloc(MEM_PGDIR_SIZE, sizeof(struct pt_entry));
mem->pgdir_used++;
}
return &pgdir[PGDIR_BOTTOM(page)];
}
struct pt_entry *mem_pt(struct mem *mem, page_t page) {
struct pt_entry *pgdir = mem->pgdir[PGDIR_TOP(page)];
if (pgdir == NULL)
return NULL;
struct pt_entry *entry = &pgdir[PGDIR_BOTTOM(page)];
if (entry->data == NULL)
return NULL;
return entry;
}
static void mem_pt_del(struct mem *mem, page_t page) {
struct pt_entry *entry = mem_pt(mem, page);
if (entry != NULL)
entry->data = NULL;
}
void mem_next_page(struct mem *mem, page_t *page) {
(*page)++;
if (*page >= MEM_PAGES)
return;
while (*page < MEM_PAGES && mem->pgdir[PGDIR_TOP(*page)] == NULL)
*page = (*page - PGDIR_BOTTOM(*page)) + MEM_PGDIR_SIZE;
}
page_t pt_find_hole(struct mem *mem, pages_t size) {
page_t hole_end = 0; // this can never be used before initializing but gcc doesn't realize
bool in_hole = false;
for (page_t page = 0xf7ffd; page > 0x40000; page--) {
// I don't know how this works but it does
if (!in_hole && mem_pt(mem, page) == NULL) {
in_hole = true;
hole_end = page + 1;
}
if (mem_pt(mem, page) != NULL)
in_hole = false;
else if (hole_end - page == size)
return page;
}
return BAD_PAGE;
}
bool pt_is_hole(struct mem *mem, page_t start, pages_t pages) {
for (page_t page = start; page < start + pages; page++) {
if (mem_pt(mem, page) != NULL)
return false;
}
return true;
}
int pt_map(struct mem *mem, page_t start, pages_t pages, void *memory, size_t offset, unsigned flags) {
if (memory == MAP_FAILED)
return errno_map();
// If this fails, the munmap in pt_unmap would probably fail.
assert((uintptr_t) memory % real_page_size == 0 || memory == vdso_data);
struct data *data = malloc(sizeof(struct data));
if (data == NULL)
return _ENOMEM;
*data = (struct data) {
.data = memory,
.size = pages * PAGE_SIZE + offset,
#if LEAK_DEBUG
.pid = current ? current->pid : 0,
.dest = start << PAGE_BITS,
#endif
};
for (page_t page = start; page < start + pages; page++) {
if (mem_pt(mem, page) != NULL)
pt_unmap(mem, page, 1);
data->refcount++;
struct pt_entry *pt = mem_pt_new(mem, page);
pt->data = data;
pt->offset = ((page - start) << PAGE_BITS) + offset;
pt->flags = flags;
}
return 0;
}
int pt_unmap(struct mem *mem, page_t start, pages_t pages) {
for (page_t page = start; page < start + pages; page++)
if (mem_pt(mem, page) == NULL)
return -1;
return pt_unmap_always(mem, start, pages);
}
int pt_unmap_always(struct mem *mem, page_t start, pages_t pages) {
for (page_t page = start; page < start + pages; mem_next_page(mem, &page)) {
struct pt_entry *pt = mem_pt(mem, page);
if (pt == NULL)
continue;
#if ENGINE_JIT
jit_invalidate_page(mem->jit, page);
#endif
struct data *data = pt->data;
mem_pt_del(mem, page);
if (--data->refcount == 0) {
// vdso wasn't allocated with mmap, it's just in our data segment
if (data->data != vdso_data) {
int err = munmap(data->data, data->size);
if (err != 0)
die("munmap(%p, %lu) failed: %s", data->data, data->size, strerror(errno));
}
if (data->fd != NULL) {
fd_close(data->fd);
}
free(data);
}
}
mem_changed(mem);
return 0;
}
int pt_map_nothing(struct mem *mem, page_t start, pages_t pages, unsigned flags) {
if (pages == 0) return 0;
void *memory = mmap(NULL, pages * PAGE_SIZE,
PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANONYMOUS, 0, 0);
return pt_map(mem, start, pages, memory, 0, flags | P_ANONYMOUS);
}
int pt_set_flags(struct mem *mem, page_t start, pages_t pages, int flags) {
for (page_t page = start; page < start + pages; page++)
if (mem_pt(mem, page) == NULL)
return _ENOMEM;
for (page_t page = start; page < start + pages; page++) {
struct pt_entry *entry = mem_pt(mem, page);
int old_flags = entry->flags;
entry->flags = flags;
// check if protection is increasing
if ((flags & ~old_flags) & (P_READ|P_WRITE)) {
void *data = (char *) entry->data->data + entry->offset;
// force to be page aligned
data = (void *) ((uintptr_t) data & ~(real_page_size - 1));
int prot = PROT_READ;
if (flags & P_WRITE) prot |= PROT_WRITE;
if (mprotect(data, real_page_size, prot) < 0)
return errno_map();
}
}
mem_changed(mem);
return 0;
}
int pt_copy_on_write(struct mem *src, struct mem *dst, page_t start, page_t pages) {
for (page_t page = start; page < start + pages; mem_next_page(src, &page)) {
struct pt_entry *entry = mem_pt(src, page);
if (entry == NULL)
continue;
if (pt_unmap_always(dst, page, 1) < 0)
return -1;
if (!(entry->flags & P_SHARED))
entry->flags |= P_COW;
entry->flags &= ~P_COMPILED;
entry->data->refcount++;
struct pt_entry *dst_entry = mem_pt_new(dst, page);
dst_entry->data = entry->data;
dst_entry->offset = entry->offset;
dst_entry->flags = entry->flags;
}
mem_changed(src);
mem_changed(dst);
return 0;
}
static void mem_changed(struct mem *mem) {
mem->changes++;
}
void *mem_ptr(struct mem *mem, addr_t addr, int type) {
page_t page = PAGE(addr);
struct pt_entry *entry = mem_pt(mem, page);
if (entry == NULL) {
// page does not exist
// look to see if the next VM region is willing to grow down
page_t p = page + 1;
while (p < MEM_PAGES && mem_pt(mem, p) == NULL)
p++;
if (p >= MEM_PAGES)
return NULL;
if (!(mem_pt(mem, p)->flags & P_GROWSDOWN))
return NULL;
// Changing memory maps must be done with the write lock. But this is
// called with the read lock, e.g. by tlb_handle_miss.
// This locking stuff is copy/pasted for all the code in this function
// which changes memory maps.
// TODO: factor the lock/unlock code here into a new function. Do this
// next time you touch this function.
read_wrunlock(&mem->lock);
write_wrlock(&mem->lock);
pt_map_nothing(mem, page, 1, P_WRITE | P_GROWSDOWN);
write_wrunlock(&mem->lock);
read_wrlock(&mem->lock);
entry = mem_pt(mem, page);
}
if (entry != NULL && type == MEM_WRITE) {
// if page is unwritable, well tough luck
if (!(entry->flags & P_WRITE))
return NULL;
// if page is cow, ~~milk~~ copy it
if (entry->flags & P_COW) {
void *data = (char *) entry->data->data + entry->offset;
void *copy = mmap(NULL, PAGE_SIZE, PROT_READ | PROT_WRITE,
MAP_PRIVATE | MAP_ANONYMOUS, 0, 0);
memcpy(copy, data, PAGE_SIZE);
// copy/paste from above
read_wrunlock(&mem->lock);
write_wrlock(&mem->lock);
pt_map(mem, page, 1, copy, 0, entry->flags &~ P_COW);
write_wrunlock(&mem->lock);
read_wrlock(&mem->lock);
}
#if ENGINE_JIT
// get rid of any compiled blocks in this page
jit_invalidate_page(mem->jit, page);
#endif
}
if (entry == NULL)
return NULL;
return entry->data->data + entry->offset + PGOFFSET(addr);
}
int mem_segv_reason(struct mem *mem, addr_t addr) {
struct pt_entry *pt = mem_pt(mem, PAGE(addr));
if (pt == NULL)
return SEGV_MAPERR_;
return SEGV_ACCERR_;
}
size_t real_page_size;
__attribute__((constructor)) static void get_real_page_size() {
real_page_size = sysconf(_SC_PAGESIZE);
}
void mem_coredump(struct mem *mem, const char *file) {
int fd = open(file, O_CREAT | O_RDWR | O_TRUNC, 0666);
if (fd < 0) {
perror("open");
return;
}
if (ftruncate(fd, 0xffffffff) < 0) {
perror("ftruncate");
return;
}
int pages = 0;
for (page_t page = 0; page < MEM_PAGES; page++) {
struct pt_entry *entry = mem_pt(mem, page);
if (entry == NULL)
continue;
pages++;
if (lseek(fd, page << PAGE_BITS, SEEK_SET) < 0) {
perror("lseek");
return;
}
if (write(fd, entry->data->data, PAGE_SIZE) < 0) {
perror("write");
return;
}
}
printk("dumped %d pages\n", pages);
close(fd);
}
| {
"language": "C"
} |
#ifndef __PERF_MAP_H
#define __PERF_MAP_H
#include <linux/compiler.h>
#include <linux/list.h>
#include <linux/rbtree.h>
#include <stdio.h>
#include <stdbool.h>
#include "types.h"
enum map_type {
MAP__FUNCTION = 0,
MAP__VARIABLE,
};
#define MAP__NR_TYPES (MAP__VARIABLE + 1)
extern const char *map_type__name[MAP__NR_TYPES];
struct dso;
struct ref_reloc_sym;
struct map_groups;
struct machine;
struct map {
union {
struct rb_node rb_node;
struct list_head node;
};
u64 start;
u64 end;
u8 /* enum map_type */ type;
bool referenced;
u32 priv;
u64 pgoff;
/* ip -> dso rip */
u64 (*map_ip)(struct map *, u64);
/* dso rip -> ip */
u64 (*unmap_ip)(struct map *, u64);
struct dso *dso;
struct map_groups *groups;
};
struct kmap {
struct ref_reloc_sym *ref_reloc_sym;
struct map_groups *kmaps;
};
struct map_groups {
struct rb_root maps[MAP__NR_TYPES];
struct list_head removed_maps[MAP__NR_TYPES];
struct machine *machine;
};
/* Native host kernel uses -1 as pid index in machine */
#define HOST_KERNEL_ID (-1)
#define DEFAULT_GUEST_KERNEL_ID (0)
struct machine {
struct rb_node rb_node;
pid_t pid;
char *root_dir;
struct list_head user_dsos;
struct list_head kernel_dsos;
struct map_groups kmaps;
struct map *vmlinux_maps[MAP__NR_TYPES];
};
static inline
struct map *machine__kernel_map(struct machine *self, enum map_type type)
{
return self->vmlinux_maps[type];
}
static inline struct kmap *map__kmap(struct map *self)
{
return (struct kmap *)(self + 1);
}
static inline u64 map__map_ip(struct map *map, u64 ip)
{
return ip - map->start + map->pgoff;
}
static inline u64 map__unmap_ip(struct map *map, u64 ip)
{
return ip + map->start - map->pgoff;
}
static inline u64 identity__map_ip(struct map *map __used, u64 ip)
{
return ip;
}
/* rip/ip <-> addr suitable for passing to `objdump --start-address=` */
u64 map__rip_2objdump(struct map *map, u64 rip);
u64 map__objdump_2ip(struct map *map, u64 addr);
struct symbol;
typedef int (*symbol_filter_t)(struct map *map, struct symbol *sym);
void map__init(struct map *self, enum map_type type,
u64 start, u64 end, u64 pgoff, struct dso *dso);
struct map *map__new(struct list_head *dsos__list, u64 start, u64 len,
u64 pgoff, u32 pid, char *filename,
enum map_type type);
void map__delete(struct map *self);
struct map *map__clone(struct map *self);
int map__overlap(struct map *l, struct map *r);
size_t map__fprintf(struct map *self, FILE *fp);
int map__load(struct map *self, symbol_filter_t filter);
struct symbol *map__find_symbol(struct map *self,
u64 addr, symbol_filter_t filter);
struct symbol *map__find_symbol_by_name(struct map *self, const char *name,
symbol_filter_t filter);
void map__fixup_start(struct map *self);
void map__fixup_end(struct map *self);
void map__reloc_vmlinux(struct map *self);
size_t __map_groups__fprintf_maps(struct map_groups *self,
enum map_type type, int verbose, FILE *fp);
void maps__insert(struct rb_root *maps, struct map *map);
void maps__remove(struct rb_root *self, struct map *map);
struct map *maps__find(struct rb_root *maps, u64 addr);
void map_groups__init(struct map_groups *self);
void map_groups__exit(struct map_groups *self);
int map_groups__clone(struct map_groups *self,
struct map_groups *parent, enum map_type type);
size_t map_groups__fprintf(struct map_groups *self, int verbose, FILE *fp);
size_t map_groups__fprintf_maps(struct map_groups *self, int verbose, FILE *fp);
typedef void (*machine__process_t)(struct machine *self, void *data);
void machines__process(struct rb_root *self, machine__process_t process, void *data);
struct machine *machines__add(struct rb_root *self, pid_t pid,
const char *root_dir);
struct machine *machines__find_host(struct rb_root *self);
struct machine *machines__find(struct rb_root *self, pid_t pid);
struct machine *machines__findnew(struct rb_root *self, pid_t pid);
char *machine__mmap_name(struct machine *self, char *bf, size_t size);
int machine__init(struct machine *self, const char *root_dir, pid_t pid);
void machine__exit(struct machine *self);
void machine__delete(struct machine *self);
/*
* Default guest kernel is defined by parameter --guestkallsyms
* and --guestmodules
*/
static inline bool machine__is_default_guest(struct machine *self)
{
return self ? self->pid == DEFAULT_GUEST_KERNEL_ID : false;
}
static inline bool machine__is_host(struct machine *self)
{
return self ? self->pid == HOST_KERNEL_ID : false;
}
static inline void map_groups__insert(struct map_groups *self, struct map *map)
{
maps__insert(&self->maps[map->type], map);
map->groups = self;
}
static inline void map_groups__remove(struct map_groups *self, struct map *map)
{
maps__remove(&self->maps[map->type], map);
}
static inline struct map *map_groups__find(struct map_groups *self,
enum map_type type, u64 addr)
{
return maps__find(&self->maps[type], addr);
}
struct symbol *map_groups__find_symbol(struct map_groups *self,
enum map_type type, u64 addr,
struct map **mapp,
symbol_filter_t filter);
struct symbol *map_groups__find_symbol_by_name(struct map_groups *self,
enum map_type type,
const char *name,
struct map **mapp,
symbol_filter_t filter);
static inline
struct symbol *machine__find_kernel_symbol(struct machine *self,
enum map_type type, u64 addr,
struct map **mapp,
symbol_filter_t filter)
{
return map_groups__find_symbol(&self->kmaps, type, addr, mapp, filter);
}
static inline
struct symbol *machine__find_kernel_function(struct machine *self, u64 addr,
struct map **mapp,
symbol_filter_t filter)
{
return machine__find_kernel_symbol(self, MAP__FUNCTION, addr, mapp, filter);
}
static inline
struct symbol *map_groups__find_function_by_name(struct map_groups *self,
const char *name, struct map **mapp,
symbol_filter_t filter)
{
return map_groups__find_symbol_by_name(self, MAP__FUNCTION, name, mapp, filter);
}
int map_groups__fixup_overlappings(struct map_groups *self, struct map *map,
int verbose, FILE *fp);
struct map *map_groups__find_by_name(struct map_groups *self,
enum map_type type, const char *name);
struct map *machine__new_module(struct machine *self, u64 start, const char *filename);
void map_groups__flush(struct map_groups *self);
#endif /* __PERF_MAP_H */
| {
"language": "C"
} |
// SPDX-License-Identifier: GPL-2.0-only
/*
* mm/balloon_compaction.c
*
* Common interface for making balloon pages movable by compaction.
*
* Copyright (C) 2012, Red Hat, Inc. Rafael Aquini <aquini@redhat.com>
*/
#include <linux/mm.h>
#include <linux/slab.h>
#include <linux/export.h>
#include <linux/balloon_compaction.h>
static void balloon_page_enqueue_one(struct balloon_dev_info *b_dev_info,
struct page *page)
{
/*
* Block others from accessing the 'page' when we get around to
* establishing additional references. We should be the only one
* holding a reference to the 'page' at this point. If we are not, then
* memory corruption is possible and we should stop execution.
*/
BUG_ON(!trylock_page(page));
balloon_page_insert(b_dev_info, page);
unlock_page(page);
__count_vm_event(BALLOON_INFLATE);
}
/**
* balloon_page_list_enqueue() - inserts a list of pages into the balloon page
* list.
* @b_dev_info: balloon device descriptor where we will insert a new page to
* @pages: pages to enqueue - allocated using balloon_page_alloc.
*
* Driver must call this function to properly enqueue balloon pages before
* definitively removing them from the guest system.
*
* Return: number of pages that were enqueued.
*/
size_t balloon_page_list_enqueue(struct balloon_dev_info *b_dev_info,
struct list_head *pages)
{
struct page *page, *tmp;
unsigned long flags;
size_t n_pages = 0;
spin_lock_irqsave(&b_dev_info->pages_lock, flags);
list_for_each_entry_safe(page, tmp, pages, lru) {
list_del(&page->lru);
balloon_page_enqueue_one(b_dev_info, page);
n_pages++;
}
spin_unlock_irqrestore(&b_dev_info->pages_lock, flags);
return n_pages;
}
EXPORT_SYMBOL_GPL(balloon_page_list_enqueue);
/**
* balloon_page_list_dequeue() - removes pages from balloon's page list and
* returns a list of the pages.
* @b_dev_info: balloon device decriptor where we will grab a page from.
* @pages: pointer to the list of pages that would be returned to the caller.
* @n_req_pages: number of requested pages.
*
* Driver must call this function to properly de-allocate a previous enlisted
* balloon pages before definitively releasing it back to the guest system.
* This function tries to remove @n_req_pages from the ballooned pages and
* return them to the caller in the @pages list.
*
* Note that this function may fail to dequeue some pages even if the balloon
* isn't empty - since the page list can be temporarily empty due to compaction
* of isolated pages.
*
* Return: number of pages that were added to the @pages list.
*/
size_t balloon_page_list_dequeue(struct balloon_dev_info *b_dev_info,
struct list_head *pages, size_t n_req_pages)
{
struct page *page, *tmp;
unsigned long flags;
size_t n_pages = 0;
spin_lock_irqsave(&b_dev_info->pages_lock, flags);
list_for_each_entry_safe(page, tmp, &b_dev_info->pages, lru) {
if (n_pages == n_req_pages)
break;
/*
* Block others from accessing the 'page' while we get around to
* establishing additional references and preparing the 'page'
* to be released by the balloon driver.
*/
if (!trylock_page(page))
continue;
if (IS_ENABLED(CONFIG_BALLOON_COMPACTION) &&
PageIsolated(page)) {
/* raced with isolation */
unlock_page(page);
continue;
}
balloon_page_delete(page);
__count_vm_event(BALLOON_DEFLATE);
list_add(&page->lru, pages);
unlock_page(page);
n_pages++;
}
spin_unlock_irqrestore(&b_dev_info->pages_lock, flags);
return n_pages;
}
EXPORT_SYMBOL_GPL(balloon_page_list_dequeue);
/*
* balloon_page_alloc - allocates a new page for insertion into the balloon
* page list.
*
* Driver must call this function to properly allocate a new balloon page.
* Driver must call balloon_page_enqueue before definitively removing the page
* from the guest system.
*
* Return: struct page for the allocated page or NULL on allocation failure.
*/
struct page *balloon_page_alloc(void)
{
struct page *page = alloc_page(balloon_mapping_gfp_mask() |
__GFP_NOMEMALLOC | __GFP_NORETRY |
__GFP_NOWARN);
return page;
}
EXPORT_SYMBOL_GPL(balloon_page_alloc);
/*
* balloon_page_enqueue - inserts a new page into the balloon page list.
*
* @b_dev_info: balloon device descriptor where we will insert a new page
* @page: new page to enqueue - allocated using balloon_page_alloc.
*
* Drivers must call this function to properly enqueue a new allocated balloon
* page before definitively removing the page from the guest system.
*
* Drivers must not call balloon_page_enqueue on pages that have been pushed to
* a list with balloon_page_push before removing them with balloon_page_pop. To
* enqueue a list of pages, use balloon_page_list_enqueue instead.
*/
void balloon_page_enqueue(struct balloon_dev_info *b_dev_info,
struct page *page)
{
unsigned long flags;
spin_lock_irqsave(&b_dev_info->pages_lock, flags);
balloon_page_enqueue_one(b_dev_info, page);
spin_unlock_irqrestore(&b_dev_info->pages_lock, flags);
}
EXPORT_SYMBOL_GPL(balloon_page_enqueue);
/*
* balloon_page_dequeue - removes a page from balloon's page list and returns
* its address to allow the driver to release the page.
* @b_dev_info: balloon device decriptor where we will grab a page from.
*
* Driver must call this function to properly dequeue a previously enqueued page
* before definitively releasing it back to the guest system.
*
* Caller must perform its own accounting to ensure that this
* function is called only if some pages are actually enqueued.
*
* Note that this function may fail to dequeue some pages even if there are
* some enqueued pages - since the page list can be temporarily empty due to
* the compaction of isolated pages.
*
* TODO: remove the caller accounting requirements, and allow caller to wait
* until all pages can be dequeued.
*
* Return: struct page for the dequeued page, or NULL if no page was dequeued.
*/
struct page *balloon_page_dequeue(struct balloon_dev_info *b_dev_info)
{
unsigned long flags;
LIST_HEAD(pages);
int n_pages;
n_pages = balloon_page_list_dequeue(b_dev_info, &pages, 1);
if (n_pages != 1) {
/*
* If we are unable to dequeue a balloon page because the page
* list is empty and there are no isolated pages, then something
* went out of track and some balloon pages are lost.
* BUG() here, otherwise the balloon driver may get stuck in
* an infinite loop while attempting to release all its pages.
*/
spin_lock_irqsave(&b_dev_info->pages_lock, flags);
if (unlikely(list_empty(&b_dev_info->pages) &&
!b_dev_info->isolated_pages))
BUG();
spin_unlock_irqrestore(&b_dev_info->pages_lock, flags);
return NULL;
}
return list_first_entry(&pages, struct page, lru);
}
EXPORT_SYMBOL_GPL(balloon_page_dequeue);
#ifdef CONFIG_BALLOON_COMPACTION
bool balloon_page_isolate(struct page *page, isolate_mode_t mode)
{
struct balloon_dev_info *b_dev_info = balloon_page_device(page);
unsigned long flags;
spin_lock_irqsave(&b_dev_info->pages_lock, flags);
list_del(&page->lru);
b_dev_info->isolated_pages++;
spin_unlock_irqrestore(&b_dev_info->pages_lock, flags);
return true;
}
void balloon_page_putback(struct page *page)
{
struct balloon_dev_info *b_dev_info = balloon_page_device(page);
unsigned long flags;
spin_lock_irqsave(&b_dev_info->pages_lock, flags);
list_add(&page->lru, &b_dev_info->pages);
b_dev_info->isolated_pages--;
spin_unlock_irqrestore(&b_dev_info->pages_lock, flags);
}
/* move_to_new_page() counterpart for a ballooned page */
int balloon_page_migrate(struct address_space *mapping,
struct page *newpage, struct page *page,
enum migrate_mode mode)
{
struct balloon_dev_info *balloon = balloon_page_device(page);
/*
* We can not easily support the no copy case here so ignore it as it
* is unlikely to be used with balloon pages. See include/linux/hmm.h
* for a user of the MIGRATE_SYNC_NO_COPY mode.
*/
if (mode == MIGRATE_SYNC_NO_COPY)
return -EINVAL;
VM_BUG_ON_PAGE(!PageLocked(page), page);
VM_BUG_ON_PAGE(!PageLocked(newpage), newpage);
return balloon->migratepage(balloon, newpage, page, mode);
}
const struct address_space_operations balloon_aops = {
.migratepage = balloon_page_migrate,
.isolate_page = balloon_page_isolate,
.putback_page = balloon_page_putback,
};
EXPORT_SYMBOL_GPL(balloon_aops);
#endif /* CONFIG_BALLOON_COMPACTION */
| {
"language": "C"
} |
#define _BSD_SOURCE 1
#define _DEFAULT_SOURCE 1
#include "internal.h"
#include "log.h"
#include "convert_utf/ConvertUTF.h"
#include <sys/types.h>
#include <dirent.h>
#include <stdlib.h>
#include <string.h>
#include <errno.h>
#include <unistd.h>
#ifdef _WIN32
#define realpath(N,R) _fullpath((R),(N),_MAX_PATH)
#include <direct.h>
#ifndef PATH_MAX
#define PATH_MAX _MAX_PATH
#endif
#else
#include <limits.h>
#endif
#define VERBOSE 0
#if defined(_MSC_VER)
#define snprintf _snprintf
#define vsnprintf _vsnprintf
#define strcasecmp _stricmp
#define strncasecmp _strnicmp
#endif
long int unshield_get_path_max(Unshield* unshield)
{
#ifdef PATH_MAX
return PATH_MAX;
#else
long int path_max = pathconf(unshield->filename_pattern, _PC_PATH_MAX);
if (path_max <= 0)
path_max = 4096;
return path_max;
#endif
}
char *unshield_get_base_directory_name(Unshield *unshield) {
long int path_max = unshield_get_path_max(unshield);
char *p = strrchr(unshield->filename_pattern, '/');
char *dirname = malloc(path_max);
if (p) {
strncpy(dirname, unshield->filename_pattern, path_max);
if ((unsigned int) (p - unshield->filename_pattern) > path_max) {
dirname[path_max - 1] = 0;
} else
dirname[(p - unshield->filename_pattern)] = 0;
} else
strcpy(dirname, ".");
return dirname;
}
static char* get_filename(Unshield* unshield, int index, const char* suffix) {
if (unshield && unshield->filename_pattern)
{
long path_max = unshield_get_path_max(unshield);
char* filename = malloc(path_max);
if (filename == NULL) {
unshield_error("Unable to allocate memory.\n");
goto exit;
}
if (snprintf(filename, path_max, unshield->filename_pattern, index, suffix) >= path_max) {
unshield_error("Pathname exceeds system limits.\n");
goto exit;
}
exit:
return filename;
}
return NULL;
}
FILE* unshield_fopen_for_reading(Unshield* unshield, int index, const char* suffix)
{
if (unshield && unshield->filename_pattern)
{
FILE* result = NULL;
char* filename = get_filename(unshield, index, suffix);
char* dirname = unshield_get_base_directory_name(unshield);
const char *q;
struct dirent *dent = NULL;
DIR *sourcedir = NULL;
long int path_max = unshield_get_path_max(unshield);
q=strrchr(filename,'/');
if (q)
q++;
else
q=filename;
sourcedir = opendir(dirname);
/* Search for the File case independent */
if (sourcedir)
{
for (dent=readdir(sourcedir);dent;dent=readdir(sourcedir))
{
if (!(strcasecmp(q, dent->d_name)))
{
/*unshield_trace("Found match %s\n",dent->d_name);*/
break;
}
}
if (dent == NULL)
{
unshield_trace("File %s not found even case insensitive\n",filename);
goto exit;
}
else
if(snprintf(filename, path_max, "%s/%s", dirname, dent->d_name)>=path_max)
{
unshield_error("Pathname exceeds system limits.\n");
goto exit;
}
}
else
unshield_trace("Could not open directory %s error %s\n", dirname, strerror(errno));
#if VERBOSE
unshield_trace("Opening file '%s'", filename);
#endif
result = fopen(filename, "rb");
exit:
if (sourcedir)
closedir(sourcedir);
free(filename);
free(dirname);
return result;
}
return NULL;
}
long unshield_fsize(FILE* file)
{
long result;
long previous = ftell(file);
fseek(file, 0L, SEEK_END);
result = ftell(file);
fseek(file, previous, SEEK_SET);
return result;
}
bool unshield_read_common_header(uint8_t** buffer, CommonHeader* common)
{
uint8_t* p = *buffer;
common->signature = READ_UINT32(p); p += 4;
if (CAB_SIGNATURE != common->signature)
{
unshield_error("Invalid file signature");
if (MSCF_SIGNATURE == common->signature)
unshield_warning("Found Microsoft Cabinet header. Use cabextract (https://www.cabextract.org.uk/) to unpack this file.");
return false;
}
common->version = READ_UINT32(p); p += 4;
common->volume_info = READ_UINT32(p); p += 4;
common->cab_descriptor_offset = READ_UINT32(p); p += 4;
common->cab_descriptor_size = READ_UINT32(p); p += 4;
#if VERBOSE
unshield_trace("Common header: %08x %08x %08x %08x",
common->version,
common->volume_info,
common->cab_descriptor_offset,
common->cab_descriptor_size);
#endif
*buffer = p;
return true;
}
/**
Get pointer at cab descriptor + offset
*/
uint8_t* unshield_header_get_buffer(Header* header, uint32_t offset)
{
if (offset)
return
header->data +
header->common.cab_descriptor_offset +
offset;
else
return NULL;
}
static int unshield_strlen_utf16(const uint16_t* utf16)
{
const uint16_t* current = utf16;
while (*current++)
;
return current - utf16;
}
static StringBuffer* unshield_add_string_buffer(Header* header)
{
StringBuffer* result = NEW1(StringBuffer);
result->next = header->string_buffer;
return header->string_buffer = result;
}
static const char* unshield_utf16_to_utf8(Header* header, const uint16_t* utf16)
{
StringBuffer* string_buffer = unshield_add_string_buffer(header);
int length = unshield_strlen_utf16(utf16);
int buffer_size = 3 * length + 1;
char* target = string_buffer->string = NEW(char, buffer_size);
ConversionResult result = ConvertUTF16toUTF8(
(const UTF16**)&utf16, utf16 + length + 1,
(UTF8**)&target, (UTF8*)(target + buffer_size), lenientConversion);
if (result != conversionOK)
{
/* fail fast */
abort();
}
return string_buffer->string;
}
const char* unshield_get_utf8_string(Header* header, const void* buffer)
{
if (header->major_version >= 17 && buffer != NULL)
{
return unshield_utf16_to_utf8(header, (const uint16_t*)buffer);
}
else
{
return (const char*)buffer;
}
}
/**
Get string at cab descriptor offset + string offset
*/
const char* unshield_header_get_string(Header* header, uint32_t offset)
{
return unshield_get_utf8_string(header, unshield_header_get_buffer(header, offset));
}
| {
"language": "C"
} |
#include <linux/fs.h>
#include <linux/types.h>
#include "ctree.h"
#include "disk-io.h"
#include "btrfs_inode.h"
#include "print-tree.h"
#include "export.h"
#define BTRFS_FID_SIZE_NON_CONNECTABLE (offsetof(struct btrfs_fid, \
parent_objectid) / 4)
#define BTRFS_FID_SIZE_CONNECTABLE (offsetof(struct btrfs_fid, \
parent_root_objectid) / 4)
#define BTRFS_FID_SIZE_CONNECTABLE_ROOT (sizeof(struct btrfs_fid) / 4)
static int btrfs_encode_fh(struct inode *inode, u32 *fh, int *max_len,
struct inode *parent)
{
struct btrfs_fid *fid = (struct btrfs_fid *)fh;
int len = *max_len;
int type;
if (parent && (len < BTRFS_FID_SIZE_CONNECTABLE)) {
*max_len = BTRFS_FID_SIZE_CONNECTABLE;
return FILEID_INVALID;
} else if (len < BTRFS_FID_SIZE_NON_CONNECTABLE) {
*max_len = BTRFS_FID_SIZE_NON_CONNECTABLE;
return FILEID_INVALID;
}
len = BTRFS_FID_SIZE_NON_CONNECTABLE;
type = FILEID_BTRFS_WITHOUT_PARENT;
fid->objectid = btrfs_ino(inode);
fid->root_objectid = BTRFS_I(inode)->root->objectid;
fid->gen = inode->i_generation;
if (parent) {
u64 parent_root_id;
fid->parent_objectid = BTRFS_I(parent)->location.objectid;
fid->parent_gen = parent->i_generation;
parent_root_id = BTRFS_I(parent)->root->objectid;
if (parent_root_id != fid->root_objectid) {
fid->parent_root_objectid = parent_root_id;
len = BTRFS_FID_SIZE_CONNECTABLE_ROOT;
type = FILEID_BTRFS_WITH_PARENT_ROOT;
} else {
len = BTRFS_FID_SIZE_CONNECTABLE;
type = FILEID_BTRFS_WITH_PARENT;
}
}
*max_len = len;
return type;
}
static struct dentry *btrfs_get_dentry(struct super_block *sb, u64 objectid,
u64 root_objectid, u32 generation,
int check_generation)
{
struct btrfs_fs_info *fs_info = btrfs_sb(sb);
struct btrfs_root *root;
struct inode *inode;
struct btrfs_key key;
int index;
int err = 0;
if (objectid < BTRFS_FIRST_FREE_OBJECTID)
return ERR_PTR(-ESTALE);
key.objectid = root_objectid;
key.type = BTRFS_ROOT_ITEM_KEY;
key.offset = (u64)-1;
index = srcu_read_lock(&fs_info->subvol_srcu);
root = btrfs_read_fs_root_no_name(fs_info, &key);
if (IS_ERR(root)) {
err = PTR_ERR(root);
goto fail;
}
key.objectid = objectid;
key.type = BTRFS_INODE_ITEM_KEY;
key.offset = 0;
inode = btrfs_iget(sb, &key, root, NULL);
if (IS_ERR(inode)) {
err = PTR_ERR(inode);
goto fail;
}
srcu_read_unlock(&fs_info->subvol_srcu, index);
if (check_generation && generation != inode->i_generation) {
iput(inode);
return ERR_PTR(-ESTALE);
}
return d_obtain_alias(inode);
fail:
srcu_read_unlock(&fs_info->subvol_srcu, index);
return ERR_PTR(err);
}
static struct dentry *btrfs_fh_to_parent(struct super_block *sb, struct fid *fh,
int fh_len, int fh_type)
{
struct btrfs_fid *fid = (struct btrfs_fid *) fh;
u64 objectid, root_objectid;
u32 generation;
if (fh_type == FILEID_BTRFS_WITH_PARENT) {
if (fh_len < BTRFS_FID_SIZE_CONNECTABLE)
return NULL;
root_objectid = fid->root_objectid;
} else if (fh_type == FILEID_BTRFS_WITH_PARENT_ROOT) {
if (fh_len < BTRFS_FID_SIZE_CONNECTABLE_ROOT)
return NULL;
root_objectid = fid->parent_root_objectid;
} else
return NULL;
objectid = fid->parent_objectid;
generation = fid->parent_gen;
return btrfs_get_dentry(sb, objectid, root_objectid, generation, 1);
}
static struct dentry *btrfs_fh_to_dentry(struct super_block *sb, struct fid *fh,
int fh_len, int fh_type)
{
struct btrfs_fid *fid = (struct btrfs_fid *) fh;
u64 objectid, root_objectid;
u32 generation;
if ((fh_type != FILEID_BTRFS_WITH_PARENT ||
fh_len < BTRFS_FID_SIZE_CONNECTABLE) &&
(fh_type != FILEID_BTRFS_WITH_PARENT_ROOT ||
fh_len < BTRFS_FID_SIZE_CONNECTABLE_ROOT) &&
(fh_type != FILEID_BTRFS_WITHOUT_PARENT ||
fh_len < BTRFS_FID_SIZE_NON_CONNECTABLE))
return NULL;
objectid = fid->objectid;
root_objectid = fid->root_objectid;
generation = fid->gen;
return btrfs_get_dentry(sb, objectid, root_objectid, generation, 1);
}
static struct dentry *btrfs_get_parent(struct dentry *child)
{
struct inode *dir = d_inode(child);
struct btrfs_root *root = BTRFS_I(dir)->root;
struct btrfs_path *path;
struct extent_buffer *leaf;
struct btrfs_root_ref *ref;
struct btrfs_key key;
struct btrfs_key found_key;
int ret;
path = btrfs_alloc_path();
if (!path)
return ERR_PTR(-ENOMEM);
if (btrfs_ino(dir) == BTRFS_FIRST_FREE_OBJECTID) {
key.objectid = root->root_key.objectid;
key.type = BTRFS_ROOT_BACKREF_KEY;
key.offset = (u64)-1;
root = root->fs_info->tree_root;
} else {
key.objectid = btrfs_ino(dir);
key.type = BTRFS_INODE_REF_KEY;
key.offset = (u64)-1;
}
ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
if (ret < 0)
goto fail;
BUG_ON(ret == 0); /* Key with offset of -1 found */
if (path->slots[0] == 0) {
ret = -ENOENT;
goto fail;
}
path->slots[0]--;
leaf = path->nodes[0];
btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
if (found_key.objectid != key.objectid || found_key.type != key.type) {
ret = -ENOENT;
goto fail;
}
if (found_key.type == BTRFS_ROOT_BACKREF_KEY) {
ref = btrfs_item_ptr(leaf, path->slots[0],
struct btrfs_root_ref);
key.objectid = btrfs_root_ref_dirid(leaf, ref);
} else {
key.objectid = found_key.offset;
}
btrfs_free_path(path);
if (found_key.type == BTRFS_ROOT_BACKREF_KEY) {
return btrfs_get_dentry(root->fs_info->sb, key.objectid,
found_key.offset, 0, 0);
}
key.type = BTRFS_INODE_ITEM_KEY;
key.offset = 0;
return d_obtain_alias(btrfs_iget(root->fs_info->sb, &key, root, NULL));
fail:
btrfs_free_path(path);
return ERR_PTR(ret);
}
static int btrfs_get_name(struct dentry *parent, char *name,
struct dentry *child)
{
struct inode *inode = d_inode(child);
struct inode *dir = d_inode(parent);
struct btrfs_path *path;
struct btrfs_root *root = BTRFS_I(dir)->root;
struct btrfs_inode_ref *iref;
struct btrfs_root_ref *rref;
struct extent_buffer *leaf;
unsigned long name_ptr;
struct btrfs_key key;
int name_len;
int ret;
u64 ino;
if (!dir || !inode)
return -EINVAL;
if (!S_ISDIR(dir->i_mode))
return -EINVAL;
ino = btrfs_ino(inode);
path = btrfs_alloc_path();
if (!path)
return -ENOMEM;
path->leave_spinning = 1;
if (ino == BTRFS_FIRST_FREE_OBJECTID) {
key.objectid = BTRFS_I(inode)->root->root_key.objectid;
key.type = BTRFS_ROOT_BACKREF_KEY;
key.offset = (u64)-1;
root = root->fs_info->tree_root;
} else {
key.objectid = ino;
key.offset = btrfs_ino(dir);
key.type = BTRFS_INODE_REF_KEY;
}
ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
if (ret < 0) {
btrfs_free_path(path);
return ret;
} else if (ret > 0) {
if (ino == BTRFS_FIRST_FREE_OBJECTID) {
path->slots[0]--;
} else {
btrfs_free_path(path);
return -ENOENT;
}
}
leaf = path->nodes[0];
if (ino == BTRFS_FIRST_FREE_OBJECTID) {
rref = btrfs_item_ptr(leaf, path->slots[0],
struct btrfs_root_ref);
name_ptr = (unsigned long)(rref + 1);
name_len = btrfs_root_ref_name_len(leaf, rref);
} else {
iref = btrfs_item_ptr(leaf, path->slots[0],
struct btrfs_inode_ref);
name_ptr = (unsigned long)(iref + 1);
name_len = btrfs_inode_ref_name_len(leaf, iref);
}
read_extent_buffer(leaf, name, name_ptr, name_len);
btrfs_free_path(path);
/*
* have to add the null termination to make sure that reconnect_path
* gets the right len for strlen
*/
name[name_len] = '\0';
return 0;
}
const struct export_operations btrfs_export_ops = {
.encode_fh = btrfs_encode_fh,
.fh_to_dentry = btrfs_fh_to_dentry,
.fh_to_parent = btrfs_fh_to_parent,
.get_parent = btrfs_get_parent,
.get_name = btrfs_get_name,
};
| {
"language": "C"
} |
/**************************************************************************
*
* Copyright 2009 VMware, Inc.
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sub license, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial portions
* of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
* OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
* IN NO EVENT SHALL VMWARE AND/OR ITS SUPPLIERS BE LIABLE FOR
* ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
* TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
* SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*
**************************************************************************/
#ifndef ID_CONTEXT_H
#define ID_CONTEXT_H
#include "pipe/p_state.h"
#include "pipe/p_context.h"
struct identity_context {
struct pipe_context base; /**< base class */
struct pipe_context *pipe;
};
struct pipe_context *
identity_context_create(struct pipe_screen *screen, struct pipe_context *pipe);
static INLINE struct identity_context *
identity_context(struct pipe_context *pipe)
{
return (struct identity_context *)pipe;
}
#endif /* ID_CONTEXT_H */
| {
"language": "C"
} |
/********************************************************************\
* SchedXaction.c -- Scheduled Transaction implementation. *
* Copyright (C) 2001,2007 Joshua Sled <jsled@asynchronous.org> *
* *
* This program is free software; you can redistribute it and/or *
* modify it under the terms of the GNU General Public License as *
* published by the Free Software Foundation; either version 2 of *
* the License, or (at your option) any later version. *
* *
* This program is distributed in the hope that it will be useful, *
* but WITHOUT ANY WARRANTY; without even the implied warranty of *
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
* GNU General Public License for more details. *
* *
* You should have received a copy of the GNU General Public License*
* along with this program; if not, contact: *
* *
* Free Software Foundation Voice: +1-617-542-5942 *
* 51 Franklin Street, Fifth Floor Fax: +1-617-542-2652 *
* Boston, MA 02110-1301, USA gnu@gnu.org *
* *
\********************************************************************/
#include <config.h>
#include <glib.h>
#include <glib/gi18n.h>
#include <string.h>
#include <stdint.h>
#include "qof.h"
#include "Account.h"
#include "SX-book.h"
#include "SX-ttinfo.h"
#include "SchedXaction.h"
#include "Transaction.h"
#include "gnc-engine.h"
#include "engine-helpers.h"
#include "qofinstance-p.h"
#undef G_LOG_DOMAIN
#define G_LOG_DOMAIN "gnc.engine.sx"
enum
{
PROP_0,
PROP_NAME, /* Table */
PROP_ENABLED, /* Table */
PROP_START_DATE, /* Table */
PROP_END_DATE, /* Table */
PROP_LAST_OCCURANCE_DATE, /* Table */
PROP_NUM_OCCURANCE, /* Table */
PROP_REM_OCCURANCE, /* Table */
PROP_AUTO_CREATE, /* Table */
PROP_AUTO_CREATE_NOTIFY, /* Table */
PROP_ADVANCE_CREATION_DAYS, /* Table */
PROP_ADVANCE_REMINDER_DAYS, /* Table */
PROP_INSTANCE_COUNT, /* Table */
PROP_TEMPLATE_ACCOUNT /* Table */
};
/* GObject initialization */
G_DEFINE_TYPE(SchedXaction, gnc_schedxaction, QOF_TYPE_INSTANCE);
static void
gnc_schedxaction_init(SchedXaction* sx)
{
sx->schedule = NULL;
g_date_clear( &sx->last_date, 1 );
g_date_clear( &sx->start_date, 1 );
g_date_clear( &sx->end_date, 1 );
sx->enabled = 1;
sx->num_occurances_total = 0;
sx->autoCreateOption = FALSE;
sx->autoCreateNotify = FALSE;
sx->advanceCreateDays = 0;
sx->advanceRemindDays = 0;
sx->instance_num = 0;
sx->deferredList = NULL;
}
static void
gnc_schedxaction_dispose(GObject *sxp)
{
G_OBJECT_CLASS(gnc_schedxaction_parent_class)->dispose(sxp);
}
static void
gnc_schedxaction_finalize(GObject* sxp)
{
G_OBJECT_CLASS(gnc_schedxaction_parent_class)->finalize(sxp);
}
/* Note that g_value_set_object() refs the object, as does
* g_object_get(). But g_object_get() only unrefs once when it disgorges
* the object, leaving an unbalanced ref, which leaks. So instead of
* using g_value_set_object(), use g_value_take_object() which doesn't
* ref the object when used in get_property().
*/
static void
gnc_schedxaction_get_property (GObject *object,
guint prop_id,
GValue *value,
GParamSpec *pspec)
{
SchedXaction *sx;
g_return_if_fail(GNC_IS_SCHEDXACTION(object));
sx = GNC_SCHEDXACTION(object);
switch (prop_id)
{
case PROP_NAME:
g_value_set_string(value, sx->name);
break;
case PROP_ENABLED:
g_value_set_boolean(value, sx->enabled);
break;
case PROP_NUM_OCCURANCE:
g_value_set_int(value, sx->num_occurances_total);
break;
case PROP_REM_OCCURANCE:
g_value_set_int(value, sx->num_occurances_remain);
break;
case PROP_AUTO_CREATE:
g_value_set_boolean(value, sx->autoCreateOption);
break;
case PROP_AUTO_CREATE_NOTIFY:
g_value_set_boolean(value, sx->autoCreateNotify);
break;
case PROP_ADVANCE_CREATION_DAYS:
g_value_set_int(value, sx->advanceCreateDays);
break;
case PROP_ADVANCE_REMINDER_DAYS:
g_value_set_int(value, sx->advanceRemindDays);
break;
case PROP_START_DATE:
g_value_set_boxed(value, &sx->start_date);
break;
case PROP_END_DATE:
/* g_value_set_boxed raises a critical error if sx->end_date
* is invalid */
if (g_date_valid (&sx->end_date))
g_value_set_boxed(value, &sx->end_date);
break;
case PROP_LAST_OCCURANCE_DATE:
/* g_value_set_boxed raises a critical error if sx->last_date
* is invalid */
if (g_date_valid (&sx->last_date))
g_value_set_boxed(value, &sx->last_date);
break;
case PROP_INSTANCE_COUNT:
g_value_set_int(value, sx->instance_num);
break;
case PROP_TEMPLATE_ACCOUNT:
g_value_take_object(value, sx->template_acct);
break;
default:
G_OBJECT_WARN_INVALID_PROPERTY_ID(object, prop_id, pspec);
break;
}
}
static void
gnc_schedxaction_set_property (GObject *object,
guint prop_id,
const GValue *value,
GParamSpec *pspec)
{
SchedXaction *sx;
g_return_if_fail(GNC_IS_SCHEDXACTION(object));
sx = GNC_SCHEDXACTION(object);
g_assert (qof_instance_get_editlevel(sx));
switch (prop_id)
{
case PROP_NAME:
xaccSchedXactionSetName(sx, g_value_get_string(value));
break;
case PROP_ENABLED:
xaccSchedXactionSetEnabled(sx, g_value_get_boolean(value));
break;
case PROP_NUM_OCCURANCE:
xaccSchedXactionSetNumOccur(sx, g_value_get_int(value));
break;
case PROP_REM_OCCURANCE:
xaccSchedXactionSetRemOccur(sx, g_value_get_int(value));
break;
case PROP_AUTO_CREATE:
xaccSchedXactionSetAutoCreate(sx, g_value_get_boolean(value), sx->autoCreateNotify);
break;
case PROP_AUTO_CREATE_NOTIFY:
xaccSchedXactionSetAutoCreate(sx, sx->autoCreateOption, g_value_get_boolean(value));
break;
case PROP_ADVANCE_CREATION_DAYS:
xaccSchedXactionSetAdvanceCreation(sx, g_value_get_int(value));
break;
case PROP_ADVANCE_REMINDER_DAYS:
xaccSchedXactionSetAdvanceReminder(sx, g_value_get_int(value));
break;
case PROP_START_DATE:
/* Note: when passed through a boxed gvalue, the julian value of the date is copied.
The date may appear invalid until a function requiring for dmy calculation is
called. */
xaccSchedXactionSetStartDate(sx, g_value_get_boxed(value));
break;
case PROP_END_DATE:
/* Note: when passed through a boxed gvalue, the julian value of the date is copied.
The date may appear invalid until a function requiring for dmy calculation is
called. */
xaccSchedXactionSetEndDate(sx, g_value_get_boxed(value));
break;
case PROP_LAST_OCCURANCE_DATE:
/* Note: when passed through a boxed gvalue, the julian value of the date is copied.
The date may appear invalid until a function requiring for dmy calculation is
called. */
xaccSchedXactionSetLastOccurDate(sx, g_value_get_boxed(value));
break;
case PROP_INSTANCE_COUNT:
gnc_sx_set_instance_count(sx, g_value_get_int(value));
break;
case PROP_TEMPLATE_ACCOUNT:
sx_set_template_account(sx, g_value_get_object(value));
break;
default:
G_OBJECT_WARN_INVALID_PROPERTY_ID(object, prop_id, pspec);
break;
}
}
static void
gnc_schedxaction_class_init (SchedXactionClass *klass)
{
GObjectClass *gobject_class = G_OBJECT_CLASS (klass);
gobject_class->dispose = gnc_schedxaction_dispose;
gobject_class->finalize = gnc_schedxaction_finalize;
gobject_class->set_property = gnc_schedxaction_set_property;
gobject_class->get_property = gnc_schedxaction_get_property;
g_object_class_install_property
(gobject_class,
PROP_NAME,
g_param_spec_string ("name",
"Scheduled Transaction Name",
"The name is an arbitrary string "
"assigned by the user. It is intended to "
"a short, 5 to 30 character long string "
"that is displayed by the GUI.",
NULL,
G_PARAM_READWRITE));
g_object_class_install_property
(gobject_class,
PROP_ENABLED,
g_param_spec_boolean ("enabled",
"Enabled",
"TRUE if the scheduled transaction is enabled.",
TRUE,
G_PARAM_READWRITE));
g_object_class_install_property
(gobject_class,
PROP_NUM_OCCURANCE,
g_param_spec_int ("num-occurance",
"Number of occurances",
"Total number of occurances for this scheduled transaction.",
0,
G_MAXINT16,
1,
G_PARAM_READWRITE));
g_object_class_install_property
(gobject_class,
PROP_REM_OCCURANCE,
g_param_spec_int ("rem-occurance",
"Number of occurances remaining",
"Remaining number of occurances for this scheduled transaction.",
0,
G_MAXINT16,
1,
G_PARAM_READWRITE));
g_object_class_install_property
(gobject_class,
PROP_AUTO_CREATE,
g_param_spec_boolean ("auto-create",
"Auto-create",
"TRUE if the transaction will be automatically "
"created when its time comes.",
FALSE,
G_PARAM_READWRITE));
g_object_class_install_property
(gobject_class,
PROP_AUTO_CREATE_NOTIFY,
g_param_spec_boolean ("auto-create-notify",
"Auto-create-notify",
"TRUE if the the user will be notified when the transaction "
"is automatically created.",
FALSE,
G_PARAM_READWRITE));
g_object_class_install_property
(gobject_class,
PROP_ADVANCE_CREATION_DAYS,
g_param_spec_int ("advance-creation-days",
"Days in advance to create",
"Number of days in advance to create this scheduled transaction.",
0,
G_MAXINT16,
0,
G_PARAM_READWRITE));
g_object_class_install_property
(gobject_class,
PROP_ADVANCE_REMINDER_DAYS,
g_param_spec_int ("advance-reminder-days",
"Days in advance to remind",
"Number of days in advance to remind about this scheduled transaction.",
0,
G_MAXINT16,
0,
G_PARAM_READWRITE));
g_object_class_install_property
(gobject_class,
PROP_START_DATE,
g_param_spec_boxed("start-date",
"Start Date",
"Date for the first occurence for the scheduled transaction.",
G_TYPE_DATE,
G_PARAM_READWRITE));
g_object_class_install_property
(gobject_class,
PROP_END_DATE,
g_param_spec_boxed("end-date",
"End Date",
"Date for the scheduled transaction to end.",
G_TYPE_DATE,
G_PARAM_READWRITE));
g_object_class_install_property
(gobject_class,
PROP_LAST_OCCURANCE_DATE,
g_param_spec_boxed("last-occurance-date",
"Last Occurance Date",
"Date for the last occurance of the scheduled transaction.",
G_TYPE_DATE,
G_PARAM_READWRITE));
g_object_class_install_property
(gobject_class,
PROP_INSTANCE_COUNT,
g_param_spec_int ("instance-count",
"Instance count",
"Number of instances of this scheduled transaction.",
0,
G_MAXINT16,
0,
G_PARAM_READWRITE));
g_object_class_install_property
(gobject_class,
PROP_TEMPLATE_ACCOUNT,
g_param_spec_object("template-account",
"Template account",
"Account which holds the template transactions.",
GNC_TYPE_ACCOUNT,
G_PARAM_READWRITE));
}
static void
xaccSchedXactionInit(SchedXaction *sx, QofBook *book)
{
Account *ra;
const GncGUID *guid;
gchar guidstr[GUID_ENCODING_LENGTH+1];
qof_instance_init_data (&sx->inst, GNC_ID_SCHEDXACTION, book);
/* create a new template account for our splits */
sx->template_acct = xaccMallocAccount(book);
guid = qof_instance_get_guid( sx );
xaccAccountBeginEdit( sx->template_acct );
guid_to_string_buff( guid, guidstr );
xaccAccountSetName( sx->template_acct, guidstr);
xaccAccountSetCommodity
(sx->template_acct,
gnc_commodity_table_lookup( gnc_commodity_table_get_table(book),
GNC_COMMODITY_NS_TEMPLATE, "template") );
xaccAccountSetType( sx->template_acct, ACCT_TYPE_BANK );
xaccAccountCommitEdit( sx->template_acct );
ra = gnc_book_get_template_root( book );
gnc_account_append_child( ra, sx->template_acct );
}
SchedXaction*
xaccSchedXactionMalloc(QofBook *book)
{
SchedXaction *sx;
g_return_val_if_fail (book, NULL);
sx = g_object_new(GNC_TYPE_SCHEDXACTION, NULL);
xaccSchedXactionInit( sx, book );
qof_event_gen( &sx->inst, QOF_EVENT_CREATE , NULL);
return sx;
}
static void
sxprivTransMapDelete( gpointer data, gpointer user_data )
{
Transaction *t = (Transaction *) data;
xaccTransBeginEdit( t );
xaccTransDestroy( t );
xaccTransCommitEdit( t );
return;
}
static void
delete_template_trans(SchedXaction *sx)
{
GList *templ_acct_splits, *curr_split_listref;
Split *curr_split;
Transaction *split_trans;
GList *templ_acct_transactions = NULL;
templ_acct_splits
= xaccAccountGetSplitList(sx->template_acct);
for (curr_split_listref = templ_acct_splits;
curr_split_listref;
curr_split_listref = curr_split_listref->next)
{
curr_split = (Split *) curr_split_listref->data;
split_trans = xaccSplitGetParent(curr_split);
if (! (g_list_find(templ_acct_transactions, split_trans)))
{
templ_acct_transactions
= g_list_prepend(templ_acct_transactions, split_trans);
}
}
g_list_foreach(templ_acct_transactions,
sxprivTransMapDelete,
NULL);
return;
}
void
sx_set_template_account (SchedXaction *sx, Account *account)
{
Account *old;
old = sx->template_acct;
sx->template_acct = account;
if (old)
{
xaccAccountBeginEdit(old);
xaccAccountDestroy(old);
}
}
void
xaccSchedXactionDestroy( SchedXaction *sx )
{
qof_instance_set_destroying( QOF_INSTANCE(sx), TRUE );
gnc_sx_commit_edit( sx );
}
static void
xaccSchedXactionFree( SchedXaction *sx )
{
GList *l;
if ( sx == NULL ) return;
qof_event_gen( &sx->inst, QOF_EVENT_DESTROY , NULL);
if ( sx->name )
g_free( sx->name );
/*
* we have to delete the transactions in the
* template account ourselves
*/
delete_template_trans( sx );
/*
* xaccAccountDestroy removes the account from
* its group for us AFAICT. If shutting down,
* the account is being deleted separately.
*/
if (!qof_book_shutting_down(qof_instance_get_book(sx)))
{
xaccAccountBeginEdit(sx->template_acct);
xaccAccountDestroy(sx->template_acct);
}
for ( l = sx->deferredList; l; l = l->next )
{
gnc_sx_destroy_temporal_state( l->data );
l->data = NULL;
}
if ( sx->deferredList )
{
g_list_free( sx->deferredList );
sx->deferredList = NULL;
}
/* qof_instance_release (&sx->inst); */
g_object_unref( sx );
}
/* ============================================================ */
void
gnc_sx_begin_edit (SchedXaction *sx)
{
qof_begin_edit (&sx->inst);
}
static void sx_free(QofInstance* inst )
{
xaccSchedXactionFree( GNC_SX(inst) );
}
static void commit_err (QofInstance *inst, QofBackendError errcode)
{
g_critical("Failed to commit: %d", errcode);
gnc_engine_signal_commit_error( errcode );
}
static void commit_done(QofInstance *inst)
{
qof_event_gen (inst, QOF_EVENT_MODIFY, NULL);
}
void
gnc_sx_commit_edit (SchedXaction *sx)
{
if (!qof_commit_edit (QOF_INSTANCE(sx))) return;
qof_commit_edit_part2 (&sx->inst, commit_err, commit_done, sx_free);
}
/* ============================================================ */
GList*
gnc_sx_get_schedule(const SchedXaction *sx)
{
return sx->schedule;
}
void
gnc_sx_set_schedule(SchedXaction *sx, GList *schedule)
{
g_return_if_fail(sx);
gnc_sx_begin_edit(sx);
sx->schedule = schedule;
qof_instance_set_dirty(&sx->inst);
gnc_sx_commit_edit(sx);
}
gchar *
xaccSchedXactionGetName( const SchedXaction *sx )
{
return sx->name;
}
void
xaccSchedXactionSetName( SchedXaction *sx, const gchar *newName )
{
g_return_if_fail( newName != NULL );
gnc_sx_begin_edit(sx);
if ( sx->name != NULL )
{
g_free( sx->name );
sx->name = NULL;
}
sx->name = g_strdup( newName );
qof_instance_set_dirty(&sx->inst);
gnc_sx_commit_edit(sx);
}
const GDate*
xaccSchedXactionGetStartDate(const SchedXaction *sx )
{
g_assert (sx);
return &sx->start_date;
}
time64
xaccSchedXactionGetStartDateTT(const SchedXaction *sx )
{
g_assert (sx);
return gdate_to_time64(sx->start_date);
}
void
xaccSchedXactionSetStartDate( SchedXaction *sx, const GDate* newStart )
{
if ( newStart == NULL || !g_date_valid( newStart ))
{
/* XXX: I reject the bad data - is this the right
* thing to do <rgmerk>.
* This warning is only human readable - the caller
* doesn't know the call failed. This is bad
*/
g_critical("Invalid Start Date");
return;
}
gnc_sx_begin_edit(sx);
sx->start_date = *newStart;
qof_instance_set_dirty(&sx->inst);
gnc_sx_commit_edit(sx);
}
void
xaccSchedXactionSetStartDateTT( SchedXaction *sx, const time64 newStart )
{
if ( newStart == INT64_MAX )
{
/* XXX: I reject the bad data - is this the right
* thing to do <rgmerk>.
* This warning is only human readable - the caller
* doesn't know the call failed. This is bad
*/
g_critical("Invalid Start Date");
return;
}
gnc_sx_begin_edit(sx);
gnc_gdate_set_time64(&sx->start_date, newStart);
qof_instance_set_dirty(&sx->inst);
gnc_sx_commit_edit(sx);
}
gboolean
xaccSchedXactionHasEndDate( const SchedXaction *sx )
{
return sx != NULL && g_date_valid( &sx->end_date );
}
const GDate*
xaccSchedXactionGetEndDate(const SchedXaction *sx )
{
g_assert (sx);
return &sx->end_date;
}
void
xaccSchedXactionSetEndDate( SchedXaction *sx, const GDate *newEnd )
{
/* Note that an invalid GDate IS a permissable value: It means that
* the SX is to run "forever". See gnc_sxed_save_sx() and
* schedXact_editor_populate() in dialog-sx-editor.c.
*/
if (newEnd == NULL ||
(g_date_valid(newEnd) && g_date_compare( newEnd, &sx->start_date ) < 0 ))
{
/* XXX: I reject the bad data - is this the right
* thing to do <rgmerk>.
* This warning is only human readable - the caller
* doesn't know the call failed. This is bad
*/
g_critical("Bad End Date: Invalid or before Start Date");
return;
}
gnc_sx_begin_edit(sx);
sx->end_date = *newEnd;
qof_instance_set_dirty(&sx->inst);
gnc_sx_commit_edit(sx);
}
const GDate*
xaccSchedXactionGetLastOccurDate(const SchedXaction *sx )
{
return &sx->last_date;
}
time64
xaccSchedXactionGetLastOccurDateTT(const SchedXaction *sx )
{
return gdate_to_time64(sx->last_date);
}
void
xaccSchedXactionSetLastOccurDate(SchedXaction *sx, const GDate* new_last_occur)
{
g_return_if_fail (new_last_occur != NULL);
if (g_date_valid(&sx->last_date)
&& g_date_compare(&sx->last_date, new_last_occur) == 0)
return;
gnc_sx_begin_edit(sx);
sx->last_date = *new_last_occur;
qof_instance_set_dirty(&sx->inst);
gnc_sx_commit_edit(sx);
}
void
xaccSchedXactionSetLastOccurDateTT(SchedXaction *sx, time64 new_last_occur)
{
GDate last_occur;
g_return_if_fail (new_last_occur != INT64_MAX);
gnc_gdate_set_time64(&last_occur, new_last_occur);
if (g_date_valid(&sx->last_date)
&& g_date_compare(&sx->last_date, &last_occur) == 0)
return;
gnc_sx_begin_edit(sx);
sx->last_date = last_occur;
qof_instance_set_dirty(&sx->inst);
gnc_sx_commit_edit(sx);
}
gboolean
xaccSchedXactionHasOccurDef( const SchedXaction *sx )
{
return ( xaccSchedXactionGetNumOccur( sx ) != 0 );
}
gint
xaccSchedXactionGetNumOccur( const SchedXaction *sx )
{
return sx->num_occurances_total;
}
void
xaccSchedXactionSetNumOccur(SchedXaction *sx, gint new_num)
{
if (sx->num_occurances_total == new_num)
return;
gnc_sx_begin_edit(sx);
sx->num_occurances_remain = sx->num_occurances_total = new_num;
qof_instance_set_dirty(&sx->inst);
gnc_sx_commit_edit(sx);
}
gint
xaccSchedXactionGetRemOccur( const SchedXaction *sx )
{
return sx->num_occurances_remain;
}
void
xaccSchedXactionSetRemOccur(SchedXaction *sx, gint num_remain)
{
/* FIXME This condition can be tightened up */
if (num_remain > sx->num_occurances_total)
{
g_warning("number remaining [%d] > total occurrences [%d]",
num_remain, sx->num_occurances_total);
}
else
{
if (num_remain == sx->num_occurances_remain)
return;
gnc_sx_begin_edit(sx);
sx->num_occurances_remain = num_remain;
qof_instance_set_dirty(&sx->inst);
gnc_sx_commit_edit(sx);
}
}
gint gnc_sx_get_num_occur_daterange(const SchedXaction *sx, const GDate* start_date, const GDate* end_date)
{
gint result = 0;
SXTmpStateData *tmpState;
gboolean countFirstDate;
/* SX still active? If not, return now. */
if ((xaccSchedXactionHasOccurDef(sx)
&& xaccSchedXactionGetRemOccur(sx) <= 0)
|| (xaccSchedXactionHasEndDate(sx)
&& g_date_compare(xaccSchedXactionGetEndDate(sx), start_date) < 0))
{
return result;
}
tmpState = gnc_sx_create_temporal_state (sx);
/* Should we count the first valid date we encounter? Only if the
* SX has not yet occurred so far, or if its last valid date was
* before the start date. */
countFirstDate = !g_date_valid(&tmpState->last_date)
|| (g_date_compare(&tmpState->last_date, start_date) < 0);
/* No valid date? SX has never occurred so far. */
if (!g_date_valid(&tmpState->last_date))
{
/* SX has never occurred so far */
gnc_sx_incr_temporal_state (sx, tmpState);
if (xaccSchedXactionHasOccurDef(sx) && tmpState->num_occur_rem < 0)
{
gnc_sx_destroy_temporal_state (tmpState);
return result;
}
}
/* Increase the tmpState until we are in our interval of
* interest. Only calculate anything if the sx hasn't already
* ended. */
while (g_date_compare(&tmpState->last_date, start_date) < 0)
{
gnc_sx_incr_temporal_state (sx, tmpState);
if (xaccSchedXactionHasOccurDef(sx) && tmpState->num_occur_rem < 0)
{
gnc_sx_destroy_temporal_state (tmpState);
return result;
}
}
/* Now we are in our interval of interest. Increment the
* occurrence date until we are beyond the end of our
* interval. Make sure to check for invalid dates here: It means
* the SX has ended. */
while (g_date_valid(&tmpState->last_date)
&& (g_date_compare(&tmpState->last_date, end_date) <= 0)
&& (!xaccSchedXactionHasEndDate(sx)
|| g_date_compare(&tmpState->last_date, xaccSchedXactionGetEndDate(sx)) <= 0)
&& (!xaccSchedXactionHasOccurDef(sx)
/* The >=0 (i.e. the ==) is important here, otherwise
* we miss the last valid occurrence of a SX which is
* limited by num_occur */
|| tmpState->num_occur_rem >= 0))
{
++result;
gnc_sx_incr_temporal_state (sx, tmpState);
}
/* If the first valid date shouldn't be counted, decrease the
* result number by one. */
if (!countFirstDate && result > 0)
--result;
gnc_sx_destroy_temporal_state (tmpState);
return result;
}
gboolean
xaccSchedXactionGetEnabled( const SchedXaction *sx )
{
return sx->enabled;
}
void
xaccSchedXactionSetEnabled( SchedXaction *sx, gboolean newEnabled)
{
gnc_sx_begin_edit(sx);
sx->enabled = newEnabled;
qof_instance_set_dirty(&sx->inst);
gnc_sx_commit_edit(sx);
}
void
xaccSchedXactionGetAutoCreate( const SchedXaction *sx,
gboolean *outAutoCreate,
gboolean *outNotify )
{
if (outAutoCreate != NULL)
*outAutoCreate = sx->autoCreateOption;
if (outNotify != NULL)
*outNotify = sx->autoCreateNotify;
return;
}
void
xaccSchedXactionSetAutoCreate( SchedXaction *sx,
gboolean newAutoCreate,
gboolean newNotify )
{
gnc_sx_begin_edit(sx);
sx->autoCreateOption = newAutoCreate;
sx->autoCreateNotify = newNotify;
qof_instance_set_dirty(&sx->inst);
gnc_sx_commit_edit(sx);
return;
}
gint
xaccSchedXactionGetAdvanceCreation( const SchedXaction *sx )
{
return sx->advanceCreateDays;
}
void
xaccSchedXactionSetAdvanceCreation( SchedXaction *sx, gint createDays )
{
gnc_sx_begin_edit(sx);
sx->advanceCreateDays = createDays;
qof_instance_set_dirty(&sx->inst);
gnc_sx_commit_edit(sx);
}
gint
xaccSchedXactionGetAdvanceReminder( const SchedXaction *sx )
{
return sx->advanceRemindDays;
}
void
xaccSchedXactionSetAdvanceReminder( SchedXaction *sx, gint reminderDays )
{
gnc_sx_begin_edit(sx);
sx->advanceRemindDays = reminderDays;
qof_instance_set_dirty(&sx->inst);
gnc_sx_commit_edit(sx);
}
GDate
xaccSchedXactionGetNextInstance (const SchedXaction *sx, SXTmpStateData *tsd)
{
GDate prev_occur, next_occur;
g_date_clear( &prev_occur, 1 );
if ( tsd != NULL )
prev_occur = tsd->last_date;
/* If prev_occur is in the "cleared" state and sx->start_date isn't, then
* we're at the beginning. We want to pretend prev_occur is the day before
* the start_date in case the start_date is today so that the SX will fire
* today. If start_date isn't valid either then the SX will fire anyway, no
* harm done. prev_occur cannot be before start_date either.
*/
if (g_date_valid (&sx->start_date) && (!g_date_valid ( &prev_occur ) || g_date_compare (&prev_occur, &sx->start_date)<0))
{
/* We must be at the beginning. */
prev_occur = sx->start_date;
g_date_subtract_days (&prev_occur, 1 );
}
recurrenceListNextInstance(sx->schedule, &prev_occur, &next_occur);
if ( xaccSchedXactionHasEndDate( sx ) )
{
const GDate *end_date = xaccSchedXactionGetEndDate( sx );
if ( g_date_compare( &next_occur, end_date ) > 0 )
{
g_date_clear( &next_occur, 1 );
}
}
else if ( xaccSchedXactionHasOccurDef( sx ) )
{
if ((tsd && tsd->num_occur_rem == 0) ||
(!tsd && sx->num_occurances_remain == 0 ))
{
g_date_clear( &next_occur, 1 );
}
}
return next_occur;
}
gint
gnc_sx_get_instance_count( const SchedXaction *sx, SXTmpStateData *stateData )
{
gint toRet = -1;
SXTmpStateData *tsd;
if ( stateData )
{
tsd = (SXTmpStateData*)stateData;
toRet = tsd->num_inst;
}
else
{
toRet = sx->instance_num;
}
return toRet;
}
void
gnc_sx_set_instance_count(SchedXaction *sx, gint instance_num)
{
g_return_if_fail(sx);
if (sx->instance_num == instance_num)
return;
gnc_sx_begin_edit(sx);
sx->instance_num = instance_num;
qof_instance_set_dirty(&sx->inst);
gnc_sx_commit_edit(sx);
}
GList *
xaccSchedXactionGetSplits( const SchedXaction *sx )
{
g_return_val_if_fail( sx, NULL );
return xaccAccountGetSplitList(sx->template_acct);
}
static Split *
pack_split_info (TTSplitInfo *s_info, Account *parent_acct,
Transaction *parent_trans, QofBook *book)
{
Split *split;
const gchar *credit_formula;
const gchar *debit_formula;
const GncGUID *acc_guid;
split = xaccMallocSplit(book);
xaccSplitSetMemo(split,
gnc_ttsplitinfo_get_memo(s_info));
/* Set split-action with gnc_set_num_action which is the same as
* xaccSplitSetAction with these arguments */
gnc_set_num_action(NULL, split, NULL,
gnc_ttsplitinfo_get_action(s_info));
xaccAccountInsertSplit(parent_acct,
split);
credit_formula = gnc_ttsplitinfo_get_credit_formula(s_info);
debit_formula = gnc_ttsplitinfo_get_debit_formula(s_info);
acc_guid = qof_entity_get_guid(QOF_INSTANCE(gnc_ttsplitinfo_get_account(s_info)));
qof_instance_set (QOF_INSTANCE (split),
"sx-credit-formula", credit_formula,
"sx-debit-formula", debit_formula,
"sx-account", acc_guid,
NULL);
return split;
}
void
xaccSchedXactionSetTemplateTrans(SchedXaction *sx, GList *t_t_list,
QofBook *book)
{
Transaction *new_trans;
TTInfo *tti;
TTSplitInfo *s_info;
Split *new_split;
GList *split_list;
g_return_if_fail (book);
/* delete any old transactions, if there are any */
delete_template_trans( sx );
for (; t_t_list != NULL; t_t_list = t_t_list->next)
{
tti = t_t_list->data;
new_trans = xaccMallocTransaction(book);
xaccTransBeginEdit(new_trans);
xaccTransSetDescription(new_trans,
gnc_ttinfo_get_description(tti));
xaccTransSetDatePostedSecsNormalized(new_trans, gnc_time (NULL));
/* Set tran-num with gnc_set_num_action which is the same as
* xaccTransSetNum with these arguments */
gnc_set_num_action(new_trans, NULL,
gnc_ttinfo_get_num(tti), NULL);
xaccTransSetNotes (new_trans, gnc_ttinfo_get_notes (tti));
xaccTransSetCurrency( new_trans,
gnc_ttinfo_get_currency(tti) );
for (split_list = gnc_ttinfo_get_template_splits(tti);
split_list;
split_list = split_list->next)
{
s_info = split_list->data;
new_split = pack_split_info(s_info, sx->template_acct,
new_trans, book);
xaccTransAppendSplit(new_trans, new_split);
}
xaccTransCommitEdit(new_trans);
}
}
SXTmpStateData*
gnc_sx_create_temporal_state(const SchedXaction *sx )
{
SXTmpStateData *toRet =
g_new0( SXTmpStateData, 1 );
if (g_date_valid (&(sx->last_date)))
toRet->last_date = sx->last_date;
else
g_date_set_dmy (&(toRet->last_date), 1, 1, 1970);
toRet->num_occur_rem = sx->num_occurances_remain;
toRet->num_inst = sx->instance_num;
return toRet;
}
void
gnc_sx_incr_temporal_state(const SchedXaction *sx, SXTmpStateData *tsd )
{
g_return_if_fail(tsd != NULL);
tsd->last_date = xaccSchedXactionGetNextInstance (sx, tsd);
if (xaccSchedXactionHasOccurDef (sx))
{
--tsd->num_occur_rem;
}
++tsd->num_inst;
}
void
gnc_sx_destroy_temporal_state (SXTmpStateData *tsd)
{
g_free(tsd);
}
SXTmpStateData*
gnc_sx_clone_temporal_state (SXTmpStateData *tsd)
{
SXTmpStateData *toRet;
toRet = g_memdup (tsd, sizeof (SXTmpStateData));
return toRet;
}
static gint
_temporal_state_data_cmp( gconstpointer a, gconstpointer b )
{
const SXTmpStateData *tsd_a = (SXTmpStateData*)a;
const SXTmpStateData *tsd_b = (SXTmpStateData*)b;
if ( !tsd_a && !tsd_b )
return 0;
if (tsd_a == tsd_b)
return 0;
if ( !tsd_a )
return 1;
if ( !tsd_b )
return -1;
return g_date_compare( &tsd_a->last_date,
&tsd_b->last_date );
}
/**
* Adds an instance to the deferred list of the SX. Added instances are
* added in (date-)sorted order.
**/
void
gnc_sx_add_defer_instance( SchedXaction *sx, void *deferStateData )
{
sx->deferredList = g_list_insert_sorted( sx->deferredList,
deferStateData,
_temporal_state_data_cmp );
}
/**
* Removes an instance from the deferred list. The saved SXTmpStateData existed
* for comparison only, so destroy it.
**/
void
gnc_sx_remove_defer_instance( SchedXaction *sx, void *deferStateData )
{
GList *found_by_value;
found_by_value = g_list_find_custom(
sx->deferredList, deferStateData, _temporal_state_data_cmp);
if (found_by_value == NULL)
{
g_warning("unable to find deferred instance");
return;
}
gnc_sx_destroy_temporal_state(found_by_value->data);
sx->deferredList = g_list_delete_link(sx->deferredList, found_by_value);
}
/**
* Returns the defer list from the SX; this is a (date-)sorted
* temporal-state-data instance list. The list should not be modified by the
* caller; use the gnc_sx_{add,remove}_defer_instance() functions to modifiy
* the list.
*
* @param sx Scheduled transaction
* @return Defer list which must not be modified by the caller
**/
GList*
gnc_sx_get_defer_instances( SchedXaction *sx )
{
return sx->deferredList;
}
static void
destroy_sx_on_book_close(QofInstance *ent, gpointer data)
{
SchedXaction* sx = GNC_SCHEDXACTION(ent);
gnc_sx_begin_edit(sx);
xaccSchedXactionDestroy(sx);
}
/**
* Destroys all SXes in the book because the book is being destroyed.
*
* @param book Book being destroyed
*/
static void
gnc_sx_book_end(QofBook* book)
{
QofCollection *col;
col = qof_book_get_collection(book, GNC_ID_SCHEDXACTION);
qof_collection_foreach(col, destroy_sx_on_book_close, NULL);
}
#ifdef _MSC_VER
/* MSVC compiler doesn't have C99 "designated initializers"
* so we wrap them in a macro that is empty on MSVC. */
# define DI(x) /* */
#else
# define DI(x) x
#endif
static QofObject SXDesc =
{
DI(.interface_version = ) QOF_OBJECT_VERSION,
DI(.e_type = ) GNC_SX_ID,
DI(.type_label = ) "Scheduled Transaction",
DI(.create = ) (gpointer)xaccSchedXactionMalloc,
DI(.book_begin = ) NULL,
DI(.book_end = ) gnc_sx_book_end,
DI(.is_dirty = ) qof_collection_is_dirty,
DI(.mark_clean = ) qof_collection_mark_clean,
DI(.foreach = ) qof_collection_foreach,
DI(.printable = ) NULL,
DI(.version_cmp = ) (int (*)(gpointer, gpointer)) qof_instance_version_cmp,
};
gboolean
SXRegister(void)
{
static QofParam params[] =
{
{
GNC_SX_NAME, QOF_TYPE_STRING, (QofAccessFunc)xaccSchedXactionGetName,
(QofSetterFunc)xaccSchedXactionSetName
},
{
GNC_SX_START_DATE, QOF_TYPE_DATE, (QofAccessFunc)xaccSchedXactionGetStartDateTT,
(QofSetterFunc)xaccSchedXactionSetStartDateTT
},
{
GNC_SX_LAST_DATE, QOF_TYPE_DATE, (QofAccessFunc)xaccSchedXactionGetLastOccurDateTT,
(QofSetterFunc)xaccSchedXactionSetLastOccurDateTT
},
{
GNC_SX_NUM_OCCUR, QOF_TYPE_INT64, (QofAccessFunc)xaccSchedXactionGetNumOccur,
(QofSetterFunc)xaccSchedXactionSetNumOccur
},
{
GNC_SX_REM_OCCUR, QOF_TYPE_INT64, (QofAccessFunc)xaccSchedXactionGetRemOccur,
(QofSetterFunc)xaccSchedXactionSetRemOccur
},
{ QOF_PARAM_BOOK, QOF_ID_BOOK, (QofAccessFunc)qof_instance_get_book, NULL },
{ QOF_PARAM_GUID, QOF_TYPE_GUID, (QofAccessFunc)qof_instance_get_guid, NULL },
{ NULL },
};
qof_class_register(GNC_SX_ID, NULL, params);
return qof_object_register(&SXDesc);
}
| {
"language": "C"
} |
/*
* Errormibess watching mib group
*/
#ifndef _MIBGROUP_ERRORMIB_H
#define _MIBGROUP_ERRORMIB_H
void init_errormib(void);
config_require(util_funcs)
void setPerrorstatus (char *);
void seterrorstatus (char *, int);
extern FindVarMethod var_extensible_errors;
#include "mibdefs.h"
#endif /* _MIBGROUP_ERRORMIB_H */
| {
"language": "C"
} |
#ifndef KernelMemory_h
#define KernelMemory_h
#include <mach/mach.h>
#include <stdbool.h>
/***** mach_vm.h *****/
kern_return_t mach_vm_read(
vm_map_t target_task,
mach_vm_address_t address,
mach_vm_size_t size,
vm_offset_t* data,
mach_msg_type_number_t* dataCnt);
kern_return_t mach_vm_write(
vm_map_t target_task,
mach_vm_address_t address,
vm_offset_t data,
mach_msg_type_number_t dataCnt);
kern_return_t mach_vm_read_overwrite(
vm_map_t target_task,
mach_vm_address_t address,
mach_vm_size_t size,
mach_vm_address_t data,
mach_vm_size_t* outsize);
kern_return_t mach_vm_allocate(
vm_map_t target,
mach_vm_address_t* address,
mach_vm_size_t size,
int flags);
kern_return_t mach_vm_deallocate(
vm_map_t target,
mach_vm_address_t address,
mach_vm_size_t size);
kern_return_t mach_vm_protect(
vm_map_t target_task,
mach_vm_address_t address,
mach_vm_size_t size,
boolean_t set_maximum,
vm_prot_t new_protection);
extern mach_port_t tfp0;
size_t kread(uint64_t where, void* p, size_t size);
size_t kwrite(uint64_t where, const void* p, size_t size);
#define rk32(kaddr) ReadKernel32(kaddr)
#define rk64(kaddr) ReadKernel64(kaddr)
uint32_t ReadKernel32(uint64_t kaddr);
uint64_t ReadKernel64(uint64_t kaddr);
#define wk32(kaddr, val) WriteKernel32(kaddr, val)
#define wk64(kaddr, val) WriteKernel64(kaddr, val)
void WriteKernel32(uint64_t kaddr, uint32_t val);
void WriteKernel64(uint64_t kaddr, uint64_t val);
bool wkbuffer(uint64_t kaddr, void* buffer, size_t length);
bool rkbuffer(uint64_t kaddr, void* buffer, size_t length);
void kmemcpy(uint64_t dest, uint64_t src, uint32_t length);
void kmem_protect(uint64_t kaddr, uint32_t size, int prot);
uint64_t kmem_alloc(uint64_t size);
uint64_t kmem_alloc_wired(uint64_t size);
void kmem_free(uint64_t kaddr, uint64_t size);
void prepare_rk_via_kmem_read_port(mach_port_t port);
void prepare_rwk_via_tfp0(mach_port_t port);
void prepare_for_rw_with_fake_tfp0(mach_port_t fake_tfp0);
// query whether kmem read or write is present
bool have_kmem_read(void);
bool have_kmem_write(void);
#endif
| {
"language": "C"
} |
/* ssl/ssl23.h */
/* Copyright (C) 1995-1998 Eric Young (eay@cryptsoft.com)
* All rights reserved.
*
* This package is an SSL implementation written
* by Eric Young (eay@cryptsoft.com).
* The implementation was written so as to conform with Netscapes SSL.
*
* This library is free for commercial and non-commercial use as long as
* the following conditions are aheared to. The following conditions
* apply to all code found in this distribution, be it the RC4, RSA,
* lhash, DES, etc., code; not just the SSL code. The SSL documentation
* included with this distribution is covered by the same copyright terms
* except that the holder is Tim Hudson (tjh@cryptsoft.com).
*
* Copyright remains Eric Young's, and as such any Copyright notices in
* the code are not to be removed.
* If this package is used in a product, Eric Young should be given attribution
* as the author of the parts of the library used.
* This can be in the form of a textual message at program startup or
* in documentation (online or textual) provided with the package.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. All advertising materials mentioning features or use of this software
* must display the following acknowledgement:
* "This product includes cryptographic software written by
* Eric Young (eay@cryptsoft.com)"
* The word 'cryptographic' can be left out if the rouines from the library
* being used are not cryptographic related :-).
* 4. If you include any Windows specific code (or a derivative thereof) from
* the apps directory (application code) you must include an acknowledgement:
* "This product includes software written by Tim Hudson (tjh@cryptsoft.com)"
*
* THIS SOFTWARE IS PROVIDED BY ERIC YOUNG ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* The licence and distribution terms for any publically available version or
* derivative of this code cannot be changed. i.e. this code cannot simply be
* copied and put under another distribution licence
* [including the GNU Public Licence.]
*/
#ifndef HEADER_SSL23_H
# define HEADER_SSL23_H
#ifdef __cplusplus
extern "C" {
#endif
/*
* client
*/
/* write to server */
# define SSL23_ST_CW_CLNT_HELLO_A (0x210|SSL_ST_CONNECT)
# define SSL23_ST_CW_CLNT_HELLO_B (0x211|SSL_ST_CONNECT)
/* read from server */
# define SSL23_ST_CR_SRVR_HELLO_A (0x220|SSL_ST_CONNECT)
# define SSL23_ST_CR_SRVR_HELLO_B (0x221|SSL_ST_CONNECT)
/* server */
/* read from client */
# define SSL23_ST_SR_CLNT_HELLO_A (0x210|SSL_ST_ACCEPT)
# define SSL23_ST_SR_CLNT_HELLO_B (0x211|SSL_ST_ACCEPT)
#ifdef __cplusplus
}
#endif
#endif
| {
"language": "C"
} |
/*
* Copyright (c) 1992, 1993
* The Regents of the University of California. All rights reserved.
*
* This software was developed by the Computer Systems Engineering group
* at Lawrence Berkeley Laboratory under DARPA contract BG 91-66 and
* contributed to Berkeley.
*
* All advertising materials mentioning features or use of this software
* must display the following acknowledgement:
* This product includes software developed by the University of
* California, Lawrence Berkeley Laboratory.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. All advertising materials mentioning features or use of this software
* must display the following acknowledgement:
* This product includes software developed by the University of
* California, Berkeley and its contributors.
* 4. Neither the name of the University nor the names of its contributors
* may be used to endorse or promote products derived from this software
* without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* @(#)ieee.h 8.1 (Berkeley) 6/11/93
*
* from: $Header: ieee.h,v 1.7 92/11/26 02:04:37 torek Exp $
*/
/*
* ieee.h defines the machine-dependent layout of the machine's IEEE
* floating point. It does *not* define (yet?) any of the rounding
* mode bits, exceptions, and so forth.
*/
/*
* Define the number of bits in each fraction and exponent.
*
* k k+1
* Note that 1.0 x 2 == 0.1 x 2 and that denorms are represented
*
* (-exp_bias+1)
* as fractions that look like 0.fffff x 2 . This means that
*
* -126
* the number 0.10000 x 2 , for instance, is the same as the normalized
*
* -127 -128
* float 1.0 x 2 . Thus, to represent 2 , we need one leading zero
*
* -129
* in the fraction; to represent 2 , we need two, and so on. This
*
* (-exp_bias-fracbits+1)
* implies that the smallest denormalized number is 2
*
* for whichever format we are talking about: for single precision, for
*
* -126 -149
* instance, we get .00000000000000000000001 x 2 , or 1.0 x 2 , and
*
* -149 == -127 - 23 + 1.
*/
#define SNG_EXPBITS 8
#define SNG_FRACBITS 23
#define DBL_EXPBITS 11
#define DBL_FRACBITS 52
#ifdef notyet
#define E80_EXPBITS 15
#define E80_FRACBITS 64
#endif
#define EXT_EXPBITS 15
#define EXT_FRACBITS 112
struct ieee_single {
u_int sng_sign:1;
u_int sng_exp:8;
u_int sng_frac:23;
};
struct ieee_double {
u_int dbl_sign:1;
u_int dbl_exp:11;
u_int dbl_frach:20;
u_int dbl_fracl;
};
struct ieee_ext {
u_int ext_sign:1;
u_int ext_exp:15;
u_int ext_frach:16;
u_int ext_frachm;
u_int ext_fraclm;
u_int ext_fracl;
};
/*
* Floats whose exponent is in [1..INFNAN) (of whatever type) are
* `normal'. Floats whose exponent is INFNAN are either Inf or NaN.
* Floats whose exponent is zero are either zero (iff all fraction
* bits are zero) or subnormal values.
*
* A NaN is a `signalling NaN' if its QUIETNAN bit is clear in its
* high fraction; if the bit is set, it is a `quiet NaN'.
*/
#define SNG_EXP_INFNAN 255
#define DBL_EXP_INFNAN 2047
#define EXT_EXP_INFNAN 32767
#if 0
#define SNG_QUIETNAN (1 << 22)
#define DBL_QUIETNAN (1 << 19)
#define EXT_QUIETNAN (1 << 15)
#endif
/*
* Exponent biases.
*/
#define SNG_EXP_BIAS 127
#define DBL_EXP_BIAS 1023
#define EXT_EXP_BIAS 16383
| {
"language": "C"
} |
/*
* Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
* Copyright (C) 2004-2008 Red Hat, Inc. All rights reserved.
*
* This copyrighted material is made available to anyone wishing to use,
* modify, copy, or redistribute it subject to the terms and conditions
* of the GNU General Public License version 2.
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/sched.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
#include <linux/buffer_head.h>
#include <linux/delay.h>
#include <linux/sort.h>
#include <linux/hash.h>
#include <linux/jhash.h>
#include <linux/kallsyms.h>
#include <linux/gfs2_ondisk.h>
#include <linux/list.h>
#include <linux/wait.h>
#include <linux/module.h>
#include <linux/uaccess.h>
#include <linux/seq_file.h>
#include <linux/debugfs.h>
#include <linux/kthread.h>
#include <linux/freezer.h>
#include <linux/workqueue.h>
#include <linux/jiffies.h>
#include <linux/rcupdate.h>
#include <linux/rculist_bl.h>
#include <linux/bit_spinlock.h>
#include <linux/percpu.h>
#include <linux/list_sort.h>
#include <linux/lockref.h>
#include <linux/rhashtable.h>
#include "gfs2.h"
#include "incore.h"
#include "glock.h"
#include "glops.h"
#include "inode.h"
#include "lops.h"
#include "meta_io.h"
#include "quota.h"
#include "super.h"
#include "util.h"
#include "bmap.h"
#define CREATE_TRACE_POINTS
#include "trace_gfs2.h"
struct gfs2_glock_iter {
struct gfs2_sbd *sdp; /* incore superblock */
struct rhashtable_iter hti; /* rhashtable iterator */
struct gfs2_glock *gl; /* current glock struct */
loff_t last_pos; /* last position */
};
typedef void (*glock_examiner) (struct gfs2_glock * gl);
static void do_xmote(struct gfs2_glock *gl, struct gfs2_holder *gh, unsigned int target);
static struct dentry *gfs2_root;
static struct workqueue_struct *glock_workqueue;
struct workqueue_struct *gfs2_delete_workqueue;
static LIST_HEAD(lru_list);
static atomic_t lru_count = ATOMIC_INIT(0);
static DEFINE_SPINLOCK(lru_lock);
#define GFS2_GL_HASH_SHIFT 15
#define GFS2_GL_HASH_SIZE BIT(GFS2_GL_HASH_SHIFT)
static const struct rhashtable_params ht_parms = {
.nelem_hint = GFS2_GL_HASH_SIZE * 3 / 4,
.key_len = offsetofend(struct lm_lockname, ln_type),
.key_offset = offsetof(struct gfs2_glock, gl_name),
.head_offset = offsetof(struct gfs2_glock, gl_node),
};
static struct rhashtable gl_hash_table;
#define GLOCK_WAIT_TABLE_BITS 12
#define GLOCK_WAIT_TABLE_SIZE (1 << GLOCK_WAIT_TABLE_BITS)
static wait_queue_head_t glock_wait_table[GLOCK_WAIT_TABLE_SIZE] __cacheline_aligned;
struct wait_glock_queue {
struct lm_lockname *name;
wait_queue_entry_t wait;
};
static int glock_wake_function(wait_queue_entry_t *wait, unsigned int mode,
int sync, void *key)
{
struct wait_glock_queue *wait_glock =
container_of(wait, struct wait_glock_queue, wait);
struct lm_lockname *wait_name = wait_glock->name;
struct lm_lockname *wake_name = key;
if (wake_name->ln_sbd != wait_name->ln_sbd ||
wake_name->ln_number != wait_name->ln_number ||
wake_name->ln_type != wait_name->ln_type)
return 0;
return autoremove_wake_function(wait, mode, sync, key);
}
static wait_queue_head_t *glock_waitqueue(struct lm_lockname *name)
{
u32 hash = jhash2((u32 *)name, sizeof(*name) / 4, 0);
return glock_wait_table + hash_32(hash, GLOCK_WAIT_TABLE_BITS);
}
/**
* wake_up_glock - Wake up waiters on a glock
* @gl: the glock
*/
static void wake_up_glock(struct gfs2_glock *gl)
{
wait_queue_head_t *wq = glock_waitqueue(&gl->gl_name);
if (waitqueue_active(wq))
__wake_up(wq, TASK_NORMAL, 1, &gl->gl_name);
}
static void gfs2_glock_dealloc(struct rcu_head *rcu)
{
struct gfs2_glock *gl = container_of(rcu, struct gfs2_glock, gl_rcu);
if (gl->gl_ops->go_flags & GLOF_ASPACE) {
kmem_cache_free(gfs2_glock_aspace_cachep, gl);
} else {
kfree(gl->gl_lksb.sb_lvbptr);
kmem_cache_free(gfs2_glock_cachep, gl);
}
}
void gfs2_glock_free(struct gfs2_glock *gl)
{
struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
rhashtable_remove_fast(&gl_hash_table, &gl->gl_node, ht_parms);
smp_mb();
wake_up_glock(gl);
call_rcu(&gl->gl_rcu, gfs2_glock_dealloc);
if (atomic_dec_and_test(&sdp->sd_glock_disposal))
wake_up(&sdp->sd_glock_wait);
}
/**
* gfs2_glock_hold() - increment reference count on glock
* @gl: The glock to hold
*
*/
void gfs2_glock_hold(struct gfs2_glock *gl)
{
GLOCK_BUG_ON(gl, __lockref_is_dead(&gl->gl_lockref));
lockref_get(&gl->gl_lockref);
}
/**
* demote_ok - Check to see if it's ok to unlock a glock
* @gl: the glock
*
* Returns: 1 if it's ok
*/
static int demote_ok(const struct gfs2_glock *gl)
{
const struct gfs2_glock_operations *glops = gl->gl_ops;
if (gl->gl_state == LM_ST_UNLOCKED)
return 0;
if (!list_empty(&gl->gl_holders))
return 0;
if (glops->go_demote_ok)
return glops->go_demote_ok(gl);
return 1;
}
void gfs2_glock_add_to_lru(struct gfs2_glock *gl)
{
spin_lock(&lru_lock);
if (!list_empty(&gl->gl_lru))
list_del_init(&gl->gl_lru);
else
atomic_inc(&lru_count);
list_add_tail(&gl->gl_lru, &lru_list);
set_bit(GLF_LRU, &gl->gl_flags);
spin_unlock(&lru_lock);
}
static void gfs2_glock_remove_from_lru(struct gfs2_glock *gl)
{
if (!(gl->gl_ops->go_flags & GLOF_LRU))
return;
spin_lock(&lru_lock);
if (!list_empty(&gl->gl_lru)) {
list_del_init(&gl->gl_lru);
atomic_dec(&lru_count);
clear_bit(GLF_LRU, &gl->gl_flags);
}
spin_unlock(&lru_lock);
}
/*
* Enqueue the glock on the work queue. Passes one glock reference on to the
* work queue.
*/
static void __gfs2_glock_queue_work(struct gfs2_glock *gl, unsigned long delay) {
if (!queue_delayed_work(glock_workqueue, &gl->gl_work, delay)) {
/*
* We are holding the lockref spinlock, and the work was still
* queued above. The queued work (glock_work_func) takes that
* spinlock before dropping its glock reference(s), so it
* cannot have dropped them in the meantime.
*/
GLOCK_BUG_ON(gl, gl->gl_lockref.count < 2);
gl->gl_lockref.count--;
}
}
static void gfs2_glock_queue_work(struct gfs2_glock *gl, unsigned long delay) {
spin_lock(&gl->gl_lockref.lock);
__gfs2_glock_queue_work(gl, delay);
spin_unlock(&gl->gl_lockref.lock);
}
static void __gfs2_glock_put(struct gfs2_glock *gl)
{
struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
struct address_space *mapping = gfs2_glock2aspace(gl);
lockref_mark_dead(&gl->gl_lockref);
gfs2_glock_remove_from_lru(gl);
spin_unlock(&gl->gl_lockref.lock);
GLOCK_BUG_ON(gl, !list_empty(&gl->gl_holders));
GLOCK_BUG_ON(gl, mapping && mapping->nrpages);
trace_gfs2_glock_put(gl);
sdp->sd_lockstruct.ls_ops->lm_put_lock(gl);
}
/*
* Cause the glock to be put in work queue context.
*/
void gfs2_glock_queue_put(struct gfs2_glock *gl)
{
gfs2_glock_queue_work(gl, 0);
}
/**
* gfs2_glock_put() - Decrement reference count on glock
* @gl: The glock to put
*
*/
void gfs2_glock_put(struct gfs2_glock *gl)
{
if (lockref_put_or_lock(&gl->gl_lockref))
return;
__gfs2_glock_put(gl);
}
/**
* may_grant - check if its ok to grant a new lock
* @gl: The glock
* @gh: The lock request which we wish to grant
*
* Returns: true if its ok to grant the lock
*/
static inline int may_grant(const struct gfs2_glock *gl, const struct gfs2_holder *gh)
{
const struct gfs2_holder *gh_head = list_entry(gl->gl_holders.next, const struct gfs2_holder, gh_list);
if ((gh->gh_state == LM_ST_EXCLUSIVE ||
gh_head->gh_state == LM_ST_EXCLUSIVE) && gh != gh_head)
return 0;
if (gl->gl_state == gh->gh_state)
return 1;
if (gh->gh_flags & GL_EXACT)
return 0;
if (gl->gl_state == LM_ST_EXCLUSIVE) {
if (gh->gh_state == LM_ST_SHARED && gh_head->gh_state == LM_ST_SHARED)
return 1;
if (gh->gh_state == LM_ST_DEFERRED && gh_head->gh_state == LM_ST_DEFERRED)
return 1;
}
if (gl->gl_state != LM_ST_UNLOCKED && (gh->gh_flags & LM_FLAG_ANY))
return 1;
return 0;
}
static void gfs2_holder_wake(struct gfs2_holder *gh)
{
clear_bit(HIF_WAIT, &gh->gh_iflags);
smp_mb__after_atomic();
wake_up_bit(&gh->gh_iflags, HIF_WAIT);
}
/**
* do_error - Something unexpected has happened during a lock request
*
*/
static void do_error(struct gfs2_glock *gl, const int ret)
{
struct gfs2_holder *gh, *tmp;
list_for_each_entry_safe(gh, tmp, &gl->gl_holders, gh_list) {
if (test_bit(HIF_HOLDER, &gh->gh_iflags))
continue;
if (ret & LM_OUT_ERROR)
gh->gh_error = -EIO;
else if (gh->gh_flags & (LM_FLAG_TRY | LM_FLAG_TRY_1CB))
gh->gh_error = GLR_TRYFAILED;
else
continue;
list_del_init(&gh->gh_list);
trace_gfs2_glock_queue(gh, 0);
gfs2_holder_wake(gh);
}
}
/**
* do_promote - promote as many requests as possible on the current queue
* @gl: The glock
*
* Returns: 1 if there is a blocked holder at the head of the list, or 2
* if a type specific operation is underway.
*/
static int do_promote(struct gfs2_glock *gl)
__releases(&gl->gl_lockref.lock)
__acquires(&gl->gl_lockref.lock)
{
const struct gfs2_glock_operations *glops = gl->gl_ops;
struct gfs2_holder *gh, *tmp;
int ret;
restart:
list_for_each_entry_safe(gh, tmp, &gl->gl_holders, gh_list) {
if (test_bit(HIF_HOLDER, &gh->gh_iflags))
continue;
if (may_grant(gl, gh)) {
if (gh->gh_list.prev == &gl->gl_holders &&
glops->go_lock) {
spin_unlock(&gl->gl_lockref.lock);
/* FIXME: eliminate this eventually */
ret = glops->go_lock(gh);
spin_lock(&gl->gl_lockref.lock);
if (ret) {
if (ret == 1)
return 2;
gh->gh_error = ret;
list_del_init(&gh->gh_list);
trace_gfs2_glock_queue(gh, 0);
gfs2_holder_wake(gh);
goto restart;
}
set_bit(HIF_HOLDER, &gh->gh_iflags);
trace_gfs2_promote(gh, 1);
gfs2_holder_wake(gh);
goto restart;
}
set_bit(HIF_HOLDER, &gh->gh_iflags);
trace_gfs2_promote(gh, 0);
gfs2_holder_wake(gh);
continue;
}
if (gh->gh_list.prev == &gl->gl_holders)
return 1;
do_error(gl, 0);
break;
}
return 0;
}
/**
* find_first_waiter - find the first gh that's waiting for the glock
* @gl: the glock
*/
static inline struct gfs2_holder *find_first_waiter(const struct gfs2_glock *gl)
{
struct gfs2_holder *gh;
list_for_each_entry(gh, &gl->gl_holders, gh_list) {
if (!test_bit(HIF_HOLDER, &gh->gh_iflags))
return gh;
}
return NULL;
}
/**
* state_change - record that the glock is now in a different state
* @gl: the glock
* @new_state the new state
*
*/
static void state_change(struct gfs2_glock *gl, unsigned int new_state)
{
int held1, held2;
held1 = (gl->gl_state != LM_ST_UNLOCKED);
held2 = (new_state != LM_ST_UNLOCKED);
if (held1 != held2) {
GLOCK_BUG_ON(gl, __lockref_is_dead(&gl->gl_lockref));
if (held2)
gl->gl_lockref.count++;
else
gl->gl_lockref.count--;
}
if (held1 && held2 && list_empty(&gl->gl_holders))
clear_bit(GLF_QUEUED, &gl->gl_flags);
if (new_state != gl->gl_target)
/* shorten our minimum hold time */
gl->gl_hold_time = max(gl->gl_hold_time - GL_GLOCK_HOLD_DECR,
GL_GLOCK_MIN_HOLD);
gl->gl_state = new_state;
gl->gl_tchange = jiffies;
}
static void gfs2_demote_wake(struct gfs2_glock *gl)
{
gl->gl_demote_state = LM_ST_EXCLUSIVE;
clear_bit(GLF_DEMOTE, &gl->gl_flags);
smp_mb__after_atomic();
wake_up_bit(&gl->gl_flags, GLF_DEMOTE);
}
/**
* finish_xmote - The DLM has replied to one of our lock requests
* @gl: The glock
* @ret: The status from the DLM
*
*/
static void finish_xmote(struct gfs2_glock *gl, unsigned int ret)
{
const struct gfs2_glock_operations *glops = gl->gl_ops;
struct gfs2_holder *gh;
unsigned state = ret & LM_OUT_ST_MASK;
int rv;
spin_lock(&gl->gl_lockref.lock);
trace_gfs2_glock_state_change(gl, state);
state_change(gl, state);
gh = find_first_waiter(gl);
/* Demote to UN request arrived during demote to SH or DF */
if (test_bit(GLF_DEMOTE_IN_PROGRESS, &gl->gl_flags) &&
state != LM_ST_UNLOCKED && gl->gl_demote_state == LM_ST_UNLOCKED)
gl->gl_target = LM_ST_UNLOCKED;
/* Check for state != intended state */
if (unlikely(state != gl->gl_target)) {
if (gh && !test_bit(GLF_DEMOTE_IN_PROGRESS, &gl->gl_flags)) {
/* move to back of queue and try next entry */
if (ret & LM_OUT_CANCELED) {
if ((gh->gh_flags & LM_FLAG_PRIORITY) == 0)
list_move_tail(&gh->gh_list, &gl->gl_holders);
gh = find_first_waiter(gl);
gl->gl_target = gh->gh_state;
goto retry;
}
/* Some error or failed "try lock" - report it */
if ((ret & LM_OUT_ERROR) ||
(gh->gh_flags & (LM_FLAG_TRY | LM_FLAG_TRY_1CB))) {
gl->gl_target = gl->gl_state;
do_error(gl, ret);
goto out;
}
}
switch(state) {
/* Unlocked due to conversion deadlock, try again */
case LM_ST_UNLOCKED:
retry:
do_xmote(gl, gh, gl->gl_target);
break;
/* Conversion fails, unlock and try again */
case LM_ST_SHARED:
case LM_ST_DEFERRED:
do_xmote(gl, gh, LM_ST_UNLOCKED);
break;
default: /* Everything else */
pr_err("wanted %u got %u\n", gl->gl_target, state);
GLOCK_BUG_ON(gl, 1);
}
spin_unlock(&gl->gl_lockref.lock);
return;
}
/* Fast path - we got what we asked for */
if (test_and_clear_bit(GLF_DEMOTE_IN_PROGRESS, &gl->gl_flags))
gfs2_demote_wake(gl);
if (state != LM_ST_UNLOCKED) {
if (glops->go_xmote_bh) {
spin_unlock(&gl->gl_lockref.lock);
rv = glops->go_xmote_bh(gl, gh);
spin_lock(&gl->gl_lockref.lock);
if (rv) {
do_error(gl, rv);
goto out;
}
}
rv = do_promote(gl);
if (rv == 2)
goto out_locked;
}
out:
clear_bit(GLF_LOCK, &gl->gl_flags);
out_locked:
spin_unlock(&gl->gl_lockref.lock);
}
/**
* do_xmote - Calls the DLM to change the state of a lock
* @gl: The lock state
* @gh: The holder (only for promotes)
* @target: The target lock state
*
*/
static void do_xmote(struct gfs2_glock *gl, struct gfs2_holder *gh, unsigned int target)
__releases(&gl->gl_lockref.lock)
__acquires(&gl->gl_lockref.lock)
{
const struct gfs2_glock_operations *glops = gl->gl_ops;
struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
unsigned int lck_flags = (unsigned int)(gh ? gh->gh_flags : 0);
int ret;
if (unlikely(test_bit(SDF_SHUTDOWN, &sdp->sd_flags)) &&
target != LM_ST_UNLOCKED)
return;
lck_flags &= (LM_FLAG_TRY | LM_FLAG_TRY_1CB | LM_FLAG_NOEXP |
LM_FLAG_PRIORITY);
GLOCK_BUG_ON(gl, gl->gl_state == target);
GLOCK_BUG_ON(gl, gl->gl_state == gl->gl_target);
if ((target == LM_ST_UNLOCKED || target == LM_ST_DEFERRED) &&
glops->go_inval) {
set_bit(GLF_INVALIDATE_IN_PROGRESS, &gl->gl_flags);
do_error(gl, 0); /* Fail queued try locks */
}
gl->gl_req = target;
set_bit(GLF_BLOCKING, &gl->gl_flags);
if ((gl->gl_req == LM_ST_UNLOCKED) ||
(gl->gl_state == LM_ST_EXCLUSIVE) ||
(lck_flags & (LM_FLAG_TRY|LM_FLAG_TRY_1CB)))
clear_bit(GLF_BLOCKING, &gl->gl_flags);
spin_unlock(&gl->gl_lockref.lock);
if (glops->go_sync)
glops->go_sync(gl);
if (test_bit(GLF_INVALIDATE_IN_PROGRESS, &gl->gl_flags))
glops->go_inval(gl, target == LM_ST_DEFERRED ? 0 : DIO_METADATA);
clear_bit(GLF_INVALIDATE_IN_PROGRESS, &gl->gl_flags);
gfs2_glock_hold(gl);
if (sdp->sd_lockstruct.ls_ops->lm_lock) {
/* lock_dlm */
ret = sdp->sd_lockstruct.ls_ops->lm_lock(gl, target, lck_flags);
if (ret == -EINVAL && gl->gl_target == LM_ST_UNLOCKED &&
target == LM_ST_UNLOCKED &&
test_bit(SDF_SKIP_DLM_UNLOCK, &sdp->sd_flags)) {
finish_xmote(gl, target);
gfs2_glock_queue_work(gl, 0);
}
else if (ret) {
pr_err("lm_lock ret %d\n", ret);
GLOCK_BUG_ON(gl, !test_bit(SDF_SHUTDOWN,
&sdp->sd_flags));
}
} else { /* lock_nolock */
finish_xmote(gl, target);
gfs2_glock_queue_work(gl, 0);
}
spin_lock(&gl->gl_lockref.lock);
}
/**
* find_first_holder - find the first "holder" gh
* @gl: the glock
*/
static inline struct gfs2_holder *find_first_holder(const struct gfs2_glock *gl)
{
struct gfs2_holder *gh;
if (!list_empty(&gl->gl_holders)) {
gh = list_entry(gl->gl_holders.next, struct gfs2_holder, gh_list);
if (test_bit(HIF_HOLDER, &gh->gh_iflags))
return gh;
}
return NULL;
}
/**
* run_queue - do all outstanding tasks related to a glock
* @gl: The glock in question
* @nonblock: True if we must not block in run_queue
*
*/
static void run_queue(struct gfs2_glock *gl, const int nonblock)
__releases(&gl->gl_lockref.lock)
__acquires(&gl->gl_lockref.lock)
{
struct gfs2_holder *gh = NULL;
int ret;
if (test_and_set_bit(GLF_LOCK, &gl->gl_flags))
return;
GLOCK_BUG_ON(gl, test_bit(GLF_DEMOTE_IN_PROGRESS, &gl->gl_flags));
if (test_bit(GLF_DEMOTE, &gl->gl_flags) &&
gl->gl_demote_state != gl->gl_state) {
if (find_first_holder(gl))
goto out_unlock;
if (nonblock)
goto out_sched;
set_bit(GLF_DEMOTE_IN_PROGRESS, &gl->gl_flags);
GLOCK_BUG_ON(gl, gl->gl_demote_state == LM_ST_EXCLUSIVE);
gl->gl_target = gl->gl_demote_state;
} else {
if (test_bit(GLF_DEMOTE, &gl->gl_flags))
gfs2_demote_wake(gl);
ret = do_promote(gl);
if (ret == 0)
goto out_unlock;
if (ret == 2)
goto out;
gh = find_first_waiter(gl);
gl->gl_target = gh->gh_state;
if (!(gh->gh_flags & (LM_FLAG_TRY | LM_FLAG_TRY_1CB)))
do_error(gl, 0); /* Fail queued try locks */
}
do_xmote(gl, gh, gl->gl_target);
out:
return;
out_sched:
clear_bit(GLF_LOCK, &gl->gl_flags);
smp_mb__after_atomic();
gl->gl_lockref.count++;
__gfs2_glock_queue_work(gl, 0);
return;
out_unlock:
clear_bit(GLF_LOCK, &gl->gl_flags);
smp_mb__after_atomic();
return;
}
static void delete_work_func(struct work_struct *work)
{
struct gfs2_glock *gl = container_of(work, struct gfs2_glock, gl_delete);
struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
struct inode *inode;
u64 no_addr = gl->gl_name.ln_number;
/* If someone's using this glock to create a new dinode, the block must
have been freed by another node, then re-used, in which case our
iopen callback is too late after the fact. Ignore it. */
if (test_bit(GLF_INODE_CREATING, &gl->gl_flags))
goto out;
inode = gfs2_lookup_by_inum(sdp, no_addr, NULL, GFS2_BLKST_UNLINKED);
if (inode && !IS_ERR(inode)) {
d_prune_aliases(inode);
iput(inode);
}
out:
gfs2_glock_put(gl);
}
static void glock_work_func(struct work_struct *work)
{
unsigned long delay = 0;
struct gfs2_glock *gl = container_of(work, struct gfs2_glock, gl_work.work);
unsigned int drop_refs = 1;
if (test_and_clear_bit(GLF_REPLY_PENDING, &gl->gl_flags)) {
finish_xmote(gl, gl->gl_reply);
drop_refs++;
}
spin_lock(&gl->gl_lockref.lock);
if (test_bit(GLF_PENDING_DEMOTE, &gl->gl_flags) &&
gl->gl_state != LM_ST_UNLOCKED &&
gl->gl_demote_state != LM_ST_EXCLUSIVE) {
unsigned long holdtime, now = jiffies;
holdtime = gl->gl_tchange + gl->gl_hold_time;
if (time_before(now, holdtime))
delay = holdtime - now;
if (!delay) {
clear_bit(GLF_PENDING_DEMOTE, &gl->gl_flags);
set_bit(GLF_DEMOTE, &gl->gl_flags);
}
}
run_queue(gl, 0);
if (delay) {
/* Keep one glock reference for the work we requeue. */
drop_refs--;
if (gl->gl_name.ln_type != LM_TYPE_INODE)
delay = 0;
__gfs2_glock_queue_work(gl, delay);
}
/*
* Drop the remaining glock references manually here. (Mind that
* __gfs2_glock_queue_work depends on the lockref spinlock begin held
* here as well.)
*/
gl->gl_lockref.count -= drop_refs;
if (!gl->gl_lockref.count) {
__gfs2_glock_put(gl);
return;
}
spin_unlock(&gl->gl_lockref.lock);
}
static struct gfs2_glock *find_insert_glock(struct lm_lockname *name,
struct gfs2_glock *new)
{
struct wait_glock_queue wait;
wait_queue_head_t *wq = glock_waitqueue(name);
struct gfs2_glock *gl;
wait.name = name;
init_wait(&wait.wait);
wait.wait.func = glock_wake_function;
again:
prepare_to_wait(wq, &wait.wait, TASK_UNINTERRUPTIBLE);
rcu_read_lock();
if (new) {
gl = rhashtable_lookup_get_insert_fast(&gl_hash_table,
&new->gl_node, ht_parms);
if (IS_ERR(gl))
goto out;
} else {
gl = rhashtable_lookup_fast(&gl_hash_table,
name, ht_parms);
}
if (gl && !lockref_get_not_dead(&gl->gl_lockref)) {
rcu_read_unlock();
schedule();
goto again;
}
out:
rcu_read_unlock();
finish_wait(wq, &wait.wait);
return gl;
}
/**
* gfs2_glock_get() - Get a glock, or create one if one doesn't exist
* @sdp: The GFS2 superblock
* @number: the lock number
* @glops: The glock_operations to use
* @create: If 0, don't create the glock if it doesn't exist
* @glp: the glock is returned here
*
* This does not lock a glock, just finds/creates structures for one.
*
* Returns: errno
*/
int gfs2_glock_get(struct gfs2_sbd *sdp, u64 number,
const struct gfs2_glock_operations *glops, int create,
struct gfs2_glock **glp)
{
struct super_block *s = sdp->sd_vfs;
struct lm_lockname name = { .ln_number = number,
.ln_type = glops->go_type,
.ln_sbd = sdp };
struct gfs2_glock *gl, *tmp;
struct address_space *mapping;
struct kmem_cache *cachep;
int ret = 0;
gl = find_insert_glock(&name, NULL);
if (gl) {
*glp = gl;
return 0;
}
if (!create)
return -ENOENT;
if (glops->go_flags & GLOF_ASPACE)
cachep = gfs2_glock_aspace_cachep;
else
cachep = gfs2_glock_cachep;
gl = kmem_cache_alloc(cachep, GFP_NOFS);
if (!gl)
return -ENOMEM;
memset(&gl->gl_lksb, 0, sizeof(struct dlm_lksb));
if (glops->go_flags & GLOF_LVB) {
gl->gl_lksb.sb_lvbptr = kzalloc(GFS2_MIN_LVB_SIZE, GFP_NOFS);
if (!gl->gl_lksb.sb_lvbptr) {
kmem_cache_free(cachep, gl);
return -ENOMEM;
}
}
atomic_inc(&sdp->sd_glock_disposal);
gl->gl_node.next = NULL;
gl->gl_flags = 0;
gl->gl_name = name;
gl->gl_lockref.count = 1;
gl->gl_state = LM_ST_UNLOCKED;
gl->gl_target = LM_ST_UNLOCKED;
gl->gl_demote_state = LM_ST_EXCLUSIVE;
gl->gl_ops = glops;
gl->gl_dstamp = 0;
preempt_disable();
/* We use the global stats to estimate the initial per-glock stats */
gl->gl_stats = this_cpu_ptr(sdp->sd_lkstats)->lkstats[glops->go_type];
preempt_enable();
gl->gl_stats.stats[GFS2_LKS_DCOUNT] = 0;
gl->gl_stats.stats[GFS2_LKS_QCOUNT] = 0;
gl->gl_tchange = jiffies;
gl->gl_object = NULL;
gl->gl_hold_time = GL_GLOCK_DFT_HOLD;
INIT_DELAYED_WORK(&gl->gl_work, glock_work_func);
INIT_WORK(&gl->gl_delete, delete_work_func);
mapping = gfs2_glock2aspace(gl);
if (mapping) {
mapping->a_ops = &gfs2_meta_aops;
mapping->host = s->s_bdev->bd_inode;
mapping->flags = 0;
mapping_set_gfp_mask(mapping, GFP_NOFS);
mapping->private_data = NULL;
mapping->writeback_index = 0;
}
tmp = find_insert_glock(&name, gl);
if (!tmp) {
*glp = gl;
goto out;
}
if (IS_ERR(tmp)) {
ret = PTR_ERR(tmp);
goto out_free;
}
*glp = tmp;
out_free:
kfree(gl->gl_lksb.sb_lvbptr);
kmem_cache_free(cachep, gl);
atomic_dec(&sdp->sd_glock_disposal);
out:
return ret;
}
/**
* gfs2_holder_init - initialize a struct gfs2_holder in the default way
* @gl: the glock
* @state: the state we're requesting
* @flags: the modifier flags
* @gh: the holder structure
*
*/
void gfs2_holder_init(struct gfs2_glock *gl, unsigned int state, u16 flags,
struct gfs2_holder *gh)
{
INIT_LIST_HEAD(&gh->gh_list);
gh->gh_gl = gl;
gh->gh_ip = _RET_IP_;
gh->gh_owner_pid = get_pid(task_pid(current));
gh->gh_state = state;
gh->gh_flags = flags;
gh->gh_error = 0;
gh->gh_iflags = 0;
gfs2_glock_hold(gl);
}
/**
* gfs2_holder_reinit - reinitialize a struct gfs2_holder so we can requeue it
* @state: the state we're requesting
* @flags: the modifier flags
* @gh: the holder structure
*
* Don't mess with the glock.
*
*/
void gfs2_holder_reinit(unsigned int state, u16 flags, struct gfs2_holder *gh)
{
gh->gh_state = state;
gh->gh_flags = flags;
gh->gh_iflags = 0;
gh->gh_ip = _RET_IP_;
put_pid(gh->gh_owner_pid);
gh->gh_owner_pid = get_pid(task_pid(current));
}
/**
* gfs2_holder_uninit - uninitialize a holder structure (drop glock reference)
* @gh: the holder structure
*
*/
void gfs2_holder_uninit(struct gfs2_holder *gh)
{
put_pid(gh->gh_owner_pid);
gfs2_glock_put(gh->gh_gl);
gfs2_holder_mark_uninitialized(gh);
gh->gh_ip = 0;
}
/**
* gfs2_glock_wait - wait on a glock acquisition
* @gh: the glock holder
*
* Returns: 0 on success
*/
int gfs2_glock_wait(struct gfs2_holder *gh)
{
unsigned long time1 = jiffies;
might_sleep();
wait_on_bit(&gh->gh_iflags, HIF_WAIT, TASK_UNINTERRUPTIBLE);
if (time_after(jiffies, time1 + HZ)) /* have we waited > a second? */
/* Lengthen the minimum hold time. */
gh->gh_gl->gl_hold_time = min(gh->gh_gl->gl_hold_time +
GL_GLOCK_HOLD_INCR,
GL_GLOCK_MAX_HOLD);
return gh->gh_error;
}
/**
* handle_callback - process a demote request
* @gl: the glock
* @state: the state the caller wants us to change to
*
* There are only two requests that we are going to see in actual
* practise: LM_ST_SHARED and LM_ST_UNLOCKED
*/
static void handle_callback(struct gfs2_glock *gl, unsigned int state,
unsigned long delay, bool remote)
{
int bit = delay ? GLF_PENDING_DEMOTE : GLF_DEMOTE;
set_bit(bit, &gl->gl_flags);
if (gl->gl_demote_state == LM_ST_EXCLUSIVE) {
gl->gl_demote_state = state;
gl->gl_demote_time = jiffies;
} else if (gl->gl_demote_state != LM_ST_UNLOCKED &&
gl->gl_demote_state != state) {
gl->gl_demote_state = LM_ST_UNLOCKED;
}
if (gl->gl_ops->go_callback)
gl->gl_ops->go_callback(gl, remote);
trace_gfs2_demote_rq(gl, remote);
}
void gfs2_print_dbg(struct seq_file *seq, const char *fmt, ...)
{
struct va_format vaf;
va_list args;
va_start(args, fmt);
if (seq) {
seq_vprintf(seq, fmt, args);
} else {
vaf.fmt = fmt;
vaf.va = &args;
pr_err("%pV", &vaf);
}
va_end(args);
}
/**
* add_to_queue - Add a holder to the wait queue (but look for recursion)
* @gh: the holder structure to add
*
* Eventually we should move the recursive locking trap to a
* debugging option or something like that. This is the fast
* path and needs to have the minimum number of distractions.
*
*/
static inline void add_to_queue(struct gfs2_holder *gh)
__releases(&gl->gl_lockref.lock)
__acquires(&gl->gl_lockref.lock)
{
struct gfs2_glock *gl = gh->gh_gl;
struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
struct list_head *insert_pt = NULL;
struct gfs2_holder *gh2;
int try_futile = 0;
BUG_ON(gh->gh_owner_pid == NULL);
if (test_and_set_bit(HIF_WAIT, &gh->gh_iflags))
BUG();
if (gh->gh_flags & (LM_FLAG_TRY | LM_FLAG_TRY_1CB)) {
if (test_bit(GLF_LOCK, &gl->gl_flags))
try_futile = !may_grant(gl, gh);
if (test_bit(GLF_INVALIDATE_IN_PROGRESS, &gl->gl_flags))
goto fail;
}
list_for_each_entry(gh2, &gl->gl_holders, gh_list) {
if (unlikely(gh2->gh_owner_pid == gh->gh_owner_pid &&
(gh->gh_gl->gl_ops->go_type != LM_TYPE_FLOCK)))
goto trap_recursive;
if (try_futile &&
!(gh2->gh_flags & (LM_FLAG_TRY | LM_FLAG_TRY_1CB))) {
fail:
gh->gh_error = GLR_TRYFAILED;
gfs2_holder_wake(gh);
return;
}
if (test_bit(HIF_HOLDER, &gh2->gh_iflags))
continue;
if (unlikely((gh->gh_flags & LM_FLAG_PRIORITY) && !insert_pt))
insert_pt = &gh2->gh_list;
}
set_bit(GLF_QUEUED, &gl->gl_flags);
trace_gfs2_glock_queue(gh, 1);
gfs2_glstats_inc(gl, GFS2_LKS_QCOUNT);
gfs2_sbstats_inc(gl, GFS2_LKS_QCOUNT);
if (likely(insert_pt == NULL)) {
list_add_tail(&gh->gh_list, &gl->gl_holders);
if (unlikely(gh->gh_flags & LM_FLAG_PRIORITY))
goto do_cancel;
return;
}
list_add_tail(&gh->gh_list, insert_pt);
do_cancel:
gh = list_entry(gl->gl_holders.next, struct gfs2_holder, gh_list);
if (!(gh->gh_flags & LM_FLAG_PRIORITY)) {
spin_unlock(&gl->gl_lockref.lock);
if (sdp->sd_lockstruct.ls_ops->lm_cancel)
sdp->sd_lockstruct.ls_ops->lm_cancel(gl);
spin_lock(&gl->gl_lockref.lock);
}
return;
trap_recursive:
pr_err("original: %pSR\n", (void *)gh2->gh_ip);
pr_err("pid: %d\n", pid_nr(gh2->gh_owner_pid));
pr_err("lock type: %d req lock state : %d\n",
gh2->gh_gl->gl_name.ln_type, gh2->gh_state);
pr_err("new: %pSR\n", (void *)gh->gh_ip);
pr_err("pid: %d\n", pid_nr(gh->gh_owner_pid));
pr_err("lock type: %d req lock state : %d\n",
gh->gh_gl->gl_name.ln_type, gh->gh_state);
gfs2_dump_glock(NULL, gl);
BUG();
}
/**
* gfs2_glock_nq - enqueue a struct gfs2_holder onto a glock (acquire a glock)
* @gh: the holder structure
*
* if (gh->gh_flags & GL_ASYNC), this never returns an error
*
* Returns: 0, GLR_TRYFAILED, or errno on failure
*/
int gfs2_glock_nq(struct gfs2_holder *gh)
{
struct gfs2_glock *gl = gh->gh_gl;
struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
int error = 0;
if (unlikely(test_bit(SDF_SHUTDOWN, &sdp->sd_flags)))
return -EIO;
if (test_bit(GLF_LRU, &gl->gl_flags))
gfs2_glock_remove_from_lru(gl);
spin_lock(&gl->gl_lockref.lock);
add_to_queue(gh);
if (unlikely((LM_FLAG_NOEXP & gh->gh_flags) &&
test_and_clear_bit(GLF_FROZEN, &gl->gl_flags))) {
set_bit(GLF_REPLY_PENDING, &gl->gl_flags);
gl->gl_lockref.count++;
__gfs2_glock_queue_work(gl, 0);
}
run_queue(gl, 1);
spin_unlock(&gl->gl_lockref.lock);
if (!(gh->gh_flags & GL_ASYNC))
error = gfs2_glock_wait(gh);
return error;
}
/**
* gfs2_glock_poll - poll to see if an async request has been completed
* @gh: the holder
*
* Returns: 1 if the request is ready to be gfs2_glock_wait()ed on
*/
int gfs2_glock_poll(struct gfs2_holder *gh)
{
return test_bit(HIF_WAIT, &gh->gh_iflags) ? 0 : 1;
}
/**
* gfs2_glock_dq - dequeue a struct gfs2_holder from a glock (release a glock)
* @gh: the glock holder
*
*/
void gfs2_glock_dq(struct gfs2_holder *gh)
{
struct gfs2_glock *gl = gh->gh_gl;
const struct gfs2_glock_operations *glops = gl->gl_ops;
unsigned delay = 0;
int fast_path = 0;
spin_lock(&gl->gl_lockref.lock);
if (gh->gh_flags & GL_NOCACHE)
handle_callback(gl, LM_ST_UNLOCKED, 0, false);
list_del_init(&gh->gh_list);
clear_bit(HIF_HOLDER, &gh->gh_iflags);
if (find_first_holder(gl) == NULL) {
if (glops->go_unlock) {
GLOCK_BUG_ON(gl, test_and_set_bit(GLF_LOCK, &gl->gl_flags));
spin_unlock(&gl->gl_lockref.lock);
glops->go_unlock(gh);
spin_lock(&gl->gl_lockref.lock);
clear_bit(GLF_LOCK, &gl->gl_flags);
}
if (list_empty(&gl->gl_holders) &&
!test_bit(GLF_PENDING_DEMOTE, &gl->gl_flags) &&
!test_bit(GLF_DEMOTE, &gl->gl_flags))
fast_path = 1;
}
if (!test_bit(GLF_LFLUSH, &gl->gl_flags) && demote_ok(gl) &&
(glops->go_flags & GLOF_LRU))
gfs2_glock_add_to_lru(gl);
trace_gfs2_glock_queue(gh, 0);
if (unlikely(!fast_path)) {
gl->gl_lockref.count++;
if (test_bit(GLF_PENDING_DEMOTE, &gl->gl_flags) &&
!test_bit(GLF_DEMOTE, &gl->gl_flags) &&
gl->gl_name.ln_type == LM_TYPE_INODE)
delay = gl->gl_hold_time;
__gfs2_glock_queue_work(gl, delay);
}
spin_unlock(&gl->gl_lockref.lock);
}
void gfs2_glock_dq_wait(struct gfs2_holder *gh)
{
struct gfs2_glock *gl = gh->gh_gl;
gfs2_glock_dq(gh);
might_sleep();
wait_on_bit(&gl->gl_flags, GLF_DEMOTE, TASK_UNINTERRUPTIBLE);
}
/**
* gfs2_glock_dq_uninit - dequeue a holder from a glock and initialize it
* @gh: the holder structure
*
*/
void gfs2_glock_dq_uninit(struct gfs2_holder *gh)
{
gfs2_glock_dq(gh);
gfs2_holder_uninit(gh);
}
/**
* gfs2_glock_nq_num - acquire a glock based on lock number
* @sdp: the filesystem
* @number: the lock number
* @glops: the glock operations for the type of glock
* @state: the state to acquire the glock in
* @flags: modifier flags for the acquisition
* @gh: the struct gfs2_holder
*
* Returns: errno
*/
int gfs2_glock_nq_num(struct gfs2_sbd *sdp, u64 number,
const struct gfs2_glock_operations *glops,
unsigned int state, u16 flags, struct gfs2_holder *gh)
{
struct gfs2_glock *gl;
int error;
error = gfs2_glock_get(sdp, number, glops, CREATE, &gl);
if (!error) {
error = gfs2_glock_nq_init(gl, state, flags, gh);
gfs2_glock_put(gl);
}
return error;
}
/**
* glock_compare - Compare two struct gfs2_glock structures for sorting
* @arg_a: the first structure
* @arg_b: the second structure
*
*/
static int glock_compare(const void *arg_a, const void *arg_b)
{
const struct gfs2_holder *gh_a = *(const struct gfs2_holder **)arg_a;
const struct gfs2_holder *gh_b = *(const struct gfs2_holder **)arg_b;
const struct lm_lockname *a = &gh_a->gh_gl->gl_name;
const struct lm_lockname *b = &gh_b->gh_gl->gl_name;
if (a->ln_number > b->ln_number)
return 1;
if (a->ln_number < b->ln_number)
return -1;
BUG_ON(gh_a->gh_gl->gl_ops->go_type == gh_b->gh_gl->gl_ops->go_type);
return 0;
}
/**
* nq_m_sync - synchonously acquire more than one glock in deadlock free order
* @num_gh: the number of structures
* @ghs: an array of struct gfs2_holder structures
*
* Returns: 0 on success (all glocks acquired),
* errno on failure (no glocks acquired)
*/
static int nq_m_sync(unsigned int num_gh, struct gfs2_holder *ghs,
struct gfs2_holder **p)
{
unsigned int x;
int error = 0;
for (x = 0; x < num_gh; x++)
p[x] = &ghs[x];
sort(p, num_gh, sizeof(struct gfs2_holder *), glock_compare, NULL);
for (x = 0; x < num_gh; x++) {
p[x]->gh_flags &= ~(LM_FLAG_TRY | GL_ASYNC);
error = gfs2_glock_nq(p[x]);
if (error) {
while (x--)
gfs2_glock_dq(p[x]);
break;
}
}
return error;
}
/**
* gfs2_glock_nq_m - acquire multiple glocks
* @num_gh: the number of structures
* @ghs: an array of struct gfs2_holder structures
*
*
* Returns: 0 on success (all glocks acquired),
* errno on failure (no glocks acquired)
*/
int gfs2_glock_nq_m(unsigned int num_gh, struct gfs2_holder *ghs)
{
struct gfs2_holder *tmp[4];
struct gfs2_holder **pph = tmp;
int error = 0;
switch(num_gh) {
case 0:
return 0;
case 1:
ghs->gh_flags &= ~(LM_FLAG_TRY | GL_ASYNC);
return gfs2_glock_nq(ghs);
default:
if (num_gh <= 4)
break;
pph = kmalloc(num_gh * sizeof(struct gfs2_holder *), GFP_NOFS);
if (!pph)
return -ENOMEM;
}
error = nq_m_sync(num_gh, ghs, pph);
if (pph != tmp)
kfree(pph);
return error;
}
/**
* gfs2_glock_dq_m - release multiple glocks
* @num_gh: the number of structures
* @ghs: an array of struct gfs2_holder structures
*
*/
void gfs2_glock_dq_m(unsigned int num_gh, struct gfs2_holder *ghs)
{
while (num_gh--)
gfs2_glock_dq(&ghs[num_gh]);
}
void gfs2_glock_cb(struct gfs2_glock *gl, unsigned int state)
{
unsigned long delay = 0;
unsigned long holdtime;
unsigned long now = jiffies;
gfs2_glock_hold(gl);
holdtime = gl->gl_tchange + gl->gl_hold_time;
if (test_bit(GLF_QUEUED, &gl->gl_flags) &&
gl->gl_name.ln_type == LM_TYPE_INODE) {
if (time_before(now, holdtime))
delay = holdtime - now;
if (test_bit(GLF_REPLY_PENDING, &gl->gl_flags))
delay = gl->gl_hold_time;
}
spin_lock(&gl->gl_lockref.lock);
handle_callback(gl, state, delay, true);
__gfs2_glock_queue_work(gl, delay);
spin_unlock(&gl->gl_lockref.lock);
}
/**
* gfs2_should_freeze - Figure out if glock should be frozen
* @gl: The glock in question
*
* Glocks are not frozen if (a) the result of the dlm operation is
* an error, (b) the locking operation was an unlock operation or
* (c) if there is a "noexp" flagged request anywhere in the queue
*
* Returns: 1 if freezing should occur, 0 otherwise
*/
static int gfs2_should_freeze(const struct gfs2_glock *gl)
{
const struct gfs2_holder *gh;
if (gl->gl_reply & ~LM_OUT_ST_MASK)
return 0;
if (gl->gl_target == LM_ST_UNLOCKED)
return 0;
list_for_each_entry(gh, &gl->gl_holders, gh_list) {
if (test_bit(HIF_HOLDER, &gh->gh_iflags))
continue;
if (LM_FLAG_NOEXP & gh->gh_flags)
return 0;
}
return 1;
}
/**
* gfs2_glock_complete - Callback used by locking
* @gl: Pointer to the glock
* @ret: The return value from the dlm
*
* The gl_reply field is under the gl_lockref.lock lock so that it is ok
* to use a bitfield shared with other glock state fields.
*/
void gfs2_glock_complete(struct gfs2_glock *gl, int ret)
{
struct lm_lockstruct *ls = &gl->gl_name.ln_sbd->sd_lockstruct;
spin_lock(&gl->gl_lockref.lock);
gl->gl_reply = ret;
if (unlikely(test_bit(DFL_BLOCK_LOCKS, &ls->ls_recover_flags))) {
if (gfs2_should_freeze(gl)) {
set_bit(GLF_FROZEN, &gl->gl_flags);
spin_unlock(&gl->gl_lockref.lock);
return;
}
}
gl->gl_lockref.count++;
set_bit(GLF_REPLY_PENDING, &gl->gl_flags);
__gfs2_glock_queue_work(gl, 0);
spin_unlock(&gl->gl_lockref.lock);
}
static int glock_cmp(void *priv, struct list_head *a, struct list_head *b)
{
struct gfs2_glock *gla, *glb;
gla = list_entry(a, struct gfs2_glock, gl_lru);
glb = list_entry(b, struct gfs2_glock, gl_lru);
if (gla->gl_name.ln_number > glb->gl_name.ln_number)
return 1;
if (gla->gl_name.ln_number < glb->gl_name.ln_number)
return -1;
return 0;
}
/**
* gfs2_dispose_glock_lru - Demote a list of glocks
* @list: The list to dispose of
*
* Disposing of glocks may involve disk accesses, so that here we sort
* the glocks by number (i.e. disk location of the inodes) so that if
* there are any such accesses, they'll be sent in order (mostly).
*
* Must be called under the lru_lock, but may drop and retake this
* lock. While the lru_lock is dropped, entries may vanish from the
* list, but no new entries will appear on the list (since it is
* private)
*/
static void gfs2_dispose_glock_lru(struct list_head *list)
__releases(&lru_lock)
__acquires(&lru_lock)
{
struct gfs2_glock *gl;
list_sort(NULL, list, glock_cmp);
while(!list_empty(list)) {
gl = list_entry(list->next, struct gfs2_glock, gl_lru);
list_del_init(&gl->gl_lru);
if (!spin_trylock(&gl->gl_lockref.lock)) {
add_back_to_lru:
list_add(&gl->gl_lru, &lru_list);
atomic_inc(&lru_count);
continue;
}
if (test_and_set_bit(GLF_LOCK, &gl->gl_flags)) {
spin_unlock(&gl->gl_lockref.lock);
goto add_back_to_lru;
}
clear_bit(GLF_LRU, &gl->gl_flags);
gl->gl_lockref.count++;
if (demote_ok(gl))
handle_callback(gl, LM_ST_UNLOCKED, 0, false);
WARN_ON(!test_and_clear_bit(GLF_LOCK, &gl->gl_flags));
__gfs2_glock_queue_work(gl, 0);
spin_unlock(&gl->gl_lockref.lock);
cond_resched_lock(&lru_lock);
}
}
/**
* gfs2_scan_glock_lru - Scan the LRU looking for locks to demote
* @nr: The number of entries to scan
*
* This function selects the entries on the LRU which are able to
* be demoted, and then kicks off the process by calling
* gfs2_dispose_glock_lru() above.
*/
static long gfs2_scan_glock_lru(int nr)
{
struct gfs2_glock *gl;
LIST_HEAD(skipped);
LIST_HEAD(dispose);
long freed = 0;
spin_lock(&lru_lock);
while ((nr-- >= 0) && !list_empty(&lru_list)) {
gl = list_entry(lru_list.next, struct gfs2_glock, gl_lru);
/* Test for being demotable */
if (!test_bit(GLF_LOCK, &gl->gl_flags)) {
list_move(&gl->gl_lru, &dispose);
atomic_dec(&lru_count);
freed++;
continue;
}
list_move(&gl->gl_lru, &skipped);
}
list_splice(&skipped, &lru_list);
if (!list_empty(&dispose))
gfs2_dispose_glock_lru(&dispose);
spin_unlock(&lru_lock);
return freed;
}
static unsigned long gfs2_glock_shrink_scan(struct shrinker *shrink,
struct shrink_control *sc)
{
if (!(sc->gfp_mask & __GFP_FS))
return SHRINK_STOP;
return gfs2_scan_glock_lru(sc->nr_to_scan);
}
static unsigned long gfs2_glock_shrink_count(struct shrinker *shrink,
struct shrink_control *sc)
{
return vfs_pressure_ratio(atomic_read(&lru_count));
}
static struct shrinker glock_shrinker = {
.seeks = DEFAULT_SEEKS,
.count_objects = gfs2_glock_shrink_count,
.scan_objects = gfs2_glock_shrink_scan,
};
/**
* examine_bucket - Call a function for glock in a hash bucket
* @examiner: the function
* @sdp: the filesystem
* @bucket: the bucket
*
* Note that the function can be called multiple times on the same
* object. So the user must ensure that the function can cope with
* that.
*/
static void glock_hash_walk(glock_examiner examiner, const struct gfs2_sbd *sdp)
{
struct gfs2_glock *gl;
struct rhashtable_iter iter;
rhashtable_walk_enter(&gl_hash_table, &iter);
do {
gl = ERR_PTR(rhashtable_walk_start(&iter));
if (IS_ERR(gl))
goto walk_stop;
while ((gl = rhashtable_walk_next(&iter)) && !IS_ERR(gl))
if (gl->gl_name.ln_sbd == sdp &&
lockref_get_not_dead(&gl->gl_lockref))
examiner(gl);
walk_stop:
rhashtable_walk_stop(&iter);
} while (cond_resched(), gl == ERR_PTR(-EAGAIN));
rhashtable_walk_exit(&iter);
}
/**
* thaw_glock - thaw out a glock which has an unprocessed reply waiting
* @gl: The glock to thaw
*
*/
static void thaw_glock(struct gfs2_glock *gl)
{
if (!test_and_clear_bit(GLF_FROZEN, &gl->gl_flags)) {
gfs2_glock_put(gl);
return;
}
set_bit(GLF_REPLY_PENDING, &gl->gl_flags);
gfs2_glock_queue_work(gl, 0);
}
/**
* clear_glock - look at a glock and see if we can free it from glock cache
* @gl: the glock to look at
*
*/
static void clear_glock(struct gfs2_glock *gl)
{
gfs2_glock_remove_from_lru(gl);
spin_lock(&gl->gl_lockref.lock);
if (gl->gl_state != LM_ST_UNLOCKED)
handle_callback(gl, LM_ST_UNLOCKED, 0, false);
__gfs2_glock_queue_work(gl, 0);
spin_unlock(&gl->gl_lockref.lock);
}
/**
* gfs2_glock_thaw - Thaw any frozen glocks
* @sdp: The super block
*
*/
void gfs2_glock_thaw(struct gfs2_sbd *sdp)
{
glock_hash_walk(thaw_glock, sdp);
}
static void dump_glock(struct seq_file *seq, struct gfs2_glock *gl)
{
spin_lock(&gl->gl_lockref.lock);
gfs2_dump_glock(seq, gl);
spin_unlock(&gl->gl_lockref.lock);
}
static void dump_glock_func(struct gfs2_glock *gl)
{
dump_glock(NULL, gl);
}
/**
* gfs2_gl_hash_clear - Empty out the glock hash table
* @sdp: the filesystem
* @wait: wait until it's all gone
*
* Called when unmounting the filesystem.
*/
void gfs2_gl_hash_clear(struct gfs2_sbd *sdp)
{
set_bit(SDF_SKIP_DLM_UNLOCK, &sdp->sd_flags);
flush_workqueue(glock_workqueue);
glock_hash_walk(clear_glock, sdp);
flush_workqueue(glock_workqueue);
wait_event_timeout(sdp->sd_glock_wait,
atomic_read(&sdp->sd_glock_disposal) == 0,
HZ * 600);
glock_hash_walk(dump_glock_func, sdp);
}
void gfs2_glock_finish_truncate(struct gfs2_inode *ip)
{
struct gfs2_glock *gl = ip->i_gl;
int ret;
ret = gfs2_truncatei_resume(ip);
gfs2_assert_withdraw(gl->gl_name.ln_sbd, ret == 0);
spin_lock(&gl->gl_lockref.lock);
clear_bit(GLF_LOCK, &gl->gl_flags);
run_queue(gl, 1);
spin_unlock(&gl->gl_lockref.lock);
}
static const char *state2str(unsigned state)
{
switch(state) {
case LM_ST_UNLOCKED:
return "UN";
case LM_ST_SHARED:
return "SH";
case LM_ST_DEFERRED:
return "DF";
case LM_ST_EXCLUSIVE:
return "EX";
}
return "??";
}
static const char *hflags2str(char *buf, u16 flags, unsigned long iflags)
{
char *p = buf;
if (flags & LM_FLAG_TRY)
*p++ = 't';
if (flags & LM_FLAG_TRY_1CB)
*p++ = 'T';
if (flags & LM_FLAG_NOEXP)
*p++ = 'e';
if (flags & LM_FLAG_ANY)
*p++ = 'A';
if (flags & LM_FLAG_PRIORITY)
*p++ = 'p';
if (flags & GL_ASYNC)
*p++ = 'a';
if (flags & GL_EXACT)
*p++ = 'E';
if (flags & GL_NOCACHE)
*p++ = 'c';
if (test_bit(HIF_HOLDER, &iflags))
*p++ = 'H';
if (test_bit(HIF_WAIT, &iflags))
*p++ = 'W';
if (test_bit(HIF_FIRST, &iflags))
*p++ = 'F';
*p = 0;
return buf;
}
/**
* dump_holder - print information about a glock holder
* @seq: the seq_file struct
* @gh: the glock holder
*
*/
static void dump_holder(struct seq_file *seq, const struct gfs2_holder *gh)
{
struct task_struct *gh_owner = NULL;
char flags_buf[32];
rcu_read_lock();
if (gh->gh_owner_pid)
gh_owner = pid_task(gh->gh_owner_pid, PIDTYPE_PID);
gfs2_print_dbg(seq, " H: s:%s f:%s e:%d p:%ld [%s] %pS\n",
state2str(gh->gh_state),
hflags2str(flags_buf, gh->gh_flags, gh->gh_iflags),
gh->gh_error,
gh->gh_owner_pid ? (long)pid_nr(gh->gh_owner_pid) : -1,
gh_owner ? gh_owner->comm : "(ended)",
(void *)gh->gh_ip);
rcu_read_unlock();
}
static const char *gflags2str(char *buf, const struct gfs2_glock *gl)
{
const unsigned long *gflags = &gl->gl_flags;
char *p = buf;
if (test_bit(GLF_LOCK, gflags))
*p++ = 'l';
if (test_bit(GLF_DEMOTE, gflags))
*p++ = 'D';
if (test_bit(GLF_PENDING_DEMOTE, gflags))
*p++ = 'd';
if (test_bit(GLF_DEMOTE_IN_PROGRESS, gflags))
*p++ = 'p';
if (test_bit(GLF_DIRTY, gflags))
*p++ = 'y';
if (test_bit(GLF_LFLUSH, gflags))
*p++ = 'f';
if (test_bit(GLF_INVALIDATE_IN_PROGRESS, gflags))
*p++ = 'i';
if (test_bit(GLF_REPLY_PENDING, gflags))
*p++ = 'r';
if (test_bit(GLF_INITIAL, gflags))
*p++ = 'I';
if (test_bit(GLF_FROZEN, gflags))
*p++ = 'F';
if (test_bit(GLF_QUEUED, gflags))
*p++ = 'q';
if (test_bit(GLF_LRU, gflags))
*p++ = 'L';
if (gl->gl_object)
*p++ = 'o';
if (test_bit(GLF_BLOCKING, gflags))
*p++ = 'b';
*p = 0;
return buf;
}
/**
* gfs2_dump_glock - print information about a glock
* @seq: The seq_file struct
* @gl: the glock
*
* The file format is as follows:
* One line per object, capital letters are used to indicate objects
* G = glock, I = Inode, R = rgrp, H = holder. Glocks are not indented,
* other objects are indented by a single space and follow the glock to
* which they are related. Fields are indicated by lower case letters
* followed by a colon and the field value, except for strings which are in
* [] so that its possible to see if they are composed of spaces for
* example. The field's are n = number (id of the object), f = flags,
* t = type, s = state, r = refcount, e = error, p = pid.
*
*/
void gfs2_dump_glock(struct seq_file *seq, const struct gfs2_glock *gl)
{
const struct gfs2_glock_operations *glops = gl->gl_ops;
unsigned long long dtime;
const struct gfs2_holder *gh;
char gflags_buf[32];
dtime = jiffies - gl->gl_demote_time;
dtime *= 1000000/HZ; /* demote time in uSec */
if (!test_bit(GLF_DEMOTE, &gl->gl_flags))
dtime = 0;
gfs2_print_dbg(seq, "G: s:%s n:%u/%llx f:%s t:%s d:%s/%llu a:%d v:%d r:%d m:%ld\n",
state2str(gl->gl_state),
gl->gl_name.ln_type,
(unsigned long long)gl->gl_name.ln_number,
gflags2str(gflags_buf, gl),
state2str(gl->gl_target),
state2str(gl->gl_demote_state), dtime,
atomic_read(&gl->gl_ail_count),
atomic_read(&gl->gl_revokes),
(int)gl->gl_lockref.count, gl->gl_hold_time);
list_for_each_entry(gh, &gl->gl_holders, gh_list)
dump_holder(seq, gh);
if (gl->gl_state != LM_ST_UNLOCKED && glops->go_dump)
glops->go_dump(seq, gl);
}
static int gfs2_glstats_seq_show(struct seq_file *seq, void *iter_ptr)
{
struct gfs2_glock *gl = iter_ptr;
seq_printf(seq, "G: n:%u/%llx rtt:%llu/%llu rttb:%llu/%llu irt:%llu/%llu dcnt: %llu qcnt: %llu\n",
gl->gl_name.ln_type,
(unsigned long long)gl->gl_name.ln_number,
(unsigned long long)gl->gl_stats.stats[GFS2_LKS_SRTT],
(unsigned long long)gl->gl_stats.stats[GFS2_LKS_SRTTVAR],
(unsigned long long)gl->gl_stats.stats[GFS2_LKS_SRTTB],
(unsigned long long)gl->gl_stats.stats[GFS2_LKS_SRTTVARB],
(unsigned long long)gl->gl_stats.stats[GFS2_LKS_SIRT],
(unsigned long long)gl->gl_stats.stats[GFS2_LKS_SIRTVAR],
(unsigned long long)gl->gl_stats.stats[GFS2_LKS_DCOUNT],
(unsigned long long)gl->gl_stats.stats[GFS2_LKS_QCOUNT]);
return 0;
}
static const char *gfs2_gltype[] = {
"type",
"reserved",
"nondisk",
"inode",
"rgrp",
"meta",
"iopen",
"flock",
"plock",
"quota",
"journal",
};
static const char *gfs2_stype[] = {
[GFS2_LKS_SRTT] = "srtt",
[GFS2_LKS_SRTTVAR] = "srttvar",
[GFS2_LKS_SRTTB] = "srttb",
[GFS2_LKS_SRTTVARB] = "srttvarb",
[GFS2_LKS_SIRT] = "sirt",
[GFS2_LKS_SIRTVAR] = "sirtvar",
[GFS2_LKS_DCOUNT] = "dlm",
[GFS2_LKS_QCOUNT] = "queue",
};
#define GFS2_NR_SBSTATS (ARRAY_SIZE(gfs2_gltype) * ARRAY_SIZE(gfs2_stype))
static int gfs2_sbstats_seq_show(struct seq_file *seq, void *iter_ptr)
{
struct gfs2_sbd *sdp = seq->private;
loff_t pos = *(loff_t *)iter_ptr;
unsigned index = pos >> 3;
unsigned subindex = pos & 0x07;
int i;
if (index == 0 && subindex != 0)
return 0;
seq_printf(seq, "%-10s %8s:", gfs2_gltype[index],
(index == 0) ? "cpu": gfs2_stype[subindex]);
for_each_possible_cpu(i) {
const struct gfs2_pcpu_lkstats *lkstats = per_cpu_ptr(sdp->sd_lkstats, i);
if (index == 0)
seq_printf(seq, " %15u", i);
else
seq_printf(seq, " %15llu", (unsigned long long)lkstats->
lkstats[index - 1].stats[subindex]);
}
seq_putc(seq, '\n');
return 0;
}
int __init gfs2_glock_init(void)
{
int i, ret;
ret = rhashtable_init(&gl_hash_table, &ht_parms);
if (ret < 0)
return ret;
glock_workqueue = alloc_workqueue("glock_workqueue", WQ_MEM_RECLAIM |
WQ_HIGHPRI | WQ_FREEZABLE, 0);
if (!glock_workqueue) {
rhashtable_destroy(&gl_hash_table);
return -ENOMEM;
}
gfs2_delete_workqueue = alloc_workqueue("delete_workqueue",
WQ_MEM_RECLAIM | WQ_FREEZABLE,
0);
if (!gfs2_delete_workqueue) {
destroy_workqueue(glock_workqueue);
rhashtable_destroy(&gl_hash_table);
return -ENOMEM;
}
ret = register_shrinker(&glock_shrinker);
if (ret) {
destroy_workqueue(gfs2_delete_workqueue);
destroy_workqueue(glock_workqueue);
rhashtable_destroy(&gl_hash_table);
return ret;
}
for (i = 0; i < GLOCK_WAIT_TABLE_SIZE; i++)
init_waitqueue_head(glock_wait_table + i);
return 0;
}
void gfs2_glock_exit(void)
{
unregister_shrinker(&glock_shrinker);
rhashtable_destroy(&gl_hash_table);
destroy_workqueue(glock_workqueue);
destroy_workqueue(gfs2_delete_workqueue);
}
static void gfs2_glock_iter_next(struct gfs2_glock_iter *gi)
{
while ((gi->gl = rhashtable_walk_next(&gi->hti))) {
if (IS_ERR(gi->gl)) {
if (PTR_ERR(gi->gl) == -EAGAIN)
continue;
gi->gl = NULL;
return;
}
/* Skip entries for other sb and dead entries */
if (gi->sdp == gi->gl->gl_name.ln_sbd &&
!__lockref_is_dead(&gi->gl->gl_lockref))
return;
}
}
static void *gfs2_glock_seq_start(struct seq_file *seq, loff_t *pos)
__acquires(RCU)
{
struct gfs2_glock_iter *gi = seq->private;
loff_t n = *pos;
rhashtable_walk_enter(&gl_hash_table, &gi->hti);
if (rhashtable_walk_start(&gi->hti) != 0)
return NULL;
do {
gfs2_glock_iter_next(gi);
} while (gi->gl && n--);
gi->last_pos = *pos;
return gi->gl;
}
static void *gfs2_glock_seq_next(struct seq_file *seq, void *iter_ptr,
loff_t *pos)
{
struct gfs2_glock_iter *gi = seq->private;
(*pos)++;
gi->last_pos = *pos;
gfs2_glock_iter_next(gi);
return gi->gl;
}
static void gfs2_glock_seq_stop(struct seq_file *seq, void *iter_ptr)
__releases(RCU)
{
struct gfs2_glock_iter *gi = seq->private;
gi->gl = NULL;
rhashtable_walk_stop(&gi->hti);
rhashtable_walk_exit(&gi->hti);
}
static int gfs2_glock_seq_show(struct seq_file *seq, void *iter_ptr)
{
dump_glock(seq, iter_ptr);
return 0;
}
static void *gfs2_sbstats_seq_start(struct seq_file *seq, loff_t *pos)
{
preempt_disable();
if (*pos >= GFS2_NR_SBSTATS)
return NULL;
return pos;
}
static void *gfs2_sbstats_seq_next(struct seq_file *seq, void *iter_ptr,
loff_t *pos)
{
(*pos)++;
if (*pos >= GFS2_NR_SBSTATS)
return NULL;
return pos;
}
static void gfs2_sbstats_seq_stop(struct seq_file *seq, void *iter_ptr)
{
preempt_enable();
}
static const struct seq_operations gfs2_glock_seq_ops = {
.start = gfs2_glock_seq_start,
.next = gfs2_glock_seq_next,
.stop = gfs2_glock_seq_stop,
.show = gfs2_glock_seq_show,
};
static const struct seq_operations gfs2_glstats_seq_ops = {
.start = gfs2_glock_seq_start,
.next = gfs2_glock_seq_next,
.stop = gfs2_glock_seq_stop,
.show = gfs2_glstats_seq_show,
};
static const struct seq_operations gfs2_sbstats_seq_ops = {
.start = gfs2_sbstats_seq_start,
.next = gfs2_sbstats_seq_next,
.stop = gfs2_sbstats_seq_stop,
.show = gfs2_sbstats_seq_show,
};
#define GFS2_SEQ_GOODSIZE min(PAGE_SIZE << PAGE_ALLOC_COSTLY_ORDER, 65536UL)
static int __gfs2_glocks_open(struct inode *inode, struct file *file,
const struct seq_operations *ops)
{
int ret = seq_open_private(file, ops, sizeof(struct gfs2_glock_iter));
if (ret == 0) {
struct seq_file *seq = file->private_data;
struct gfs2_glock_iter *gi = seq->private;
gi->sdp = inode->i_private;
seq->buf = kmalloc(GFS2_SEQ_GOODSIZE, GFP_KERNEL | __GFP_NOWARN);
if (seq->buf)
seq->size = GFS2_SEQ_GOODSIZE;
gi->gl = NULL;
}
return ret;
}
static int gfs2_glocks_open(struct inode *inode, struct file *file)
{
return __gfs2_glocks_open(inode, file, &gfs2_glock_seq_ops);
}
static int gfs2_glocks_release(struct inode *inode, struct file *file)
{
struct seq_file *seq = file->private_data;
struct gfs2_glock_iter *gi = seq->private;
gi->gl = NULL;
return seq_release_private(inode, file);
}
static int gfs2_glstats_open(struct inode *inode, struct file *file)
{
return __gfs2_glocks_open(inode, file, &gfs2_glstats_seq_ops);
}
static int gfs2_sbstats_open(struct inode *inode, struct file *file)
{
int ret = seq_open(file, &gfs2_sbstats_seq_ops);
if (ret == 0) {
struct seq_file *seq = file->private_data;
seq->private = inode->i_private; /* sdp */
}
return ret;
}
static const struct file_operations gfs2_glocks_fops = {
.owner = THIS_MODULE,
.open = gfs2_glocks_open,
.read = seq_read,
.llseek = seq_lseek,
.release = gfs2_glocks_release,
};
static const struct file_operations gfs2_glstats_fops = {
.owner = THIS_MODULE,
.open = gfs2_glstats_open,
.read = seq_read,
.llseek = seq_lseek,
.release = gfs2_glocks_release,
};
static const struct file_operations gfs2_sbstats_fops = {
.owner = THIS_MODULE,
.open = gfs2_sbstats_open,
.read = seq_read,
.llseek = seq_lseek,
.release = seq_release,
};
int gfs2_create_debugfs_file(struct gfs2_sbd *sdp)
{
struct dentry *dent;
dent = debugfs_create_dir(sdp->sd_table_name, gfs2_root);
if (IS_ERR_OR_NULL(dent))
goto fail;
sdp->debugfs_dir = dent;
dent = debugfs_create_file("glocks",
S_IFREG | S_IRUGO,
sdp->debugfs_dir, sdp,
&gfs2_glocks_fops);
if (IS_ERR_OR_NULL(dent))
goto fail;
sdp->debugfs_dentry_glocks = dent;
dent = debugfs_create_file("glstats",
S_IFREG | S_IRUGO,
sdp->debugfs_dir, sdp,
&gfs2_glstats_fops);
if (IS_ERR_OR_NULL(dent))
goto fail;
sdp->debugfs_dentry_glstats = dent;
dent = debugfs_create_file("sbstats",
S_IFREG | S_IRUGO,
sdp->debugfs_dir, sdp,
&gfs2_sbstats_fops);
if (IS_ERR_OR_NULL(dent))
goto fail;
sdp->debugfs_dentry_sbstats = dent;
return 0;
fail:
gfs2_delete_debugfs_file(sdp);
return dent ? PTR_ERR(dent) : -ENOMEM;
}
void gfs2_delete_debugfs_file(struct gfs2_sbd *sdp)
{
if (sdp->debugfs_dir) {
if (sdp->debugfs_dentry_glocks) {
debugfs_remove(sdp->debugfs_dentry_glocks);
sdp->debugfs_dentry_glocks = NULL;
}
if (sdp->debugfs_dentry_glstats) {
debugfs_remove(sdp->debugfs_dentry_glstats);
sdp->debugfs_dentry_glstats = NULL;
}
if (sdp->debugfs_dentry_sbstats) {
debugfs_remove(sdp->debugfs_dentry_sbstats);
sdp->debugfs_dentry_sbstats = NULL;
}
debugfs_remove(sdp->debugfs_dir);
sdp->debugfs_dir = NULL;
}
}
int gfs2_register_debugfs(void)
{
gfs2_root = debugfs_create_dir("gfs2", NULL);
if (IS_ERR(gfs2_root))
return PTR_ERR(gfs2_root);
return gfs2_root ? 0 : -ENOMEM;
}
void gfs2_unregister_debugfs(void)
{
debugfs_remove(gfs2_root);
gfs2_root = NULL;
}
| {
"language": "C"
} |
/*
* Sony Playstation (PSX) STR File Demuxer
* Copyright (c) 2003 The FFmpeg project
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
/**
* @file
* PSX STR file demuxer
* by Mike Melanson (melanson@pcisys.net)
* This module handles streams that have been ripped from Sony Playstation
* CD games. This demuxer can handle either raw STR files (which are just
* concatenations of raw compact disc sectors) or STR files with 0x2C-byte
* RIFF headers, followed by CD sectors.
*/
#include "libavutil/channel_layout.h"
#include "libavutil/internal.h"
#include "libavutil/intreadwrite.h"
#include "avformat.h"
#include "internal.h"
#define RIFF_TAG MKTAG('R', 'I', 'F', 'F')
#define CDXA_TAG MKTAG('C', 'D', 'X', 'A')
#define RAW_CD_SECTOR_SIZE 2352
#define RAW_CD_SECTOR_DATA_SIZE 2304
#define VIDEO_DATA_CHUNK_SIZE 0x7E0
#define VIDEO_DATA_HEADER_SIZE 0x38
#define RIFF_HEADER_SIZE 0x2C
#define CDXA_TYPE_MASK 0x0E
#define CDXA_TYPE_DATA 0x08
#define CDXA_TYPE_AUDIO 0x04
#define CDXA_TYPE_VIDEO 0x02
#define STR_MAGIC (0x80010160)
typedef struct StrChannel {
/* video parameters */
int video_stream_index;
AVPacket tmp_pkt;
/* audio parameters */
int audio_stream_index;
} StrChannel;
typedef struct StrDemuxContext {
/* a STR file can contain up to 32 channels of data */
StrChannel channels[32];
} StrDemuxContext;
static const uint8_t sync_header[12] = {0x00,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0x00};
static int str_probe(AVProbeData *p)
{
const uint8_t *sector= p->buf;
const uint8_t *end= sector + p->buf_size;
int aud=0, vid=0;
if (p->buf_size < RAW_CD_SECTOR_SIZE)
return 0;
if ((AV_RL32(&p->buf[0]) == RIFF_TAG) &&
(AV_RL32(&p->buf[8]) == CDXA_TAG)) {
/* RIFF header seen; skip 0x2C bytes */
sector += RIFF_HEADER_SIZE;
}
while (end - sector >= RAW_CD_SECTOR_SIZE) {
/* look for CD sync header (00, 0xFF x 10, 00) */
if (memcmp(sector,sync_header,sizeof(sync_header)))
return 0;
if (sector[0x11] >= 32)
return 0;
switch (sector[0x12] & CDXA_TYPE_MASK) {
case CDXA_TYPE_DATA:
case CDXA_TYPE_VIDEO: {
int current_sector = AV_RL16(§or[0x1C]);
int sector_count = AV_RL16(§or[0x1E]);
int frame_size = AV_RL32(§or[0x24]);
if(!( frame_size>=0
&& current_sector < sector_count
&& sector_count*VIDEO_DATA_CHUNK_SIZE >=frame_size)){
return 0;
}
vid++;
}
break;
case CDXA_TYPE_AUDIO:
if(sector[0x13]&0x2A)
return 0;
aud++;
break;
default:
if(sector[0x12] & CDXA_TYPE_MASK)
return 0;
}
sector += RAW_CD_SECTOR_SIZE;
}
/* MPEG files (like those ripped from VCDs) can also look like this;
* only return half certainty */
if(vid+aud > 3) return AVPROBE_SCORE_EXTENSION;
else if(vid+aud) return 1;
else return 0;
}
static int str_read_header(AVFormatContext *s)
{
AVIOContext *pb = s->pb;
StrDemuxContext *str = s->priv_data;
unsigned char sector[RAW_CD_SECTOR_SIZE];
int start;
int i;
/* skip over any RIFF header */
if (avio_read(pb, sector, RIFF_HEADER_SIZE) != RIFF_HEADER_SIZE)
return AVERROR(EIO);
if (AV_RL32(§or[0]) == RIFF_TAG)
start = RIFF_HEADER_SIZE;
else
start = 0;
avio_seek(pb, start, SEEK_SET);
for(i=0; i<32; i++){
str->channels[i].video_stream_index=
str->channels[i].audio_stream_index= -1;
}
s->ctx_flags |= AVFMTCTX_NOHEADER;
return 0;
}
static int str_read_packet(AVFormatContext *s,
AVPacket *ret_pkt)
{
AVIOContext *pb = s->pb;
StrDemuxContext *str = s->priv_data;
unsigned char sector[RAW_CD_SECTOR_SIZE];
int channel;
AVPacket *pkt;
AVStream *st;
while (1) {
if (avio_read(pb, sector, RAW_CD_SECTOR_SIZE) != RAW_CD_SECTOR_SIZE)
return AVERROR(EIO);
channel = sector[0x11];
if (channel >= 32)
return AVERROR_INVALIDDATA;
switch (sector[0x12] & CDXA_TYPE_MASK) {
case CDXA_TYPE_DATA:
case CDXA_TYPE_VIDEO:
{
int current_sector = AV_RL16(§or[0x1C]);
int sector_count = AV_RL16(§or[0x1E]);
int frame_size = AV_RL32(§or[0x24]);
if(!( frame_size>=0
&& current_sector < sector_count
&& sector_count*VIDEO_DATA_CHUNK_SIZE >=frame_size)){
av_log(s, AV_LOG_ERROR, "Invalid parameters %d %d %d\n", current_sector, sector_count, frame_size);
break;
}
if(str->channels[channel].video_stream_index < 0){
/* allocate a new AVStream */
st = avformat_new_stream(s, NULL);
if (!st)
return AVERROR(ENOMEM);
avpriv_set_pts_info(st, 64, 1, 15);
str->channels[channel].video_stream_index = st->index;
st->codecpar->codec_type = AVMEDIA_TYPE_VIDEO;
st->codecpar->codec_id = AV_CODEC_ID_MDEC;
st->codecpar->codec_tag = 0; /* no fourcc */
st->codecpar->width = AV_RL16(§or[0x28]);
st->codecpar->height = AV_RL16(§or[0x2A]);
}
/* if this is the first sector of the frame, allocate a pkt */
pkt = &str->channels[channel].tmp_pkt;
if(pkt->size != sector_count*VIDEO_DATA_CHUNK_SIZE){
if(pkt->data)
av_log(s, AV_LOG_ERROR, "mismatching sector_count\n");
av_packet_unref(pkt);
if (av_new_packet(pkt, sector_count*VIDEO_DATA_CHUNK_SIZE))
return AVERROR(EIO);
memset(pkt->data, 0, sector_count*VIDEO_DATA_CHUNK_SIZE);
pkt->pos= avio_tell(pb) - RAW_CD_SECTOR_SIZE;
pkt->stream_index =
str->channels[channel].video_stream_index;
}
memcpy(pkt->data + current_sector*VIDEO_DATA_CHUNK_SIZE,
sector + VIDEO_DATA_HEADER_SIZE,
VIDEO_DATA_CHUNK_SIZE);
if (current_sector == sector_count-1) {
pkt->size= frame_size;
*ret_pkt = *pkt;
pkt->data= NULL;
pkt->size= -1;
pkt->buf = NULL;
return 0;
}
}
break;
case CDXA_TYPE_AUDIO:
if(str->channels[channel].audio_stream_index < 0){
int fmt = sector[0x13];
/* allocate a new AVStream */
st = avformat_new_stream(s, NULL);
if (!st)
return AVERROR(ENOMEM);
str->channels[channel].audio_stream_index = st->index;
st->codecpar->codec_type = AVMEDIA_TYPE_AUDIO;
st->codecpar->codec_id = AV_CODEC_ID_ADPCM_XA;
st->codecpar->codec_tag = 0; /* no fourcc */
if (fmt & 1) {
st->codecpar->channels = 2;
st->codecpar->channel_layout = AV_CH_LAYOUT_STEREO;
} else {
st->codecpar->channels = 1;
st->codecpar->channel_layout = AV_CH_LAYOUT_MONO;
}
st->codecpar->sample_rate = (fmt&4)?18900:37800;
// st->codecpar->bit_rate = 0; //FIXME;
st->codecpar->block_align = 128;
avpriv_set_pts_info(st, 64, 18 * 224 / st->codecpar->channels,
st->codecpar->sample_rate);
st->start_time = 0;
}
pkt = ret_pkt;
if (av_new_packet(pkt, 2304))
return AVERROR(EIO);
memcpy(pkt->data,sector+24,2304);
pkt->stream_index =
str->channels[channel].audio_stream_index;
pkt->duration = 1;
return 0;
default:
av_log(s, AV_LOG_WARNING, "Unknown sector type %02X\n", sector[0x12]);
/* drop the sector and move on */
break;
}
if (avio_feof(pb))
return AVERROR(EIO);
}
}
static int str_read_close(AVFormatContext *s)
{
StrDemuxContext *str = s->priv_data;
int i;
for(i=0; i<32; i++){
if(str->channels[i].tmp_pkt.data)
av_packet_unref(&str->channels[i].tmp_pkt);
}
return 0;
}
AVInputFormat ff_str_demuxer = {
.name = "psxstr",
.long_name = NULL_IF_CONFIG_SMALL("Sony Playstation STR"),
.priv_data_size = sizeof(StrDemuxContext),
.read_probe = str_probe,
.read_header = str_read_header,
.read_packet = str_read_packet,
.read_close = str_read_close,
.flags = AVFMT_NO_BYTE_SEEK,
};
| {
"language": "C"
} |
/* opensslconf.h */
/* WARNING: Generated automatically from opensslconf.h.in by Configure. */
/* OpenSSL was configured with the following options: */
#ifndef OPENSSL_SYSNAME_WIN64A
# define OPENSSL_SYSNAME_WIN64A
#endif
#ifndef OPENSSL_DOING_MAKEDEPEND
#ifndef OPENSSL_NO_EC_NISTP_64_GCC_128
# define OPENSSL_NO_EC_NISTP_64_GCC_128
#endif
#ifndef OPENSSL_NO_GMP
# define OPENSSL_NO_GMP
#endif
#ifndef OPENSSL_NO_JPAKE
# define OPENSSL_NO_JPAKE
#endif
#ifndef OPENSSL_NO_KRB5
# define OPENSSL_NO_KRB5
#endif
#ifndef OPENSSL_NO_MD2
# define OPENSSL_NO_MD2
#endif
#ifndef OPENSSL_NO_RC5
# define OPENSSL_NO_RC5
#endif
#ifndef OPENSSL_NO_RFC3779
# define OPENSSL_NO_RFC3779
#endif
#ifndef OPENSSL_NO_SCTP
# define OPENSSL_NO_SCTP
#endif
#ifndef OPENSSL_NO_STORE
# define OPENSSL_NO_STORE
#endif
#endif /* OPENSSL_DOING_MAKEDEPEND */
#ifndef OPENSSL_THREADS
# define OPENSSL_THREADS
#endif
#ifndef OPENSSL_NO_ASM
# define OPENSSL_NO_ASM
#endif
/* The OPENSSL_NO_* macros are also defined as NO_* if the application
asks for it. This is a transient feature that is provided for those
who haven't had the time to do the appropriate changes in their
applications. */
#ifdef OPENSSL_ALGORITHM_DEFINES
# if defined(OPENSSL_NO_EC_NISTP_64_GCC_128) && !defined(NO_EC_NISTP_64_GCC_128)
# define NO_EC_NISTP_64_GCC_128
# endif
# if defined(OPENSSL_NO_GMP) && !defined(NO_GMP)
# define NO_GMP
# endif
# if defined(OPENSSL_NO_JPAKE) && !defined(NO_JPAKE)
# define NO_JPAKE
# endif
# if defined(OPENSSL_NO_KRB5) && !defined(NO_KRB5)
# define NO_KRB5
# endif
# if defined(OPENSSL_NO_MD2) && !defined(NO_MD2)
# define NO_MD2
# endif
# if defined(OPENSSL_NO_RC5) && !defined(NO_RC5)
# define NO_RC5
# endif
# if defined(OPENSSL_NO_RFC3779) && !defined(NO_RFC3779)
# define NO_RFC3779
# endif
# if defined(OPENSSL_NO_SCTP) && !defined(NO_SCTP)
# define NO_SCTP
# endif
# if defined(OPENSSL_NO_STORE) && !defined(NO_STORE)
# define NO_STORE
# endif
#endif
/* crypto/opensslconf.h.in */
/* Generate 80386 code? */
#undef I386_ONLY
#if !(defined(VMS) || defined(__VMS)) /* VMS uses logical names instead */
#if defined(HEADER_CRYPTLIB_H) && !defined(OPENSSLDIR)
#define ENGINESDIR "..\\OpenSSL.x64/lib/engines"
#define OPENSSLDIR "..\\OpenSSL.x64/..\\OpenSSL.x64/ssl"
#endif
#endif
#undef OPENSSL_UNISTD
#define OPENSSL_UNISTD <unistd.h>
#undef OPENSSL_EXPORT_VAR_AS_FUNCTION
#define OPENSSL_EXPORT_VAR_AS_FUNCTION
#if defined(HEADER_IDEA_H) && !defined(IDEA_INT)
#define IDEA_INT unsigned int
#endif
#if defined(HEADER_MD2_H) && !defined(MD2_INT)
#define MD2_INT unsigned int
#endif
#if defined(HEADER_RC2_H) && !defined(RC2_INT)
/* I need to put in a mod for the alpha - eay */
#define RC2_INT unsigned int
#endif
#if defined(HEADER_RC4_H)
#if !defined(RC4_INT)
/* using int types make the structure larger but make the code faster
* on most boxes I have tested - up to %20 faster. */
/*
* I don't know what does "most" mean, but declaring "int" is a must on:
* - Intel P6 because partial register stalls are very expensive;
* - elder Alpha because it lacks byte load/store instructions;
*/
#define RC4_INT unsigned int
#endif
#if !defined(RC4_CHUNK)
/*
* This enables code handling data aligned at natural CPU word
* boundary. See crypto/rc4/rc4_enc.c for further details.
*/
#define RC4_CHUNK unsigned long long
#endif
#endif
#if (defined(HEADER_NEW_DES_H) || defined(HEADER_DES_H)) && !defined(DES_LONG)
/* If this is set to 'unsigned int' on a DEC Alpha, this gives about a
* %20 speed up (longs are 8 bytes, int's are 4). */
#ifndef DES_LONG
#define DES_LONG unsigned int
#endif
#endif
#if defined(HEADER_BN_H) && !defined(CONFIG_HEADER_BN_H)
#define CONFIG_HEADER_BN_H
#undef BN_LLONG
/* Should we define BN_DIV2W here? */
/* Only one for the following should be defined */
#undef SIXTY_FOUR_BIT_LONG
#define SIXTY_FOUR_BIT
#undef THIRTY_TWO_BIT
#endif
#if defined(HEADER_RC4_LOCL_H) && !defined(CONFIG_HEADER_RC4_LOCL_H)
#define CONFIG_HEADER_RC4_LOCL_H
/* if this is defined data[i] is used instead of *data, this is a %20
* speedup on x86 */
#undef RC4_INDEX
#endif
#if defined(HEADER_BF_LOCL_H) && !defined(CONFIG_HEADER_BF_LOCL_H)
#define CONFIG_HEADER_BF_LOCL_H
#undef BF_PTR
#endif /* HEADER_BF_LOCL_H */
#if defined(HEADER_DES_LOCL_H) && !defined(CONFIG_HEADER_DES_LOCL_H)
#define CONFIG_HEADER_DES_LOCL_H
#ifndef DES_DEFAULT_OPTIONS
/* the following is tweaked from a config script, that is why it is a
* protected undef/define */
#ifndef DES_PTR
#undef DES_PTR
#endif
/* This helps C compiler generate the correct code for multiple functional
* units. It reduces register dependancies at the expense of 2 more
* registers */
#ifndef DES_RISC1
#undef DES_RISC1
#endif
#ifndef DES_RISC2
#undef DES_RISC2
#endif
#if defined(DES_RISC1) && defined(DES_RISC2)
YOU SHOULD NOT HAVE BOTH DES_RISC1 AND DES_RISC2 DEFINED!!!!!
#endif
/* Unroll the inner loop, this sometimes helps, sometimes hinders.
* Very mucy CPU dependant */
#ifndef DES_UNROLL
#undef DES_UNROLL
#endif
/* These default values were supplied by
* Peter Gutman <pgut001@cs.auckland.ac.nz>
* They are only used if nothing else has been defined */
#if !defined(DES_PTR) && !defined(DES_RISC1) && !defined(DES_RISC2) && !defined(DES_UNROLL)
/* Special defines which change the way the code is built depending on the
CPU and OS. For SGI machines you can use _MIPS_SZLONG (32 or 64) to find
even newer MIPS CPU's, but at the moment one size fits all for
optimization options. Older Sparc's work better with only UNROLL, but
there's no way to tell at compile time what it is you're running on */
#if defined( sun ) /* Newer Sparc's */
# define DES_PTR
# define DES_RISC1
# define DES_UNROLL
#elif defined( __ultrix ) /* Older MIPS */
# define DES_PTR
# define DES_RISC2
# define DES_UNROLL
#elif defined( __osf1__ ) /* Alpha */
# define DES_PTR
# define DES_RISC2
#elif defined ( _AIX ) /* RS6000 */
/* Unknown */
#elif defined( __hpux ) /* HP-PA */
/* Unknown */
#elif defined( __aux ) /* 68K */
/* Unknown */
#elif defined( __dgux ) /* 88K (but P6 in latest boxes) */
# define DES_UNROLL
#elif defined( __sgi ) /* Newer MIPS */
# define DES_PTR
# define DES_RISC2
# define DES_UNROLL
#elif defined(i386) || defined(__i386__) /* x86 boxes, should be gcc */
# define DES_PTR
# define DES_RISC1
# define DES_UNROLL
#endif /* Systems-specific speed defines */
#endif
#endif /* DES_DEFAULT_OPTIONS */
#endif /* HEADER_DES_LOCL_H */
| {
"language": "C"
} |
/* Copyright (c) 2014-2017, ARM Limited and Contributors
*
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge,
* to any person obtaining a copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation the rights to
* use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software,
* and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED,
* INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
* IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
* WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*/
#ifndef TEXT_H
#define TEXT_H
#include "Matrix.h"
#include <GLES3/gl3.h>
namespace AstcTextures
{
/**
* \brief Vertex shader source code for text rendering.
*/
const char fontVertexShaderSource[] =
{
"#version 300 es\n"
"uniform mat4 u_m4Projection;\n"
"in vec4 a_v4Position;\n"
"in vec4 a_v4FontColor;\n"
"in vec2 a_v2TexCoord;\n"
"out vec4 v_v4FontColor;\n"
"out vec2 v_v2TexCoord;\n"
"void main() {\n"
" v_v4FontColor = a_v4FontColor;\n"
" v_v2TexCoord = a_v2TexCoord;\n"
" gl_Position = u_m4Projection * a_v4Position;\n"
"}\n"
};
/**
* \brief Fragment shader source code for text rendering.
*/
const char fontFragmentShaderSource[] =
{
"#version 300 es\n"
"precision mediump float;\n"
"uniform sampler2D u_s2dTexture;\n"
"in vec2 v_v2TexCoord;\n"
"in vec4 v_v4FontColor;\n"
"out vec4 color;\n"
"void main() {\n"
" vec4 v4Texel = texture(u_s2dTexture, v_v2TexCoord);\n"
" color = v_v4FontColor * v4Texel;\n"
"}\n"
};
/**
* \brief Load texture data from a file into memory.
*
* \param[in] filename The filename of the texture to load.
* \param[out] textureData Pointer to the texture that has been loaded.
*/
void loadData(const char* filename, unsigned char** textureData);
/**
* \brief Type representing texture coordinates.
*/
typedef struct Vec2
{
int x;
int y;
} Vec2;
/**
* \brief Functions for drawing text in OpenGL ES
*
* Uses a texture with images of alphanumeric and punctuation symbols.
* The class converts strings into texture coordinates in order to render the correct symbol from the texture for each character of the string.
*/
class Text
{
private:
static const char textureFilename[];
static const char vertexShaderFilename[];
static const char fragmentShaderFilename[];
/**
* \brief Scaling factor to use when rendering the text.
* \warning
* Experimental: allows drawing characters bigger than the texture was.
* Range 0.75-3.0 seems to work reasonably.
*/
static const float scale;
Matrix projectionMatrix;
int numberOfCharacters;
float* textVertex;
float* textTextureCoordinates;
float* color;
GLshort* textIndex;
int m_iLocPosition;
int m_iLocProjection;
int m_iLocTextColor;
int m_iLocTexCoord;
int m_iLocTexture;
GLuint vertexShaderID;
GLuint fragmentShaderID;
GLuint programID;
GLuint textureID;
public:
/**
* \brief The width (in pixels) of the characters in the text texture.
* \warning Change only if the text texture is changed and the width of the characters is different.
*/
static const int textureCharacterWidth;
/**
* \brief The height (in pixels) of the characters in the text texture.
* \warning Change only if the text texture is changed and the height of the characters is different.
*/
static const int textureCharacterHeight;
/**
* \brief Constructor for Text.
*
* \param[in] resourceDirectory Path to the resources. Where the textures and shaders are located.
* \param[in] windowWidth The width of the window (in pixles) that the text is being used in.
* \param[in] windowHeight The height of the window (in pixles) that the text is being used in.
*/
Text(const char* resourceDirectory, int windowWidth, int windowHeight);
/**
* \brief Overloaded default constructor.
*/
Text(void);
/**
* \brief Overloaded default destructor.
*/
~Text(void);
/**
* \brief Removes the current string from the class.
*
* Should be called before adding a new string to render using addString().
*/
void clear(void);
/**
* \brief Add a std::string to be drawn to the screen.
*
* \param[in] xPosition The X position (in pixels) to start drawing the text. Measured from the left of the screen.
* \param[in] yPosition The Y position (in pixels) to start drawing the text. Measured from the bottom of the screen.
* \param[in] string The string to be rendered on the screen.
* \param[in] red The red component of the text colour (accepts values 0-255).
* \param[in] green The green component of the text colour (accepts values 0-255).
* \param[in] blue The blue component of the text colour (accepts values 0-255).
* \param[in] alpha The alpha component of the text colour (accepts values 0-255). Affects the transparency of the text.
*/
void addString(int xPosition, int yPosition, const char* string, int red, int green, int blue, int alpha);
/**
* \brief Draw the text to the screen.
*
* Should be called each time through the render loop so that the text is drawn every frame.
*/
void draw(void);
};
}
#endif /* TEXT_H */
| {
"language": "C"
} |
# Patch against libcharset version 1.4
--- libiconv-1.12/libcharset//lib/localcharset.c 2006-10-18 07:55:49.000000000 -0400
+++ localcharset.c 2008-05-20 18:36:24.000000000 -0400
@@ -103,8 +103,8 @@
static const char * volatile charset_aliases;
/* Return a pointer to the contents of the charset.alias file. */
-static const char *
-get_charset_aliases (void)
+const char *
+_g_locale_get_charset_aliases (void)
{
const char *cp;
@@ -280,14 +280,10 @@
If the canonical name cannot be determined, the result is a non-canonical
name. */
-#ifdef STATIC
-STATIC
-#endif
const char *
-locale_charset (void)
+_g_locale_charset_raw (void)
{
const char *codeset;
- const char *aliases;
#if !(defined WIN32_NATIVE || defined OS2)
@@ -436,12 +432,20 @@
#endif
+ return codeset;
+}
+
+const char *
+_g_locale_charset_unalias (const char *codeset)
+{
+ const char *aliases;
+
if (codeset == NULL)
/* The canonical name cannot be determined. */
codeset = "";
/* Resolve alias. */
- for (aliases = get_charset_aliases ();
+ for (aliases = _g_locale_get_charset_aliases ();
*aliases != '\0';
aliases += strlen (aliases) + 1, aliases += strlen (aliases) + 1)
if (strcmp (codeset, aliases) == 0
--- libiconv-1.12/libcharset//include/libcharset.h.in 2005-05-19 13:14:56.000000000 -0400
+++ libcharset.h 2008-05-20 18:39:44.000000000 -0400
@@ -19,7 +19,7 @@
#ifndef _LIBCHARSET_H
#define _LIBCHARSET_H
-#include <localcharset.h>
+#include "localcharset.h"
#ifdef __cplusplus
--- libiconv-1.12/libcharset//include/localcharset.h.in 2005-05-19 13:14:57.000000000 -0400
+++ localcharset.h 2008-05-20 18:36:24.000000000 -0400
@@ -31,8 +31,9 @@
The result must not be freed; it is statically allocated.
If the canonical name cannot be determined, the result is a non-canonical
name. */
-extern const char * locale_charset (void);
-
+extern const char * _g_locale_charset_raw (void);
+extern const char * _g_locale_charset_unalias (const char *codeset);
+extern const char * _g_locale_get_charset_aliases (void);
#ifdef __cplusplus
}
| {
"language": "C"
} |
/*
* Copyright 2012 Red Hat Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: Ben Skeggs
*/
#define nv04_dmaobj(p) container_of((p), struct nv04_dmaobj, base)
#include "user.h"
#include <core/gpuobj.h>
#include <subdev/fb.h>
#include <subdev/mmu/vmm.h>
#include <nvif/class.h>
struct nv04_dmaobj {
struct nvkm_dmaobj base;
bool clone;
u32 flags0;
u32 flags2;
};
static int
nv04_dmaobj_bind(struct nvkm_dmaobj *base, struct nvkm_gpuobj *parent,
int align, struct nvkm_gpuobj **pgpuobj)
{
struct nv04_dmaobj *dmaobj = nv04_dmaobj(base);
struct nvkm_device *device = dmaobj->base.dma->engine.subdev.device;
u64 offset = dmaobj->base.start & 0xfffff000;
u64 adjust = dmaobj->base.start & 0x00000fff;
u32 length = dmaobj->base.limit - dmaobj->base.start;
int ret;
if (dmaobj->clone) {
struct nvkm_memory *pgt =
device->mmu->vmm->pd->pt[0]->memory;
if (!dmaobj->base.start)
return nvkm_gpuobj_wrap(pgt, pgpuobj);
nvkm_kmap(pgt);
offset = nvkm_ro32(pgt, 8 + (offset >> 10));
offset &= 0xfffff000;
nvkm_done(pgt);
}
ret = nvkm_gpuobj_new(device, 16, align, false, parent, pgpuobj);
if (ret == 0) {
nvkm_kmap(*pgpuobj);
nvkm_wo32(*pgpuobj, 0x00, dmaobj->flags0 | (adjust << 20));
nvkm_wo32(*pgpuobj, 0x04, length);
nvkm_wo32(*pgpuobj, 0x08, dmaobj->flags2 | offset);
nvkm_wo32(*pgpuobj, 0x0c, dmaobj->flags2 | offset);
nvkm_done(*pgpuobj);
}
return ret;
}
static const struct nvkm_dmaobj_func
nv04_dmaobj_func = {
.bind = nv04_dmaobj_bind,
};
int
nv04_dmaobj_new(struct nvkm_dma *dma, const struct nvkm_oclass *oclass,
void *data, u32 size, struct nvkm_dmaobj **pdmaobj)
{
struct nvkm_device *device = dma->engine.subdev.device;
struct nv04_dmaobj *dmaobj;
int ret;
if (!(dmaobj = kzalloc(sizeof(*dmaobj), GFP_KERNEL)))
return -ENOMEM;
*pdmaobj = &dmaobj->base;
ret = nvkm_dmaobj_ctor(&nv04_dmaobj_func, dma, oclass,
&data, &size, &dmaobj->base);
if (ret)
return ret;
if (dmaobj->base.target == NV_MEM_TARGET_VM) {
if (device->mmu->func == &nv04_mmu)
dmaobj->clone = true;
dmaobj->base.target = NV_MEM_TARGET_PCI;
dmaobj->base.access = NV_MEM_ACCESS_RW;
}
dmaobj->flags0 = oclass->base.oclass;
switch (dmaobj->base.target) {
case NV_MEM_TARGET_VRAM:
dmaobj->flags0 |= 0x00003000;
break;
case NV_MEM_TARGET_PCI:
dmaobj->flags0 |= 0x00023000;
break;
case NV_MEM_TARGET_PCI_NOSNOOP:
dmaobj->flags0 |= 0x00033000;
break;
default:
return -EINVAL;
}
switch (dmaobj->base.access) {
case NV_MEM_ACCESS_RO:
dmaobj->flags0 |= 0x00004000;
break;
case NV_MEM_ACCESS_WO:
dmaobj->flags0 |= 0x00008000;
/* fall through */
case NV_MEM_ACCESS_RW:
dmaobj->flags2 |= 0x00000002;
break;
default:
return -EINVAL;
}
return 0;
}
| {
"language": "C"
} |
/****************************** Module Header ******************************\
* Module Name: handtabl.c
*
* Copyright (c) 1985-95, Microsoft Corporation
*
* Implements the USER handle table.
*
* 01-13-92 ScottLu Created.
\***************************************************************************/
#include "precomp.h"
#pragma hdrstop
/*
* Turning this variable on results in lock tracking, for debugging
* purposes. This is FALSE by default.
*/
#ifdef DEBUG_LOCKS
BOOL gfTrackLocks = TRUE;
#else
BOOL gfTrackLocks = FALSE;
#endif
/*
* Handle table allocation globals. The purpose of keeping per-page free
* lists is to keep the table as small as is practical and to minimize
* the number of pages touched while performing handle table operations.
*/
#define CPAGEENTRIESINIT 4
typedef struct _HANDLEPAGE {
DWORD iheLimit; /* first handle index past the end of the page */
DWORD iheFree; /* first free handle in the page */
} HANDLEPAGE, *PHANDLEPAGE;
DWORD gcHandlePages;
PHANDLEPAGE gpHandlePages;
CONST BYTE gabObjectCreateFlags[TYPE_CTYPES] = {
0, /* free */
OCF_THREADOWNED | OCF_MARKTHREAD | OCF_USEQUOTA, /* window */
OCF_PROCESSOWNED, /* menu */
OCF_PROCESSOWNED | OCF_USEQUOTA, /* cursor/icon */
OCF_THREADOWNED | OCF_USEQUOTA, /* hswpi (SetWindowPos Information) */
OCF_THREADOWNED | OCF_MARKTHREAD, /* hook */
OCF_THREADOWNED | OCF_USEQUOTA, /* thread info object (internal) */
0, /* clipboard data (internal) */
OCF_THREADOWNED, /* CALLPROCDATA */
OCF_PROCESSOWNED | OCF_USEQUOTA, /* accel table */
OCF_THREADOWNED | OCF_USEQUOTA, /* dde access */
OCF_THREADOWNED | OCF_MARKTHREAD | OCF_USEQUOTA, /* dde conversation */
OCF_THREADOWNED | OCF_MARKTHREAD | OCF_USEQUOTA, /* ddex */
OCF_PROCESSOWNED, /* zombie */
OCF_PROCESSOWNED, /* keyboard layout */
OCF_PROCESSOWNED, /* keyboard file */
#ifdef FE_IME
OCF_THREADOWNED | OCF_MARKTHREAD, /* input context */
#endif
};
/*
* Tag array for objects allocated from pool
*/
CONST DWORD gdwAllocTag[TYPE_CTYPES] = {
0, /* free */
0, /* window */
0, /* menu */
TAG_CURSOR, /* cursor/icon */
TAG_SWP, /* hswpi (SetWindowPos Information) */
0, /* hook */
TAG_THREADINFO, /* thread info object (internal) */
TAG_CLIPBOARD, /* clipboard data (internal) */
0, /* CALLPROCDATA */
TAG_ACCEL, /* accel table */
TAG_DDE9, /* dde access */
TAG_DDEa, /* dde conversation */
TAG_DDEb, /* ddex */
0, /* zombie */
TAG_KBDLAYOUT, /* keyboard layout */
TAG_KBDFILE, /* keyboard file */
#ifdef FE_IME
0, /* input context */
#endif
};
#ifdef DEBUG
PVOID LockRecordLookasideBase;
PVOID LockRecordLookasideBounds;
ZONE_HEADER LockRecordLookasideZone;
ULONG AllocLockRecordHiWater;
ULONG AllocLockRecordCalls;
ULONG AllocLockRecordSlowCalls;
ULONG DelLockRecordCalls;
ULONG DelLockRecordSlowCalls;
NTSTATUS InitLockRecordLookaside();
void FreeLockRecord(PLR plr);
#endif
void HMDestroyUnlockedObject(PHE phe);
void HMRecordLock(PVOID ppobj, PVOID pobj, DWORD cLockObj, PVOID pfn);
BOOL HMUnrecordLock(PVOID ppobj, PVOID pobj);
VOID ShowLocks(PHE);
BOOL HMRelocateLockRecord(PVOID ppobjNew, int cbDelta);
/***************************************************************************\
* HMInitHandleTable
*
* Initialize the handle table. Unused entries are linked together.
*
* 01-13-92 ScottLu Created.
\***************************************************************************/
#define CHANDLEENTRIESINIT 200
#define CLOCKENTRIESINIT 100
BOOL HMInitHandleTable(
PVOID pReadOnlySharedSectionBase)
{
int i;
PHE pheT;
NTSTATUS Status;
/*
* Allocate the handle page array. Make it big enough
* for 4 pages, which should be sufficient for nearly
* all instances.
*/
gpHandlePages = UserAllocPool(CPAGEENTRIESINIT * sizeof(HANDLEPAGE),
TAG_SYSTEM);
if (gpHandlePages == NULL)
return FALSE;
#ifdef DEBUG
if (!NT_SUCCESS(InitLockRecordLookaside()))
return FALSE;
#endif
/*
* Allocate the array. We have the space from
* NtCurrentPeb()->ReadOnlySharedMemoryBase to
* NtCurrentPeb()->ReadOnlySharedMemoryHeap reserved for
* the handle table. All we need to do is commit the pages.
*
* Compute the minimum size of the table. The allocation will
* round this up to the next page size.
*/
gpsi->cbHandleTable = PAGE_SIZE;
Status = CommitReadOnlyMemory(ghReadOnlySharedSection,
gpsi->cbHandleTable, 0);
gSharedInfo.aheList = pReadOnlySharedSectionBase;
gpsi->cHandleEntries = gpsi->cbHandleTable / sizeof(HANDLEENTRY);
gcHandlePages = 1;
/*
* Put these free handles on the free list. The last free handle points
* to NULL. Use indexes; the handle table may move around in memory when
* growing.
*/
RtlZeroMemory(gSharedInfo.aheList, gpsi->cHandleEntries * sizeof(HANDLEENTRY));
for (pheT = gSharedInfo.aheList, i = 0; i < (int)gpsi->cHandleEntries; i++, pheT++) {
pheT->phead = ((PHEAD)(((PBYTE)i) + 1));
pheT->bType = TYPE_FREE;
pheT->wUniq = 1;
}
(pheT - 1)->phead = NULL;
/*
* Reserve the first handle table entry so that PW(NULL) maps to a
* NULL pointer. Set it to TYPE_FREE so the cleanup code doesn't think
* it is allocated. Set wUniq to 1 so that RevalidateHandles on NULL
* will fail.
*/
gpHandlePages[0].iheFree = 1;
gpHandlePages[0].iheLimit = gpsi->cHandleEntries;
RtlZeroMemory(&gSharedInfo.aheList[0], sizeof(HANDLEENTRY));
gSharedInfo.aheList[0].bType = TYPE_FREE;
gSharedInfo.aheList[0].wUniq = 1;
return TRUE;
}
/***************************************************************************\
* HMGrowHandleTable
*
* Grows the handle table. Assumes the handle table already exists.
*
* 01-13-92 ScottLu Created.
\***************************************************************************/
BOOL HMGrowHandleTable()
{
DWORD i;
PHE pheT;
PVOID p;
PHANDLEPAGE phpNew;
DWORD dwCommitOffset;
NTSTATUS Status;
/*
* If we've run out of handle space, fail.
*/
i = gpsi->cHandleEntries;
if (i & ~HMINDEXBITS)
return FALSE;
/*
* Grow the page table if need be.
*/
i = gcHandlePages + 1;
if (i > CPAGEENTRIESINIT) {
DWORD dwSize = gcHandlePages * sizeof(HANDLEPAGE);
phpNew = UserReAllocPool(gpHandlePages, dwSize, dwSize + sizeof(HANDLEPAGE),
TAG_SYSTEM);
if (phpNew == NULL)
return FALSE;
gpHandlePages = phpNew;
}
/*
* Commit some more pages to the table. First find the
* address where the commitment needs to be.
*/
p = (PBYTE)gSharedInfo.aheList + gpsi->cbHandleTable;
if (p >= ghheapSharedRO) {
return FALSE;
}
dwCommitOffset = (ULONG)((PBYTE)p - (PBYTE)gpReadOnlySharedSectionBase);
Status = CommitReadOnlyMemory(ghReadOnlySharedSection,
PAGE_SIZE, dwCommitOffset);
if (!NT_SUCCESS(Status))
return FALSE;
phpNew = &gpHandlePages[gcHandlePages++];
/*
* Update the global information to include the new
* page.
*/
phpNew->iheFree = gpsi->cHandleEntries;
gpsi->cbHandleTable += PAGE_SIZE;
/*
* Check for handle overflow
*/
gpsi->cHandleEntries = gpsi->cbHandleTable / sizeof(HANDLEENTRY);
if (gpsi->cHandleEntries & ~HMINDEXBITS)
gpsi->cHandleEntries = (HMINDEXBITS + 1);
phpNew->iheLimit = gpsi->cHandleEntries;
/*
* Link all the new handle entries together.
*/
i = phpNew->iheFree;
RtlZeroMemory(&gSharedInfo.aheList[i],
(gpsi->cHandleEntries - i) * sizeof(HANDLEENTRY));
for (pheT = &gSharedInfo.aheList[i]; i < gpsi->cHandleEntries; i++, pheT++) {
pheT->phead = ((PHEAD)(((PBYTE)i) + 1));
pheT->bType = TYPE_FREE;
pheT->wUniq = 1;
}
/*
* There are no old free entries (since we're growing the table), so the
* last new free handle points to 0.
*/
(pheT - 1)->phead = 0;
return TRUE;
}
/***************************************************************************\
* HMAllocObject
*
* Allocs a handle by removing it from the free list.
*
* 01-13-92 ScottLu Created.
\***************************************************************************/
#define TRACE_OBJECT_ALLOCS 0
#define TYPE_MAXTYPES 20
#if (DBG || TRACE_OBJECT_ALLOCS)
DWORD acurObjectCount[TYPE_MAXTYPES] = {
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
DWORD amaxObjectCount[TYPE_MAXTYPES] = {
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
DWORD atotObjectCount[TYPE_MAXTYPES] = {
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
DWORD abasObjectCount[TYPE_MAXTYPES] = {
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
DWORD asizObjectCount[TYPE_MAXTYPES] = {
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
#endif // TRACE_OBJECT_ALLOCS
/***************************************************************************\
* HMAllocObject
*
* Allocs a non-secure object by allocating a handle and memory for
* the object.
*
* 01-13-92 ScottLu Created.
\***************************************************************************/
PVOID HMAllocObject(
PTHREADINFO ptiOwner,
PDESKTOP pdeskSrc,
BYTE bType,
DWORD size)
{
DWORD i;
PHEAD phead;
PHE pheT;
DWORD iheFreeOdd = 0;
DWORD iheFree;
PHANDLEPAGE php;
BOOL fPoolAlloc = TRUE;
BYTE bCreateFlags;
/*
* If there are no more free handles, grow the table.
*/
TryFreeHandle:
iheFree = 0;
php = gpHandlePages;
for (i = 0; i < gcHandlePages; ++i, ++php)
if (php->iheFree != 0) {
iheFree = php->iheFree;
break;
}
if (iheFree == 0) {
HMGrowHandleTable();
/*
* If the table didn't grow, get out.
*/
if (i == gcHandlePages) {
RIPMSG0(RIP_WARNING, "USER: HMAllocObject: could not grow handle space\n");
return NULL;
}
/*
* Because the handle page table may have moved,
* recalc the page entry pointer.
*/
php = &gpHandlePages[i];
iheFree = php->iheFree;
UserAssert(iheFree);
}
/*
* NOTE: the next two tests will nicely fail if iheFree == 0
*
* If the next handle is 0xFFFF, we need to treat it specially because
* internally 0xFFFF is a constant.
*/
if (LOWORD(iheFree) == 0xFFFF) {
/*
* Reserve this table entry so that PW(FFFF) maps to a
* NULL pointer. Set it to TYPE_FREE so the cleanup code doesn't think
* it is allocated. Set wUniq to 1 so that RevalidateHandles on FFFF
* will fail.
*/
pheT = &gSharedInfo.aheList[iheFree];
php->iheFree = (DWORD)pheT->phead;
RtlZeroMemory(pheT, sizeof(HANDLEENTRY));
pheT->bType = TYPE_FREE;
pheT->wUniq = 1;
goto TryFreeHandle;
}
/*
* Some wow apps, like WinProj, require even Window handles so we'll
* accomodate them; build a list of the odd handles so they won't get lost
*/
if ((bType == TYPE_WINDOW) && (iheFree & 1)) {
/*
* The handle following iheFree is the next handle to try
*/
pheT = &gSharedInfo.aheList[iheFree];
php->iheFree = (DWORD)pheT->phead;
/*
* add the old first free HE to the free odd list (of indices)
*/
pheT->phead = (PHEAD)iheFreeOdd;
iheFreeOdd = pheT - gSharedInfo.aheList;
goto TryFreeHandle;
}
if (iheFree == 0) {
RIPMSG0(RIP_WARNING, "USER: HMAllocObject: out of handles\n");
/*
* In a very rare case we can't allocate any more handles but
* we had some odd handles that couldn't be used; they're
* now the free list but usually iheFreeOdd == 0;
*/
php->iheFree = iheFreeOdd;
return NULL;
}
/*
* Now we have a free handle we can use, iheFree, so link in the Odd
* handles we couldn't use
*/
if (iheFreeOdd) {
DWORD iheNextFree;
/*
* link the start of the free odd list right after the first free
* then walk the odd list until the end and link the end of the
* odd list into the start or the free list.
*/
pheT = &gSharedInfo.aheList[iheFree];
iheNextFree = (DWORD)pheT->phead;
pheT->phead = (PHEAD)iheFreeOdd;
while (pheT->phead)
pheT = &gSharedInfo.aheList[(DWORD)pheT->phead];
pheT->phead = (PHEAD)iheNextFree;
}
/*
* Try to allocate the object. If this fails, bail out.
*/
bCreateFlags = gabObjectCreateFlags[bType];
switch (bType) {
case TYPE_WINDOW:
if (pdeskSrc == NULL) {
phead = (PHEAD)UserAllocPoolWithQuota(size, TAG_WINDOW);
break;
}
/*
* Fall through
*/
case TYPE_MENU:
case TYPE_HOOK:
case TYPE_CALLPROC:
#ifdef FE_IME
case TYPE_INPUTCONTEXT:
#endif
fPoolAlloc = FALSE;
/*
* Fail the allocation if the desktop is destroyed.
* LATER: GerardoB.
* Change DesktopAlloc so it takes the pdesk; the move this check
* in there. Sometimes we call DesktopAlloc directly (TYPE_CLASS).
*/
if (pdeskSrc->dwDTFlags & DF_DESTROYED) {
RIPMSG1(RIP_WARNING, "HMAllocObject: pdeskSrc is destroyed:%#lx", pdeskSrc);
return NULL;
}
phead = (PHEAD)DesktopAlloc(pdeskSrc->hheapDesktop, size);
if (phead == NULL)
break;
LockDesktop(&((PSHROBJHEAD)phead)->rpdesk, pdeskSrc);
((PSHROBJHEAD)phead)->pSelf = (PBYTE)phead;
break;
default:
if (bCreateFlags & OCF_USEQUOTA)
phead = (PHEAD)UserAllocPoolWithQuota(size, gdwAllocTag[bType]);
else
phead = (PHEAD)UserAllocPool(size, gdwAllocTag[bType]);
break;
}
if (phead == NULL) {
RIPERR0(ERROR_NOT_ENOUGH_MEMORY,
RIP_WARNING,
"USER: HMAllocObject: out of memory\n");
return NULL;
}
/*
* If the memory came from pool, zero it.
*/
if (fPoolAlloc)
RtlZeroMemory(phead, size);
/*
* The free handle pointer points to the next free handle.
*/
pheT = &gSharedInfo.aheList[iheFree];
php->iheFree = (DWORD)pheT->phead;
/*
* Track high water mark for handle allocation.
*/
if ((DWORD)iheFree > giheLast) {
giheLast = iheFree;
}
/*
* Setup the handle contents, plus initialize the object header.
*/
pheT->bType = bType;
pheT->phead = phead;
if (bCreateFlags & OCF_PROCESSOWNED) {
if (ptiOwner != NULL) {
((PPROCOBJHEAD)phead)->ppi = ptiOwner->ppi;
if ((ptiOwner->TIF_flags & TIF_16BIT) && (ptiOwner->ptdb)) {
((PPROCOBJHEAD)phead)->hTaskWow = ptiOwner->ptdb->hTaskWow;
} else {
((PPROCOBJHEAD)phead)->hTaskWow = 0;
}
pheT->pOwner = ptiOwner->ppi;
} else
pheT->pOwner = NULL;
} else if (bCreateFlags & OCF_THREADOWNED) {
if (bCreateFlags & OCF_MARKTHREAD)
((PTHROBJHEAD)phead)->pti = ptiOwner;
pheT->pOwner = ptiOwner;
}
phead->h = HMHandleFromIndex(iheFree);
/*
* Return a handle entry pointer.
*/
return pheT->phead;
}
#if 0
#define HANDLEF_FREECHECK 0x80
VOID CheckHMTable(
PVOID pobj)
{
PHE pheT, pheMax;
if (giheLast) {
pheMax = &gSharedInfo.aheList[giheLast];
for (pheT = gSharedInfo.aheList; pheT <= pheMax; pheT++) {
if (pheT->bType == TYPE_FREE) {
continue;
}
if (pheT->phead == pobj && !(pheT->bFlags & HANDLEF_FREECHECK)) {
UserAssert(FALSE);
}
}
}
}
#endif
/***************************************************************************\
* HMFreeObject
*
* This destroys an object - the handle and the referenced memory. To check
* to see if destroying is ok, HMMarkObjectDestroy() should be called.
*
* 01-13-92 ScottLu Created.
\***************************************************************************/
BOOL HMFreeObject(
PVOID pobj)
{
PHE pheT;
WORD wUniqT;
PHANDLEPAGE php;
DWORD i;
DWORD iheCurrent;
PSHROBJHEAD phead;
PDESKTOP pdesk;
#ifdef DEBUG
PLR plrT, plrNextT;
#endif
/*
* Free the object first.
*/
pheT = HMPheFromObject(pobj);
UserAssert(((PHEAD)pobj)->cLockObj == 0);
#ifndef DEBUG
switch(pheT->bType) {
case TYPE_MENU:
case TYPE_WINDOW:
case TYPE_HOOK:
case TYPE_CALLPROC:
#ifdef FE_IME
case TYPE_INPUTCONTEXT:
#endif
phead = (PSHROBJHEAD)pobj;
pdesk = phead->rpdesk;
if (pdesk != NULL) {
UnlockDesktop(&phead->rpdesk);
DesktopFree(pdesk->hheapDesktop, (HANDLE)phead);
} else {
UserFreePool(phead);
}
break;
case TYPE_SETWINDOWPOS:
if (((PSMWP)(pobj))->acvr != NULL)
UserFreePool(((PSMWP)(pobj))->acvr);
// FALL THROUGH!!!
default:
UserFreePool((HANDLE)pobj);
break;
}
#else // DEBUG
#if 0
pheT->bFlags |= HANDLEF_FREECHECK; // marker for later check.
#endif
/*
* Validate by going through the handle entry so that we make sure pobj
* is not just pointing off into space. This may GP fault, but that's
* ok: this case should not ever happen if we're bug free.
*/
if (HMRevalidateHandle(pheT->phead->h) == NULL)
goto AlreadyFree;
switch (pheT->bType) {
#if 0
case TYPE_CURSOR:
/*
* Search all caches and make sure this bugger is not referenced
* and that the caches are cool.
*/
UserAssert(!(((PCURSOR)pobj)->CURSORF_flags & CURSORF_LINKED));
{
PCURSOR *ppcurT, *ppcurFirst;
PPROCESSINFO ppi;
ppcurFirst = &gpcurFirst;
for (ppcurT = ppcurFirst; *ppcurT != NULL; ppcurT = &((*ppcurT)->pcurNext)) {
if (*ppcurT == pobj) {
UserAssert(FALSE);
}
UserAssert(HtoP(PtoH(*ppcurT)));
}
for (ppi = gppiStarting; ppi != NULL; ppi = ppi->ppiNext) {
ppcurFirst = &ppi->pCursorCache;
for (ppcurT = ppcurFirst; *ppcurT != NULL; ppcurT = &((*ppcurT)->pcurNext)) {
if (*ppcurT == pobj) {
UserAssert(FALSE);
}
UserAssert(HtoP(PtoH(*ppcurT)));
}
}
}
UserFreePool((HANDLE)pobj);
break;
#endif
case TYPE_MENU:
case TYPE_WINDOW:
case TYPE_HOOK:
case TYPE_CALLPROC:
#ifdef FE_IME
case TYPE_INPUTCONTEXT:
#endif
phead = (PSHROBJHEAD)pobj;
pdesk = phead->rpdesk;
if (pdesk != NULL) {
UnlockDesktop(&phead->rpdesk);
if (DesktopFree(pdesk->hheapDesktop, pheT->phead))
goto AlreadyFree;
} else {
UserFreePool(phead);
}
break;
case TYPE_SETWINDOWPOS:
if (((PSMWP)(pobj))->acvr != NULL)
UserFreePool(((PSMWP)(pobj))->acvr);
/*
* fall through to default case.
*/
default:
UserFreePool((HANDLE)pobj);
break;
}
if (pheT->bType == TYPE_FREE) {
AlreadyFree:
RIPMSG1(RIP_ERROR, "Object already freed!!! %08lx", pheT);
return FALSE;
}
/*
* Go through and delete the lock records, if they exist.
*/
for (plrT = pheT->plr; plrT != NULL; plrT = plrNextT) {
/*
* Remember the next one before freeing this one.
*/
plrNextT = plrT->plrNext;
FreeLockRecord((HANDLE)plrT);
}
#endif
/*
* Clear the handle contents. Need to remember the uniqueness across
* the clear. Also, advance uniqueness on free so that uniqueness checking
* against old handles also fails.
*/
wUniqT = (WORD)((pheT->wUniq + 1) & HMUNIQBITS);
RtlZeroMemory(pheT, sizeof(HANDLEENTRY));
pheT->wUniq = wUniqT;
/*
* Change the handle type to TYPE_FREE so we know what type this handle
* is.
*/
pheT->bType = TYPE_FREE;
/*
* Put the handle on the free list of the appropriate page.
*/
php = gpHandlePages;
iheCurrent = pheT - gSharedInfo.aheList;
for (i = 0; i < gcHandlePages; ++i, ++php) {
if (iheCurrent < php->iheLimit) {
pheT->phead = (PHEAD)php->iheFree;
php->iheFree = iheCurrent;
break;
}
}
pheT->pOwner = NULL;
return TRUE;
}
/***************************************************************************\
* HMMarkObjectDestroy
*
* Marks an object for destruction, returns TRUE if object can be destroyed.
*
* 02-10-92 ScottLu Created.
\***************************************************************************/
BOOL HMMarkObjectDestroy(
PVOID pobj)
{
PHE phe;
phe = HMPheFromObject(pobj);
#ifdef DEBUG
/*
* Record where the object was marked for destruction.
*/
if (gfTrackLocks) {
if (!(phe->bFlags & HANDLEF_DESTROY)) {
PVOID pfn1, pfn2;
RtlGetCallersAddress(&pfn1, &pfn2);
HMRecordLock(pfn1, pobj, ((PHEAD)pobj)->cLockObj, 0);
}
}
#endif
/*
* Set the destroy flag so our unlock code will know we're trying to
* destroy this object.
*/
phe->bFlags |= HANDLEF_DESTROY;
/*
* If this object can't be destroyed, then CLEAR the HANDLEF_INDESTROY
* flag - because this object won't be currently "in destruction"!
* (if we didn't clear it, when it was unlocked it wouldn't get destroyed).
*/
if (((PHEAD)pobj)->cLockObj != 0) {
phe->bFlags &= ~HANDLEF_INDESTROY;
/*
* Return FALSE because we can't destroy this object.
*/
return FALSE;
}
#ifdef DEBUG
/*
* Ensure that this function only returns TRUE once.
*/
UserAssert(!(phe->bFlags & HANDLEF_MARKED_OK));
phe->bFlags |= HANDLEF_MARKED_OK;
#endif
/*
* Return TRUE because Lock count is zero - ok to destroy this object.
*/
return TRUE;
}
/***************************************************************************\
* HMDestroyObject
*
* This routine handles destruction of non-secure objects.
*
* 10-13-94 JimA Created.
\***************************************************************************/
BOOL HMDestroyObject(
PVOID pobj)
{
PHE phe;
phe = HMPheFromObject(pobj);
/*
* First mark the object for destruction. This tells the locking code
* that we want to destroy this object when the lock count goes to 0.
* If this returns FALSE, we can't destroy the object yet (and can't get
* rid of security yet either.)
*/
if (!HMMarkObjectDestroy(pobj))
return FALSE;
/*
* Ok to destroy... Free the handle (which will free the object
* and the handle).
*/
HMFreeObject(pobj);
return TRUE;
}
/***************************************************************************\
* HMRecordLock
*
* This routine records a lock on a "lock list", so that locks and unlocks
* can be tracked in the debugger. Only called if gfTrackLocks == TRUE.
*
* 02-27-92 ScottLu Created.
\***************************************************************************/
#ifdef DEBUG
NTSTATUS
InitLockRecordLookaside()
{
ULONG BlockSize;
ULONG InitialSegmentSize;
BlockSize = (sizeof(LOCKRECORD) + 7) & ~7;
InitialSegmentSize = 1000 * BlockSize + sizeof(ZONE_SEGMENT_HEADER);
LockRecordLookasideBase = UserAllocPool(InitialSegmentSize, TAG_LOOKASIDE);
if ( !LockRecordLookasideBase ) {
return STATUS_NO_MEMORY;
}
LockRecordLookasideBounds = (PVOID)((PUCHAR)LockRecordLookasideBase + InitialSegmentSize);
return ExInitializeZone(&LockRecordLookasideZone,
BlockSize,
LockRecordLookasideBase,
InitialSegmentSize);
}
PLR AllocLockRecord()
{
PLR plr;
/*
* Attempt to get a LOCKRECORD from the zone. If this fails, then
* LocalAlloc the LOCKRECORD
*/
plr = ExAllocateFromZone(&LockRecordLookasideZone);
if ( !plr ) {
/*
* Allocate a Q message structure.
*/
AllocLockRecordSlowCalls++;
if ((plr = (PLR)UserAllocPool(sizeof(LOCKRECORD), TAG_LOCKRECORD)) == NULL)
return NULL;
}
RtlZeroMemory(plr, sizeof(*plr));
AllocLockRecordCalls++;
if (AllocLockRecordCalls-DelLockRecordCalls > AllocLockRecordHiWater ) {
AllocLockRecordHiWater = AllocLockRecordCalls-DelLockRecordCalls;
}
return plr;
}
void FreeLockRecord(
PLR plr)
{
DelLockRecordCalls++;
/*
* If the plr was from zone, then free to zone
*/
if ( (PVOID)plr >= LockRecordLookasideBase && (PVOID)plr < LockRecordLookasideBounds ) {
ExFreeToZone(&LockRecordLookasideZone, plr);
} else {
DelLockRecordSlowCalls++;
UserFreePool((HLOCAL)plr);
}
}
void HMRecordLock(
PVOID ppobj,
PVOID pobj,
DWORD cLockObj,
PVOID pfn)
{
PHE phe;
PLR plr;
int i;
phe = HMPheFromObject(pobj);
if ((plr = AllocLockRecord()) == NULL)
return;
plr->plrNext = phe->plr;
phe->plr = plr;
if (((PHEAD)pobj)->cLockObj > cLockObj) {
i = (int)cLockObj;
i = -i;
cLockObj = (DWORD)i;
}
plr->ppobj = ppobj;
plr->cLockObj = cLockObj;
plr->pfn = pfn;
return;
}
#endif // DEBUG
/***************************************************************************\
* HMLockObject
*
* This routine locks an object. This is a macro in retail systems.
*
* 02-24-92 ScottLu Created.
\***************************************************************************/
#ifdef DEBUG
void HMLockObject(
PVOID pobj)
{
HANDLE h;
PVOID pobjValidate;
/*
* Validate by going through the handle entry so that we make sure pobj
* is not just pointing off into space. This may GP fault, but that's
* ok: this case should not ever happen if we're bug free.
*/
h = HMPheFromObject(pobj)->phead->h;
pobjValidate = HMRevalidateHandle(h);
if (!pobj || pobj != pobjValidate) {
RIPMSG2(RIP_ERROR,
"HMLockObject called with invalid object = %08lx, handle = %08lx",
pobj, h);
return;
}
/*
* Inc the reference count.
*/
((PHEAD)pobj)->cLockObj++;
if (((PHEAD)pobj)->cLockObj == 0)
RIPMSG1(RIP_ERROR, "Object lock count has overflowed: %08lx", pobj);
}
#endif // DEBUG
/***************************************************************************\
* HMUnlockObject
*
* This routine unlocks an object. pobj is returned if the object is still
* around after the unlock.
*
* 01-21-92 ScottLu Created.
\***************************************************************************/
PVOID HMUnlockObjectInternal(
PVOID pobj)
{
PHE phe;
/*
* The object is not reference counted. If the object is not a zombie,
* return success because the object is still around.
*/
phe = HMPheFromObject(pobj);
if (!(phe->bFlags & HANDLEF_DESTROY))
return pobj;
/*
* We're destroying the object based on an unlock... Make sure it isn't
* currently being destroyed! (It is valid to have lock counts go from
* 0 to != 0 to 0 during destruction... don't want recursion into
* the destroy routine.
*/
if (phe->bFlags & HANDLEF_INDESTROY)
return pobj;
HMDestroyUnlockedObject(phe);
return NULL;
}
/***************************************************************************\
* HMAssignmentLock
*
* This api is used for structure and global variable assignment.
* Returns pobjOld if the object was *not* destroyed. Means the object is
* still valid.
*
* 02-24-92 ScottLu Created.
\***************************************************************************/
PVOID FASTCALL HMAssignmentLock(
PVOID *ppobj,
PVOID pobj)
{
PVOID pobjOld;
pobjOld = *ppobj;
*ppobj = pobj;
/*
* Unlocks the old, locks the new.
*/
if (pobjOld != NULL) {
#ifdef DEBUG
PVOID pfn1, pfn2;
/*
* If DEBUG && gfTrackLocks, track assignment locks.
*/
if (gfTrackLocks) {
RtlGetCallersAddress(&pfn1, &pfn2);
if (!HMUnrecordLock(ppobj, pobjOld)) {
HMRecordLock(ppobj, pobjOld, ((PHEAD)pobjOld)->cLockObj - 1, pfn1);
}
}
#endif
/*
* if we are locking in the same object that is there then
* it is a no-op but we don't want to do the Unlock and the Lock
* because the unlock could free object and the lock would lock
* in a freed pointer; 6410.
*/
if (pobjOld == pobj) {
return pobjOld;
}
}
if (pobj != NULL) {
#ifdef DEBUG
PVOID pfn1, pfn2;
UserAssert(HMValidateHandle(((PHEAD)pobj)->h, TYPE_GENERIC));
/*
* If DEBUG && gfTrackLocks, track assignment locks.
*/
if (gfTrackLocks) {
RtlGetCallersAddress(&pfn1, &pfn2);
HMRecordLock(ppobj, pobj, ((PHEAD)pobj)->cLockObj + 1, pfn1);
if (HMIsMarkDestroy(pobj))
RIPMSG1(RIP_WARNING, "Locking object marked for destruction (%lX)", pobj);
}
#endif
HMLockObject(pobj);
}
/*
* This unlock has been moved from up above, so that we implement a
* "lock before unlock" strategy. Just in case pobjOld was the
* only object referencing pobj, pobj won't go away when we unlock
* pobjNew -- it will have been locked above.
*/
if (pobjOld) {
pobjOld = HMUnlockObject(pobjOld);
}
return pobjOld;
}
/***************************************************************************\
* HMAssignmentLock
*
* This api is used for structure and global variable assignment.
* Returns pobjOld if the object was *not* destroyed. Means the object is
* still valid.
*
* 02-24-92 ScottLu Created.
\***************************************************************************/
PVOID FASTCALL HMAssignmentUnlock(
PVOID *ppobj)
{
PVOID pobjOld;
pobjOld = *ppobj;
*ppobj = NULL;
/*
* Unlocks the old, locks the new.
*/
if (pobjOld != NULL) {
#ifdef DEBUG
PVOID pfn1, pfn2;
/*
* If DEBUG && gfTrackLocks, track assignment locks.
*/
if (gfTrackLocks) {
RtlGetCallersAddress(&pfn1, &pfn2);
if (!HMUnrecordLock(ppobj, pobjOld)) {
HMRecordLock(ppobj, pobjOld, ((PHEAD)pobjOld)->cLockObj - 1, pfn1);
}
}
#endif
pobjOld = HMUnlockObject(pobjOld);
}
return pobjOld;
}
/***************************************************************************\
* IsValidThreadLock
*
* This routine checks to make sure that the thread lock structures passed
* in are valid.
*
* 03-17-92 ScottLu Created.
\***************************************************************************/
#ifdef DEBUG
VOID
IsValidThreadLock(
PTHREADINFO pti,
PTL ptl)
{
PETHREAD Thread = PsGetCurrentThread();
if (ptl->pti != pti) {
RIPMSG1(RIP_ERROR,
"This thread lock does not belong to this thread %08lx\n",
ptl);
}
UserAssert((DWORD)ptl > (DWORD)&Thread);
UserAssert((DWORD)ptl < (DWORD)KeGetCurrentThread()->StackBase);
}
#endif
/***************************************************************************\
* ValidateThreadLocks
*
* This routine validates the thread lock list of a thread.
*
* 03-10-92 ScottLu Created.
\***************************************************************************/
#ifdef DEBUG
void
ValidateThreadLocks(
PTL NewLock,
PTL OldLock)
{
PTHREADINFO ptiCurrent = PtiCurrent();
/*
* Validate the new thread lock.
*/
IsValidThreadLock(ptiCurrent, NewLock);
/*
* Loop through the list of thread locks and check to make sure the
* new lock is not in the list and that list is valid.
*/
while (OldLock != NULL) {
/*
* The new lock must not be the same as the old lock.
*/
if (NewLock == OldLock) {
RIPMSG1(RIP_ERROR,
"This thread lock address is already in the thread list %08lx\n",
NewLock);
}
/*
* Validate the old thread lock.
*/
IsValidThreadLock(ptiCurrent, OldLock);
OldLock = OldLock->next;
}
}
#endif
/***************************************************************************\
* ThreadLock
*
* This api is used for locking objects across callbacks, so they are still
* there when the callback returns.
*
* 03-04-92 ScottLu Created.
\***************************************************************************/
#ifdef DEBUG
void
ThreadLock(
PVOID pobj,
PTL ptl)
{
PTHREADINFO ptiCurrent;
PVOID pfnT;
/*
* This is a handy place, because it is called so often, to see if User is
* eating up too much stack.
*/
UserAssert(((DWORD)&pfnT - (DWORD)KeGetCurrentThread()->StackLimit) > KERNEL_STACK_MINIMUM_RESERVE);
/*
* Store the address of the object in the thread lock structure and
* link the structure into the thread lock list.
*
* N.B. The lock structure is always linked into the thread lock list
* regardless of whether the object address is NULL. The reason
* this is done is so the lock address does not need to be passed
* to the unlock function since the first entry in the lock list
* is always the entry to be unlocked.
*/
UserAssert(HtoP(PtoH(pobj)) == pobj);
UserAssert(!(PpiCurrent()->W32PF_Flags & W32PF_TERMINATED));
ptiCurrent = PtiCurrent();
UserAssert(ptiCurrent);
/*
* Get the callers address and validate the thread lock list.
*/
RtlGetCallersAddress(&ptl->pfn, &pfnT);
ptl->pti = ptiCurrent;
ValidateThreadLocks(ptl, ptiCurrent->ptl);
ptl->next = ptiCurrent->ptl;
ptiCurrent->ptl = ptl;
ptl->pobj = pobj;
if (pobj != NULL) {
HMLockObject(pobj);
}
return;
}
#endif
/*
* The thread locking routines should be optimized for time, not size,
* since they get called so often.
*/
#pragma optimize("t", on)
/***************************************************************************\
* ThreadUnlock1
*
* This api unlocks a thread locked object. Returns pobj if the object
* was *not* destroyed (meaning the pointer is still valid).
*
* N.B. In a free build the first entry in the thread lock list is unlocked.
*
* 03-04-92 ScottLu Created.
\***************************************************************************/
#ifdef DEBUG
PVOID
ThreadUnlock1(
PTL ptlIn)
#else
PVOID
ThreadUnlock1(
VOID)
#endif
{
PHEAD phead;
PTHREADINFO ptiCurrent;
PTL ptl;
/*
* Remove the thread lock structure from the thread lock list.
*/
ptiCurrent = PtiCurrent();
ptl = ptiCurrent->ptl;
/*
* make sure that the ptl we are looking at is on the stack before
* our current stack position but not before the beginning of the stack
*/
UserAssert((DWORD)ptl > (DWORD)&ptl);
UserAssert((DWORD)ptl < (DWORD)KeGetCurrentThread()->StackBase);
UserAssert(((DWORD)ptl->next > (DWORD)&ptl) || ((DWORD)ptl->next == 0));
UserAssert(ptlIn == ptl);
ptiCurrent->ptl = ptl->next;
#ifdef DEBUG
/*
* Validate the thread lock list.
*/
ValidateThreadLocks(ptl, ptiCurrent->ptl);
#endif
/*
* If the object address is not NULL, then unlock the object.
*/
phead = (PHEAD)(ptl->pobj);
if (phead != NULL) {
/*
* Unlock the object.
*/
phead = (PHEAD)HMUnlockObject(phead);
}
return (PVOID)phead;
}
/*
* Switch back to default optimization.
*/
#pragma optimize("", on)
/***************************************************************************\
* CheckLock
*
* This routine only exists in DEBUG builds - it checks to make sure objects
* are thread locked.
*
* 03-09-92 ScottLu Created.
\***************************************************************************/
#ifdef DEBUG
void CheckLock(
PVOID pobj)
{
PTHREADINFO ptiCurrent;
PTL ptl;
if (pobj == NULL)
return;
// if (KeGetPreviousMode() != UserMode)
// return;
ptiCurrent = PtiCurrentShared();
UserAssert(ptiCurrent);
for (ptl = ptiCurrent->ptl; ptl != NULL; ptl=ptl->next) {
if (ptl->pobj == pobj) return;
}
/*
* WM_FINALDESTROY messages get sent without thread locking, so if
* marked for destruction, don't print the message.
*/
if (HMPheFromObject(pobj)->bFlags & HANDLEF_DESTROY)
return;
RIPMSG1(RIP_ERROR, "Object not thread locked! 0x%08lx", pobj);
}
#endif
/***************************************************************************\
* HMDestroyUnlockedObject
*
* We're destroying the object based on an unlock... which means we could
* be destroying this object in a context different than the one that
* created it. This is very important to understand since in lots of code
* the "current thread" is referenced and assumed as the creator.
*
* 02-10-92 ScottLu Created.
\***************************************************************************/
void HMDestroyUnlockedObject(
PHE phe)
{
PTHREADINFO ptiCurrent;
/*
* The object has been unlocked and needs to be destroyed. Change
* the ownership on this object to be the current thread: this'll
* make sure DestroyWindow() doesn't send destroy messages across
* threads.
*/
if (gabObjectCreateFlags[phe->bType] & OCF_PROCESSOWNED) {
((PPROCOBJHEAD)phe->phead)->ppi = (PPROCESSINFO)phe->pOwner =
PpiCurrent();
}
/*
* Remember that we're destroying this object so we don't try to destroy
* it again when the lock count goes from != 0 to 0 (especially true
* for thread locks).
*/
phe->bFlags |= HANDLEF_INDESTROY;
/*
* This'll call the destroy handler for this object type.
*/
switch(phe->bType) {
case TYPE_CURSOR:
_DestroyCursor((PCURSOR)phe->phead, CURSOR_THREADCLEANUP);
break;
case TYPE_HOOK:
FreeHook((PHOOK)phe->phead);
break;
case TYPE_ACCELTABLE:
case TYPE_SETWINDOWPOS:
case TYPE_CALLPROC:
/*
* Mark the object for destruction - if it says it's ok to free,
* then free it.
*/
if (HMMarkObjectDestroy(phe->phead))
HMFreeObject(phe->phead);
break;
case TYPE_MENU:
_DestroyMenu((PMENU)phe->phead);
break;
case TYPE_WINDOW:
ptiCurrent = PtiCurrent();
if ((PTHREADINFO)phe->pOwner != ptiCurrent) {
UserAssert(PsGetCurrentThread()->Tcb.Win32Thread);
if(PsGetCurrentThread()->Tcb.Win32Thread == NULL)
break;
HMChangeOwnerThread(phe->phead, ptiCurrent);
}
xxxDestroyWindow((PWND)phe->phead);
break;
case TYPE_DDECONV:
FreeDdeConv((PDDECONV)phe->phead);
break;
case TYPE_DDEXACT:
FreeDdeXact((PXSTATE)phe->phead);
break;
case TYPE_KBDLAYOUT:
DestroyKL((PKL)phe->phead);
break;
case TYPE_KBDFILE:
/*
* Remove keyboard file from global list.
*/
RemoveKeyboardLayoutFile((PKBDFILE)phe->phead);
UserFreePool(((PKBDFILE)phe->phead)->hBase);
HMFreeObject(phe->phead);
break;
#ifdef FE_IME
case TYPE_INPUTCONTEXT:
FreeInputContext((PIMC)phe->phead);
break;
#endif
}
}
/***************************************************************************\
* HMChangeOwnerThread
*
* Changes the owning thread of an object.
*
* 09-13-93 JimA Created.
\***************************************************************************/
VOID HMChangeOwnerThread(
PVOID pobj,
PTHREADINFO pti)
{
PHE phe = HMPheFromObject(pobj);
PTHREADINFO ptiOld = ((PTHROBJHEAD)(pobj))->pti;
PWND pwnd;
PPCLS ppcls;
PPROCESSINFO ppi;
if (gabObjectCreateFlags[phe->bType] & OCF_MARKTHREAD)
((PTHROBJHEAD)(pobj))->pti = pti;
phe->pOwner = pti;
/*
* If this is a window, update the window counts.
*/
if (phe->bType == TYPE_WINDOW) {
UserAssert(ptiOld->cWindows > 0);
ptiOld->cWindows--;
pti->cWindows++;
/*
* If the owning process is changing, fix up
* the window class.
*/
if (pti->ppi != ptiOld->ppi) {
pwnd = (PWND)pobj;
ppcls = GetClassPtr(pwnd->pcls->atomClassName, pti->ppi, hModuleWin);
UserAssert(ppcls);
if (ppcls == NULL) {
if (pwnd->head.rpdesk)
ppi = pwnd->head.rpdesk->rpwinstaParent->ptiDesktop->ppi;
else
ppi == PpiCurrent();
ppcls = GetClassPtr(gpsi->atomSysClass[ICLS_ICONTITLE], ppi, hModuleWin);
}
UserAssert(ppcls);
DereferenceClass(pwnd);
pwnd->pcls = *ppcls;
ReferenceClass(pwnd->pcls, pwnd);
}
}
}
/***************************************************************************\
* DestroyThreadsObjects
*
* Goes through the handle table list and destroy all objects owned by this
* thread, because the thread is going away (either nicely, it faulted, or
* was terminated). It is ok to destroy the objects in any order, because
* object locking will ensure that they get destroyed in the right order.
*
* This routine gets called in the context of the thread that is exiting.
*
* 02-08-92 ScottLu Created.
\***************************************************************************/
VOID DestroyThreadsObjects()
{
PTHREADINFO ptiCurrent;
HANDLEENTRY volatile * (*pphe);
PHE pheT;
DWORD i;
ptiCurrent = PtiCurrent();
/*
* Before any window destruction occurs, we need to destroy any dcs
* in use in the dc cache. When a dc is checked out, it is marked owned,
* which makes gdi's process cleanup code delete it when a process
* goes away. We need to similarly destroy the cache entry of any dcs
* in use by the exiting process.
*/
DestroyCacheDCEntries(ptiCurrent);
/*
* Remove any thread locks that may exist for this thread.
*/
while (ptiCurrent->ptl != NULL) {
UserAssert((DWORD)ptiCurrent->ptl > (DWORD)&i);
UserAssert((DWORD)ptiCurrent->ptl < (DWORD)KeGetCurrentThread()->StackBase);
ThreadUnlock(ptiCurrent->ptl);
}
while (ptiCurrent->ptlOb != NULL) {
ThreadUnlockObject(ptiCurrent);
}
while (ptiCurrent->ptlPool != NULL) {
ThreadUnlockAndFreePool(ptiCurrent, ptiCurrent->ptlPool);
}
/*
* Loop through the table destroying all objects created by the current
* thread. All objects will get destroyed in their proper order simply
* because of the object locking.
*/
pphe = &gSharedInfo.aheList;
for (i = 0; i <= giheLast; i++) {
/*
* This pointer is done this way because it can change when we leave
* the critical section below. The above volatile ensures that we
* always use the most current value
*/
pheT = (PHE)((*pphe) + i);
/*
* Check against free before we look at pti... because pq is stored
* in the object itself, which won't be there if TYPE_FREE.
*/
if (pheT->bType == TYPE_FREE)
continue;
/*
* If a menu refererences a window owned by this thread, unlock
* the window. This is done to prevent calling xxxDestroyWindow
* during process cleanup.
*/
if (gabObjectCreateFlags[pheT->bType] & OCF_PROCESSOWNED) {
if (pheT->bType == TYPE_MENU) {
PWND pwnd = ((PMENU)pheT->phead)->spwndNotify;
if (pwnd != NULL && GETPTI(pwnd) == ptiCurrent)
Unlock(&((PMENU)pheT->phead)->spwndNotify);
}
continue;
}
/*
* Destroy those objects created by this queue.
*/
if ((PTHREADINFO)pheT->pOwner != ptiCurrent)
continue;
/*
* Make sure this object isn't already marked to be destroyed - we'll
* do no good if we try to destroy it now since it is locked.
*/
if (pheT->bFlags & HANDLEF_DESTROY) {
continue;
}
/*
* Destroy this object.
*/
HMDestroyUnlockedObject(pheT);
}
}
#ifdef DEBUG
LPCSTR aszObjectTypes[TYPE_CTYPES] = {
"Free",
"Window",
"Menu",
"Icon/Cursor",
"WPI(SWP) structure",
"Hook",
"ThreadInfo",
"Input Queue",
"CallProcData",
"Accelerator",
"DDE access",
"DDE conv",
"DDE Transaction",
"Zombie",
"Keyboard Layout",
#ifdef FE_IME
"Input Context",
#endif
};
#endif
#ifdef DEBUG
VOID ShowLocks(
PHE phe)
{
PLR plr = phe->plr;
INT c;
KdPrint(("USERSRV: Lock records for %s %lx:\n",
aszObjectTypes[phe->bType], phe->phead->h));
/*
* We have the handle entry: 'head' and 'he' are both filled in. Dump
* the lock records. Remember the first record is the last transaction!!
*/
c = 0;
while (plr != NULL) {
char achPrint[80];
if (plr->pfn == NULL) {
strcpy(achPrint, "Destroyed with");
} else if ((int)plr->cLockObj <= 0) {
strcpy(achPrint, " Unlock");
} else {
/*
* Find corresponding unlock;
*/
{
PLR plrUnlock;
DWORD cT;
DWORD cUnlock;
plrUnlock = phe->plr;
cT = 0;
cUnlock = (DWORD)-1;
while (plrUnlock != plr) {
if (plrUnlock->ppobj == plr->ppobj) {
if ((int)plrUnlock->cLockObj <= 0) {
// a matching unlock found
cUnlock = cT;
} else {
// the unlock #cUnlock matches this lock #cT, thus
// #cUnlock is not the unlock we were looking for.
cUnlock = (DWORD)-1;
}
}
plrUnlock = plrUnlock->plrNext;
cT++;
}
if (cUnlock == (DWORD)-1) {
/*
* Corresponding unlock not found!
* This may not mean something is wrong: the structure
* containing the pointer to the object may have moved
* during a reallocation. This can cause ppobj at Unlock
* time to differ from that recorded at Lock time.
* (Warning: moving structures like this may cause a Lock
* and an Unlock to be misidentified as a pair, if by a
* stroke of incredibly bad luck, the new location of a
* pointer to an object is now where an old pointer to the
* same object used to be)
*/
sprintf(achPrint, "Unmatched Lock");
} else {
sprintf(achPrint, "lock #%ld", cUnlock);
}
}
}
KdPrint((" %s cLock=%d, pobj at 0x%08lx, code at 0x%08lx\n",
achPrint,
abs((int)plr->cLockObj),
plr->ppobj,
plr->pfn));
plr = plr->plrNext;
c++;
}
RIPMSG1(RIP_WARNING, " 0x%lx records\n", c);
}
#endif
/***************************************************************************\
* DestroyProcessesObjects
*
* Goes through the handle table list and destroy all objects owned by this
* process, because the process is going away (either nicely, it faulted, or
* was terminated). It is ok to destroy the objects in any order, because
* object locking will ensure that they get destroyed in the right order.
*
* This routine gets called in the context of the last thread in the process.
*
* 08-17-92 JimA Created.
\***************************************************************************/
VOID DestroyProcessesObjects(
PPROCESSINFO ppi)
{
PHE pheT, pheMax;
/*
* Loop through the table destroying all objects created by the current
* process. All objects will get destroyed in their proper order simply
* because of the object locking.
*/
pheMax = &gSharedInfo.aheList[giheLast];
for (pheT = gSharedInfo.aheList; pheT <= pheMax; pheT++) {
/*
* Check against free before we look at ppi... because pq is stored
* in the object itself, which won't be there if TYPE_FREE.
*/
if (pheT->bType == TYPE_FREE)
continue;
/*
* Destroy those objects created by this queue.
*/
if (!(gabObjectCreateFlags[pheT->bType] & OCF_PROCESSOWNED) ||
(PPROCESSINFO)pheT->pOwner != ppi)
continue;
/*
* Make sure this object isn't already marked to be destroyed - we'll
* do no good if we try to destroy it now since it is locked.
*/
if (pheT->bFlags & HANDLEF_DESTROY) {
/*
* Clear this so it isn't referenced after being freed.
*/
pheT->pOwner = NULL;
continue;
}
/*
* Destroy this object.
*/
HMDestroyUnlockedObject(pheT);
}
}
/***************************************************************************\
* MarkThreadsObjects
*
* This is called for the *final* exiting condition when a thread
* may have objects still around... in which case their owner must
* be changed to something "safe" that won't be going away.
*
* 03-02-92 ScottLu Created.
\***************************************************************************/
void MarkThreadsObjects(
PTHREADINFO pti)
{
PHE pheT, pheMax;
pheMax = &gSharedInfo.aheList[giheLast];
for (pheT = gSharedInfo.aheList; pheT <= pheMax; pheT++) {
/*
* Check against free before we look at pti... because pti is stored
* in the object itself, which won't be there if TYPE_FREE.
*/
if (pheT->bType == TYPE_FREE)
continue;
/*
* Change ownership!
*/
if (gabObjectCreateFlags[pheT->bType] & OCF_PROCESSOWNED ||
(PTHREADINFO)pheT->pOwner != pti)
continue;
HMChangeOwnerThread(pheT->phead, gptiRit);
#ifdef DEBUG
#ifdef DEBUG_LOCKS
/*
* Object still around: print warning message.
*/
if (pheT->bFlags & HANDLEF_DESTROY) {
if ((pheT->phead->cLockObj == 1)
&& (pheT->bFlags & HANDLEF_INWAITFORDEATH)) {
RIPMSG1(RIP_WARNING,
"USERSRV Warning: Only killer has thread object 0x%08lx locked (OK).\n",
pheT->phead->h);
} else {
RIPMSG2(RIP_WARNING,
"USERSRV Warning: Zombie %s 0x%08lx still locked\n",
aszObjectTypes[pheT->bType], pheT->phead->h);
}
} else {
RIPMSG1(RIP_WARNING, "USERSRV Warning: Thread object 0x%08lx not destroyed.\n", pheT->phead->h);
}
if (gfTrackLocks) {
ShowLocks(pheT);
}
#endif // DEBUG_LOCKS
#endif // DEBUG
}
}
/***************************************************************************\
* HMRelocateLockRecord
*
* If a pointer to a locked object has been relocated, then this routine will
* adjust the lock record accordingly. Must be called after the relocation.
*
* The arguments are:
* ppobjNew - the address of the new pointer
* MUST already contain the pointer to the object!!
* cbDelta - the amount by which this pointer was moved.
*
* Using this routine appropriately will prevent spurious "unmatched lock"
* reports. See mnchange.c for an example.
*
*
* 03-18-93 IanJa Created.
\***************************************************************************/
#ifdef DEBUG
BOOL HMRelocateLockRecord(
PVOID ppobjNew,
int cbDelta)
{
PHE phe;
PVOID ppobjOld = (PBYTE)ppobjNew - cbDelta;
PHEAD pobj;
PLR plr;
if (ppobjNew == NULL) {
return FALSE;
}
pobj = *(PHEAD *)ppobjNew;
if (pobj == NULL) {
return FALSE;
}
phe = HMPheFromObject(pobj);
if (phe->phead != pobj) {
KdPrint(("HmRelocateLockRecord(%lx, %lx) - %lx is bad pobj\n",
ppobjNew, cbDelta, pobj));
return FALSE;
}
plr = phe->plr;
while (plr != NULL) {
if (plr->ppobj == ppobjOld) {
(PBYTE)(plr->ppobj) += cbDelta;
return TRUE;
}
plr = plr->plrNext;
}
KdPrint(("HmRelocateLockRecord(%lx, %lx) - couldn't find lock record\n",
ppobjNew, cbDelta));
ShowLocks(phe);
return FALSE;
}
BOOL HMUnrecordLock(
PVOID ppobj,
PVOID pobj)
{
PHE phe;
PLR plr;
PLR *pplr;
phe = HMPheFromObject(pobj);
pplr = &(phe->plr);
plr = *pplr;
/*
* Find corresponding lock;
*/
while (plr != NULL) {
if (plr->ppobj == ppobj) {
/*
* Remove the lock from the list...
*/
*pplr = plr->plrNext; // unlink it
plr->plrNext = NULL; // make the dead entry safe (?)
/*
* ...and free it.
*/
FreeLockRecord(plr);
return TRUE;
}
pplr = &(plr->plrNext);
plr = *pplr;
}
return FALSE;
}
#endif // DEBUG
/***************************************************************************\
* HMGetStats
*
* This function traverses the handle table and calculates statistics,
* either for the entire system or for objects owned by a specific
* process (and its threads)
*
* Parameters:
* hProcess - handle to the process to query for information about
* iPidType - whether to query for just the process pointed
* to by hProcess or all objects in the table
* (OBJECT_OWNER_CURRENT vs. OBJECT_OWNER_IGNORE)
* pResults - Pointer to the buffer to fill the data into
* cjResultSize - Size of the buffer to fill
*
* The user buffer is expected to be zero'd.
*
* returns: Success state
* 07-31-95 t-andsko Created.
\***************************************************************************/
#define OBJECT_OWNER_IGNORE (0x0001)
#define NUM_USER_TYPES TYPE_CTYPES + 1
NTSTATUS HMGetStats(
IN HANDLE hProcess,
IN int iPidType,
OUT PVOID pResults,
IN UINT cjResultSize)
{
NTSTATUS iRet = STATUS_SUCCESS;
LPDWORD pdwRes = (DWORD *) pResults; //Pointer to the result buffer
PEPROCESS peProcessInfo; //Pointer to process structure
PHE pheCurPos; //Current position in the table
PHE pheMax; //address of last table entry
//Check permissions flag
if (!( (*(DWORD *)NtGlobalFlag) & FLG_POOL_ENABLE_TAGGING))
{
iRet = STATUS_ACCESS_DENIED;
return iRet;
}
//Check buffer is large enough to take the results
if (cjResultSize < NUM_USER_TYPES)
{
iRet = STATUS_BUFFER_TOO_SMALL;
return iRet;
}
if (iPidType == OBJECT_OWNER_CURRENT)
{
//Get pointer to EPROCESS structure from the handle
iRet = PsLookupProcessByProcessId(hProcess, &peProcessInfo);
}
if (NT_SUCCESS(iRet))
{
try
{
//Test buffer
ProbeForWrite(pResults, cjResultSize, sizeof(UCHAR));
//now traverse the handle table to count
pheMax = &gSharedInfo.aheList[giheLast];
for(pheCurPos = gSharedInfo.aheList; pheCurPos <= pheMax; pheCurPos++)
{
if (iPidType == (int) OBJECT_OWNER_IGNORE)
{
if (pheCurPos->bType != TYPE_GENERIC)
pdwRes[pheCurPos->bType]++;
}
else
{
if ((pheCurPos->bType == TYPE_FREE) || (pheCurPos->bType == TYPE_GENERIC))
continue;
UserAssert(pheCurPos->bType < NUM_USER_TYPES);
if (gabObjectCreateFlags[pheCurPos->bType] & OCF_PROCESSOWNED)
{
//Object is owned by process
//some objects may not have an owner
if (pheCurPos->pOwner)
{
if (((PPROCESSINFO)pheCurPos->pOwner)->
Process == peProcessInfo)
{
pdwRes[pheCurPos->bType]++;
}
}
}
else if (gabObjectCreateFlags[pheCurPos->bType] & OCF_THREADOWNED)
{
//Object owned by thread
if (pheCurPos->pOwner)
{
//dereference the thread and from there the process
if (((PTHREADINFO)pheCurPos->pOwner)->Thread->ThreadsProcess == peProcessInfo)
{
pdwRes[pheCurPos->bType]++;
}
}
}
}
}
}
except (EXCEPTION_EXECUTE_HANDLER)
{
iRet = STATUS_ACCESS_VIOLATION;
}
}
return iRet;
}
/***************************************************************************\
* KernelPtoH
*
* This function is called from the client to convert pool-based object
* pointers, such as a cursor pointer, to a handle.
*
* HISTORY:
* 11/22/95 BradG Created
\***************************************************************************/
HANDLE KernelPtoH( PVOID pObj ) {
HANDLE h;
UserAssert( pObj != NULL );
try {
h = PtoHq( pObj );
} except (EXCEPTION_EXECUTE_HANDLER) {
h = NULL;
}
return h;
}
| {
"language": "C"
} |
/*
* Copyright 2002-2018 The OpenSSL Project Authors. All Rights Reserved.
*
* Licensed under the OpenSSL license (the "License"). You may not use
* this file except in compliance with the License. You can obtain a copy
* in the file LICENSE in the source distribution or at
* https://www.openssl.org/source/license.html
*/
#include "../bn_lcl.h"
#if !(defined(__GNUC__) && __GNUC__>=2)
# include "../bn_asm.c" /* kind of dirty hack for Sun Studio */
#else
/*-
* x86_64 BIGNUM accelerator version 0.1, December 2002.
*
* Implemented by Andy Polyakov <appro@openssl.org> for the OpenSSL
* project.
*
* Rights for redistribution and usage in source and binary forms are
* granted according to the OpenSSL license. Warranty of any kind is
* disclaimed.
*
* Q. Version 0.1? It doesn't sound like Andy, he used to assign real
* versions, like 1.0...
* A. Well, that's because this code is basically a quick-n-dirty
* proof-of-concept hack. As you can see it's implemented with
* inline assembler, which means that you're bound to GCC and that
* there might be enough room for further improvement.
*
* Q. Why inline assembler?
* A. x86_64 features own ABI which I'm not familiar with. This is
* why I decided to let the compiler take care of subroutine
* prologue/epilogue as well as register allocation. For reference.
* Win64 implements different ABI for AMD64, different from Linux.
*
* Q. How much faster does it get?
* A. 'apps/openssl speed rsa dsa' output with no-asm:
*
* sign verify sign/s verify/s
* rsa 512 bits 0.0006s 0.0001s 1683.8 18456.2
* rsa 1024 bits 0.0028s 0.0002s 356.0 6407.0
* rsa 2048 bits 0.0172s 0.0005s 58.0 1957.8
* rsa 4096 bits 0.1155s 0.0018s 8.7 555.6
* sign verify sign/s verify/s
* dsa 512 bits 0.0005s 0.0006s 2100.8 1768.3
* dsa 1024 bits 0.0014s 0.0018s 692.3 559.2
* dsa 2048 bits 0.0049s 0.0061s 204.7 165.0
*
* 'apps/openssl speed rsa dsa' output with this module:
*
* sign verify sign/s verify/s
* rsa 512 bits 0.0004s 0.0000s 2767.1 33297.9
* rsa 1024 bits 0.0012s 0.0001s 867.4 14674.7
* rsa 2048 bits 0.0061s 0.0002s 164.0 5270.0
* rsa 4096 bits 0.0384s 0.0006s 26.1 1650.8
* sign verify sign/s verify/s
* dsa 512 bits 0.0002s 0.0003s 4442.2 3786.3
* dsa 1024 bits 0.0005s 0.0007s 1835.1 1497.4
* dsa 2048 bits 0.0016s 0.0020s 620.4 504.6
*
* For the reference. IA-32 assembler implementation performs
* very much like 64-bit code compiled with no-asm on the same
* machine.
*/
# undef mul
# undef mul_add
/*-
* "m"(a), "+m"(r) is the way to favor DirectPath µ-code;
* "g"(0) let the compiler to decide where does it
* want to keep the value of zero;
*/
# define mul_add(r,a,word,carry) do { \
register BN_ULONG high,low; \
asm ("mulq %3" \
: "=a"(low),"=d"(high) \
: "a"(word),"m"(a) \
: "cc"); \
asm ("addq %2,%0; adcq %3,%1" \
: "+r"(carry),"+d"(high)\
: "a"(low),"g"(0) \
: "cc"); \
asm ("addq %2,%0; adcq %3,%1" \
: "+m"(r),"+d"(high) \
: "r"(carry),"g"(0) \
: "cc"); \
carry=high; \
} while (0)
# define mul(r,a,word,carry) do { \
register BN_ULONG high,low; \
asm ("mulq %3" \
: "=a"(low),"=d"(high) \
: "a"(word),"g"(a) \
: "cc"); \
asm ("addq %2,%0; adcq %3,%1" \
: "+r"(carry),"+d"(high)\
: "a"(low),"g"(0) \
: "cc"); \
(r)=carry, carry=high; \
} while (0)
# undef sqr
# define sqr(r0,r1,a) \
asm ("mulq %2" \
: "=a"(r0),"=d"(r1) \
: "a"(a) \
: "cc");
BN_ULONG bn_mul_add_words(BN_ULONG *rp, const BN_ULONG *ap, int num,
BN_ULONG w)
{
BN_ULONG c1 = 0;
if (num <= 0)
return c1;
while (num & ~3) {
mul_add(rp[0], ap[0], w, c1);
mul_add(rp[1], ap[1], w, c1);
mul_add(rp[2], ap[2], w, c1);
mul_add(rp[3], ap[3], w, c1);
ap += 4;
rp += 4;
num -= 4;
}
if (num) {
mul_add(rp[0], ap[0], w, c1);
if (--num == 0)
return c1;
mul_add(rp[1], ap[1], w, c1);
if (--num == 0)
return c1;
mul_add(rp[2], ap[2], w, c1);
return c1;
}
return c1;
}
BN_ULONG bn_mul_words(BN_ULONG *rp, const BN_ULONG *ap, int num, BN_ULONG w)
{
BN_ULONG c1 = 0;
if (num <= 0)
return c1;
while (num & ~3) {
mul(rp[0], ap[0], w, c1);
mul(rp[1], ap[1], w, c1);
mul(rp[2], ap[2], w, c1);
mul(rp[3], ap[3], w, c1);
ap += 4;
rp += 4;
num -= 4;
}
if (num) {
mul(rp[0], ap[0], w, c1);
if (--num == 0)
return c1;
mul(rp[1], ap[1], w, c1);
if (--num == 0)
return c1;
mul(rp[2], ap[2], w, c1);
}
return c1;
}
void bn_sqr_words(BN_ULONG *r, const BN_ULONG *a, int n)
{
if (n <= 0)
return;
while (n & ~3) {
sqr(r[0], r[1], a[0]);
sqr(r[2], r[3], a[1]);
sqr(r[4], r[5], a[2]);
sqr(r[6], r[7], a[3]);
a += 4;
r += 8;
n -= 4;
}
if (n) {
sqr(r[0], r[1], a[0]);
if (--n == 0)
return;
sqr(r[2], r[3], a[1]);
if (--n == 0)
return;
sqr(r[4], r[5], a[2]);
}
}
BN_ULONG bn_div_words(BN_ULONG h, BN_ULONG l, BN_ULONG d)
{
BN_ULONG ret, waste;
asm("divq %4":"=a"(ret), "=d"(waste)
: "a"(l), "d"(h), "r"(d)
: "cc");
return ret;
}
BN_ULONG bn_add_words(BN_ULONG *rp, const BN_ULONG *ap, const BN_ULONG *bp,
int n)
{
BN_ULONG ret;
size_t i = 0;
if (n <= 0)
return 0;
asm volatile (" subq %0,%0 \n" /* clear carry */
" jmp 1f \n"
".p2align 4 \n"
"1: movq (%4,%2,8),%0 \n"
" adcq (%5,%2,8),%0 \n"
" movq %0,(%3,%2,8) \n"
" lea 1(%2),%2 \n"
" dec %1 \n"
" jnz 1b \n"
" sbbq %0,%0 \n"
:"=&r" (ret), "+c"(n), "+r"(i)
:"r"(rp), "r"(ap), "r"(bp)
:"cc", "memory");
return ret & 1;
}
# ifndef SIMICS
BN_ULONG bn_sub_words(BN_ULONG *rp, const BN_ULONG *ap, const BN_ULONG *bp,
int n)
{
BN_ULONG ret;
size_t i = 0;
if (n <= 0)
return 0;
asm volatile (" subq %0,%0 \n" /* clear borrow */
" jmp 1f \n"
".p2align 4 \n"
"1: movq (%4,%2,8),%0 \n"
" sbbq (%5,%2,8),%0 \n"
" movq %0,(%3,%2,8) \n"
" lea 1(%2),%2 \n"
" dec %1 \n"
" jnz 1b \n"
" sbbq %0,%0 \n"
:"=&r" (ret), "+c"(n), "+r"(i)
:"r"(rp), "r"(ap), "r"(bp)
:"cc", "memory");
return ret & 1;
}
# else
/* Simics 1.4<7 has buggy sbbq:-( */
# define BN_MASK2 0xffffffffffffffffL
BN_ULONG bn_sub_words(BN_ULONG *r, BN_ULONG *a, BN_ULONG *b, int n)
{
BN_ULONG t1, t2;
int c = 0;
if (n <= 0)
return (BN_ULONG)0;
for (;;) {
t1 = a[0];
t2 = b[0];
r[0] = (t1 - t2 - c) & BN_MASK2;
if (t1 != t2)
c = (t1 < t2);
if (--n <= 0)
break;
t1 = a[1];
t2 = b[1];
r[1] = (t1 - t2 - c) & BN_MASK2;
if (t1 != t2)
c = (t1 < t2);
if (--n <= 0)
break;
t1 = a[2];
t2 = b[2];
r[2] = (t1 - t2 - c) & BN_MASK2;
if (t1 != t2)
c = (t1 < t2);
if (--n <= 0)
break;
t1 = a[3];
t2 = b[3];
r[3] = (t1 - t2 - c) & BN_MASK2;
if (t1 != t2)
c = (t1 < t2);
if (--n <= 0)
break;
a += 4;
b += 4;
r += 4;
}
return c;
}
# endif
/* mul_add_c(a,b,c0,c1,c2) -- c+=a*b for three word number c=(c2,c1,c0) */
/* mul_add_c2(a,b,c0,c1,c2) -- c+=2*a*b for three word number c=(c2,c1,c0) */
/* sqr_add_c(a,i,c0,c1,c2) -- c+=a[i]^2 for three word number c=(c2,c1,c0) */
/*
* sqr_add_c2(a,i,c0,c1,c2) -- c+=2*a[i]*a[j] for three word number
* c=(c2,c1,c0)
*/
/*
* Keep in mind that carrying into high part of multiplication result
* can not overflow, because it cannot be all-ones.
*/
# if 0
/* original macros are kept for reference purposes */
# define mul_add_c(a,b,c0,c1,c2) do { \
BN_ULONG ta = (a), tb = (b); \
BN_ULONG lo, hi; \
BN_UMULT_LOHI(lo,hi,ta,tb); \
c0 += lo; hi += (c0<lo)?1:0; \
c1 += hi; c2 += (c1<hi)?1:0; \
} while(0)
# define mul_add_c2(a,b,c0,c1,c2) do { \
BN_ULONG ta = (a), tb = (b); \
BN_ULONG lo, hi, tt; \
BN_UMULT_LOHI(lo,hi,ta,tb); \
c0 += lo; tt = hi+((c0<lo)?1:0); \
c1 += tt; c2 += (c1<tt)?1:0; \
c0 += lo; hi += (c0<lo)?1:0; \
c1 += hi; c2 += (c1<hi)?1:0; \
} while(0)
# define sqr_add_c(a,i,c0,c1,c2) do { \
BN_ULONG ta = (a)[i]; \
BN_ULONG lo, hi; \
BN_UMULT_LOHI(lo,hi,ta,ta); \
c0 += lo; hi += (c0<lo)?1:0; \
c1 += hi; c2 += (c1<hi)?1:0; \
} while(0)
# else
# define mul_add_c(a,b,c0,c1,c2) do { \
BN_ULONG t1,t2; \
asm ("mulq %3" \
: "=a"(t1),"=d"(t2) \
: "a"(a),"m"(b) \
: "cc"); \
asm ("addq %3,%0; adcq %4,%1; adcq %5,%2" \
: "+r"(c0),"+r"(c1),"+r"(c2) \
: "r"(t1),"r"(t2),"g"(0) \
: "cc"); \
} while (0)
# define sqr_add_c(a,i,c0,c1,c2) do { \
BN_ULONG t1,t2; \
asm ("mulq %2" \
: "=a"(t1),"=d"(t2) \
: "a"(a[i]) \
: "cc"); \
asm ("addq %3,%0; adcq %4,%1; adcq %5,%2" \
: "+r"(c0),"+r"(c1),"+r"(c2) \
: "r"(t1),"r"(t2),"g"(0) \
: "cc"); \
} while (0)
# define mul_add_c2(a,b,c0,c1,c2) do { \
BN_ULONG t1,t2; \
asm ("mulq %3" \
: "=a"(t1),"=d"(t2) \
: "a"(a),"m"(b) \
: "cc"); \
asm ("addq %3,%0; adcq %4,%1; adcq %5,%2" \
: "+r"(c0),"+r"(c1),"+r"(c2) \
: "r"(t1),"r"(t2),"g"(0) \
: "cc"); \
asm ("addq %3,%0; adcq %4,%1; adcq %5,%2" \
: "+r"(c0),"+r"(c1),"+r"(c2) \
: "r"(t1),"r"(t2),"g"(0) \
: "cc"); \
} while (0)
# endif
# define sqr_add_c2(a,i,j,c0,c1,c2) \
mul_add_c2((a)[i],(a)[j],c0,c1,c2)
void bn_mul_comba8(BN_ULONG *r, BN_ULONG *a, BN_ULONG *b)
{
BN_ULONG c1, c2, c3;
c1 = 0;
c2 = 0;
c3 = 0;
mul_add_c(a[0], b[0], c1, c2, c3);
r[0] = c1;
c1 = 0;
mul_add_c(a[0], b[1], c2, c3, c1);
mul_add_c(a[1], b[0], c2, c3, c1);
r[1] = c2;
c2 = 0;
mul_add_c(a[2], b[0], c3, c1, c2);
mul_add_c(a[1], b[1], c3, c1, c2);
mul_add_c(a[0], b[2], c3, c1, c2);
r[2] = c3;
c3 = 0;
mul_add_c(a[0], b[3], c1, c2, c3);
mul_add_c(a[1], b[2], c1, c2, c3);
mul_add_c(a[2], b[1], c1, c2, c3);
mul_add_c(a[3], b[0], c1, c2, c3);
r[3] = c1;
c1 = 0;
mul_add_c(a[4], b[0], c2, c3, c1);
mul_add_c(a[3], b[1], c2, c3, c1);
mul_add_c(a[2], b[2], c2, c3, c1);
mul_add_c(a[1], b[3], c2, c3, c1);
mul_add_c(a[0], b[4], c2, c3, c1);
r[4] = c2;
c2 = 0;
mul_add_c(a[0], b[5], c3, c1, c2);
mul_add_c(a[1], b[4], c3, c1, c2);
mul_add_c(a[2], b[3], c3, c1, c2);
mul_add_c(a[3], b[2], c3, c1, c2);
mul_add_c(a[4], b[1], c3, c1, c2);
mul_add_c(a[5], b[0], c3, c1, c2);
r[5] = c3;
c3 = 0;
mul_add_c(a[6], b[0], c1, c2, c3);
mul_add_c(a[5], b[1], c1, c2, c3);
mul_add_c(a[4], b[2], c1, c2, c3);
mul_add_c(a[3], b[3], c1, c2, c3);
mul_add_c(a[2], b[4], c1, c2, c3);
mul_add_c(a[1], b[5], c1, c2, c3);
mul_add_c(a[0], b[6], c1, c2, c3);
r[6] = c1;
c1 = 0;
mul_add_c(a[0], b[7], c2, c3, c1);
mul_add_c(a[1], b[6], c2, c3, c1);
mul_add_c(a[2], b[5], c2, c3, c1);
mul_add_c(a[3], b[4], c2, c3, c1);
mul_add_c(a[4], b[3], c2, c3, c1);
mul_add_c(a[5], b[2], c2, c3, c1);
mul_add_c(a[6], b[1], c2, c3, c1);
mul_add_c(a[7], b[0], c2, c3, c1);
r[7] = c2;
c2 = 0;
mul_add_c(a[7], b[1], c3, c1, c2);
mul_add_c(a[6], b[2], c3, c1, c2);
mul_add_c(a[5], b[3], c3, c1, c2);
mul_add_c(a[4], b[4], c3, c1, c2);
mul_add_c(a[3], b[5], c3, c1, c2);
mul_add_c(a[2], b[6], c3, c1, c2);
mul_add_c(a[1], b[7], c3, c1, c2);
r[8] = c3;
c3 = 0;
mul_add_c(a[2], b[7], c1, c2, c3);
mul_add_c(a[3], b[6], c1, c2, c3);
mul_add_c(a[4], b[5], c1, c2, c3);
mul_add_c(a[5], b[4], c1, c2, c3);
mul_add_c(a[6], b[3], c1, c2, c3);
mul_add_c(a[7], b[2], c1, c2, c3);
r[9] = c1;
c1 = 0;
mul_add_c(a[7], b[3], c2, c3, c1);
mul_add_c(a[6], b[4], c2, c3, c1);
mul_add_c(a[5], b[5], c2, c3, c1);
mul_add_c(a[4], b[6], c2, c3, c1);
mul_add_c(a[3], b[7], c2, c3, c1);
r[10] = c2;
c2 = 0;
mul_add_c(a[4], b[7], c3, c1, c2);
mul_add_c(a[5], b[6], c3, c1, c2);
mul_add_c(a[6], b[5], c3, c1, c2);
mul_add_c(a[7], b[4], c3, c1, c2);
r[11] = c3;
c3 = 0;
mul_add_c(a[7], b[5], c1, c2, c3);
mul_add_c(a[6], b[6], c1, c2, c3);
mul_add_c(a[5], b[7], c1, c2, c3);
r[12] = c1;
c1 = 0;
mul_add_c(a[6], b[7], c2, c3, c1);
mul_add_c(a[7], b[6], c2, c3, c1);
r[13] = c2;
c2 = 0;
mul_add_c(a[7], b[7], c3, c1, c2);
r[14] = c3;
r[15] = c1;
}
void bn_mul_comba4(BN_ULONG *r, BN_ULONG *a, BN_ULONG *b)
{
BN_ULONG c1, c2, c3;
c1 = 0;
c2 = 0;
c3 = 0;
mul_add_c(a[0], b[0], c1, c2, c3);
r[0] = c1;
c1 = 0;
mul_add_c(a[0], b[1], c2, c3, c1);
mul_add_c(a[1], b[0], c2, c3, c1);
r[1] = c2;
c2 = 0;
mul_add_c(a[2], b[0], c3, c1, c2);
mul_add_c(a[1], b[1], c3, c1, c2);
mul_add_c(a[0], b[2], c3, c1, c2);
r[2] = c3;
c3 = 0;
mul_add_c(a[0], b[3], c1, c2, c3);
mul_add_c(a[1], b[2], c1, c2, c3);
mul_add_c(a[2], b[1], c1, c2, c3);
mul_add_c(a[3], b[0], c1, c2, c3);
r[3] = c1;
c1 = 0;
mul_add_c(a[3], b[1], c2, c3, c1);
mul_add_c(a[2], b[2], c2, c3, c1);
mul_add_c(a[1], b[3], c2, c3, c1);
r[4] = c2;
c2 = 0;
mul_add_c(a[2], b[3], c3, c1, c2);
mul_add_c(a[3], b[2], c3, c1, c2);
r[5] = c3;
c3 = 0;
mul_add_c(a[3], b[3], c1, c2, c3);
r[6] = c1;
r[7] = c2;
}
void bn_sqr_comba8(BN_ULONG *r, const BN_ULONG *a)
{
BN_ULONG c1, c2, c3;
c1 = 0;
c2 = 0;
c3 = 0;
sqr_add_c(a, 0, c1, c2, c3);
r[0] = c1;
c1 = 0;
sqr_add_c2(a, 1, 0, c2, c3, c1);
r[1] = c2;
c2 = 0;
sqr_add_c(a, 1, c3, c1, c2);
sqr_add_c2(a, 2, 0, c3, c1, c2);
r[2] = c3;
c3 = 0;
sqr_add_c2(a, 3, 0, c1, c2, c3);
sqr_add_c2(a, 2, 1, c1, c2, c3);
r[3] = c1;
c1 = 0;
sqr_add_c(a, 2, c2, c3, c1);
sqr_add_c2(a, 3, 1, c2, c3, c1);
sqr_add_c2(a, 4, 0, c2, c3, c1);
r[4] = c2;
c2 = 0;
sqr_add_c2(a, 5, 0, c3, c1, c2);
sqr_add_c2(a, 4, 1, c3, c1, c2);
sqr_add_c2(a, 3, 2, c3, c1, c2);
r[5] = c3;
c3 = 0;
sqr_add_c(a, 3, c1, c2, c3);
sqr_add_c2(a, 4, 2, c1, c2, c3);
sqr_add_c2(a, 5, 1, c1, c2, c3);
sqr_add_c2(a, 6, 0, c1, c2, c3);
r[6] = c1;
c1 = 0;
sqr_add_c2(a, 7, 0, c2, c3, c1);
sqr_add_c2(a, 6, 1, c2, c3, c1);
sqr_add_c2(a, 5, 2, c2, c3, c1);
sqr_add_c2(a, 4, 3, c2, c3, c1);
r[7] = c2;
c2 = 0;
sqr_add_c(a, 4, c3, c1, c2);
sqr_add_c2(a, 5, 3, c3, c1, c2);
sqr_add_c2(a, 6, 2, c3, c1, c2);
sqr_add_c2(a, 7, 1, c3, c1, c2);
r[8] = c3;
c3 = 0;
sqr_add_c2(a, 7, 2, c1, c2, c3);
sqr_add_c2(a, 6, 3, c1, c2, c3);
sqr_add_c2(a, 5, 4, c1, c2, c3);
r[9] = c1;
c1 = 0;
sqr_add_c(a, 5, c2, c3, c1);
sqr_add_c2(a, 6, 4, c2, c3, c1);
sqr_add_c2(a, 7, 3, c2, c3, c1);
r[10] = c2;
c2 = 0;
sqr_add_c2(a, 7, 4, c3, c1, c2);
sqr_add_c2(a, 6, 5, c3, c1, c2);
r[11] = c3;
c3 = 0;
sqr_add_c(a, 6, c1, c2, c3);
sqr_add_c2(a, 7, 5, c1, c2, c3);
r[12] = c1;
c1 = 0;
sqr_add_c2(a, 7, 6, c2, c3, c1);
r[13] = c2;
c2 = 0;
sqr_add_c(a, 7, c3, c1, c2);
r[14] = c3;
r[15] = c1;
}
void bn_sqr_comba4(BN_ULONG *r, const BN_ULONG *a)
{
BN_ULONG c1, c2, c3;
c1 = 0;
c2 = 0;
c3 = 0;
sqr_add_c(a, 0, c1, c2, c3);
r[0] = c1;
c1 = 0;
sqr_add_c2(a, 1, 0, c2, c3, c1);
r[1] = c2;
c2 = 0;
sqr_add_c(a, 1, c3, c1, c2);
sqr_add_c2(a, 2, 0, c3, c1, c2);
r[2] = c3;
c3 = 0;
sqr_add_c2(a, 3, 0, c1, c2, c3);
sqr_add_c2(a, 2, 1, c1, c2, c3);
r[3] = c1;
c1 = 0;
sqr_add_c(a, 2, c2, c3, c1);
sqr_add_c2(a, 3, 1, c2, c3, c1);
r[4] = c2;
c2 = 0;
sqr_add_c2(a, 3, 2, c3, c1, c2);
r[5] = c3;
c3 = 0;
sqr_add_c(a, 3, c1, c2, c3);
r[6] = c1;
r[7] = c2;
}
#endif
| {
"language": "C"
} |
#pragma once
#ifndef ON
#define ON true
#endif
#ifndef OFF
#define OFF false
#endif
#ifndef TRUE
#define TRUE true
#endif
#ifndef FALSE
#define FALSE false
#endif
#ifdef __APPLE__
#define CEF_LIBRARY "@CEF_LIBRARY@"
#endif
#ifdef _WIN32
#define EXPERIMENTAL_SHARED_TEXTURE_SUPPORT_ENABLED \
@EXPERIMENTAL_SHARED_TEXTURE_SUPPORT_ENABLED@
#else
#define EXPERIMENTAL_SHARED_TEXTURE_SUPPORT_ENABLED false
#endif
| {
"language": "C"
} |
// Copyright 2011 Google Inc. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS-IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
//
// Given a sequence of S2Points assumed to be the center of level-k cells,
// compresses it into a stream using the following method:
// - decompose the points into (face, si, ti) tuples (see s2coords.h)
// - run-length encode the faces, combining face number and count into a
// varint32. See the Faces class in s2point_compression.cc.
// - right shift the (si, ti) to remove the part that's constant for all cells
// of level-k. The result is called the (pi, qi) space.
// - 2nd derivative encode the pi and qi sequences (linear prediction)
// - zig-zag encode all derivative values but the first, which cannot be
// negative
// - interleave the zig-zag encoded values
// - encode the first interleaved value in a fixed length encoding
// (varint would make this value larger)
// - encode the remaining interleaved values as varint64s, as the
// derivative encoding should make the values small.
// In addition, provides a lossless method to compress a sequence of points even
// if some points are not the center of level-k cells. These points are stored
// exactly, using 3 double precision values, after the above encoded string,
// together with their index in the sequence (this leads to some redundancy - it
// is expected that only a small fraction of the points are not cell centers).
//
// Require that the encoder was constructed with the no-arg constructor, as
// Ensure() will be called to allocate space.
//
// To encode leaf cells, this requires 8 bytes for the first vertex plus
// an average of 3.8 bytes for each additional vertex, when computed on
// Google's geographic repository.
#ifndef S2_S2POINT_COMPRESSION_H_
#define S2_S2POINT_COMPRESSION_H_
#include "s2/third_party/absl/types/span.h"
#include "s2/_fp_contract_off.h"
#include "s2/s1angle.h"
class Decoder;
class Encoder;
// The XYZ and face,si,ti coordinates of an S2Point and, if this point is equal
// to the center of an S2Cell, the level of this cell (-1 otherwise).
struct S2XYZFaceSiTi {
S2Point xyz;
int face;
unsigned int si;
unsigned int ti;
int cell_level;
};
// Encode the points in the encoder, using an optimized compressed format for
// points at the center of a cell at 'level', plus 3 double values for the
// others.
void S2EncodePointsCompressed(absl::Span<const S2XYZFaceSiTi> points,
int level, Encoder* encoder);
// Decode points encoded with S2EncodePointsCompressed. Requires that the
// level is the level that was used in S2EncodePointsCompressed. Ensures
// that the decoded points equal the encoded points. Returns true on success.
bool S2DecodePointsCompressed(Decoder* decoder, int level,
absl::Span<S2Point> points);
#endif // S2_S2POINT_COMPRESSION_H_
| {
"language": "C"
} |
/*
* iSER transport for the Open iSCSI Initiator & iSER transport internals
*
* Copyright (C) 2004 Dmitry Yusupov
* Copyright (C) 2004 Alex Aizman
* Copyright (C) 2005 Mike Christie
* based on code maintained by open-iscsi@googlegroups.com
*
* Copyright (c) 2004, 2005, 2006 Voltaire, Inc. All rights reserved.
* Copyright (c) 2005, 2006 Cisco Systems. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
* General Public License (GPL) Version 2, available from the file
* COPYING in the main directory of this source tree, or the
* OpenIB.org BSD license below:
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* - Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
*
* - Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#ifndef __ISCSI_ISER_H__
#define __ISCSI_ISER_H__
#include <linux/types.h>
#include <linux/net.h>
#include <scsi/libiscsi.h>
#include <scsi/scsi_transport_iscsi.h>
#include <linux/wait.h>
#include <linux/sched.h>
#include <linux/list.h>
#include <linux/slab.h>
#include <linux/dma-mapping.h>
#include <linux/mutex.h>
#include <linux/mempool.h>
#include <linux/uio.h>
#include <linux/socket.h>
#include <linux/in.h>
#include <linux/in6.h>
#include <rdma/ib_verbs.h>
#include <rdma/ib_fmr_pool.h>
#include <rdma/rdma_cm.h>
#define DRV_NAME "iser"
#define PFX DRV_NAME ": "
#define DRV_VER "0.1"
#define DRV_DATE "May 7th, 2006"
#define iser_dbg(fmt, arg...) \
do { \
if (iser_debug_level > 1) \
printk(KERN_DEBUG PFX "%s:" fmt,\
__func__ , ## arg); \
} while (0)
#define iser_warn(fmt, arg...) \
do { \
if (iser_debug_level > 0) \
printk(KERN_DEBUG PFX "%s:" fmt,\
__func__ , ## arg); \
} while (0)
#define iser_err(fmt, arg...) \
do { \
printk(KERN_ERR PFX "%s:" fmt, \
__func__ , ## arg); \
} while (0)
#define SHIFT_4K 12
#define SIZE_4K (1UL << SHIFT_4K)
#define MASK_4K (~(SIZE_4K-1))
/* support up to 512KB in one RDMA */
#define ISCSI_ISER_SG_TABLESIZE (0x80000 >> SHIFT_4K)
#define ISER_DEF_CMD_PER_LUN 128
/* QP settings */
/* Maximal bounds on received asynchronous PDUs */
#define ISER_MAX_RX_MISC_PDUS 4 /* NOOP_IN(2) , ASYNC_EVENT(2) */
#define ISER_MAX_TX_MISC_PDUS 6 /* NOOP_OUT(2), TEXT(1), *
* SCSI_TMFUNC(2), LOGOUT(1) */
#define ISER_QP_MAX_RECV_DTOS (ISCSI_DEF_XMIT_CMDS_MAX)
#define ISER_MIN_POSTED_RX (ISCSI_DEF_XMIT_CMDS_MAX >> 2)
/* the max TX (send) WR supported by the iSER QP is defined by *
* max_send_wr = T * (1 + D) + C ; D is how many inflight dataouts we expect *
* to have at max for SCSI command. The tx posting & completion handling code *
* supports -EAGAIN scheme where tx is suspended till the QP has room for more *
* send WR. D=8 comes from 64K/8K */
#define ISER_INFLIGHT_DATAOUTS 8
#define ISER_QP_MAX_REQ_DTOS (ISCSI_DEF_XMIT_CMDS_MAX * \
(1 + ISER_INFLIGHT_DATAOUTS) + \
ISER_MAX_TX_MISC_PDUS + \
ISER_MAX_RX_MISC_PDUS)
#define ISER_VER 0x10
#define ISER_WSV 0x08
#define ISER_RSV 0x04
struct iser_hdr {
u8 flags;
u8 rsvd[3];
__be32 write_stag; /* write rkey */
__be64 write_va;
__be32 read_stag; /* read rkey */
__be64 read_va;
} __attribute__((packed));
/* Constant PDU lengths calculations */
#define ISER_HEADERS_LEN (sizeof(struct iser_hdr) + sizeof(struct iscsi_hdr))
#define ISER_RECV_DATA_SEG_LEN 128
#define ISER_RX_PAYLOAD_SIZE (ISER_HEADERS_LEN + ISER_RECV_DATA_SEG_LEN)
#define ISER_RX_LOGIN_SIZE (ISER_HEADERS_LEN + ISCSI_DEF_MAX_RECV_SEG_LEN)
/* Length of an object name string */
#define ISER_OBJECT_NAME_SIZE 64
enum iser_ib_conn_state {
ISER_CONN_INIT, /* descriptor allocd, no conn */
ISER_CONN_PENDING, /* in the process of being established */
ISER_CONN_UP, /* up and running */
ISER_CONN_TERMINATING, /* in the process of being terminated */
ISER_CONN_DOWN, /* shut down */
ISER_CONN_STATES_NUM
};
enum iser_task_status {
ISER_TASK_STATUS_INIT = 0,
ISER_TASK_STATUS_STARTED,
ISER_TASK_STATUS_COMPLETED
};
enum iser_data_dir {
ISER_DIR_IN = 0, /* to initiator */
ISER_DIR_OUT, /* from initiator */
ISER_DIRS_NUM
};
struct iser_data_buf {
void *buf; /* pointer to the sg list */
unsigned int size; /* num entries of this sg */
unsigned long data_len; /* total data len */
unsigned int dma_nents; /* returned by dma_map_sg */
char *copy_buf; /* allocated copy buf for SGs unaligned *
* for rdma which are copied */
struct scatterlist sg_single; /* SG-ified clone of a non SG SC or *
* unaligned SG */
};
/* fwd declarations */
struct iser_device;
struct iscsi_iser_conn;
struct iscsi_iser_task;
struct iscsi_endpoint;
struct iser_mem_reg {
u32 lkey;
u32 rkey;
u64 va;
u64 len;
void *mem_h;
int is_fmr;
};
struct iser_regd_buf {
struct iser_mem_reg reg; /* memory registration info */
void *virt_addr;
struct iser_device *device; /* device->device for dma_unmap */
enum dma_data_direction direction; /* direction for dma_unmap */
unsigned int data_size;
};
enum iser_desc_type {
ISCSI_TX_CONTROL ,
ISCSI_TX_SCSI_COMMAND,
ISCSI_TX_DATAOUT
};
struct iser_tx_desc {
struct iser_hdr iser_header;
struct iscsi_hdr iscsi_header;
enum iser_desc_type type;
u64 dma_addr;
/* sg[0] points to iser/iscsi headers, sg[1] optionally points to either
of immediate data, unsolicited data-out or control (login,text) */
struct ib_sge tx_sg[2];
int num_sge;
};
#define ISER_RX_PAD_SIZE (256 - (ISER_RX_PAYLOAD_SIZE + \
sizeof(u64) + sizeof(struct ib_sge)))
struct iser_rx_desc {
struct iser_hdr iser_header;
struct iscsi_hdr iscsi_header;
char data[ISER_RECV_DATA_SEG_LEN];
u64 dma_addr;
struct ib_sge rx_sg;
char pad[ISER_RX_PAD_SIZE];
} __attribute__((packed));
struct iser_device {
struct ib_device *ib_device;
struct ib_pd *pd;
struct ib_cq *rx_cq;
struct ib_cq *tx_cq;
struct ib_mr *mr;
struct tasklet_struct cq_tasklet;
struct ib_event_handler event_handler;
struct list_head ig_list; /* entry in ig devices list */
int refcount;
};
struct iser_conn {
struct iscsi_iser_conn *iser_conn; /* iser conn for upcalls */
struct iscsi_endpoint *ep;
enum iser_ib_conn_state state; /* rdma connection state */
atomic_t refcount;
spinlock_t lock; /* used for state changes */
struct iser_device *device; /* device context */
struct rdma_cm_id *cma_id; /* CMA ID */
struct ib_qp *qp; /* QP */
struct ib_fmr_pool *fmr_pool; /* pool of IB FMRs */
wait_queue_head_t wait; /* waitq for conn/disconn */
int post_recv_buf_count; /* posted rx count */
atomic_t post_send_buf_count; /* posted tx count */
char name[ISER_OBJECT_NAME_SIZE];
struct iser_page_vec *page_vec; /* represents SG to fmr maps*
* maps serialized as tx is*/
struct list_head conn_list; /* entry in ig conn list */
char *login_buf;
u64 login_dma;
unsigned int rx_desc_head;
struct iser_rx_desc *rx_descs;
struct ib_recv_wr rx_wr[ISER_MIN_POSTED_RX];
};
struct iscsi_iser_conn {
struct iscsi_conn *iscsi_conn;/* ptr to iscsi conn */
struct iser_conn *ib_conn; /* iSER IB conn */
};
struct iscsi_iser_task {
struct iser_tx_desc desc;
struct iscsi_iser_conn *iser_conn;
enum iser_task_status status;
int command_sent; /* set if command sent */
int dir[ISER_DIRS_NUM]; /* set if dir use*/
struct iser_regd_buf rdma_regd[ISER_DIRS_NUM];/* regd rdma buf */
struct iser_data_buf data[ISER_DIRS_NUM]; /* orig. data des*/
struct iser_data_buf data_copy[ISER_DIRS_NUM];/* contig. copy */
int headers_initialized;
};
struct iser_page_vec {
u64 *pages;
int length;
int offset;
int data_size;
};
struct iser_global {
struct mutex device_list_mutex;/* */
struct list_head device_list; /* all iSER devices */
struct mutex connlist_mutex;
struct list_head connlist; /* all iSER IB connections */
struct kmem_cache *desc_cache;
};
extern struct iser_global ig;
extern int iser_debug_level;
/* allocate connection resources needed for rdma functionality */
int iser_conn_set_full_featured_mode(struct iscsi_conn *conn);
int iser_send_control(struct iscsi_conn *conn,
struct iscsi_task *task);
int iser_send_command(struct iscsi_conn *conn,
struct iscsi_task *task);
int iser_send_data_out(struct iscsi_conn *conn,
struct iscsi_task *task,
struct iscsi_data *hdr);
void iscsi_iser_recv(struct iscsi_conn *conn,
struct iscsi_hdr *hdr,
char *rx_data,
int rx_data_len);
void iser_conn_init(struct iser_conn *ib_conn);
void iser_conn_get(struct iser_conn *ib_conn);
int iser_conn_put(struct iser_conn *ib_conn, int destroy_cma_id_allowed);
void iser_conn_terminate(struct iser_conn *ib_conn);
void iser_rcv_completion(struct iser_rx_desc *desc,
unsigned long dto_xfer_len,
struct iser_conn *ib_conn);
void iser_snd_completion(struct iser_tx_desc *desc, struct iser_conn *ib_conn);
void iser_task_rdma_init(struct iscsi_iser_task *task);
void iser_task_rdma_finalize(struct iscsi_iser_task *task);
void iser_free_rx_descriptors(struct iser_conn *ib_conn);
void iser_finalize_rdma_unaligned_sg(struct iscsi_iser_task *task,
enum iser_data_dir cmd_dir);
int iser_reg_rdma_mem(struct iscsi_iser_task *task,
enum iser_data_dir cmd_dir);
int iser_connect(struct iser_conn *ib_conn,
struct sockaddr_in *src_addr,
struct sockaddr_in *dst_addr,
int non_blocking);
int iser_reg_page_vec(struct iser_conn *ib_conn,
struct iser_page_vec *page_vec,
struct iser_mem_reg *mem_reg);
void iser_unreg_mem(struct iser_mem_reg *mem_reg);
int iser_post_recvl(struct iser_conn *ib_conn);
int iser_post_recvm(struct iser_conn *ib_conn, int count);
int iser_post_send(struct iser_conn *ib_conn, struct iser_tx_desc *tx_desc);
int iser_dma_map_task_data(struct iscsi_iser_task *iser_task,
struct iser_data_buf *data,
enum iser_data_dir iser_dir,
enum dma_data_direction dma_dir);
void iser_dma_unmap_task_data(struct iscsi_iser_task *iser_task);
int iser_initialize_task_headers(struct iscsi_task *task,
struct iser_tx_desc *tx_desc);
int iser_alloc_rx_descriptors(struct iser_conn *ib_conn);
#endif
| {
"language": "C"
} |
/*
* zero.c -- Gadget Zero, for USB development
*
* Copyright (C) 2003-2004 David Brownell
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions, and the following disclaimer,
* without modification.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. The names of the above-listed copyright holders may not be used
* to endorse or promote products derived from this software without
* specific prior written permission.
*
* ALTERNATIVELY, this software may be distributed under the terms of the
* GNU General Public License ("GPL") as published by the Free Software
* Foundation, either version 2 of that License or (at your option) any
* later version.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
* IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
* THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
* CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
* PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
* LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
* NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
* Gadget Zero only needs two bulk endpoints, and is an example of how you
* can write a hardware-agnostic gadget driver running inside a USB device.
*
* Hardware details are visible (see CONFIG_USB_ZERO_* below) but don't
* affect most of the driver.
*
* Use it with the Linux host/master side "usbtest" driver to get a basic
* functional test of your device-side usb stack, or with "usb-skeleton".
*
* It supports two similar configurations. One sinks whatever the usb host
* writes, and in return sources zeroes. The other loops whatever the host
* writes back, so the host can read it. Module options include:
*
* buflen=N default N=4096, buffer size used
* qlen=N default N=32, how many buffers in the loopback queue
* loopdefault default false, list loopback config first
*
* Many drivers will only have one configuration, letting them be much
* simpler if they also don't support high speed operation (like this
* driver does).
*/
#include <linux/config.h>
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/delay.h>
#include <linux/ioport.h>
#include <linux/sched.h>
#include <linux/slab.h>
#include <linux/smp_lock.h>
#include <linux/errno.h>
#include <linux/init.h>
#include <linux/timer.h>
#include <linux/list.h>
#include <linux/interrupt.h>
#include <linux/uts.h>
#include <linux/version.h>
#include <linux/device.h>
#include <linux/moduleparam.h>
#include <linux/proc_fs.h>
#include <asm/byteorder.h>
#include <asm/io.h>
#include <asm/irq.h>
#include <asm/system.h>
#include <asm/unaligned.h>
#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,21)
# include <linux/usb/ch9.h>
#else
# include <linux/usb_ch9.h>
#endif
#include <linux/usb_gadget.h>
/*-------------------------------------------------------------------------*/
/*-------------------------------------------------------------------------*/
static int utf8_to_utf16le(const char *s, u16 *cp, unsigned len)
{
int count = 0;
u8 c;
u16 uchar;
/* this insists on correct encodings, though not minimal ones.
* BUT it currently rejects legit 4-byte UTF-8 code points,
* which need surrogate pairs. (Unicode 3.1 can use them.)
*/
while (len != 0 && (c = (u8) *s++) != 0) {
if (unlikely(c & 0x80)) {
// 2-byte sequence:
// 00000yyyyyxxxxxx = 110yyyyy 10xxxxxx
if ((c & 0xe0) == 0xc0) {
uchar = (c & 0x1f) << 6;
c = (u8) *s++;
if ((c & 0xc0) != 0xc0)
goto fail;
c &= 0x3f;
uchar |= c;
// 3-byte sequence (most CJKV characters):
// zzzzyyyyyyxxxxxx = 1110zzzz 10yyyyyy 10xxxxxx
} else if ((c & 0xf0) == 0xe0) {
uchar = (c & 0x0f) << 12;
c = (u8) *s++;
if ((c & 0xc0) != 0xc0)
goto fail;
c &= 0x3f;
uchar |= c << 6;
c = (u8) *s++;
if ((c & 0xc0) != 0xc0)
goto fail;
c &= 0x3f;
uchar |= c;
/* no bogus surrogates */
if (0xd800 <= uchar && uchar <= 0xdfff)
goto fail;
// 4-byte sequence (surrogate pairs, currently rare):
// 11101110wwwwzzzzyy + 110111yyyyxxxxxx
// = 11110uuu 10uuzzzz 10yyyyyy 10xxxxxx
// (uuuuu = wwww + 1)
// FIXME accept the surrogate code points (only)
} else
goto fail;
} else
uchar = c;
put_unaligned (cpu_to_le16 (uchar), cp++);
count++;
len--;
}
return count;
fail:
return -1;
}
/**
* usb_gadget_get_string - fill out a string descriptor
* @table: of c strings encoded using UTF-8
* @id: string id, from low byte of wValue in get string descriptor
* @buf: at least 256 bytes
*
* Finds the UTF-8 string matching the ID, and converts it into a
* string descriptor in utf16-le.
* Returns length of descriptor (always even) or negative errno
*
* If your driver needs stings in multiple languages, you'll probably
* "switch (wIndex) { ... }" in your ep0 string descriptor logic,
* using this routine after choosing which set of UTF-8 strings to use.
* Note that US-ASCII is a strict subset of UTF-8; any string bytes with
* the eighth bit set will be multibyte UTF-8 characters, not ISO-8859/1
* characters (which are also widely used in C strings).
*/
int
usb_gadget_get_string (struct usb_gadget_strings *table, int id, u8 *buf)
{
struct usb_string *s;
int len;
/* descriptor 0 has the language id */
if (id == 0) {
buf [0] = 4;
buf [1] = USB_DT_STRING;
buf [2] = (u8) table->language;
buf [3] = (u8) (table->language >> 8);
return 4;
}
for (s = table->strings; s && s->s; s++)
if (s->id == id)
break;
/* unrecognized: stall. */
if (!s || !s->s)
return -EINVAL;
/* string descriptors have length, tag, then UTF16-LE text */
len = min ((size_t) 126, strlen (s->s));
memset (buf + 2, 0, 2 * len); /* zero all the bytes */
len = utf8_to_utf16le(s->s, (u16 *)&buf[2], len);
if (len < 0)
return -EINVAL;
buf [0] = (len + 1) * 2;
buf [1] = USB_DT_STRING;
return buf [0];
}
/*-------------------------------------------------------------------------*/
/*-------------------------------------------------------------------------*/
/**
* usb_descriptor_fillbuf - fill buffer with descriptors
* @buf: Buffer to be filled
* @buflen: Size of buf
* @src: Array of descriptor pointers, terminated by null pointer.
*
* Copies descriptors into the buffer, returning the length or a
* negative error code if they can't all be copied. Useful when
* assembling descriptors for an associated set of interfaces used
* as part of configuring a composite device; or in other cases where
* sets of descriptors need to be marshaled.
*/
int
usb_descriptor_fillbuf(void *buf, unsigned buflen,
const struct usb_descriptor_header **src)
{
u8 *dest = buf;
if (!src)
return -EINVAL;
/* fill buffer from src[] until null descriptor ptr */
for (; 0 != *src; src++) {
unsigned len = (*src)->bLength;
if (len > buflen)
return -EINVAL;
memcpy(dest, *src, len);
buflen -= len;
dest += len;
}
return dest - (u8 *)buf;
}
/**
* usb_gadget_config_buf - builts a complete configuration descriptor
* @config: Header for the descriptor, including characteristics such
* as power requirements and number of interfaces.
* @desc: Null-terminated vector of pointers to the descriptors (interface,
* endpoint, etc) defining all functions in this device configuration.
* @buf: Buffer for the resulting configuration descriptor.
* @length: Length of buffer. If this is not big enough to hold the
* entire configuration descriptor, an error code will be returned.
*
* This copies descriptors into the response buffer, building a descriptor
* for that configuration. It returns the buffer length or a negative
* status code. The config.wTotalLength field is set to match the length
* of the result, but other descriptor fields (including power usage and
* interface count) must be set by the caller.
*
* Gadget drivers could use this when constructing a config descriptor
* in response to USB_REQ_GET_DESCRIPTOR. They will need to patch the
* resulting bDescriptorType value if USB_DT_OTHER_SPEED_CONFIG is needed.
*/
int usb_gadget_config_buf(
const struct usb_config_descriptor *config,
void *buf,
unsigned length,
const struct usb_descriptor_header **desc
)
{
struct usb_config_descriptor *cp = buf;
int len;
/* config descriptor first */
if (length < USB_DT_CONFIG_SIZE || !desc)
return -EINVAL;
*cp = *config;
/* then interface/endpoint/class/vendor/... */
len = usb_descriptor_fillbuf(USB_DT_CONFIG_SIZE + (u8*)buf,
length - USB_DT_CONFIG_SIZE, desc);
if (len < 0)
return len;
len += USB_DT_CONFIG_SIZE;
if (len > 0xffff)
return -EINVAL;
/* patch up the config descriptor */
cp->bLength = USB_DT_CONFIG_SIZE;
cp->bDescriptorType = USB_DT_CONFIG;
cp->wTotalLength = cpu_to_le16(len);
cp->bmAttributes |= USB_CONFIG_ATT_ONE;
return len;
}
/*-------------------------------------------------------------------------*/
/*-------------------------------------------------------------------------*/
#define RBUF_LEN (1024*1024)
static int rbuf_start;
static int rbuf_len;
static __u8 rbuf[RBUF_LEN];
/*-------------------------------------------------------------------------*/
#define DRIVER_VERSION "St Patrick's Day 2004"
static const char shortname [] = "zero";
static const char longname [] = "YAMAHA YST-MS35D USB Speaker ";
static const char source_sink [] = "source and sink data";
static const char loopback [] = "loop input to output";
/*-------------------------------------------------------------------------*/
/*
* driver assumes self-powered hardware, and
* has no way for users to trigger remote wakeup.
*
* this version autoconfigures as much as possible,
* which is reasonable for most "bulk-only" drivers.
*/
static const char *EP_IN_NAME; /* source */
static const char *EP_OUT_NAME; /* sink */
/*-------------------------------------------------------------------------*/
/* big enough to hold our biggest descriptor */
#define USB_BUFSIZ 512
struct zero_dev {
spinlock_t lock;
struct usb_gadget *gadget;
struct usb_request *req; /* for control responses */
/* when configured, we have one of two configs:
* - source data (in to host) and sink it (out from host)
* - or loop it back (out from host back in to host)
*/
u8 config;
struct usb_ep *in_ep, *out_ep;
/* autoresume timer */
struct timer_list resume;
};
#define xprintk(d,level,fmt,args...) \
dev_printk(level , &(d)->gadget->dev , fmt , ## args)
#ifdef DEBUG
#define DBG(dev,fmt,args...) \
xprintk(dev , KERN_DEBUG , fmt , ## args)
#else
#define DBG(dev,fmt,args...) \
do { } while (0)
#endif /* DEBUG */
#ifdef VERBOSE
#define VDBG DBG
#else
#define VDBG(dev,fmt,args...) \
do { } while (0)
#endif /* VERBOSE */
#define ERROR(dev,fmt,args...) \
xprintk(dev , KERN_ERR , fmt , ## args)
#define WARN(dev,fmt,args...) \
xprintk(dev , KERN_WARNING , fmt , ## args)
#define INFO(dev,fmt,args...) \
xprintk(dev , KERN_INFO , fmt , ## args)
/*-------------------------------------------------------------------------*/
static unsigned buflen = 4096;
static unsigned qlen = 32;
static unsigned pattern = 0;
module_param (buflen, uint, S_IRUGO|S_IWUSR);
module_param (qlen, uint, S_IRUGO|S_IWUSR);
module_param (pattern, uint, S_IRUGO|S_IWUSR);
/*
* if it's nonzero, autoresume says how many seconds to wait
* before trying to wake up the host after suspend.
*/
static unsigned autoresume = 0;
module_param (autoresume, uint, 0);
/*
* Normally the "loopback" configuration is second (index 1) so
* it's not the default. Here's where to change that order, to
* work better with hosts where config changes are problematic.
* Or controllers (like superh) that only support one config.
*/
static int loopdefault = 0;
module_param (loopdefault, bool, S_IRUGO|S_IWUSR);
/*-------------------------------------------------------------------------*/
/* Thanks to NetChip Technologies for donating this product ID.
*
* DO NOT REUSE THESE IDs with a protocol-incompatible driver!! Ever!!
* Instead: allocate your own, using normal USB-IF procedures.
*/
#ifndef CONFIG_USB_ZERO_HNPTEST
#define DRIVER_VENDOR_NUM 0x0525 /* NetChip */
#define DRIVER_PRODUCT_NUM 0xa4a0 /* Linux-USB "Gadget Zero" */
#else
#define DRIVER_VENDOR_NUM 0x1a0a /* OTG test device IDs */
#define DRIVER_PRODUCT_NUM 0xbadd
#endif
/*-------------------------------------------------------------------------*/
/*
* DESCRIPTORS ... most are static, but strings and (full)
* configuration descriptors are built on demand.
*/
/*
#define STRING_MANUFACTURER 25
#define STRING_PRODUCT 42
#define STRING_SERIAL 101
*/
#define STRING_MANUFACTURER 1
#define STRING_PRODUCT 2
#define STRING_SERIAL 3
#define STRING_SOURCE_SINK 250
#define STRING_LOOPBACK 251
/*
* This device advertises two configurations; these numbers work
* on a pxa250 as well as more flexible hardware.
*/
#define CONFIG_SOURCE_SINK 3
#define CONFIG_LOOPBACK 2
/*
static struct usb_device_descriptor
device_desc = {
.bLength = sizeof device_desc,
.bDescriptorType = USB_DT_DEVICE,
.bcdUSB = __constant_cpu_to_le16 (0x0200),
.bDeviceClass = USB_CLASS_VENDOR_SPEC,
.idVendor = __constant_cpu_to_le16 (DRIVER_VENDOR_NUM),
.idProduct = __constant_cpu_to_le16 (DRIVER_PRODUCT_NUM),
.iManufacturer = STRING_MANUFACTURER,
.iProduct = STRING_PRODUCT,
.iSerialNumber = STRING_SERIAL,
.bNumConfigurations = 2,
};
*/
static struct usb_device_descriptor
device_desc = {
.bLength = sizeof device_desc,
.bDescriptorType = USB_DT_DEVICE,
.bcdUSB = __constant_cpu_to_le16 (0x0100),
.bDeviceClass = USB_CLASS_PER_INTERFACE,
.bDeviceSubClass = 0,
.bDeviceProtocol = 0,
.bMaxPacketSize0 = 64,
.bcdDevice = __constant_cpu_to_le16 (0x0100),
.idVendor = __constant_cpu_to_le16 (0x0499),
.idProduct = __constant_cpu_to_le16 (0x3002),
.iManufacturer = STRING_MANUFACTURER,
.iProduct = STRING_PRODUCT,
.iSerialNumber = STRING_SERIAL,
.bNumConfigurations = 1,
};
static struct usb_config_descriptor
z_config = {
.bLength = sizeof z_config,
.bDescriptorType = USB_DT_CONFIG,
/* compute wTotalLength on the fly */
.bNumInterfaces = 2,
.bConfigurationValue = 1,
.iConfiguration = 0,
.bmAttributes = 0x40,
.bMaxPower = 0, /* self-powered */
};
static struct usb_otg_descriptor
otg_descriptor = {
.bLength = sizeof otg_descriptor,
.bDescriptorType = USB_DT_OTG,
.bmAttributes = USB_OTG_SRP,
};
/* one interface in each configuration */
#ifdef CONFIG_USB_GADGET_DUALSPEED
/*
* usb 2.0 devices need to expose both high speed and full speed
* descriptors, unless they only run at full speed.
*
* that means alternate endpoint descriptors (bigger packets)
* and a "device qualifier" ... plus more construction options
* for the config descriptor.
*/
static struct usb_qualifier_descriptor
dev_qualifier = {
.bLength = sizeof dev_qualifier,
.bDescriptorType = USB_DT_DEVICE_QUALIFIER,
.bcdUSB = __constant_cpu_to_le16 (0x0200),
.bDeviceClass = USB_CLASS_VENDOR_SPEC,
.bNumConfigurations = 2,
};
struct usb_cs_as_general_descriptor {
__u8 bLength;
__u8 bDescriptorType;
__u8 bDescriptorSubType;
__u8 bTerminalLink;
__u8 bDelay;
__u16 wFormatTag;
} __attribute__ ((packed));
struct usb_cs_as_format_descriptor {
__u8 bLength;
__u8 bDescriptorType;
__u8 bDescriptorSubType;
__u8 bFormatType;
__u8 bNrChannels;
__u8 bSubframeSize;
__u8 bBitResolution;
__u8 bSamfreqType;
__u8 tLowerSamFreq[3];
__u8 tUpperSamFreq[3];
} __attribute__ ((packed));
static const struct usb_interface_descriptor
z_audio_control_if_desc = {
.bLength = sizeof z_audio_control_if_desc,
.bDescriptorType = USB_DT_INTERFACE,
.bInterfaceNumber = 0,
.bAlternateSetting = 0,
.bNumEndpoints = 0,
.bInterfaceClass = USB_CLASS_AUDIO,
.bInterfaceSubClass = 0x1,
.bInterfaceProtocol = 0,
.iInterface = 0,
};
static const struct usb_interface_descriptor
z_audio_if_desc = {
.bLength = sizeof z_audio_if_desc,
.bDescriptorType = USB_DT_INTERFACE,
.bInterfaceNumber = 1,
.bAlternateSetting = 0,
.bNumEndpoints = 0,
.bInterfaceClass = USB_CLASS_AUDIO,
.bInterfaceSubClass = 0x2,
.bInterfaceProtocol = 0,
.iInterface = 0,
};
static const struct usb_interface_descriptor
z_audio_if_desc2 = {
.bLength = sizeof z_audio_if_desc,
.bDescriptorType = USB_DT_INTERFACE,
.bInterfaceNumber = 1,
.bAlternateSetting = 1,
.bNumEndpoints = 1,
.bInterfaceClass = USB_CLASS_AUDIO,
.bInterfaceSubClass = 0x2,
.bInterfaceProtocol = 0,
.iInterface = 0,
};
static const struct usb_cs_as_general_descriptor
z_audio_cs_as_if_desc = {
.bLength = 7,
.bDescriptorType = 0x24,
.bDescriptorSubType = 0x01,
.bTerminalLink = 0x01,
.bDelay = 0x0,
.wFormatTag = __constant_cpu_to_le16 (0x0001)
};
static const struct usb_cs_as_format_descriptor
z_audio_cs_as_format_desc = {
.bLength = 0xe,
.bDescriptorType = 0x24,
.bDescriptorSubType = 2,
.bFormatType = 1,
.bNrChannels = 1,
.bSubframeSize = 1,
.bBitResolution = 8,
.bSamfreqType = 0,
.tLowerSamFreq = {0x7e, 0x13, 0x00},
.tUpperSamFreq = {0xe2, 0xd6, 0x00},
};
static const struct usb_endpoint_descriptor
z_iso_ep = {
.bLength = 0x09,
.bDescriptorType = 0x05,
.bEndpointAddress = 0x04,
.bmAttributes = 0x09,
.wMaxPacketSize = 0x0038,
.bInterval = 0x01,
.bRefresh = 0x00,
.bSynchAddress = 0x00,
};
static char z_iso_ep2[] = {0x07, 0x25, 0x01, 0x00, 0x02, 0x00, 0x02};
// 9 bytes
static char z_ac_interface_header_desc[] =
{ 0x09, 0x24, 0x01, 0x00, 0x01, 0x2b, 0x00, 0x01, 0x01 };
// 12 bytes
static char z_0[] = {0x0c, 0x24, 0x02, 0x01, 0x01, 0x01, 0x00, 0x02,
0x03, 0x00, 0x00, 0x00};
// 13 bytes
static char z_1[] = {0x0d, 0x24, 0x06, 0x02, 0x01, 0x02, 0x15, 0x00,
0x02, 0x00, 0x02, 0x00, 0x00};
// 9 bytes
static char z_2[] = {0x09, 0x24, 0x03, 0x03, 0x01, 0x03, 0x00, 0x02,
0x00};
static char za_0[] = {0x09, 0x04, 0x01, 0x02, 0x01, 0x01, 0x02, 0x00,
0x00};
static char za_1[] = {0x07, 0x24, 0x01, 0x01, 0x00, 0x01, 0x00};
static char za_2[] = {0x0e, 0x24, 0x02, 0x01, 0x02, 0x01, 0x08, 0x00,
0x7e, 0x13, 0x00, 0xe2, 0xd6, 0x00};
static char za_3[] = {0x09, 0x05, 0x04, 0x09, 0x70, 0x00, 0x01, 0x00,
0x00};
static char za_4[] = {0x07, 0x25, 0x01, 0x00, 0x02, 0x00, 0x02};
static char za_5[] = {0x09, 0x04, 0x01, 0x03, 0x01, 0x01, 0x02, 0x00,
0x00};
static char za_6[] = {0x07, 0x24, 0x01, 0x01, 0x00, 0x01, 0x00};
static char za_7[] = {0x0e, 0x24, 0x02, 0x01, 0x01, 0x02, 0x10, 0x00,
0x7e, 0x13, 0x00, 0xe2, 0xd6, 0x00};
static char za_8[] = {0x09, 0x05, 0x04, 0x09, 0x70, 0x00, 0x01, 0x00,
0x00};
static char za_9[] = {0x07, 0x25, 0x01, 0x00, 0x02, 0x00, 0x02};
static char za_10[] = {0x09, 0x04, 0x01, 0x04, 0x01, 0x01, 0x02, 0x00,
0x00};
static char za_11[] = {0x07, 0x24, 0x01, 0x01, 0x00, 0x01, 0x00};
static char za_12[] = {0x0e, 0x24, 0x02, 0x01, 0x02, 0x02, 0x10, 0x00,
0x73, 0x13, 0x00, 0xe2, 0xd6, 0x00};
static char za_13[] = {0x09, 0x05, 0x04, 0x09, 0xe0, 0x00, 0x01, 0x00,
0x00};
static char za_14[] = {0x07, 0x25, 0x01, 0x00, 0x02, 0x00, 0x02};
static char za_15[] = {0x09, 0x04, 0x01, 0x05, 0x01, 0x01, 0x02, 0x00,
0x00};
static char za_16[] = {0x07, 0x24, 0x01, 0x01, 0x00, 0x01, 0x00};
static char za_17[] = {0x0e, 0x24, 0x02, 0x01, 0x01, 0x03, 0x14, 0x00,
0x7e, 0x13, 0x00, 0xe2, 0xd6, 0x00};
static char za_18[] = {0x09, 0x05, 0x04, 0x09, 0xa8, 0x00, 0x01, 0x00,
0x00};
static char za_19[] = {0x07, 0x25, 0x01, 0x00, 0x02, 0x00, 0x02};
static char za_20[] = {0x09, 0x04, 0x01, 0x06, 0x01, 0x01, 0x02, 0x00,
0x00};
static char za_21[] = {0x07, 0x24, 0x01, 0x01, 0x00, 0x01, 0x00};
static char za_22[] = {0x0e, 0x24, 0x02, 0x01, 0x02, 0x03, 0x14, 0x00,
0x7e, 0x13, 0x00, 0xe2, 0xd6, 0x00};
static char za_23[] = {0x09, 0x05, 0x04, 0x09, 0x50, 0x01, 0x01, 0x00,
0x00};
static char za_24[] = {0x07, 0x25, 0x01, 0x00, 0x02, 0x00, 0x02};
static const struct usb_descriptor_header *z_function [] = {
(struct usb_descriptor_header *) &z_audio_control_if_desc,
(struct usb_descriptor_header *) &z_ac_interface_header_desc,
(struct usb_descriptor_header *) &z_0,
(struct usb_descriptor_header *) &z_1,
(struct usb_descriptor_header *) &z_2,
(struct usb_descriptor_header *) &z_audio_if_desc,
(struct usb_descriptor_header *) &z_audio_if_desc2,
(struct usb_descriptor_header *) &z_audio_cs_as_if_desc,
(struct usb_descriptor_header *) &z_audio_cs_as_format_desc,
(struct usb_descriptor_header *) &z_iso_ep,
(struct usb_descriptor_header *) &z_iso_ep2,
(struct usb_descriptor_header *) &za_0,
(struct usb_descriptor_header *) &za_1,
(struct usb_descriptor_header *) &za_2,
(struct usb_descriptor_header *) &za_3,
(struct usb_descriptor_header *) &za_4,
(struct usb_descriptor_header *) &za_5,
(struct usb_descriptor_header *) &za_6,
(struct usb_descriptor_header *) &za_7,
(struct usb_descriptor_header *) &za_8,
(struct usb_descriptor_header *) &za_9,
(struct usb_descriptor_header *) &za_10,
(struct usb_descriptor_header *) &za_11,
(struct usb_descriptor_header *) &za_12,
(struct usb_descriptor_header *) &za_13,
(struct usb_descriptor_header *) &za_14,
(struct usb_descriptor_header *) &za_15,
(struct usb_descriptor_header *) &za_16,
(struct usb_descriptor_header *) &za_17,
(struct usb_descriptor_header *) &za_18,
(struct usb_descriptor_header *) &za_19,
(struct usb_descriptor_header *) &za_20,
(struct usb_descriptor_header *) &za_21,
(struct usb_descriptor_header *) &za_22,
(struct usb_descriptor_header *) &za_23,
(struct usb_descriptor_header *) &za_24,
NULL,
};
/* maxpacket and other transfer characteristics vary by speed. */
#define ep_desc(g,hs,fs) (((g)->speed==USB_SPEED_HIGH)?(hs):(fs))
#else
/* if there's no high speed support, maxpacket doesn't change. */
#define ep_desc(g,hs,fs) fs
#endif /* !CONFIG_USB_GADGET_DUALSPEED */
static char manufacturer [40];
//static char serial [40];
static char serial [] = "Ser 00 em";
/* static strings, in UTF-8 */
static struct usb_string strings [] = {
{ STRING_MANUFACTURER, manufacturer, },
{ STRING_PRODUCT, longname, },
{ STRING_SERIAL, serial, },
{ STRING_LOOPBACK, loopback, },
{ STRING_SOURCE_SINK, source_sink, },
{ } /* end of list */
};
static struct usb_gadget_strings stringtab = {
.language = 0x0409, /* en-us */
.strings = strings,
};
/*
* config descriptors are also handcrafted. these must agree with code
* that sets configurations, and with code managing interfaces and their
* altsettings. other complexity may come from:
*
* - high speed support, including "other speed config" rules
* - multiple configurations
* - interfaces with alternate settings
* - embedded class or vendor-specific descriptors
*
* this handles high speed, and has a second config that could as easily
* have been an alternate interface setting (on most hardware).
*
* NOTE: to demonstrate (and test) more USB capabilities, this driver
* should include an altsetting to test interrupt transfers, including
* high bandwidth modes at high speed. (Maybe work like Intel's test
* device?)
*/
static int
config_buf (struct usb_gadget *gadget, u8 *buf, u8 type, unsigned index)
{
int len;
const struct usb_descriptor_header **function;
function = z_function;
len = usb_gadget_config_buf (&z_config, buf, USB_BUFSIZ, function);
if (len < 0)
return len;
((struct usb_config_descriptor *) buf)->bDescriptorType = type;
return len;
}
/*-------------------------------------------------------------------------*/
static struct usb_request *
alloc_ep_req (struct usb_ep *ep, unsigned length)
{
struct usb_request *req;
req = usb_ep_alloc_request (ep, GFP_ATOMIC);
if (req) {
req->length = length;
req->buf = usb_ep_alloc_buffer (ep, length,
&req->dma, GFP_ATOMIC);
if (!req->buf) {
usb_ep_free_request (ep, req);
req = NULL;
}
}
return req;
}
static void free_ep_req (struct usb_ep *ep, struct usb_request *req)
{
if (req->buf)
usb_ep_free_buffer (ep, req->buf, req->dma, req->length);
usb_ep_free_request (ep, req);
}
/*-------------------------------------------------------------------------*/
/* optionally require specific source/sink data patterns */
static int
check_read_data (
struct zero_dev *dev,
struct usb_ep *ep,
struct usb_request *req
)
{
unsigned i;
u8 *buf = req->buf;
for (i = 0; i < req->actual; i++, buf++) {
switch (pattern) {
/* all-zeroes has no synchronization issues */
case 0:
if (*buf == 0)
continue;
break;
/* mod63 stays in sync with short-terminated transfers,
* or otherwise when host and gadget agree on how large
* each usb transfer request should be. resync is done
* with set_interface or set_config.
*/
case 1:
if (*buf == (u8)(i % 63))
continue;
break;
}
ERROR (dev, "bad OUT byte, buf [%d] = %d\n", i, *buf);
usb_ep_set_halt (ep);
return -EINVAL;
}
return 0;
}
/*-------------------------------------------------------------------------*/
static void zero_reset_config (struct zero_dev *dev)
{
if (dev->config == 0)
return;
DBG (dev, "reset config\n");
/* just disable endpoints, forcing completion of pending i/o.
* all our completion handlers free their requests in this case.
*/
if (dev->in_ep) {
usb_ep_disable (dev->in_ep);
dev->in_ep = NULL;
}
if (dev->out_ep) {
usb_ep_disable (dev->out_ep);
dev->out_ep = NULL;
}
dev->config = 0;
del_timer (&dev->resume);
}
#define _write(f, buf, sz) (f->f_op->write(f, buf, sz, &f->f_pos))
static void
zero_isoc_complete (struct usb_ep *ep, struct usb_request *req)
{
struct zero_dev *dev = ep->driver_data;
int status = req->status;
int i, j;
switch (status) {
case 0: /* normal completion? */
//printk ("\nzero ---------------> isoc normal completion %d bytes\n", req->actual);
for (i=0, j=rbuf_start; i<req->actual; i++) {
//printk ("%02x ", ((__u8*)req->buf)[i]);
rbuf[j] = ((__u8*)req->buf)[i];
j++;
if (j >= RBUF_LEN) j=0;
}
rbuf_start = j;
//printk ("\n\n");
if (rbuf_len < RBUF_LEN) {
rbuf_len += req->actual;
if (rbuf_len > RBUF_LEN) {
rbuf_len = RBUF_LEN;
}
}
break;
/* this endpoint is normally active while we're configured */
case -ECONNABORTED: /* hardware forced ep reset */
case -ECONNRESET: /* request dequeued */
case -ESHUTDOWN: /* disconnect from host */
VDBG (dev, "%s gone (%d), %d/%d\n", ep->name, status,
req->actual, req->length);
if (ep == dev->out_ep)
check_read_data (dev, ep, req);
free_ep_req (ep, req);
return;
case -EOVERFLOW: /* buffer overrun on read means that
* we didn't provide a big enough
* buffer.
*/
default:
#if 1
DBG (dev, "%s complete --> %d, %d/%d\n", ep->name,
status, req->actual, req->length);
#endif
case -EREMOTEIO: /* short read */
break;
}
status = usb_ep_queue (ep, req, GFP_ATOMIC);
if (status) {
ERROR (dev, "kill %s: resubmit %d bytes --> %d\n",
ep->name, req->length, status);
usb_ep_set_halt (ep);
/* FIXME recover later ... somehow */
}
}
static struct usb_request *
zero_start_isoc_ep (struct usb_ep *ep, int gfp_flags)
{
struct usb_request *req;
int status;
req = alloc_ep_req (ep, 512);
if (!req)
return NULL;
req->complete = zero_isoc_complete;
status = usb_ep_queue (ep, req, gfp_flags);
if (status) {
struct zero_dev *dev = ep->driver_data;
ERROR (dev, "start %s --> %d\n", ep->name, status);
free_ep_req (ep, req);
req = NULL;
}
return req;
}
/* change our operational config. this code must agree with the code
* that returns config descriptors, and altsetting code.
*
* it's also responsible for power management interactions. some
* configurations might not work with our current power sources.
*
* note that some device controller hardware will constrain what this
* code can do, perhaps by disallowing more than one configuration or
* by limiting configuration choices (like the pxa2xx).
*/
static int
zero_set_config (struct zero_dev *dev, unsigned number, int gfp_flags)
{
int result = 0;
struct usb_gadget *gadget = dev->gadget;
const struct usb_endpoint_descriptor *d;
struct usb_ep *ep;
if (number == dev->config)
return 0;
zero_reset_config (dev);
gadget_for_each_ep (ep, gadget) {
if (strcmp (ep->name, "ep4") == 0) {
d = (struct usb_endpoint_descripter *)&za_23; // isoc ep desc for audio i/f alt setting 6
result = usb_ep_enable (ep, d);
if (result == 0) {
ep->driver_data = dev;
dev->in_ep = ep;
if (zero_start_isoc_ep (ep, gfp_flags) != 0) {
dev->in_ep = ep;
continue;
}
usb_ep_disable (ep);
result = -EIO;
}
}
}
dev->config = number;
return result;
}
/*-------------------------------------------------------------------------*/
static void zero_setup_complete (struct usb_ep *ep, struct usb_request *req)
{
if (req->status || req->actual != req->length)
DBG ((struct zero_dev *) ep->driver_data,
"setup complete --> %d, %d/%d\n",
req->status, req->actual, req->length);
}
/*
* The setup() callback implements all the ep0 functionality that's
* not handled lower down, in hardware or the hardware driver (like
* device and endpoint feature flags, and their status). It's all
* housekeeping for the gadget function we're implementing. Most of
* the work is in config-specific setup.
*/
static int
zero_setup (struct usb_gadget *gadget, const struct usb_ctrlrequest *ctrl)
{
struct zero_dev *dev = get_gadget_data (gadget);
struct usb_request *req = dev->req;
int value = -EOPNOTSUPP;
/* usually this stores reply data in the pre-allocated ep0 buffer,
* but config change events will reconfigure hardware.
*/
req->zero = 0;
switch (ctrl->bRequest) {
case USB_REQ_GET_DESCRIPTOR:
switch (ctrl->wValue >> 8) {
case USB_DT_DEVICE:
value = min (ctrl->wLength, (u16) sizeof device_desc);
memcpy (req->buf, &device_desc, value);
break;
#ifdef CONFIG_USB_GADGET_DUALSPEED
case USB_DT_DEVICE_QUALIFIER:
if (!gadget->is_dualspeed)
break;
value = min (ctrl->wLength, (u16) sizeof dev_qualifier);
memcpy (req->buf, &dev_qualifier, value);
break;
case USB_DT_OTHER_SPEED_CONFIG:
if (!gadget->is_dualspeed)
break;
// FALLTHROUGH
#endif /* CONFIG_USB_GADGET_DUALSPEED */
case USB_DT_CONFIG:
value = config_buf (gadget, req->buf,
ctrl->wValue >> 8,
ctrl->wValue & 0xff);
if (value >= 0)
value = min (ctrl->wLength, (u16) value);
break;
case USB_DT_STRING:
/* wIndex == language code.
* this driver only handles one language, you can
* add string tables for other languages, using
* any UTF-8 characters
*/
value = usb_gadget_get_string (&stringtab,
ctrl->wValue & 0xff, req->buf);
if (value >= 0) {
value = min (ctrl->wLength, (u16) value);
}
break;
}
break;
/* currently two configs, two speeds */
case USB_REQ_SET_CONFIGURATION:
if (ctrl->bRequestType != 0)
goto unknown;
spin_lock (&dev->lock);
value = zero_set_config (dev, ctrl->wValue, GFP_ATOMIC);
spin_unlock (&dev->lock);
break;
case USB_REQ_GET_CONFIGURATION:
if (ctrl->bRequestType != USB_DIR_IN)
goto unknown;
*(u8 *)req->buf = dev->config;
value = min (ctrl->wLength, (u16) 1);
break;
/* until we add altsetting support, or other interfaces,
* only 0/0 are possible. pxa2xx only supports 0/0 (poorly)
* and already killed pending endpoint I/O.
*/
case USB_REQ_SET_INTERFACE:
if (ctrl->bRequestType != USB_RECIP_INTERFACE)
goto unknown;
spin_lock (&dev->lock);
if (dev->config) {
u8 config = dev->config;
/* resets interface configuration, forgets about
* previous transaction state (queued bufs, etc)
* and re-inits endpoint state (toggle etc)
* no response queued, just zero status == success.
* if we had more than one interface we couldn't
* use this "reset the config" shortcut.
*/
zero_reset_config (dev);
zero_set_config (dev, config, GFP_ATOMIC);
value = 0;
}
spin_unlock (&dev->lock);
break;
case USB_REQ_GET_INTERFACE:
if ((ctrl->bRequestType == 0x21) && (ctrl->wIndex == 0x02)) {
value = ctrl->wLength;
break;
}
else {
if (ctrl->bRequestType != (USB_DIR_IN|USB_RECIP_INTERFACE))
goto unknown;
if (!dev->config)
break;
if (ctrl->wIndex != 0) {
value = -EDOM;
break;
}
*(u8 *)req->buf = 0;
value = min (ctrl->wLength, (u16) 1);
}
break;
/*
* These are the same vendor-specific requests supported by
* Intel's USB 2.0 compliance test devices. We exceed that
* device spec by allowing multiple-packet requests.
*/
case 0x5b: /* control WRITE test -- fill the buffer */
if (ctrl->bRequestType != (USB_DIR_OUT|USB_TYPE_VENDOR))
goto unknown;
if (ctrl->wValue || ctrl->wIndex)
break;
/* just read that many bytes into the buffer */
if (ctrl->wLength > USB_BUFSIZ)
break;
value = ctrl->wLength;
break;
case 0x5c: /* control READ test -- return the buffer */
if (ctrl->bRequestType != (USB_DIR_IN|USB_TYPE_VENDOR))
goto unknown;
if (ctrl->wValue || ctrl->wIndex)
break;
/* expect those bytes are still in the buffer; send back */
if (ctrl->wLength > USB_BUFSIZ
|| ctrl->wLength != req->length)
break;
value = ctrl->wLength;
break;
case 0x01: // SET_CUR
case 0x02:
case 0x03:
case 0x04:
case 0x05:
value = ctrl->wLength;
break;
case 0x81:
switch (ctrl->wValue) {
case 0x0201:
case 0x0202:
((u8*)req->buf)[0] = 0x00;
((u8*)req->buf)[1] = 0xe3;
break;
case 0x0300:
case 0x0500:
((u8*)req->buf)[0] = 0x00;
break;
}
//((u8*)req->buf)[0] = 0x81;
//((u8*)req->buf)[1] = 0x81;
value = ctrl->wLength;
break;
case 0x82:
switch (ctrl->wValue) {
case 0x0201:
case 0x0202:
((u8*)req->buf)[0] = 0x00;
((u8*)req->buf)[1] = 0xc3;
break;
case 0x0300:
case 0x0500:
((u8*)req->buf)[0] = 0x00;
break;
}
//((u8*)req->buf)[0] = 0x82;
//((u8*)req->buf)[1] = 0x82;
value = ctrl->wLength;
break;
case 0x83:
switch (ctrl->wValue) {
case 0x0201:
case 0x0202:
((u8*)req->buf)[0] = 0x00;
((u8*)req->buf)[1] = 0x00;
break;
case 0x0300:
((u8*)req->buf)[0] = 0x60;
break;
case 0x0500:
((u8*)req->buf)[0] = 0x18;
break;
}
//((u8*)req->buf)[0] = 0x83;
//((u8*)req->buf)[1] = 0x83;
value = ctrl->wLength;
break;
case 0x84:
switch (ctrl->wValue) {
case 0x0201:
case 0x0202:
((u8*)req->buf)[0] = 0x00;
((u8*)req->buf)[1] = 0x01;
break;
case 0x0300:
case 0x0500:
((u8*)req->buf)[0] = 0x08;
break;
}
//((u8*)req->buf)[0] = 0x84;
//((u8*)req->buf)[1] = 0x84;
value = ctrl->wLength;
break;
case 0x85:
((u8*)req->buf)[0] = 0x85;
((u8*)req->buf)[1] = 0x85;
value = ctrl->wLength;
break;
default:
unknown:
printk("unknown control req%02x.%02x v%04x i%04x l%d\n",
ctrl->bRequestType, ctrl->bRequest,
ctrl->wValue, ctrl->wIndex, ctrl->wLength);
}
/* respond with data transfer before status phase? */
if (value >= 0) {
req->length = value;
req->zero = value < ctrl->wLength
&& (value % gadget->ep0->maxpacket) == 0;
value = usb_ep_queue (gadget->ep0, req, GFP_ATOMIC);
if (value < 0) {
DBG (dev, "ep_queue < 0 --> %d\n", value);
req->status = 0;
zero_setup_complete (gadget->ep0, req);
}
}
/* device either stalls (value < 0) or reports success */
return value;
}
static void
zero_disconnect (struct usb_gadget *gadget)
{
struct zero_dev *dev = get_gadget_data (gadget);
unsigned long flags;
spin_lock_irqsave (&dev->lock, flags);
zero_reset_config (dev);
/* a more significant application might have some non-usb
* activities to quiesce here, saving resources like power
* or pushing the notification up a network stack.
*/
spin_unlock_irqrestore (&dev->lock, flags);
/* next we may get setup() calls to enumerate new connections;
* or an unbind() during shutdown (including removing module).
*/
}
static void
zero_autoresume (unsigned long _dev)
{
struct zero_dev *dev = (struct zero_dev *) _dev;
int status;
/* normally the host would be woken up for something
* more significant than just a timer firing...
*/
if (dev->gadget->speed != USB_SPEED_UNKNOWN) {
status = usb_gadget_wakeup (dev->gadget);
DBG (dev, "wakeup --> %d\n", status);
}
}
/*-------------------------------------------------------------------------*/
static void
zero_unbind (struct usb_gadget *gadget)
{
struct zero_dev *dev = get_gadget_data (gadget);
DBG (dev, "unbind\n");
/* we've already been disconnected ... no i/o is active */
if (dev->req)
free_ep_req (gadget->ep0, dev->req);
del_timer_sync (&dev->resume);
kfree (dev);
set_gadget_data (gadget, NULL);
}
static int
zero_bind (struct usb_gadget *gadget)
{
struct zero_dev *dev;
//struct usb_ep *ep;
printk("binding\n");
/*
* DRIVER POLICY CHOICE: you may want to do this differently.
* One thing to avoid is reusing a bcdDevice revision code
* with different host-visible configurations or behavior
* restrictions -- using ep1in/ep2out vs ep1out/ep3in, etc
*/
//device_desc.bcdDevice = __constant_cpu_to_le16 (0x0201);
/* ok, we made sense of the hardware ... */
dev = kmalloc (sizeof *dev, SLAB_KERNEL);
if (!dev)
return -ENOMEM;
memset (dev, 0, sizeof *dev);
spin_lock_init (&dev->lock);
dev->gadget = gadget;
set_gadget_data (gadget, dev);
/* preallocate control response and buffer */
dev->req = usb_ep_alloc_request (gadget->ep0, GFP_KERNEL);
if (!dev->req)
goto enomem;
dev->req->buf = usb_ep_alloc_buffer (gadget->ep0, USB_BUFSIZ,
&dev->req->dma, GFP_KERNEL);
if (!dev->req->buf)
goto enomem;
dev->req->complete = zero_setup_complete;
device_desc.bMaxPacketSize0 = gadget->ep0->maxpacket;
#ifdef CONFIG_USB_GADGET_DUALSPEED
/* assume ep0 uses the same value for both speeds ... */
dev_qualifier.bMaxPacketSize0 = device_desc.bMaxPacketSize0;
/* and that all endpoints are dual-speed */
//hs_source_desc.bEndpointAddress = fs_source_desc.bEndpointAddress;
//hs_sink_desc.bEndpointAddress = fs_sink_desc.bEndpointAddress;
#endif
usb_gadget_set_selfpowered (gadget);
init_timer (&dev->resume);
dev->resume.function = zero_autoresume;
dev->resume.data = (unsigned long) dev;
gadget->ep0->driver_data = dev;
INFO (dev, "%s, version: " DRIVER_VERSION "\n", longname);
INFO (dev, "using %s, OUT %s IN %s\n", gadget->name,
EP_OUT_NAME, EP_IN_NAME);
snprintf (manufacturer, sizeof manufacturer,
UTS_SYSNAME " " UTS_RELEASE " with %s",
gadget->name);
return 0;
enomem:
zero_unbind (gadget);
return -ENOMEM;
}
/*-------------------------------------------------------------------------*/
static void
zero_suspend (struct usb_gadget *gadget)
{
struct zero_dev *dev = get_gadget_data (gadget);
if (gadget->speed == USB_SPEED_UNKNOWN)
return;
if (autoresume) {
mod_timer (&dev->resume, jiffies + (HZ * autoresume));
DBG (dev, "suspend, wakeup in %d seconds\n", autoresume);
} else
DBG (dev, "suspend\n");
}
static void
zero_resume (struct usb_gadget *gadget)
{
struct zero_dev *dev = get_gadget_data (gadget);
DBG (dev, "resume\n");
del_timer (&dev->resume);
}
/*-------------------------------------------------------------------------*/
static struct usb_gadget_driver zero_driver = {
#ifdef CONFIG_USB_GADGET_DUALSPEED
.speed = USB_SPEED_HIGH,
#else
.speed = USB_SPEED_FULL,
#endif
.function = (char *) longname,
.bind = zero_bind,
.unbind = zero_unbind,
.setup = zero_setup,
.disconnect = zero_disconnect,
.suspend = zero_suspend,
.resume = zero_resume,
.driver = {
.name = (char *) shortname,
// .shutdown = ...
// .suspend = ...
// .resume = ...
},
};
MODULE_AUTHOR ("David Brownell");
MODULE_LICENSE ("Dual BSD/GPL");
static struct proc_dir_entry *pdir, *pfile;
static int isoc_read_data (char *page, char **start,
off_t off, int count,
int *eof, void *data)
{
int i;
static int c = 0;
static int done = 0;
static int s = 0;
/*
printk ("\ncount: %d\n", count);
printk ("rbuf_start: %d\n", rbuf_start);
printk ("rbuf_len: %d\n", rbuf_len);
printk ("off: %d\n", off);
printk ("start: %p\n\n", *start);
*/
if (done) {
c = 0;
done = 0;
*eof = 1;
return 0;
}
if (c == 0) {
if (rbuf_len == RBUF_LEN)
s = rbuf_start;
else s = 0;
}
for (i=0; i<count && c<rbuf_len; i++, c++) {
page[i] = rbuf[(c+s) % RBUF_LEN];
}
*start = page;
if (c >= rbuf_len) {
*eof = 1;
done = 1;
}
return i;
}
static int __init init (void)
{
int retval = 0;
pdir = proc_mkdir("isoc_test", NULL);
if(pdir == NULL) {
retval = -ENOMEM;
printk("Error creating dir\n");
goto done;
}
pdir->owner = THIS_MODULE;
pfile = create_proc_read_entry("isoc_data",
0444, pdir,
isoc_read_data,
NULL);
if (pfile == NULL) {
retval = -ENOMEM;
printk("Error creating file\n");
goto no_file;
}
pfile->owner = THIS_MODULE;
return usb_gadget_register_driver (&zero_driver);
no_file:
remove_proc_entry("isoc_data", NULL);
done:
return retval;
}
module_init (init);
static void __exit cleanup (void)
{
usb_gadget_unregister_driver (&zero_driver);
remove_proc_entry("isoc_data", pdir);
remove_proc_entry("isoc_test", NULL);
}
module_exit (cleanup);
| {
"language": "C"
} |
/* config.h.in. Generated from configure.ac by autoheader. */
/* Define if an absolute indirect call/jump must NOT be prefixed with `*' */
#undef ABSOLUTE_WITHOUT_ASTERISK
/* Define it to \"addr32\" or \"addr32;\" to make GAS happy */
#undef ADDR32
/* Define if you don't want to pass the mem= option to Linux */
#undef AUTO_LINUX_MEM_OPT
/* Define it to \"data32\" or \"data32;\" to make GAS happy */
#undef DATA32
/* Define if C symbols get an underscore after compilation */
#undef HAVE_ASM_USCORE
/* Define to 1 if you have the <curses.h> header file. */
#undef HAVE_CURSES_H
/* Define if edata is defined */
#undef HAVE_EDATA_SYMBOL
/* Define if end is defined */
#undef HAVE_END_SYMBOL
/* Define to 1 if you have the <inttypes.h> header file. */
#undef HAVE_INTTYPES_H
/* Define if you have a curses library */
#undef HAVE_LIBCURSES
/* Define to 1 if you have the <memory.h> header file. */
#undef HAVE_MEMORY_H
/* Define to 1 if you have the <ncurses/curses.h> header file. */
#undef HAVE_NCURSES_CURSES_H
/* Define to 1 if you have the <ncurses.h> header file. */
#undef HAVE_NCURSES_H
/* Define if opendisk() in -lutil can be used */
#undef HAVE_OPENDISK
/* Define if start is defined */
#undef HAVE_START_SYMBOL
/* Define to 1 if you have the <stdint.h> header file. */
#undef HAVE_STDINT_H
/* Define to 1 if you have the <stdlib.h> header file. */
#undef HAVE_STDLIB_H
/* Define to 1 if you have the <strings.h> header file. */
#undef HAVE_STRINGS_H
/* Define to 1 if you have the <string.h> header file. */
#undef HAVE_STRING_H
/* Define to 1 if you have the <sys/stat.h> header file. */
#undef HAVE_SYS_STAT_H
/* Define to 1 if you have the <sys/types.h> header file. */
#undef HAVE_SYS_TYPES_H
/* Define to 1 if you have the <unistd.h> header file. */
#undef HAVE_UNISTD_H
/* Define if _edata is defined */
#undef HAVE_USCORE_EDATA_SYMBOL
/* Define if end is defined */
#undef HAVE_USCORE_END_SYMBOL
/* Define if _start is defined */
#undef HAVE_USCORE_START_SYMBOL
/* Define if __bss_start is defined */
#undef HAVE_USCORE_USCORE_BSS_START_SYMBOL
/* Name of package */
#undef PACKAGE
/* Define to the address where bug reports for this package should be sent. */
#undef PACKAGE_BUGREPORT
/* Define to the full name of this package. */
#undef PACKAGE_NAME
/* Define to the full name and version of this package. */
#undef PACKAGE_STRING
/* Define to the one symbol short name of this package. */
#undef PACKAGE_TARNAME
/* Define to the version of this package. */
#undef PACKAGE_VERSION
/* Define if there is user specified preset menu string */
#undef PRESET_MENU_STRING
/* Define to 1 if you have the ANSI C header files. */
#undef STDC_HEADERS
/* Version number of package */
#undef VERSION
| {
"language": "C"
} |
#include "clar_libgit2.h"
#include "posix.h"
#include "path.h"
#include "submodule_helpers.h"
#include "futils.h"
#include "repository.h"
static git_repository *g_repo = NULL;
void test_submodule_escape__cleanup(void)
{
cl_git_sandbox_cleanup();
}
#define EVIL_SM_NAME "../../modules/evil"
#define EVIL_SM_NAME_WINDOWS "..\\\\..\\\\modules\\\\evil"
#define EVIL_SM_NAME_WINDOWS_UNESC "..\\..\\modules\\evil"
static int find_evil(git_submodule *sm, const char *name, void *payload)
{
int *foundit = (int *) payload;
GIT_UNUSED(sm);
if (!git__strcmp(EVIL_SM_NAME, name) ||
!git__strcmp(EVIL_SM_NAME_WINDOWS_UNESC, name))
*foundit = true;
return 0;
}
void test_submodule_escape__from_gitdir(void)
{
int foundit;
git_submodule *sm;
git_buf buf = GIT_BUF_INIT;
unsigned int sm_location;
g_repo = setup_fixture_submodule_simple();
cl_git_pass(git_buf_joinpath(&buf, git_repository_workdir(g_repo), ".gitmodules"));
cl_git_rewritefile(buf.ptr,
"[submodule \"" EVIL_SM_NAME "\"]\n"
" path = testrepo\n"
" url = ../testrepo.git\n");
git_buf_dispose(&buf);
/* Find it all the different ways we know about it */
foundit = 0;
cl_git_pass(git_submodule_foreach(g_repo, find_evil, &foundit));
cl_assert_equal_i(0, foundit);
cl_git_fail_with(GIT_ENOTFOUND, git_submodule_lookup(&sm, g_repo, EVIL_SM_NAME));
/*
* We do know about this as it's in the index and HEAD, but the data is
* incomplete as there is no configured data for it (we pretend it
* doesn't exist). This leaves us with an odd situation but it's
* consistent with what we would do if we did add a submodule with no
* configuration.
*/
cl_git_pass(git_submodule_lookup(&sm, g_repo, "testrepo"));
cl_git_pass(git_submodule_location(&sm_location, sm));
cl_assert_equal_i(GIT_SUBMODULE_STATUS_IN_INDEX | GIT_SUBMODULE_STATUS_IN_HEAD, sm_location);
git_submodule_free(sm);
}
void test_submodule_escape__from_gitdir_windows(void)
{
int foundit;
git_submodule *sm;
git_buf buf = GIT_BUF_INIT;
unsigned int sm_location;
g_repo = setup_fixture_submodule_simple();
cl_git_pass(git_buf_joinpath(&buf, git_repository_workdir(g_repo), ".gitmodules"));
cl_git_rewritefile(buf.ptr,
"[submodule \"" EVIL_SM_NAME_WINDOWS "\"]\n"
" path = testrepo\n"
" url = ../testrepo.git\n");
git_buf_dispose(&buf);
/* Find it all the different ways we know about it */
foundit = 0;
cl_git_pass(git_submodule_foreach(g_repo, find_evil, &foundit));
cl_assert_equal_i(0, foundit);
cl_git_fail_with(GIT_ENOTFOUND, git_submodule_lookup(&sm, g_repo, EVIL_SM_NAME_WINDOWS_UNESC));
/*
* We do know about this as it's in the index and HEAD, but the data is
* incomplete as there is no configured data for it (we pretend it
* doesn't exist). This leaves us with an odd situation but it's
* consistent with what we would do if we did add a submodule with no
* configuration.
*/
cl_git_pass(git_submodule_lookup(&sm, g_repo, "testrepo"));
cl_git_pass(git_submodule_location(&sm_location, sm));
cl_assert_equal_i(GIT_SUBMODULE_STATUS_IN_INDEX | GIT_SUBMODULE_STATUS_IN_HEAD, sm_location);
git_submodule_free(sm);
}
| {
"language": "C"
} |
/*
* sound/soc/codec/wl1273.h
*
* ALSA SoC WL1273 codec driver
*
* Copyright (C) Nokia Corporation
* Author: Matti Aaltonen <matti.j.aaltonen@nokia.com>
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
* 02110-1301 USA
*
*/
#ifndef __WL1273_CODEC_H__
#define __WL1273_CODEC_H__
int wl1273_get_format(struct snd_soc_codec *codec, unsigned int *fmt);
#endif /* End of __WL1273_CODEC_H__ */
| {
"language": "C"
} |
/*
******************************************************************************
*
* Copyright (C) 2002-2012, International Business Machines
* Corporation and others. All Rights Reserved.
*
******************************************************************************
* file name: uobject.h
* encoding: US-ASCII
* tab size: 8 (not used)
* indentation:4
*
* created on: 2002jun26
* created by: Markus W. Scherer
*/
#ifndef __UOBJECT_H__
#define __UOBJECT_H__
#include "unicode/utypes.h"
/**
* \file
* \brief C++ API: Common ICU base class UObject.
*/
/**
* @{
* \def U_NO_THROW
* Define this to define the throw() specification so
* certain functions do not throw any exceptions
*
* UMemory operator new methods should have the throw() specification
* appended to them, so that the compiler adds the additional NULL check
* before calling constructors. Without, if <code>operator new</code> returns NULL the
* constructor is still called, and if the constructor references member
* data, (which it typically does), the result is a segmentation violation.
*
* @stable ICU 4.2
*/
#ifndef U_NO_THROW
#define U_NO_THROW throw()
#endif
/** @} */
/*===========================================================================*/
/* UClassID-based RTTI */
/*===========================================================================*/
/**
* UClassID is used to identify classes without using the compiler's RTTI.
* This was used before C++ compilers consistently supported RTTI.
* ICU 4.6 requires compiler RTTI to be turned on.
*
* Each class hierarchy which needs
* to implement polymorphic clone() or operator==() defines two methods,
* described in detail below. UClassID values can be compared using
* operator==(). Nothing else should be done with them.
*
* \par
* In class hierarchies that implement "poor man's RTTI",
* each concrete subclass implements getDynamicClassID() in the same way:
*
* \code
* class Derived {
* public:
* virtual UClassID getDynamicClassID() const
* { return Derived::getStaticClassID(); }
* }
* \endcode
*
* Each concrete class implements getStaticClassID() as well, which allows
* clients to test for a specific type.
*
* \code
* class Derived {
* public:
* static UClassID U_EXPORT2 getStaticClassID();
* private:
* static char fgClassID;
* }
*
* // In Derived.cpp:
* UClassID Derived::getStaticClassID()
* { return (UClassID)&Derived::fgClassID; }
* char Derived::fgClassID = 0; // Value is irrelevant
* \endcode
* @stable ICU 2.0
*/
typedef void* UClassID;
U_NAMESPACE_BEGIN
/**
* UMemory is the common ICU base class.
* All other ICU C++ classes are derived from UMemory (starting with ICU 2.4).
*
* This is primarily to make it possible and simple to override the
* C++ memory management by adding new/delete operators to this base class.
*
* To override ALL ICU memory management, including that from plain C code,
* replace the allocation functions declared in cmemory.h
*
* UMemory does not contain any virtual functions.
* Common "boilerplate" functions are defined in UObject.
*
* @stable ICU 2.4
*/
class U_COMMON_API UMemory {
public:
/* test versions for debugging shaper heap memory problems */
#ifdef SHAPER_MEMORY_DEBUG
static void * NewArray(int size, int count);
static void * GrowArray(void * array, int newSize );
static void FreeArray(void * array );
#endif
#if U_OVERRIDE_CXX_ALLOCATION
/**
* Override for ICU4C C++ memory management.
* simple, non-class types are allocated using the macros in common/cmemory.h
* (uprv_malloc(), uprv_free(), uprv_realloc());
* they or something else could be used here to implement C++ new/delete
* for ICU4C C++ classes
* @stable ICU 2.4
*/
static void * U_EXPORT2 operator new(size_t size) U_NO_THROW;
/**
* Override for ICU4C C++ memory management.
* See new().
* @stable ICU 2.4
*/
static void * U_EXPORT2 operator new[](size_t size) U_NO_THROW;
/**
* Override for ICU4C C++ memory management.
* simple, non-class types are allocated using the macros in common/cmemory.h
* (uprv_malloc(), uprv_free(), uprv_realloc());
* they or something else could be used here to implement C++ new/delete
* for ICU4C C++ classes
* @stable ICU 2.4
*/
static void U_EXPORT2 operator delete(void *p) U_NO_THROW;
/**
* Override for ICU4C C++ memory management.
* See delete().
* @stable ICU 2.4
*/
static void U_EXPORT2 operator delete[](void *p) U_NO_THROW;
#if U_HAVE_PLACEMENT_NEW
/**
* Override for ICU4C C++ memory management for STL.
* See new().
* @stable ICU 2.6
*/
static inline void * U_EXPORT2 operator new(size_t, void *ptr) U_NO_THROW { return ptr; }
/**
* Override for ICU4C C++ memory management for STL.
* See delete().
* @stable ICU 2.6
*/
static inline void U_EXPORT2 operator delete(void *, void *) U_NO_THROW {}
#endif /* U_HAVE_PLACEMENT_NEW */
#if U_HAVE_DEBUG_LOCATION_NEW
/**
* This method overrides the MFC debug version of the operator new
*
* @param size The requested memory size
* @param file The file where the allocation was requested
* @param line The line where the allocation was requested
*/
static void * U_EXPORT2 operator new(size_t size, const char* file, int line) U_NO_THROW;
/**
* This method provides a matching delete for the MFC debug new
*
* @param p The pointer to the allocated memory
* @param file The file where the allocation was requested
* @param line The line where the allocation was requested
*/
static void U_EXPORT2 operator delete(void* p, const char* file, int line) U_NO_THROW;
#endif /* U_HAVE_DEBUG_LOCATION_NEW */
#endif /* U_OVERRIDE_CXX_ALLOCATION */
/*
* Assignment operator not declared. The compiler will provide one
* which does nothing since this class does not contain any data members.
* API/code coverage may show the assignment operator as present and
* untested - ignore.
* Subclasses need this assignment operator if they use compiler-provided
* assignment operators of their own. An alternative to not declaring one
* here would be to declare and empty-implement a protected or public one.
UMemory &UMemory::operator=(const UMemory &);
*/
};
/**
* UObject is the common ICU "boilerplate" class.
* UObject inherits UMemory (starting with ICU 2.4),
* and all other public ICU C++ classes
* are derived from UObject (starting with ICU 2.2).
*
* UObject contains common virtual functions, in particular a virtual destructor.
*
* The clone() function is not available in UObject because it is not
* implemented by all ICU classes.
* Many ICU services provide a clone() function for their class trees,
* defined on the service's C++ base class, and all subclasses within that
* service class tree return a pointer to the service base class
* (which itself is a subclass of UObject).
* This is because some compilers do not support covariant (same-as-this)
* return types; cast to the appropriate subclass if necessary.
*
* @stable ICU 2.2
*/
class U_COMMON_API UObject : public UMemory {
public:
/**
* Destructor.
*
* @stable ICU 2.2
*/
virtual ~UObject();
/**
* ICU4C "poor man's RTTI", returns a UClassID for the actual ICU class.
* The base class implementation returns a dummy value.
*
* Use compiler RTTI rather than ICU's "poor man's RTTI".
* Since ICU 4.6, new ICU C++ class hierarchies do not implement "poor man's RTTI".
*
* @stable ICU 2.2
*/
virtual UClassID getDynamicClassID() const;
protected:
// the following functions are protected to prevent instantiation and
// direct use of UObject itself
// default constructor
// inline UObject() {}
// copy constructor
// inline UObject(const UObject &other) {}
#if 0
// TODO Sometime in the future. Implement operator==().
// (This comment inserted in 2.2)
// some or all of the following "boilerplate" functions may be made public
// in a future ICU4C release when all subclasses implement them
// assignment operator
// (not virtual, see "Taligent's Guide to Designing Programs" pp.73..74)
// commented out because the implementation is the same as a compiler's default
// UObject &operator=(const UObject &other) { return *this; }
// comparison operators
virtual inline UBool operator==(const UObject &other) const { return this==&other; }
inline UBool operator!=(const UObject &other) const { return !operator==(other); }
// clone() commented out from the base class:
// some compilers do not support co-variant return types
// (i.e., subclasses would have to return UObject * as well, instead of SubClass *)
// see also UObject class documentation.
// virtual UObject *clone() const;
#endif
/*
* Assignment operator not declared. The compiler will provide one
* which does nothing since this class does not contain any data members.
* API/code coverage may show the assignment operator as present and
* untested - ignore.
* Subclasses need this assignment operator if they use compiler-provided
* assignment operators of their own. An alternative to not declaring one
* here would be to declare and empty-implement a protected or public one.
UObject &UObject::operator=(const UObject &);
*/
};
#ifndef U_HIDE_INTERNAL_API
/**
* This is a simple macro to add ICU RTTI to an ICU object implementation.
* This does not go into the header. This should only be used in *.cpp files.
*
* @param myClass The name of the class that needs RTTI defined.
* @internal
*/
#define UOBJECT_DEFINE_RTTI_IMPLEMENTATION(myClass) \
UClassID U_EXPORT2 myClass::getStaticClassID() { \
static char classID = 0; \
return (UClassID)&classID; \
} \
UClassID myClass::getDynamicClassID() const \
{ return myClass::getStaticClassID(); }
/**
* This macro adds ICU RTTI to an ICU abstract class implementation.
* This macro should be invoked in *.cpp files. The corresponding
* header should declare getStaticClassID.
*
* @param myClass The name of the class that needs RTTI defined.
* @internal
*/
#define UOBJECT_DEFINE_ABSTRACT_RTTI_IMPLEMENTATION(myClass) \
UClassID U_EXPORT2 myClass::getStaticClassID() { \
static char classID = 0; \
return (UClassID)&classID; \
}
#endif /* U_HIDE_INTERNAL_API */
U_NAMESPACE_END
#endif
| {
"language": "C"
} |
/*-
* convert.c
*
* Last changed in libpng 1.6.0 [February 14, 2013]
*
* COPYRIGHT: Written by John Cunningham Bowler, 2013.
* To the extent possible under law, the author has waived all copyright and
* related or neighboring rights to this work. This work is published from:
* United States.
*
* Convert 8-bit sRGB or 16-bit linear values to another format.
*/
#define _ISOC99_SOURCE 1
#include <stdlib.h>
#include <string.h>
#include <math.h>
#include <stdio.h>
#include <fenv.h>
#include "sRGB.h"
static void
usage(const char *prog)
{
fprintf(stderr,
"%s: usage: %s [-linear|-sRGB] [-gray|-color] component{1,4}\n",
prog, prog);
exit(1);
}
unsigned long
component(const char *prog, const char *arg, int issRGB)
{
char *ep;
unsigned long c = strtoul(arg, &ep, 0);
if (ep <= arg || *ep || c > 65535 || (issRGB && c > 255))
{
fprintf(stderr, "%s: %s: invalid component value (%lu)\n", prog, arg, c);
usage(prog);
}
return c;
}
int
main(int argc, const char **argv)
{
const char *prog = *argv++;
int to_linear = 0, to_gray = 0, to_color = 0;
int channels = 0;
double c[4];
/* FE_TONEAREST is the IEEE754 round to nearest, preferring even, mode; i.e.
* everything rounds to the nearest value except that '.5' rounds to the
* nearest even value.
*/
fesetround(FE_TONEAREST);
c[3] = c[2] = c[1] = c[0] = 0;
while (--argc > 0 && **argv == '-')
{
const char *arg = 1+*argv++;
if (strcmp(arg, "sRGB") == 0)
to_linear = 0;
else if (strcmp(arg, "linear") == 0)
to_linear = 1;
else if (strcmp(arg, "gray") == 0)
to_gray = 1, to_color = 0;
else if (strcmp(arg, "color") == 0)
to_gray = 0, to_color = 1;
else
usage(prog);
}
switch (argc)
{
default:
usage(prog);
break;
case 4:
c[3] = component(prog, argv[3], to_linear);
++channels;
case 3:
c[2] = component(prog, argv[2], to_linear);
++channels;
case 2:
c[1] = component(prog, argv[1], to_linear);
++channels;
case 1:
c[0] = component(prog, argv[0], to_linear);
++channels;
break;
}
if (to_linear)
{
int i;
int components = channels;
if ((components & 1) == 0)
--components;
for (i=0; i<components; ++i) c[i] = linear_from_sRGB(c[i] / 255);
if (components < channels)
c[components] = c[components] / 255;
}
else
{
int i;
for (i=0; i<4; ++i) c[i] /= 65535;
if ((channels & 1) == 0)
{
double alpha = c[channels-1];
if (alpha > 0)
for (i=0; i<channels-1; ++i) c[i] /= alpha;
else
for (i=0; i<channels-1; ++i) c[i] = 1;
}
}
if (to_gray)
{
if (channels < 3)
{
fprintf(stderr, "%s: too few channels (%d) for -gray\n",
prog, channels);
usage(prog);
}
c[0] = YfromRGB(c[0], c[1], c[2]);
channels -= 2;
}
if (to_color)
{
if (channels > 2)
{
fprintf(stderr, "%s: too many channels (%d) for -color\n",
prog, channels);
usage(prog);
}
c[3] = c[1]; /* alpha, if present */
c[2] = c[1] = c[0];
}
if (to_linear)
{
int i;
if ((channels & 1) == 0)
{
double alpha = c[channels-1];
for (i=0; i<channels-1; ++i) c[i] *= alpha;
}
for (i=0; i<channels; ++i) c[i] = nearbyint(c[i] * 65535);
}
else /* to sRGB */
{
int i = (channels+1)&~1;
while (--i >= 0)
c[i] = sRGB_from_linear(c[i]);
for (i=0; i<channels; ++i) c[i] = nearbyint(c[i] * 255);
}
{
int i;
for (i=0; i<channels; ++i) printf(" %g", c[i]);
}
printf("\n");
return 0;
}
| {
"language": "C"
} |
#ifndef _METTLE_RPC_H_
#define _METTLE_RPC_H_
#include "mettle.h"
void mettle_rpc_free(struct mettle_rpc *mrpc)
struct mettle_rpc * mettle_rpc_new(struct mettle *m)
#endif
| {
"language": "C"
} |
/*
* copyright (c) 2005-2012 Michael Niedermayer <michaelni@gmx.at>
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
/**
* @file
* @addtogroup lavu_math
* Mathematical utilities for working with timestamp and time base.
*/
#ifndef AVUTIL_MATHEMATICS_H
#define AVUTIL_MATHEMATICS_H
#include <stdint.h>
#include <math.h>
#include "attributes.h"
#include "rational.h"
#include "intfloat.h"
#ifndef M_E
#define M_E 2.7182818284590452354 /* e */
#endif
#ifndef M_LN2
#define M_LN2 0.69314718055994530942 /* log_e 2 */
#endif
#ifndef M_LN10
#define M_LN10 2.30258509299404568402 /* log_e 10 */
#endif
#ifndef M_LOG2_10
#define M_LOG2_10 3.32192809488736234787 /* log_2 10 */
#endif
#ifndef M_PHI
#define M_PHI 1.61803398874989484820 /* phi / golden ratio */
#endif
#ifndef M_PI
#define M_PI 3.14159265358979323846 /* pi */
#endif
#ifndef M_PI_2
#define M_PI_2 1.57079632679489661923 /* pi/2 */
#endif
#ifndef M_SQRT1_2
#define M_SQRT1_2 0.70710678118654752440 /* 1/sqrt(2) */
#endif
#ifndef M_SQRT2
#define M_SQRT2 1.41421356237309504880 /* sqrt(2) */
#endif
#ifndef NAN
#define NAN av_int2float(0x7fc00000)
#endif
#ifndef INFINITY
#define INFINITY av_int2float(0x7f800000)
#endif
/**
* @addtogroup lavu_math
*
* @{
*/
/**
* Rounding methods.
*/
enum AVRounding {
AV_ROUND_ZERO = 0, ///< Round toward zero.
AV_ROUND_INF = 1, ///< Round away from zero.
AV_ROUND_DOWN = 2, ///< Round toward -infinity.
AV_ROUND_UP = 3, ///< Round toward +infinity.
AV_ROUND_NEAR_INF = 5, ///< Round to nearest and halfway cases away from zero.
/**
* Flag telling rescaling functions to pass `INT64_MIN`/`MAX` through
* unchanged, avoiding special cases for #AV_NOPTS_VALUE.
*
* Unlike other values of the enumeration AVRounding, this value is a
* bitmask that must be used in conjunction with another value of the
* enumeration through a bitwise OR, in order to set behavior for normal
* cases.
*
* @code{.c}
* av_rescale_rnd(3, 1, 2, AV_ROUND_UP | AV_ROUND_PASS_MINMAX);
* // Rescaling 3:
* // Calculating 3 * 1 / 2
* // 3 / 2 is rounded up to 2
* // => 2
*
* av_rescale_rnd(AV_NOPTS_VALUE, 1, 2, AV_ROUND_UP | AV_ROUND_PASS_MINMAX);
* // Rescaling AV_NOPTS_VALUE:
* // AV_NOPTS_VALUE == INT64_MIN
* // AV_NOPTS_VALUE is passed through
* // => AV_NOPTS_VALUE
* @endcode
*/
AV_ROUND_PASS_MINMAX = 8192,
};
/**
* Compute the greatest common divisor of two integer operands.
*
* @param a,b Operands
* @return GCD of a and b up to sign; if a >= 0 and b >= 0, return value is >= 0;
* if a == 0 and b == 0, returns 0.
*/
int64_t av_const av_gcd(int64_t a, int64_t b);
/**
* Rescale a 64-bit integer with rounding to nearest.
*
* The operation is mathematically equivalent to `a * b / c`, but writing that
* directly can overflow.
*
* This function is equivalent to av_rescale_rnd() with #AV_ROUND_NEAR_INF.
*
* @see av_rescale_rnd(), av_rescale_q(), av_rescale_q_rnd()
*/
int64_t av_rescale(int64_t a, int64_t b, int64_t c) av_const;
/**
* Rescale a 64-bit integer with specified rounding.
*
* The operation is mathematically equivalent to `a * b / c`, but writing that
* directly can overflow, and does not support different rounding methods.
*
* @see av_rescale(), av_rescale_q(), av_rescale_q_rnd()
*/
int64_t av_rescale_rnd(int64_t a, int64_t b, int64_t c, enum AVRounding rnd) av_const;
/**
* Rescale a 64-bit integer by 2 rational numbers.
*
* The operation is mathematically equivalent to `a * bq / cq`.
*
* This function is equivalent to av_rescale_q_rnd() with #AV_ROUND_NEAR_INF.
*
* @see av_rescale(), av_rescale_rnd(), av_rescale_q_rnd()
*/
int64_t av_rescale_q(int64_t a, AVRational bq, AVRational cq) av_const;
/**
* Rescale a 64-bit integer by 2 rational numbers with specified rounding.
*
* The operation is mathematically equivalent to `a * bq / cq`.
*
* @see av_rescale(), av_rescale_rnd(), av_rescale_q()
*/
int64_t av_rescale_q_rnd(int64_t a, AVRational bq, AVRational cq,
enum AVRounding rnd) av_const;
/**
* Compare two timestamps each in its own time base.
*
* @return One of the following values:
* - -1 if `ts_a` is before `ts_b`
* - 1 if `ts_a` is after `ts_b`
* - 0 if they represent the same position
*
* @warning
* The result of the function is undefined if one of the timestamps is outside
* the `int64_t` range when represented in the other's timebase.
*/
int av_compare_ts(int64_t ts_a, AVRational tb_a, int64_t ts_b, AVRational tb_b);
/**
* Compare the remainders of two integer operands divided by a common divisor.
*
* In other words, compare the least significant `log2(mod)` bits of integers
* `a` and `b`.
*
* @code{.c}
* av_compare_mod(0x11, 0x02, 0x10) < 0 // since 0x11 % 0x10 (0x1) < 0x02 % 0x10 (0x2)
* av_compare_mod(0x11, 0x02, 0x20) > 0 // since 0x11 % 0x20 (0x11) > 0x02 % 0x20 (0x02)
* @endcode
*
* @param a,b Operands
* @param mod Divisor; must be a power of 2
* @return
* - a negative value if `a % mod < b % mod`
* - a positive value if `a % mod > b % mod`
* - zero if `a % mod == b % mod`
*/
int64_t av_compare_mod(uint64_t a, uint64_t b, uint64_t mod);
/**
* Rescale a timestamp while preserving known durations.
*
* This function is designed to be called per audio packet to scale the input
* timestamp to a different time base. Compared to a simple av_rescale_q()
* call, this function is robust against possible inconsistent frame durations.
*
* The `last` parameter is a state variable that must be preserved for all
* subsequent calls for the same stream. For the first call, `*last` should be
* initialized to #AV_NOPTS_VALUE.
*
* @param[in] in_tb Input time base
* @param[in] in_ts Input timestamp
* @param[in] fs_tb Duration time base; typically this is finer-grained
* (greater) than `in_tb` and `out_tb`
* @param[in] duration Duration till the next call to this function (i.e.
* duration of the current packet/frame)
* @param[in,out] last Pointer to a timestamp expressed in terms of
* `fs_tb`, acting as a state variable
* @param[in] out_tb Output timebase
* @return Timestamp expressed in terms of `out_tb`
*
* @note In the context of this function, "duration" is in term of samples, not
* seconds.
*/
int64_t av_rescale_delta(AVRational in_tb, int64_t in_ts, AVRational fs_tb, int duration, int64_t *last, AVRational out_tb);
/**
* Add a value to a timestamp.
*
* This function guarantees that when the same value is repeatly added that
* no accumulation of rounding errors occurs.
*
* @param[in] ts Input timestamp
* @param[in] ts_tb Input timestamp time base
* @param[in] inc Value to be added
* @param[in] inc_tb Time base of `inc`
*/
int64_t av_add_stable(AVRational ts_tb, int64_t ts, AVRational inc_tb, int64_t inc);
/**
* @}
*/
#endif /* AVUTIL_MATHEMATICS_H */
| {
"language": "C"
} |
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2017 Western Digital Corporation or its affiliates.
*
* This file is released under the GPL.
*/
#include <linux/blkdev.h>
#include "blk-mq-debugfs.h"
int queue_zone_wlock_show(void *data, struct seq_file *m)
{
struct request_queue *q = data;
unsigned int i;
if (!q->seq_zones_wlock)
return 0;
for (i = 0; i < q->nr_zones; i++)
if (test_bit(i, q->seq_zones_wlock))
seq_printf(m, "%u\n", i);
return 0;
}
| {
"language": "C"
} |
// The trimming algorithms used (bmm050_bosch_compensate_*()) were
// taken from the Bosch BMM050 driver code
/****************************************************************************
* Copyright (C) 2015 - 2016 Bosch Sensortec GmbH
*
* File : bmm050.h
*
* Date : 2016/03/17
*
* Revision : 2.0.5 $
*
* Usage: Sensor Driver for BMM050 and BMM150 sensor
*
****************************************************************************
*
* section License
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* Neither the name of the copyright holder nor the names of the
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
* CONTRIBUTORS "AS IS" AND ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL COPYRIGHT HOLDER
* OR CONTRIBUTORS BE LIABLE FOR ANY
* DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY,
* OR CONSEQUENTIAL DAMAGES(INCLUDING, BUT NOT LIMITED TO,
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
* WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
* ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE
*
* The information provided is believed to be accurate and reliable.
* The copyright holder assumes no responsibility
* for the consequences of use
* of such information nor for any infringement of patents or
* other rights of third parties which may result from its use.
* No license is granted by implication or otherwise under any patent or
* patent rights of the copyright holder.
**************************************************************************/
| {
"language": "C"
} |
// This file is part of Eigen, a lightweight C++ template library
// for linear algebra.
//
// Copyright (C) 2016 Eugene Brevdo <ebrevdo@gmail.com>
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
#ifndef EIGEN_TERNARY_FUNCTORS_H
#define EIGEN_TERNARY_FUNCTORS_H
namespace Eigen {
namespace internal {
//---------- associative ternary functors ----------
} // end namespace internal
} // end namespace Eigen
#endif // EIGEN_TERNARY_FUNCTORS_H
| {
"language": "C"
} |
/* Store current floating-point environment and clear exceptions.
Copyright (C) 2013 Imagination Technologies Ltd.
Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
modify it under the terms of the GNU Lesser General Public
License as published by the Free Software Foundation; either
version 2.1 of the License, or (at your option) any later version.
The GNU C Library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with the GNU C Library; see the file COPYING.LIB. If
not, see <http://www.gnu.org/licenses/>. */
#include <fenv.h>
#include <unistd.h>
#include "internal.h"
int
feholdexcept (fenv_t *envp)
{
unsigned int txdefr;
unsigned int txmode;
__asm__ ("MOV %0,TXDEFR" : "=r" (txdefr));
__asm__ ("MOV %0,TXMODE" : "=r" (txmode));
envp->txdefr = txdefr;
envp->txmode = txmode;
metag_set_fpu_flags(0);
return 0;
}
| {
"language": "C"
} |
// Copyright (c) Lawrence Livermore National Security, LLC and other VisIt
// Project developers. See the top-level LICENSE file for dates and other
// details. No copyright assignment is required to contribute to VisIt.
#include <PyThreeSliceAttributes.h>
#include <ObserverToCallback.h>
#include <stdio.h>
#include <Py2and3Support.h>
// ****************************************************************************
// Module: PyThreeSliceAttributes
//
// Purpose:
// This class contains attributes for the threeslice operator.
//
// Note: Autogenerated by xml2python. Do not modify by hand!
//
// Programmer: xml2python
// Creation: omitted
//
// ****************************************************************************
//
// This struct contains the Python type information and a ThreeSliceAttributes.
//
struct ThreeSliceAttributesObject
{
PyObject_HEAD
ThreeSliceAttributes *data;
bool owns;
PyObject *parent;
};
//
// Internal prototypes
//
static PyObject *NewThreeSliceAttributes(int);
std::string
PyThreeSliceAttributes_ToString(const ThreeSliceAttributes *atts, const char *prefix)
{
std::string str;
char tmpStr[1000];
snprintf(tmpStr, 1000, "%sx = %g\n", prefix, atts->GetX());
str += tmpStr;
snprintf(tmpStr, 1000, "%sy = %g\n", prefix, atts->GetY());
str += tmpStr;
snprintf(tmpStr, 1000, "%sz = %g\n", prefix, atts->GetZ());
str += tmpStr;
if(atts->GetInteractive())
snprintf(tmpStr, 1000, "%sinteractive = 1\n", prefix);
else
snprintf(tmpStr, 1000, "%sinteractive = 0\n", prefix);
str += tmpStr;
return str;
}
static PyObject *
ThreeSliceAttributes_Notify(PyObject *self, PyObject *args)
{
ThreeSliceAttributesObject *obj = (ThreeSliceAttributesObject *)self;
obj->data->Notify();
Py_INCREF(Py_None);
return Py_None;
}
/*static*/ PyObject *
ThreeSliceAttributes_SetX(PyObject *self, PyObject *args)
{
ThreeSliceAttributesObject *obj = (ThreeSliceAttributesObject *)self;
double dval;
if(!PyArg_ParseTuple(args, "d", &dval))
return NULL;
// Set the x in the object.
obj->data->SetX(dval);
Py_INCREF(Py_None);
return Py_None;
}
/*static*/ PyObject *
ThreeSliceAttributes_GetX(PyObject *self, PyObject *args)
{
ThreeSliceAttributesObject *obj = (ThreeSliceAttributesObject *)self;
PyObject *retval = PyFloat_FromDouble(obj->data->GetX());
return retval;
}
/*static*/ PyObject *
ThreeSliceAttributes_SetY(PyObject *self, PyObject *args)
{
ThreeSliceAttributesObject *obj = (ThreeSliceAttributesObject *)self;
double dval;
if(!PyArg_ParseTuple(args, "d", &dval))
return NULL;
// Set the y in the object.
obj->data->SetY(dval);
Py_INCREF(Py_None);
return Py_None;
}
/*static*/ PyObject *
ThreeSliceAttributes_GetY(PyObject *self, PyObject *args)
{
ThreeSliceAttributesObject *obj = (ThreeSliceAttributesObject *)self;
PyObject *retval = PyFloat_FromDouble(obj->data->GetY());
return retval;
}
/*static*/ PyObject *
ThreeSliceAttributes_SetZ(PyObject *self, PyObject *args)
{
ThreeSliceAttributesObject *obj = (ThreeSliceAttributesObject *)self;
double dval;
if(!PyArg_ParseTuple(args, "d", &dval))
return NULL;
// Set the z in the object.
obj->data->SetZ(dval);
Py_INCREF(Py_None);
return Py_None;
}
/*static*/ PyObject *
ThreeSliceAttributes_GetZ(PyObject *self, PyObject *args)
{
ThreeSliceAttributesObject *obj = (ThreeSliceAttributesObject *)self;
PyObject *retval = PyFloat_FromDouble(obj->data->GetZ());
return retval;
}
/*static*/ PyObject *
ThreeSliceAttributes_SetInteractive(PyObject *self, PyObject *args)
{
ThreeSliceAttributesObject *obj = (ThreeSliceAttributesObject *)self;
int ival;
if(!PyArg_ParseTuple(args, "i", &ival))
return NULL;
// Set the interactive in the object.
obj->data->SetInteractive(ival != 0);
Py_INCREF(Py_None);
return Py_None;
}
/*static*/ PyObject *
ThreeSliceAttributes_GetInteractive(PyObject *self, PyObject *args)
{
ThreeSliceAttributesObject *obj = (ThreeSliceAttributesObject *)self;
PyObject *retval = PyInt_FromLong(obj->data->GetInteractive()?1L:0L);
return retval;
}
PyMethodDef PyThreeSliceAttributes_methods[THREESLICEATTRIBUTES_NMETH] = {
{"Notify", ThreeSliceAttributes_Notify, METH_VARARGS},
{"SetX", ThreeSliceAttributes_SetX, METH_VARARGS},
{"GetX", ThreeSliceAttributes_GetX, METH_VARARGS},
{"SetY", ThreeSliceAttributes_SetY, METH_VARARGS},
{"GetY", ThreeSliceAttributes_GetY, METH_VARARGS},
{"SetZ", ThreeSliceAttributes_SetZ, METH_VARARGS},
{"GetZ", ThreeSliceAttributes_GetZ, METH_VARARGS},
{"SetInteractive", ThreeSliceAttributes_SetInteractive, METH_VARARGS},
{"GetInteractive", ThreeSliceAttributes_GetInteractive, METH_VARARGS},
{NULL, NULL}
};
//
// Type functions
//
static void
ThreeSliceAttributes_dealloc(PyObject *v)
{
ThreeSliceAttributesObject *obj = (ThreeSliceAttributesObject *)v;
if(obj->parent != 0)
Py_DECREF(obj->parent);
if(obj->owns)
delete obj->data;
}
static PyObject *ThreeSliceAttributes_richcompare(PyObject *self, PyObject *other, int op);
PyObject *
PyThreeSliceAttributes_getattr(PyObject *self, char *name)
{
if(strcmp(name, "x") == 0)
return ThreeSliceAttributes_GetX(self, NULL);
if(strcmp(name, "y") == 0)
return ThreeSliceAttributes_GetY(self, NULL);
if(strcmp(name, "z") == 0)
return ThreeSliceAttributes_GetZ(self, NULL);
if(strcmp(name, "interactive") == 0)
return ThreeSliceAttributes_GetInteractive(self, NULL);
return Py_FindMethod(PyThreeSliceAttributes_methods, self, name);
}
int
PyThreeSliceAttributes_setattr(PyObject *self, char *name, PyObject *args)
{
// Create a tuple to contain the arguments since all of the Set
// functions expect a tuple.
PyObject *tuple = PyTuple_New(1);
PyTuple_SET_ITEM(tuple, 0, args);
Py_INCREF(args);
PyObject *obj = NULL;
if(strcmp(name, "x") == 0)
obj = ThreeSliceAttributes_SetX(self, tuple);
else if(strcmp(name, "y") == 0)
obj = ThreeSliceAttributes_SetY(self, tuple);
else if(strcmp(name, "z") == 0)
obj = ThreeSliceAttributes_SetZ(self, tuple);
else if(strcmp(name, "interactive") == 0)
obj = ThreeSliceAttributes_SetInteractive(self, tuple);
if(obj != NULL)
Py_DECREF(obj);
Py_DECREF(tuple);
if( obj == NULL)
PyErr_Format(PyExc_RuntimeError, "Unable to set unknown attribute: '%s'", name);
return (obj != NULL) ? 0 : -1;
}
static int
ThreeSliceAttributes_print(PyObject *v, FILE *fp, int flags)
{
ThreeSliceAttributesObject *obj = (ThreeSliceAttributesObject *)v;
fprintf(fp, "%s", PyThreeSliceAttributes_ToString(obj->data, "").c_str());
return 0;
}
PyObject *
ThreeSliceAttributes_str(PyObject *v)
{
ThreeSliceAttributesObject *obj = (ThreeSliceAttributesObject *)v;
return PyString_FromString(PyThreeSliceAttributes_ToString(obj->data,"").c_str());
}
//
// The doc string for the class.
//
#if PY_MAJOR_VERSION > 2 || (PY_MAJOR_VERSION == 2 && PY_MINOR_VERSION >= 5)
static const char *ThreeSliceAttributes_Purpose = "This class contains attributes for the threeslice operator.";
#else
static char *ThreeSliceAttributes_Purpose = "This class contains attributes for the threeslice operator.";
#endif
//
// Python Type Struct Def Macro from Py2and3Support.h
//
// VISIT_PY_TYPE_OBJ( VPY_TYPE,
// VPY_NAME,
// VPY_OBJECT,
// VPY_DEALLOC,
// VPY_PRINT,
// VPY_GETATTR,
// VPY_SETATTR,
// VPY_STR,
// VPY_PURPOSE,
// VPY_RICHCOMP,
// VPY_AS_NUMBER)
//
// The type description structure
//
VISIT_PY_TYPE_OBJ(ThreeSliceAttributesType, \
"ThreeSliceAttributes", \
ThreeSliceAttributesObject, \
ThreeSliceAttributes_dealloc, \
ThreeSliceAttributes_print, \
PyThreeSliceAttributes_getattr, \
PyThreeSliceAttributes_setattr, \
ThreeSliceAttributes_str, \
ThreeSliceAttributes_Purpose, \
ThreeSliceAttributes_richcompare, \
0); /* as_number*/
//
// Helper function for comparing.
//
static PyObject *
ThreeSliceAttributes_richcompare(PyObject *self, PyObject *other, int op)
{
// only compare against the same type
if ( Py_TYPE(self) != &ThreeSliceAttributesType
|| Py_TYPE(other) != &ThreeSliceAttributesType)
{
Py_INCREF(Py_NotImplemented);
return Py_NotImplemented;
}
PyObject *res = NULL;
ThreeSliceAttributes *a = ((ThreeSliceAttributesObject *)self)->data;
ThreeSliceAttributes *b = ((ThreeSliceAttributesObject *)other)->data;
switch (op)
{
case Py_EQ:
res = (*a == *b) ? Py_True : Py_False;
break;
case Py_NE:
res = (*a != *b) ? Py_True : Py_False;
break;
default:
res = Py_NotImplemented;
break;
}
Py_INCREF(res);
return res;
}
//
// Helper functions for object allocation.
//
static ThreeSliceAttributes *defaultAtts = 0;
static ThreeSliceAttributes *currentAtts = 0;
static PyObject *
NewThreeSliceAttributes(int useCurrent)
{
ThreeSliceAttributesObject *newObject;
newObject = PyObject_NEW(ThreeSliceAttributesObject, &ThreeSliceAttributesType);
if(newObject == NULL)
return NULL;
if(useCurrent && currentAtts != 0)
newObject->data = new ThreeSliceAttributes(*currentAtts);
else if(defaultAtts != 0)
newObject->data = new ThreeSliceAttributes(*defaultAtts);
else
newObject->data = new ThreeSliceAttributes;
newObject->owns = true;
newObject->parent = 0;
return (PyObject *)newObject;
}
static PyObject *
WrapThreeSliceAttributes(const ThreeSliceAttributes *attr)
{
ThreeSliceAttributesObject *newObject;
newObject = PyObject_NEW(ThreeSliceAttributesObject, &ThreeSliceAttributesType);
if(newObject == NULL)
return NULL;
newObject->data = (ThreeSliceAttributes *)attr;
newObject->owns = false;
newObject->parent = 0;
return (PyObject *)newObject;
}
///////////////////////////////////////////////////////////////////////////////
//
// Interface that is exposed to the VisIt module.
//
///////////////////////////////////////////////////////////////////////////////
PyObject *
ThreeSliceAttributes_new(PyObject *self, PyObject *args)
{
int useCurrent = 0;
if (!PyArg_ParseTuple(args, "i", &useCurrent))
{
if (!PyArg_ParseTuple(args, ""))
return NULL;
else
PyErr_Clear();
}
return (PyObject *)NewThreeSliceAttributes(useCurrent);
}
//
// Plugin method table. These methods are added to the visitmodule's methods.
//
static PyMethodDef ThreeSliceAttributesMethods[] = {
{"ThreeSliceAttributes", ThreeSliceAttributes_new, METH_VARARGS},
{NULL, NULL} /* Sentinel */
};
static Observer *ThreeSliceAttributesObserver = 0;
std::string
PyThreeSliceAttributes_GetLogString()
{
std::string s("ThreeSliceAtts = ThreeSliceAttributes()\n");
if(currentAtts != 0)
s += PyThreeSliceAttributes_ToString(currentAtts, "ThreeSliceAtts.");
return s;
}
static void
PyThreeSliceAttributes_CallLogRoutine(Subject *subj, void *data)
{
typedef void (*logCallback)(const std::string &);
logCallback cb = (logCallback)data;
if(cb != 0)
{
std::string s("ThreeSliceAtts = ThreeSliceAttributes()\n");
s += PyThreeSliceAttributes_ToString(currentAtts, "ThreeSliceAtts.");
cb(s);
}
}
void
PyThreeSliceAttributes_StartUp(ThreeSliceAttributes *subj, void *data)
{
if(subj == 0)
return;
currentAtts = subj;
PyThreeSliceAttributes_SetDefaults(subj);
//
// Create the observer that will be notified when the attributes change.
//
if(ThreeSliceAttributesObserver == 0)
{
ThreeSliceAttributesObserver = new ObserverToCallback(subj,
PyThreeSliceAttributes_CallLogRoutine, (void *)data);
}
}
void
PyThreeSliceAttributes_CloseDown()
{
delete defaultAtts;
defaultAtts = 0;
delete ThreeSliceAttributesObserver;
ThreeSliceAttributesObserver = 0;
}
PyMethodDef *
PyThreeSliceAttributes_GetMethodTable(int *nMethods)
{
*nMethods = 1;
return ThreeSliceAttributesMethods;
}
bool
PyThreeSliceAttributes_Check(PyObject *obj)
{
return (obj->ob_type == &ThreeSliceAttributesType);
}
ThreeSliceAttributes *
PyThreeSliceAttributes_FromPyObject(PyObject *obj)
{
ThreeSliceAttributesObject *obj2 = (ThreeSliceAttributesObject *)obj;
return obj2->data;
}
PyObject *
PyThreeSliceAttributes_New()
{
return NewThreeSliceAttributes(0);
}
PyObject *
PyThreeSliceAttributes_Wrap(const ThreeSliceAttributes *attr)
{
return WrapThreeSliceAttributes(attr);
}
void
PyThreeSliceAttributes_SetParent(PyObject *obj, PyObject *parent)
{
ThreeSliceAttributesObject *obj2 = (ThreeSliceAttributesObject *)obj;
obj2->parent = parent;
}
void
PyThreeSliceAttributes_SetDefaults(const ThreeSliceAttributes *atts)
{
if(defaultAtts)
delete defaultAtts;
defaultAtts = new ThreeSliceAttributes(*atts);
}
| {
"language": "C"
} |
/*
Copyright 2001-2005 - Cycling '74
Joshua Kit Clayton jkc@cycling74.com
*/
#include "jit.common.h"
#include "max.jit.mop.h"
typedef struct _max_jit_charmap
{
t_object ob;
void *obex;
} t_max_jit_charmap;
t_jit_err jit_charmap_init(void);
void *max_jit_charmap_new(t_symbol *s, long argc, t_atom *argv);
void max_jit_charmap_free(t_max_jit_charmap *x);
t_messlist *max_jit_charmap_class;
void ext_main(void *r)
{
void *p,*q;
jit_charmap_init();
setup(&max_jit_charmap_class, max_jit_charmap_new, (method)max_jit_charmap_free, (short)sizeof(t_max_jit_charmap),
0L, A_GIMME, 0);
p = max_jit_classex_setup(calcoffset(t_max_jit_charmap,obex));
q = jit_class_findbyname(gensym("jit_charmap"));
max_jit_classex_mop_wrap(p,q,0);
max_jit_classex_standard_wrap(p,q,0);
addmess((method)max_jit_mop_assist, "assist", A_CANT,0);
}
void max_jit_charmap_free(t_max_jit_charmap *x)
{
max_jit_mop_free(x);
jit_object_free(max_jit_obex_jitob_get(x));
max_jit_obex_free(x);
}
void *max_jit_charmap_new(t_symbol *s, long argc, t_atom *argv)
{
t_max_jit_charmap *x;
void *o;
if (x=(t_max_jit_charmap *)max_jit_obex_new(max_jit_charmap_class,gensym("jit_charmap"))) {
if (o=jit_object_new(gensym("jit_charmap"))) {
max_jit_mop_setup_simple(x,o,argc,argv);
max_jit_attr_args(x,argc,argv);
} else {
jit_object_error((t_object *)x,"jit.charmap: could not allocate object");
freeobject((t_object *) x);
x = NULL;
}
}
return (x);
}
| {
"language": "C"
} |
#ifndef __PERF_STRLIST_H
#define __PERF_STRLIST_H
#include <linux/rbtree.h>
#include <stdbool.h>
#include "rblist.h"
struct str_node {
struct rb_node rb_node;
const char *s;
};
struct strlist {
struct rblist rblist;
bool dupstr;
};
struct strlist *strlist__new(bool dupstr, const char *slist);
void strlist__delete(struct strlist *slist);
void strlist__remove(struct strlist *slist, struct str_node *sn);
int strlist__load(struct strlist *slist, const char *filename);
int strlist__add(struct strlist *slist, const char *str);
struct str_node *strlist__entry(const struct strlist *slist, unsigned int idx);
struct str_node *strlist__find(struct strlist *slist, const char *entry);
static inline bool strlist__has_entry(struct strlist *slist, const char *entry)
{
return strlist__find(slist, entry) != NULL;
}
static inline bool strlist__empty(const struct strlist *slist)
{
return rblist__empty(&slist->rblist);
}
static inline unsigned int strlist__nr_entries(const struct strlist *slist)
{
return rblist__nr_entries(&slist->rblist);
}
/* For strlist iteration */
static inline struct str_node *strlist__first(struct strlist *slist)
{
struct rb_node *rn = rb_first(&slist->rblist.entries);
return rn ? rb_entry(rn, struct str_node, rb_node) : NULL;
}
static inline struct str_node *strlist__next(struct str_node *sn)
{
struct rb_node *rn;
if (!sn)
return NULL;
rn = rb_next(&sn->rb_node);
return rn ? rb_entry(rn, struct str_node, rb_node) : NULL;
}
/**
* strlist_for_each - iterate over a strlist
* @pos: the &struct str_node to use as a loop cursor.
* @slist: the &struct strlist for loop.
*/
#define strlist__for_each(pos, slist) \
for (pos = strlist__first(slist); pos; pos = strlist__next(pos))
/**
* strlist_for_each_safe - iterate over a strlist safe against removal of
* str_node
* @pos: the &struct str_node to use as a loop cursor.
* @n: another &struct str_node to use as temporary storage.
* @slist: the &struct strlist for loop.
*/
#define strlist__for_each_safe(pos, n, slist) \
for (pos = strlist__first(slist), n = strlist__next(pos); pos;\
pos = n, n = strlist__next(n))
int strlist__parse_list(struct strlist *slist, const char *s);
#endif /* __PERF_STRLIST_H */
| {
"language": "C"
} |
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Dell WMI descriptor driver
*
* Copyright (c) 2017 Dell Inc.
*/
#ifndef _DELL_WMI_DESCRIPTOR_H_
#define _DELL_WMI_DESCRIPTOR_H_
#include <linux/wmi.h>
/* possible return values:
* -ENODEV: Descriptor GUID missing from WMI bus
* -EPROBE_DEFER: probing for dell-wmi-descriptor not yet run
* 0: valid descriptor, successfully probed
* < 0: invalid descriptor, don't probe dependent devices
*/
int dell_wmi_get_descriptor_valid(void);
bool dell_wmi_get_interface_version(u32 *version);
bool dell_wmi_get_size(u32 *size);
bool dell_wmi_get_hotfix(u32 *hotfix);
#endif
| {
"language": "C"
} |
/*
* $Id: rwchinfl.c,v 1.10 2008-07-27 12:23:45 haley Exp $
*/
/************************************************************************
* *
* Copyright (C) 2000 *
* University Corporation for Atmospheric Research *
* All Rights Reserved *
* *
* The use of this Software is governed by a License Agreement. *
* *
************************************************************************/
#include <ncarg/c.h>
#include <stdlib.h>
#ifdef FreeBSD
#include <sys/filio.h>
#else
#include <sys/file.h>
#endif /* FreeBSD */
#include <fcntl.h>
#include <unistd.h>
#if defined (cray)
#include <fcntl.h>
#include <sys/types.h>
#include <fortran.h>
#endif
void NGCALLF(ngcpid,NGCPID)(ipid)
/*
* This routine gets the current process id.
*/
int *ipid;
{
*ipid = getpid();
}
void NGCALLF(ngclfi,NGCLFI)(fdes)
/*
* This routine closes an open file; the argument "fdes" is the file
* descriptor which was returned by ngofro, ngofrw, or ngofwo.
*/
int *fdes;
{
(void)close((int)*fdes);
}
void NGCALLF(ngexit,NGEXIT)(nerr)
int *nerr;
{
exit(*nerr);
}
void NGCALLF(ngofro,NGOFRO)(flnm,fdes,stat)
/*
* This routine opens an existing file for reading only. The argument
* "flnm" is input; it contains a file name, in the form of a FORTRAN
* CHARACTER*1 string terminated by a null. The argument "fdes"
* receives the file descriptor for the file. The argument "stat"
* receives a zero if the operation was successful, non-zero otherwise.
*/
#if defined (cray)
_fcd flnm;
#else
char flnm[];
#endif
int *fdes,*stat;
{
int st,flgs;
*stat = 0;
/*
* Attempt to open the file.
*/
flgs = O_RDONLY;
#if defined (cray)
st = open(_fcdtocp(flnm),flgs,0);
#else
st = open(flnm,flgs,0);
#endif
/*
* If "st" is -1, set error flag; otherwise, return file descriptor.
*/
if (st == -1)
*stat = 1;
else
*fdes = st;
}
void NGCALLF(ngofrw,NGOFRW)(flnm,fdes,stat)
/*
* This routine opens a file for reading and writing both. The argument
* "flnm" is input; it contains a file name, in the form of a FORTRAN
* CHARACTER*1 string terminated by a null. The argument "fdes"
* receives the file descriptor for the file. The argument "stat"
* receives a zero if the operation was successful, non-zero otherwise.
*/
#if defined (cray)
_fcd flnm;
#else
char flnm[];
#endif
int *fdes,*stat;
{
int st,flgs;
*stat = 0;
/*
* Attempt to open the file.
*/
flgs = O_RDWR | O_CREAT;
#if defined (cray)
st = open(_fcdtocp(flnm),flgs,0666);
#else
st = open(flnm,flgs,0666);
#endif
/*
* If "st" is -1, set error flag; otherwise, return file descriptor.
*/
if (st == -1)
*stat = 1;
else
*fdes = st;
}
void NGCALLF(ngofwo,NGOFWO)(flnm,fdes,stat)
/*
* This routine opens a file for writing only. The argument "flnm" is
* input; it contains a file name, in the form of a FORTRAN CHARACTER*1
* string terminated by a null. The argument "fdes" receives the file
* descriptor for the file. The argument "stat" receives a zero if the
* operation was successful, non-zero otherwise.
*/
#if defined (cray)
_fcd flnm;
#else
char flnm[];
#endif
int *fdes,*stat;
{
int st,flgs;
*stat = 0;
/*
* Attempt to open the file.
*/
flgs = O_WRONLY | O_CREAT;
#if defined (cray)
st = open(_fcdtocp(flnm),flgs,0666);
#else
st = open(flnm,flgs,0666);
#endif
/*
* If "st" is -1, set error flag; otherwise, return file descriptor.
*/
if (st == -1)
*stat = 1;
else
*fdes = st;
}
void NGCALLF(ngrdch,NGRDCH)(fdes,buff,lbuf,stat)
/*
* This routine reads characters (bytes) from a file to an internal
* buffer. The argument "fdes" is the file descriptor returned by the
* routine that opened the file. The argument "buff" is a FORTRAN
* CHARACTER*1 array. The argument "lbuf" says how many bytes are
* to be transferred. The argument "stat" is returned with a positive
* value indicating how many bytes were actually transferred, a zero
* value if an end-of-file was encountered, or a negative value if an
* error occurred.
*/
#if defined (cray)
_fcd buff;
#else
char buff[];
#endif
int *fdes,*lbuf,*stat;
{
int ret;
#if defined (cray)
ret = read((int)*fdes,_fcdtocp(buff),(int)*lbuf);
#else
ret = read((int)*fdes,buff,(int)*lbuf);
#endif
*stat = ret;
}
void NGCALLF(ngrdfl,NGRDFL)(fdes,buff,lbuf,stat)
/*
* This routine reads floats (reals) from a file to an internal buffer.
* The argument "fdes" is the file descriptor returned by the routine
* that opened the file. The argument "buff" is a FORTRAN REAL array.
* The argument "lbuf" says how many reals are to be transferred. The
* argument "stat" is returned with a positive value indicating how
* many reals were actually transferred, a zero value if an end-of-file
* was encountered, or a negative value if an error occurred.
*/
float buff[];
int *fdes,*lbuf,*stat;
{
int ret;
ret = read((int)*fdes,(char*)buff,(int)(*lbuf*sizeof(float)));
if (ret%sizeof(float)!=0) ret=-1;
else if (ret>0) ret=ret/sizeof(float);
*stat = ret;
}
void NGCALLF(ngrdin,NGRDIN)(fdes,buff,lbuf,stat)
/*
* This routine reads integers from a file to an internal buffer. The
* argument "fdes" is the file descriptor returned by the routine that
* opened the file. The argument "buff" is a FORTRAN INTEGER array.
* The argument "lbuf" says how many integers are to be transferred.
* The argument "stat" is returned with a positive value indicating
* how many integers were actually transferred, a zero value if an
* end-of-file was encountered, or a negative value if an error
* occurred.
*/
int buff[];
int *fdes,*lbuf,*stat;
{
int ret;
ret = read((int)*fdes,(char*)buff,(int)(*lbuf*sizeof(int)));
if (ret%sizeof(int)!=0) ret=-1;
else if (ret>0) ret=ret/sizeof(int);
*stat = ret;
}
void NGCALLF(ngrmfi,NGRMFI)(flnm)
/*
* This routine removes an existing file. The argument "flnm" is input;
* it contains a file name, in the form of a FORTRAN CHARACTER*1 string
* terminated by a null.
*/
#if defined (cray)
_fcd flnm;
#else
char flnm[];
#endif
{
#if defined (cray)
(void)unlink(_fcdtocp(flnm));
#else
(void)unlink(flnm);
#endif
}
void NGCALLF(ngwrch,NGWRCH)(fdes,buff,lbuf,stat)
/*
* This routine writes characters (bytes) to a file from an internal
* buffer. The argument "fdes" is the file descriptor returned by the
* routine that opened the file. The argument "buff" is a FORTRAN
* CHARACTER*1 array. The argument "lbuf" says how many bytes are to
* be transferred. The argument "stat" is returned with a positive
* value indicating how many bytes were actually transferred or a value
* less than or equal to zero if an error occurred.
*/
#if defined (cray)
_fcd buff;
#else
char buff[];
#endif
int *fdes,*lbuf,*stat;
{
int ret;
#if defined (cray)
ret = write((int)*fdes,_fcdtocp(buff),(int)*lbuf);
#else
ret = write((int)*fdes,buff,(int)*lbuf);
#endif
*stat = ret;
}
void NGCALLF(ngwrfl,NGWRFL)(fdes,buff,lbuf,stat)
/*
* This routine writes reals (floats) to a file from an internal
* buffer. The argument "fdes" is the file descriptor returned by the
* routine that opened the file. The argument "buff" is a FORTRAN
* REAL array. The argument "lbuf" says how many reals are to be
* transferred. The argument "stat" is returned with a positive value
* indicating how many reals were actually transferred or a value less
* than or equal to zero if an error occurred.
*/
float buff[];
int *fdes,*lbuf,*stat;
{
int ret;
ret = write((int)*fdes,(char*)buff,(int)(*lbuf*sizeof(float)));
if (ret%sizeof(float)!=0) ret=-1;
else if (ret>0) ret=ret/sizeof(float);
*stat = ret;
}
void NGCALLF(ngwrin,NGWRIN)(fdes,buff,lbuf,stat)
/*
* This routine writes integers to a file from an internal buffer. The
* argument "fdes" is the file descriptor returned by the routine that
* opened the file. The argument "buff" is a FORTRAN INTEGER array.
* The argument "lbuf" says how many integers are to be transferred.
* The argument "stat" is returned with a positive value indicating
* how many integers were actually transferred or a value less than or
* equal to zero if an error occurred.
*/
int buff[];
int *fdes,*lbuf,*stat;
{
int ret;
ret = write((int)*fdes,(char*)buff,(int)(*lbuf*sizeof(int)));
if (ret%sizeof(int)!=0) ret=-1;
else if (ret>0) ret=ret/sizeof(int);
*stat = ret;
}
void NGCALLF(ngseek,NGSEEK)(fdes,offs,orig,stat)
/*
* This routine repositions the read/write position of an open file.
* The argument "fdes" is the file descriptor returned by the routine
* that opened the file. The argument "offs" is the desired position,
* given as an offset from the origin specified by the argument "orig",
* which is a "0" to specify the beginning of the file, a 1 to specify
* the current position, or a 2 to specify the end of the file.
*/
int *fdes,*offs,*orig,*stat;
{
long ret;
ret = lseek((int)*fdes,(long)*offs,(int)*orig);
*stat = (int) ret;
}
| {
"language": "C"
} |
/** @file nim_tools_http.h
* @brief NIM HTTP提供的http传输相关接口
* @copyright (c) 2015-2016, NetEase Inc. All rights reserved
* @author towik
* @date 2015/4/30
*/
#ifndef NIM_TOOLS_HTTP_H
#define NIM_TOOLS_HTTP_H
#include "net/base/net_export.h"
#include "nim_tools_http_def.h"
#ifdef __cplusplus
extern"C"
{
#endif
/** @fn void nim_http_init()
* NIM HTTP 初始化
* @return void 无返回值
*/
NET_EXPORT void nim_http_init();
/** @fn void nim_http_uninit()
* NIM HTTP 反初始化
* @return void 无返回值
*/
NET_EXPORT void nim_http_uninit();
/** @fn void nim_http_init_log(const char* log_file_path)
* NIM HTTP 设置日志文件路径
* @param[in] log_file_path 日志文件保存路径
* @return void 无返回值
*/
NET_EXPORT void nim_http_init_log(const char* log_file_path);
/** @fn bool nim_http_is_init_log()
* NIM HTTP 是否设置过日志路径
* @return bool 是否设置过日志路径
*/
NET_EXPORT bool nim_http_is_init_log();
/** @fn HttpRequestID nim_http_post_request(HttpRequestHandle)
* NIM HTTP 发起任务
* @param[in] request_handle http任务句柄
* @return HttpRequestID 任务id
*/
NET_EXPORT HttpRequestID nim_http_post_request(HttpRequestHandle request_handle);
/** @fn void nim_http_remove_request(HttpRequestID http_request_id)
* NIM HTTP 取消任务
* @param[in] http_request_id 任务id
* @return void 无返回值
*/
NET_EXPORT void nim_http_remove_request(HttpRequestID http_request_id);
/** @fn HttpRequestHandle nim_http_create_download_file_request(const char *url, const char *download_file_path, nim_http_request_completed_cb complete_cb, const void *user_data)
* NIM HTTP 创建下载文件任务
* @param[in] url 资源地址
* @param[in] download_file_path 下载文件保存的本地路径
* @param[in] complete_cb 结束回调
* @param[in] user_data 自定义数据
* @return HttpRequestHandle http任务句柄
*/
NET_EXPORT HttpRequestHandle nim_http_create_download_file_request(const char *url, const char *download_file_path,
nim_http_request_completed_cb complete_cb, const void *user_data);
/** @fn HttpRequestHandle nim_http_create_download_file_range_request(const char *url, const char *download_file_path, __int64 range_start, nim_http_request_completed_cb complete_cb, const void *user_data)
* NIM HTTP 创建下载文件任务,支持断点续传
* @param[in] url 资源地址
* @param[in] download_file_path 下载文件保存的本地路径
* @param[in] range_start 下载文件的起始点
* @param[in] complete_cb 结束回调
* @param[in] user_data 自定义数据
* @return HttpRequestHandle http任务句柄
*/
NET_EXPORT HttpRequestHandle nim_http_create_download_file_range_request(const char *url, const char *download_file_path,
__int64 range_start, nim_http_request_completed_cb complete_cb, const void *user_data);
/** @fn HttpRequestHandle nim_http_create_request(const char* url, const char* post_body, size_t post_body_size, nim_http_request_response_cb response_cb, const void* user_data)
* NIM HTTP 创建任务
* @param[in] url 资源地址
* @param[in] post_body 上传内容
* @param[in] post_body_size 上传内容大小
* @param[in] response_cb 结束回调,响应实体内容
* @param[in] user_data 自定义数据
* @return HttpRequestHandle http任务句柄
*/
NET_EXPORT HttpRequestHandle nim_http_create_request(const char* url, const char* post_body, size_t post_body_size,
nim_http_request_response_cb response_cb, const void* user_data);
/** @fn void nim_http_add_request_header(HttpRequestHandle request_handle, const char* key, const char* value)
* NIM HTTP 创建任务
* @param[in] request_handle http任务句柄
* @param[in] key 头的key
* @param[in] value 头的value
* @return void 无返回值
*/
NET_EXPORT void nim_http_add_request_header(HttpRequestHandle request_handle, const char* key, const char* value);
/** @fn void nim_http_set_request_progress_cb(HttpRequestHandle request_handle, nim_http_request_progress_cb progress_callback, const void* user_data)
* NIM HTTP 设置进度回调
* @param[in] request_handle http任务句柄
* @param[in] progress_callback 进度回调函数
* @param[in] user_data 自定义数据
* @return void 无返回值
*/
NET_EXPORT void nim_http_set_request_progress_cb(HttpRequestHandle request_handle, nim_http_request_progress_cb progress_callback, const void* user_data);
/** @fn void nim_http_set_request_speed_cb(HttpRequestHandle request_handle, nim_http_request_speed_cb speed_callback, const void* user_data)
* NIM HTTP 设置实时速度回调
* @param[in] request_handle http任务句柄
* @param[in] speed_callback 速度回调函数
* @param[in] user_data 自定义数据
* @return void 无返回值
*/
NET_EXPORT void nim_http_set_request_speed_cb(HttpRequestHandle request_handle, nim_http_request_speed_cb speed_callback, const void* user_data);
/** @fn void nim_http_set_request_transfer_cb(HttpRequestHandle request_handle, nim_http_request_transfer_cb transfer_callback, const void* user_data)
* NIM HTTP 设置传输信息回调
* @param[in] request_handle http任务句柄
* @param[in] transfer_callback 结束回调,获取实际传输信息
* @param[in] user_data 自定义数据
* @return void 无返回值
*/
NET_EXPORT void nim_http_set_request_transfer_cb(HttpRequestHandle request_handle, nim_http_request_transfer_cb transfer_callback, const void* user_data);
/** @fn void nim_http_set_request_method_as_post(HttpRequestHandle request_handle)
* NIM HTTP 强制设置http请求方法为post
* @param[in] request_handle http任务句柄
* @return void 无返回值
*/
NET_EXPORT void nim_http_set_request_method_as_post(HttpRequestHandle request_handle);
/** @fn void nim_http_set_timeout(HttpRequestHandle request_handle, int timeout_ms)
* NIM HTTP 设置超时
* @param[in] request_handle http任务句柄
* @param[in] timeout_ms 超时时间,单位是毫秒
* @return void 无返回值
*/
NET_EXPORT void nim_http_set_timeout(HttpRequestHandle request_handle, int timeout_ms);
/** @fn void nim_http_set_low_speed(HttpRequestHandle request_handle, int low_speed_limit, int low_speed_time)
* NIM HTTP 设置最低传输速度
* @param[in] request_handle http任务句柄
* @param[in] low_speed_limit 最低传输的字节数(大于0)
* @param[in] low_speed_time 多少秒传输速度不得低于low_speed_limit,不满足条件则会终止传输(大于0)
* @return void 无返回值
*/
NET_EXPORT void nim_http_set_low_speed(HttpRequestHandle request_handle, int low_speed_limit, int low_speed_time);
/** @fn void nim_http_set_proxy(HttpRequestHandle request_handle, int type, const char* host, short port, const char* user, const char* pass)
* NIM HTTP 设置代理
* @param[in] request_handle http任务句柄
* @param[in] type 代理类型NIMProxyType
* @param[in] host 代理地址
* @param[in] port 代理端口
* @param[in] user 代理账号
* @param[in] pass 代理密码
* @return void 无返回值
*/
NET_EXPORT void nim_http_set_proxy(HttpRequestHandle request_handle, int type, const char* host, short port, const char* user, const char* pass);
/** @fn const char* const nim_http_get_response_head(HttpRequestID http_request_id)
* NIM HTTP 读取应答的http头信息
* @param[in] http_request_id 任务id
* @return char* 头信息
*/
NET_EXPORT const char* const nim_http_get_response_head(HttpRequestID http_request_id);
#ifdef __cplusplus
};
#endif //__cplusplus
#endif // NIM_TOOLS_HTTP_H | {
"language": "C"
} |
/*
* \brief Schedules CPU shares for the execution time of a CPU
* \author Martin Stein
* \date 2014-10-09
*/
/*
* Copyright (C) 2014-2017 Genode Labs GmbH
*
* This file is part of the Genode OS framework, which is distributed
* under the terms of the GNU Affero General Public License version 3.
*/
#ifndef _CORE__KERNEL__CPU_SCHEDULER_H_
#define _CORE__KERNEL__CPU_SCHEDULER_H_
/* core includes */
#include <util.h>
#include <util/misc_math.h>
#include <kernel/configuration.h>
#include <kernel/double_list.h>
namespace Kernel
{
/**
* Priority of an unconsumed CPU claim versus other unconsumed CPU claims
*/
class Cpu_priority;
/**
* Scheduling context that is both claim and fill
*/
class Cpu_share;
/**
* Schedules CPU shares for the execution time of a CPU
*/
class Cpu_scheduler;
}
class Kernel::Cpu_priority
{
private:
unsigned _value;
public:
enum {
MIN = 0,
MAX = cpu_priorities - 1,
};
/**
* Construct priority with value 'v'
*/
Cpu_priority(signed const v) : _value(Genode::min(v, MAX)) { }
/*
* Standard operators
*/
Cpu_priority &operator =(signed const v)
{
_value = Genode::min(v, MAX);
return *this;
}
operator signed() const { return _value; }
};
class Kernel::Cpu_share
{
friend class Cpu_scheduler;
private:
Double_list_item<Cpu_share> _fill_item { *this };
Double_list_item<Cpu_share> _claim_item { *this };
signed const _prio;
unsigned _quota;
unsigned _claim;
unsigned _fill { 0 };
bool _ready { false };
public:
/**
* Constructor
*
* \param p claimed priority
* \param q claimed quota
*/
Cpu_share(signed const p, unsigned const q)
: _prio(p), _quota(q), _claim(q) { }
/*
* Accessors
*/
bool ready() const { return _ready; }
void quota(unsigned const q) { _quota = q; }
};
class Kernel::Cpu_scheduler
{
private:
typedef Cpu_share Share;
typedef Cpu_priority Prio;
Double_list<Cpu_share> _rcl[Prio::MAX + 1]; /* ready claims */
Double_list<Cpu_share> _ucl[Prio::MAX + 1]; /* unready claims */
Double_list<Cpu_share> _fills { }; /* ready fills */
Share &_idle;
Share *_head = nullptr;
unsigned _head_quota = 0;
bool _head_claims = false;
bool _head_yields = false;
unsigned const _quota;
unsigned _residual;
unsigned const _fill;
bool _need_to_schedule { true };
time_t _last_time { 0 };
template <typename F> void _for_each_prio(F f) {
for (signed p = Prio::MAX; p > Prio::MIN - 1; p--) { f(p); } }
static void _reset(Cpu_share &share);
void _reset_claims(unsigned const p);
void _next_round();
void _consumed(unsigned const q);
void _set_head(Share &s, unsigned const q, bool const c);
void _next_fill();
void _head_claimed(unsigned const r);
void _head_filled(unsigned const r);
bool _claim_for_head();
bool _fill_for_head();
unsigned _trim_consumption(unsigned &q);
/**
* Fill 's' becomes a claim due to a quota donation
*/
void _quota_introduction(Share &s);
/**
* Claim 's' looses its state as claim due to quota revokation
*/
void _quota_revokation(Share &s);
/**
* The quota of claim 's' changes to 'q'
*/
void _quota_adaption(Share &s, unsigned const q);
public:
/**
* Constructor
*
* \param i Gets scheduled with static quota when no other share
* is schedulable. Unremovable. All values get ignored.
* \param q total amount of time quota that can be claimed by shares
* \param f time-slice length of the fill round-robin
*/
Cpu_scheduler(Share &i, unsigned const q, unsigned const f);
bool need_to_schedule() { return _need_to_schedule; }
void timeout() { _need_to_schedule = true; }
/**
* Update head according to the consumed time
*/
void update(time_t time);
/**
* Set 's1' ready and return wether this outdates current head
*/
void ready_check(Share &s1);
/**
* Set share 's' ready
*/
void ready(Share &s);
/**
* Set share 's' unready
*/
void unready(Share &s);
/**
* Current head looses its current claim/fill for this round
*/
void yield();
/**
* Remove share 's' from scheduler
*/
void remove(Share &s);
/**
* Insert share 's' into scheduler
*/
void insert(Share &s);
/**
* Set quota of share 's' to 'q'
*/
void quota(Share &s, unsigned const q);
/*
* Accessors
*/
Share &head() const;
unsigned head_quota() const {
return Genode::min(_head_quota, _residual); }
unsigned quota() const { return _quota; }
unsigned residual() const { return _residual; }
};
#endif /* _CORE__KERNEL__CPU_SCHEDULER_H_ */
| {
"language": "C"
} |
/* SPDX-License-Identifier: GPL-2.0+ */
/*
* Copyright (C) 2016 Oracle. All Rights Reserved.
* Author: Darrick J. Wong <darrick.wong@oracle.com>
*/
#ifndef __XFS_DEFER_H__
#define __XFS_DEFER_H__
struct xfs_btree_cur;
struct xfs_defer_op_type;
/*
* Header for deferred operation list.
*/
enum xfs_defer_ops_type {
XFS_DEFER_OPS_TYPE_BMAP,
XFS_DEFER_OPS_TYPE_REFCOUNT,
XFS_DEFER_OPS_TYPE_RMAP,
XFS_DEFER_OPS_TYPE_FREE,
XFS_DEFER_OPS_TYPE_AGFL_FREE,
XFS_DEFER_OPS_TYPE_MAX,
};
/*
* Save a log intent item and a list of extents, so that we can replay
* whatever action had to happen to the extent list and file the log done
* item.
*/
struct xfs_defer_pending {
struct list_head dfp_list; /* pending items */
struct list_head dfp_work; /* work items */
struct xfs_log_item *dfp_intent; /* log intent item */
struct xfs_log_item *dfp_done; /* log done item */
unsigned int dfp_count; /* # extent items */
enum xfs_defer_ops_type dfp_type;
};
void xfs_defer_add(struct xfs_trans *tp, enum xfs_defer_ops_type type,
struct list_head *h);
int xfs_defer_finish_noroll(struct xfs_trans **tp);
int xfs_defer_finish(struct xfs_trans **tp);
void xfs_defer_cancel(struct xfs_trans *);
void xfs_defer_move(struct xfs_trans *dtp, struct xfs_trans *stp);
/* Description of a deferred type. */
struct xfs_defer_op_type {
struct xfs_log_item *(*create_intent)(struct xfs_trans *tp,
struct list_head *items, unsigned int count, bool sort);
void (*abort_intent)(struct xfs_log_item *intent);
struct xfs_log_item *(*create_done)(struct xfs_trans *tp,
struct xfs_log_item *intent, unsigned int count);
int (*finish_item)(struct xfs_trans *tp, struct xfs_log_item *done,
struct list_head *item, struct xfs_btree_cur **state);
void (*finish_cleanup)(struct xfs_trans *tp,
struct xfs_btree_cur *state, int error);
void (*cancel_item)(struct list_head *item);
unsigned int max_items;
};
extern const struct xfs_defer_op_type xfs_bmap_update_defer_type;
extern const struct xfs_defer_op_type xfs_refcount_update_defer_type;
extern const struct xfs_defer_op_type xfs_rmap_update_defer_type;
extern const struct xfs_defer_op_type xfs_extent_free_defer_type;
extern const struct xfs_defer_op_type xfs_agfl_free_defer_type;
#endif /* __XFS_DEFER_H__ */
| {
"language": "C"
} |
/*====================================================================*
- Copyright (C) 2001 Leptonica. All rights reserved.
-
- Redistribution and use in source and binary forms, with or without
- modification, are permitted provided that the following conditions
- are met:
- 1. Redistributions of source code must retain the above copyright
- notice, this list of conditions and the following disclaimer.
- 2. Redistributions in binary form must reproduce the above
- copyright notice, this list of conditions and the following
- disclaimer in the documentation and/or other materials
- provided with the distribution.
-
- THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL ANY
- CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
- EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
- PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
- PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
- OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
- NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
- SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*====================================================================*/
#ifdef _WIN32
#ifndef LEPTONICA_LEPTWIN_H
#define LEPTONICA_LEPTWIN_H
#include "allheaders.h"
#include <windows.h>
#ifdef __cplusplus
extern "C" {
#endif /* __cplusplus */
LEPT_DLL extern HBITMAP pixGetWindowsHBITMAP( PIX *pixs );
#ifdef __cplusplus
}
#endif /* __cplusplus */
#endif /* LEPTONICA_LEPTWIN_H */
#endif /* _WIN32 */
| {
"language": "C"
} |
/*
* Copyright 2012 Freescale Semiconductor, Inc.
* Copyright (C) 2012 Marek Vasut <marex@denx.de>
* on behalf of DENX Software Engineering GmbH
*
* The code contained herein is licensed under the GNU General Public
* License. You may obtain a copy of the GNU General Public License
* Version 2 or later at the following locations:
*
* http://www.opensource.org/licenses/gpl-license.html
* http://www.gnu.org/copyleft/gpl.html
*/
#include <linux/module.h>
#include <linux/of_platform.h>
#include <linux/of_gpio.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
#include <linux/dma-mapping.h>
#include <linux/usb/chipidea.h>
#include <linux/usb/of.h>
#include <linux/clk.h>
#include "ci.h"
#include "ci_hdrc_imx.h"
struct ci_hdrc_imx_platform_flag {
unsigned int flags;
bool runtime_pm;
};
static const struct ci_hdrc_imx_platform_flag imx23_usb_data = {
.flags = CI_HDRC_TURN_VBUS_EARLY_ON |
CI_HDRC_DISABLE_STREAMING,
};
static const struct ci_hdrc_imx_platform_flag imx27_usb_data = {
CI_HDRC_DISABLE_STREAMING,
};
static const struct ci_hdrc_imx_platform_flag imx28_usb_data = {
.flags = CI_HDRC_IMX28_WRITE_FIX |
CI_HDRC_TURN_VBUS_EARLY_ON |
CI_HDRC_DISABLE_STREAMING,
};
static const struct ci_hdrc_imx_platform_flag imx6q_usb_data = {
.flags = CI_HDRC_SUPPORTS_RUNTIME_PM |
CI_HDRC_TURN_VBUS_EARLY_ON |
CI_HDRC_DISABLE_STREAMING,
};
static const struct ci_hdrc_imx_platform_flag imx6sl_usb_data = {
.flags = CI_HDRC_SUPPORTS_RUNTIME_PM |
CI_HDRC_TURN_VBUS_EARLY_ON |
CI_HDRC_DISABLE_HOST_STREAMING,
};
static const struct ci_hdrc_imx_platform_flag imx6sx_usb_data = {
.flags = CI_HDRC_SUPPORTS_RUNTIME_PM |
CI_HDRC_TURN_VBUS_EARLY_ON |
CI_HDRC_DISABLE_HOST_STREAMING,
};
static const struct ci_hdrc_imx_platform_flag imx6ul_usb_data = {
.flags = CI_HDRC_SUPPORTS_RUNTIME_PM |
CI_HDRC_TURN_VBUS_EARLY_ON,
};
static const struct ci_hdrc_imx_platform_flag imx7d_usb_data = {
.flags = CI_HDRC_SUPPORTS_RUNTIME_PM,
};
static const struct of_device_id ci_hdrc_imx_dt_ids[] = {
{ .compatible = "fsl,imx23-usb", .data = &imx23_usb_data},
{ .compatible = "fsl,imx28-usb", .data = &imx28_usb_data},
{ .compatible = "fsl,imx27-usb", .data = &imx27_usb_data},
{ .compatible = "fsl,imx6q-usb", .data = &imx6q_usb_data},
{ .compatible = "fsl,imx6sl-usb", .data = &imx6sl_usb_data},
{ .compatible = "fsl,imx6sx-usb", .data = &imx6sx_usb_data},
{ .compatible = "fsl,imx6ul-usb", .data = &imx6ul_usb_data},
{ .compatible = "fsl,imx7d-usb", .data = &imx7d_usb_data},
{ /* sentinel */ }
};
MODULE_DEVICE_TABLE(of, ci_hdrc_imx_dt_ids);
struct ci_hdrc_imx_data {
struct usb_phy *phy;
struct platform_device *ci_pdev;
struct clk *clk;
struct imx_usbmisc_data *usbmisc_data;
bool supports_runtime_pm;
bool in_lpm;
/* SoC before i.mx6 (except imx23/imx28) needs three clks */
bool need_three_clks;
struct clk *clk_ipg;
struct clk *clk_ahb;
struct clk *clk_per;
/* --------------------------------- */
};
/* Common functions shared by usbmisc drivers */
static struct imx_usbmisc_data *usbmisc_get_init_data(struct device *dev)
{
struct platform_device *misc_pdev;
struct device_node *np = dev->of_node;
struct of_phandle_args args;
struct imx_usbmisc_data *data;
int ret;
/*
* In case the fsl,usbmisc property is not present this device doesn't
* need usbmisc. Return NULL (which is no error here)
*/
if (!of_get_property(np, "fsl,usbmisc", NULL))
return NULL;
data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL);
if (!data)
return ERR_PTR(-ENOMEM);
ret = of_parse_phandle_with_args(np, "fsl,usbmisc", "#index-cells",
0, &args);
if (ret) {
dev_err(dev, "Failed to parse property fsl,usbmisc, errno %d\n",
ret);
return ERR_PTR(ret);
}
data->index = args.args[0];
misc_pdev = of_find_device_by_node(args.np);
of_node_put(args.np);
if (!misc_pdev || !platform_get_drvdata(misc_pdev))
return ERR_PTR(-EPROBE_DEFER);
data->dev = &misc_pdev->dev;
if (of_find_property(np, "disable-over-current", NULL))
data->disable_oc = 1;
if (of_find_property(np, "over-current-active-high", NULL))
data->oc_polarity = 1;
if (of_find_property(np, "external-vbus-divider", NULL))
data->evdo = 1;
if (of_usb_get_phy_mode(np) == USBPHY_INTERFACE_MODE_ULPI)
data->ulpi = 1;
return data;
}
/* End of common functions shared by usbmisc drivers*/
static int imx_get_clks(struct device *dev)
{
struct ci_hdrc_imx_data *data = dev_get_drvdata(dev);
int ret = 0;
data->clk_ipg = devm_clk_get(dev, "ipg");
if (IS_ERR(data->clk_ipg)) {
/* If the platform only needs one clocks */
data->clk = devm_clk_get(dev, NULL);
if (IS_ERR(data->clk)) {
ret = PTR_ERR(data->clk);
dev_err(dev,
"Failed to get clks, err=%ld,%ld\n",
PTR_ERR(data->clk), PTR_ERR(data->clk_ipg));
return ret;
}
return ret;
}
data->clk_ahb = devm_clk_get(dev, "ahb");
if (IS_ERR(data->clk_ahb)) {
ret = PTR_ERR(data->clk_ahb);
dev_err(dev,
"Failed to get ahb clock, err=%d\n", ret);
return ret;
}
data->clk_per = devm_clk_get(dev, "per");
if (IS_ERR(data->clk_per)) {
ret = PTR_ERR(data->clk_per);
dev_err(dev,
"Failed to get per clock, err=%d\n", ret);
return ret;
}
data->need_three_clks = true;
return ret;
}
static int imx_prepare_enable_clks(struct device *dev)
{
struct ci_hdrc_imx_data *data = dev_get_drvdata(dev);
int ret = 0;
if (data->need_three_clks) {
ret = clk_prepare_enable(data->clk_ipg);
if (ret) {
dev_err(dev,
"Failed to prepare/enable ipg clk, err=%d\n",
ret);
return ret;
}
ret = clk_prepare_enable(data->clk_ahb);
if (ret) {
dev_err(dev,
"Failed to prepare/enable ahb clk, err=%d\n",
ret);
clk_disable_unprepare(data->clk_ipg);
return ret;
}
ret = clk_prepare_enable(data->clk_per);
if (ret) {
dev_err(dev,
"Failed to prepare/enable per clk, err=%d\n",
ret);
clk_disable_unprepare(data->clk_ahb);
clk_disable_unprepare(data->clk_ipg);
return ret;
}
} else {
ret = clk_prepare_enable(data->clk);
if (ret) {
dev_err(dev,
"Failed to prepare/enable clk, err=%d\n",
ret);
return ret;
}
}
return ret;
}
static void imx_disable_unprepare_clks(struct device *dev)
{
struct ci_hdrc_imx_data *data = dev_get_drvdata(dev);
if (data->need_three_clks) {
clk_disable_unprepare(data->clk_per);
clk_disable_unprepare(data->clk_ahb);
clk_disable_unprepare(data->clk_ipg);
} else {
clk_disable_unprepare(data->clk);
}
}
static int ci_hdrc_imx_probe(struct platform_device *pdev)
{
struct ci_hdrc_imx_data *data;
struct ci_hdrc_platform_data pdata = {
.name = dev_name(&pdev->dev),
.capoffset = DEF_CAPOFFSET,
};
int ret;
const struct of_device_id *of_id;
const struct ci_hdrc_imx_platform_flag *imx_platform_flag;
of_id = of_match_device(ci_hdrc_imx_dt_ids, &pdev->dev);
if (!of_id)
return -ENODEV;
imx_platform_flag = of_id->data;
data = devm_kzalloc(&pdev->dev, sizeof(*data), GFP_KERNEL);
if (!data)
return -ENOMEM;
platform_set_drvdata(pdev, data);
data->usbmisc_data = usbmisc_get_init_data(&pdev->dev);
if (IS_ERR(data->usbmisc_data))
return PTR_ERR(data->usbmisc_data);
ret = imx_get_clks(&pdev->dev);
if (ret)
return ret;
ret = imx_prepare_enable_clks(&pdev->dev);
if (ret)
return ret;
data->phy = devm_usb_get_phy_by_phandle(&pdev->dev, "fsl,usbphy", 0);
if (IS_ERR(data->phy)) {
ret = PTR_ERR(data->phy);
/* Return -EINVAL if no usbphy is available */
if (ret == -ENODEV)
ret = -EINVAL;
goto err_clk;
}
pdata.usb_phy = data->phy;
pdata.flags |= imx_platform_flag->flags;
if (pdata.flags & CI_HDRC_SUPPORTS_RUNTIME_PM)
data->supports_runtime_pm = true;
ret = imx_usbmisc_init(data->usbmisc_data);
if (ret) {
dev_err(&pdev->dev, "usbmisc init failed, ret=%d\n", ret);
goto err_clk;
}
data->ci_pdev = ci_hdrc_add_device(&pdev->dev,
pdev->resource, pdev->num_resources,
&pdata);
if (IS_ERR(data->ci_pdev)) {
ret = PTR_ERR(data->ci_pdev);
if (ret != -EPROBE_DEFER)
dev_err(&pdev->dev,
"ci_hdrc_add_device failed, err=%d\n", ret);
goto err_clk;
}
ret = imx_usbmisc_init_post(data->usbmisc_data);
if (ret) {
dev_err(&pdev->dev, "usbmisc post failed, ret=%d\n", ret);
goto disable_device;
}
if (data->supports_runtime_pm) {
pm_runtime_set_active(&pdev->dev);
pm_runtime_enable(&pdev->dev);
}
device_set_wakeup_capable(&pdev->dev, true);
return 0;
disable_device:
ci_hdrc_remove_device(data->ci_pdev);
err_clk:
imx_disable_unprepare_clks(&pdev->dev);
return ret;
}
static int ci_hdrc_imx_remove(struct platform_device *pdev)
{
struct ci_hdrc_imx_data *data = platform_get_drvdata(pdev);
if (data->supports_runtime_pm) {
pm_runtime_get_sync(&pdev->dev);
pm_runtime_disable(&pdev->dev);
pm_runtime_put_noidle(&pdev->dev);
}
ci_hdrc_remove_device(data->ci_pdev);
imx_disable_unprepare_clks(&pdev->dev);
return 0;
}
static void ci_hdrc_imx_shutdown(struct platform_device *pdev)
{
ci_hdrc_imx_remove(pdev);
}
#ifdef CONFIG_PM
static int imx_controller_suspend(struct device *dev)
{
struct ci_hdrc_imx_data *data = dev_get_drvdata(dev);
dev_dbg(dev, "at %s\n", __func__);
imx_disable_unprepare_clks(dev);
data->in_lpm = true;
return 0;
}
static int imx_controller_resume(struct device *dev)
{
struct ci_hdrc_imx_data *data = dev_get_drvdata(dev);
int ret = 0;
dev_dbg(dev, "at %s\n", __func__);
if (!data->in_lpm) {
WARN_ON(1);
return 0;
}
ret = imx_prepare_enable_clks(dev);
if (ret)
return ret;
data->in_lpm = false;
ret = imx_usbmisc_set_wakeup(data->usbmisc_data, false);
if (ret) {
dev_err(dev, "usbmisc set_wakeup failed, ret=%d\n", ret);
goto clk_disable;
}
return 0;
clk_disable:
imx_disable_unprepare_clks(dev);
return ret;
}
#ifdef CONFIG_PM_SLEEP
static int ci_hdrc_imx_suspend(struct device *dev)
{
int ret;
struct ci_hdrc_imx_data *data = dev_get_drvdata(dev);
if (data->in_lpm)
/* The core's suspend doesn't run */
return 0;
if (device_may_wakeup(dev)) {
ret = imx_usbmisc_set_wakeup(data->usbmisc_data, true);
if (ret) {
dev_err(dev, "usbmisc set_wakeup failed, ret=%d\n",
ret);
return ret;
}
}
return imx_controller_suspend(dev);
}
static int ci_hdrc_imx_resume(struct device *dev)
{
struct ci_hdrc_imx_data *data = dev_get_drvdata(dev);
int ret;
ret = imx_controller_resume(dev);
if (!ret && data->supports_runtime_pm) {
pm_runtime_disable(dev);
pm_runtime_set_active(dev);
pm_runtime_enable(dev);
}
return ret;
}
#endif /* CONFIG_PM_SLEEP */
static int ci_hdrc_imx_runtime_suspend(struct device *dev)
{
struct ci_hdrc_imx_data *data = dev_get_drvdata(dev);
int ret;
if (data->in_lpm) {
WARN_ON(1);
return 0;
}
ret = imx_usbmisc_set_wakeup(data->usbmisc_data, true);
if (ret) {
dev_err(dev, "usbmisc set_wakeup failed, ret=%d\n", ret);
return ret;
}
return imx_controller_suspend(dev);
}
static int ci_hdrc_imx_runtime_resume(struct device *dev)
{
return imx_controller_resume(dev);
}
#endif /* CONFIG_PM */
static const struct dev_pm_ops ci_hdrc_imx_pm_ops = {
SET_SYSTEM_SLEEP_PM_OPS(ci_hdrc_imx_suspend, ci_hdrc_imx_resume)
SET_RUNTIME_PM_OPS(ci_hdrc_imx_runtime_suspend,
ci_hdrc_imx_runtime_resume, NULL)
};
static struct platform_driver ci_hdrc_imx_driver = {
.probe = ci_hdrc_imx_probe,
.remove = ci_hdrc_imx_remove,
.shutdown = ci_hdrc_imx_shutdown,
.driver = {
.name = "imx_usb",
.of_match_table = ci_hdrc_imx_dt_ids,
.pm = &ci_hdrc_imx_pm_ops,
},
};
module_platform_driver(ci_hdrc_imx_driver);
MODULE_ALIAS("platform:imx-usb");
MODULE_LICENSE("GPL v2");
MODULE_DESCRIPTION("CI HDRC i.MX USB binding");
MODULE_AUTHOR("Marek Vasut <marex@denx.de>");
MODULE_AUTHOR("Richard Zhao <richard.zhao@freescale.com>");
| {
"language": "C"
} |
/**
* Marlin 3D Printer Firmware
* Copyright (c) 2020 MarlinFirmware [https://github.com/MarlinFirmware/Marlin]
*
* Based on Sprinter and grbl.
* Copyright (c) 2011 Camiel Gubbels / Erik van der Zalm
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <https://www.gnu.org/licenses/>.
*
*/
#pragma once
/**
* Ultimaker pin assignments (Old electronics)
*/
/**
* Rev B 3 JAN 2017
*
* Details on pin definitions for M3, M4 & M5 spindle control commands and for
* the CASE_LIGHT_PIN are at the end of this file.
*
* This started out as an attempt to add pin definitions for M3, M4 & M5 spindle
* control commands but quickly turned into a head scratcher as the sources for
* the revisions provided inconsistent information.
*
* As best I can determine:
* 1.5.3 boards should use the pins_ULTIMAKER.h file which means the BOARD_INFO_NAME
* define in this file should say 1.5.3 rather than 1.5.4
* This file is meant for 1.1 - 1.3 boards.
* The endstops for the 1.0 boards use different definitions than on the 1.1 - 1.3
* boards.
*
* I've added sections that have the 1.0 and 1.5.3 + endstop definitions so you can
* easily switch if needed. I've also copied over the 1.5.3 + LCD definitions.
*
* To be 100% sure of the board you have:
* 1. In Configuration_adv.h enable "PINS_DEBUGGING"
* 2. Compile & uploade
* 3. Enter the command "M43 W1 I1". This command will report that pin nmumber and
* name of any pin that changes state.
* 4. Using a 1k (approximately) resistor pull the endstops and some of the LCD pins
* to ground and see what is reported.
* 5. If the reported pin doesn't match the file then try a different board revision
* and repeat steps 2 - 5
*/
#define BOARD_REV_1_1_TO_1_3
//#define BOARD_REV_1_0
//#define BOARD_REV_1_5
#if !defined(__AVR_ATmega1280__) && !defined(__AVR_ATmega2560__)
#error "Oops! Select 'Arduino/Genuino Mega or Mega 2560' in 'Tools > Board.'"
#endif
#ifdef BOARD_REV_1_1_TO_1_3
#define BOARD_INFO_NAME "Ultimaker 1.1-1.3"
#elif defined(BOARD_REV_1_0)
#define BOARD_INFO_NAME "Ultimaker 1.0"
#elif defined(BOARD_REV_1_5)
#define BOARD_INFO_NAME "Ultimaker 1.5"
#else
#define BOARD_INFO_NAME "Ultimaker 1.5.4+"
#endif
#define DEFAULT_MACHINE_NAME "Ultimaker"
#define DEFAULT_SOURCE_CODE_URL "https://github.com/Ultimaker/Marlin"
//
// Limit Switches
//
#if ENABLED(BOARD_REV_1_1_TO_1_3)
#define X_MIN_PIN 15 // SW1
#define X_MAX_PIN 14 // SW2
#define Y_MIN_PIN 17 // SW3
#define Y_MAX_PIN 16 // SW4
#define Z_MIN_PIN 19 // SW5
#define Z_MAX_PIN 18 // SW6
#endif
#if ENABLED(BOARD_REV_1_0)
#if HAS_CUTTER
#define X_STOP_PIN 13 // SW1 (didn't change) - also has a useable hardware PWM
#define Y_STOP_PIN 12 // SW2
#define Z_STOP_PIN 11 // SW3
#else
#define X_MIN_PIN 13 // SW1
#define X_MAX_PIN 12 // SW2
#define Y_MIN_PIN 11 // SW3
#define Y_MAX_PIN 10 // SW4
#define Z_MIN_PIN 9 // SW5
#define Z_MAX_PIN 8 // SW6
#endif
#endif
#if ENABLED(BOARD_REV_1_5)
#define X_MIN_PIN 22
#define X_MAX_PIN 24
#define Y_MIN_PIN 26
#define Y_MAX_PIN 28
#define Z_MIN_PIN 30
#define Z_MAX_PIN 32
#endif
//
// Z Probe (when not Z_MIN_PIN)
//
#if !defined(Z_MIN_PROBE_PIN) && !BOTH(HAS_CUTTER, BOARD_REV_1_0)
#define Z_MIN_PROBE_PIN Z_MAX_PIN
#endif
//
// Steppers
//
#define X_STEP_PIN 25
#define X_DIR_PIN 23
#define X_ENABLE_PIN 27
#define Y_STEP_PIN 31
#define Y_DIR_PIN 33
#define Y_ENABLE_PIN 29
#define Z_STEP_PIN 37
#define Z_DIR_PIN 39
#define Z_ENABLE_PIN 35
#if BOTH(HAS_CUTTER, BOARD_REV_1_1_TO_1_3) && EXTRUDERS == 1
// Move E0 to the spare and get Spindle/Laser signals from E0
#define E0_STEP_PIN 49
#define E0_DIR_PIN 47
#define E0_ENABLE_PIN 48
#else
#define E0_STEP_PIN 43
#define E0_DIR_PIN 45
#define E0_ENABLE_PIN 41
#define E1_STEP_PIN 49
#define E1_DIR_PIN 47
#define E1_ENABLE_PIN 48
#endif
//
// Temperature Sensors
//
#define TEMP_0_PIN 8 // Analog Input
#define TEMP_1_PIN 1 // Analog Input
//
// Heaters / Fans
//
#define HEATER_0_PIN 2
//#define HEATER_1_PIN 3 // used for case light Rev A said "1"
#define HEATER_BED_PIN 4
//
// LCD / Controller
//
#if ANY(BOARD_REV_1_0, BOARD_REV_1_1_TO_1_3)
#define LCD_PINS_RS 24
#define LCD_PINS_ENABLE 22
#define LCD_PINS_D4 36
#define LCD_PINS_D5 34
#define LCD_PINS_D6 32
#define LCD_PINS_D7 30
#elif BOTH(BOARD_REV_1_5, ULTRA_LCD)
#define BEEPER_PIN 18
#if ENABLED(NEWPANEL)
#define LCD_PINS_RS 20
#define LCD_PINS_ENABLE 17
#define LCD_PINS_D4 16
#define LCD_PINS_D5 21
#define LCD_PINS_D6 5
#define LCD_PINS_D7 6
// Buttons directly attached
#define BTN_EN1 40
#define BTN_EN2 42
#define BTN_ENC 19
#define SD_DETECT_PIN 38
#else // !NEWPANEL - Old style panel with shift register
// Buttons attached to a shift register
#define SHIFT_CLK 38
#define SHIFT_LD 42
#define SHIFT_OUT 40
#define SHIFT_EN 17
#define LCD_PINS_RS 16
#define LCD_PINS_ENABLE 5
#define LCD_PINS_D4 6
#define LCD_PINS_D5 21
#define LCD_PINS_D6 20
#define LCD_PINS_D7 19
#endif // !NEWPANEL
#endif
//
// case light - see spindle section for more info on available hardware PWMs
//
#if !PIN_EXISTS(CASE_LIGHT) && ENABLED(BOARD_REV_1_5)
#define CASE_LIGHT_PIN 7 // use PWM - MUST BE HARDWARE PWM
#endif
//
// M3/M4/M5 - Spindle/Laser Control
//
#if HAS_CUTTER
#if EITHER(BOARD_REV_1_0, BOARD_REV_1_5) // Use the last three SW positions
#define SPINDLE_DIR_PIN 10 // 1.0: SW4 1.5: EXP3-6 ("10")
#define SPINDLE_LASER_PWM_PIN 9 // 1.0: SW5 1.5: EXP3-7 ( "9") .. MUST BE HARDWARE PWM
#define SPINDLE_LASER_ENA_PIN 8 // 1.0: SW6 1.5: EXP3-8 ( "8") .. Pin should have a pullup!
#elif ENABLED(BOARD_REV_1_1_TO_1_3)
/**
* Only four hardware PWMs physically connected to anything on these boards:
*
* HEATER_0_PIN 2 silkscreen varies - usually "PWM 1" or "HEATER1"
* HEATER_1_PIN 3 silkscreen varies - usually "PWM 2" or "HEATER2"
* HEATER_BED_PIN 4 silkscreen varies - usually "PWM 3" or "HEATED BED"
* E0_DIR_PIN 45
*
* If one of the heaters is used then special precautions will usually be needed.
* They have an LED and resistor pullup to +24V which could damage 3.3V-5V ICs.
*/
#if EXTRUDERS == 1
#define SPINDLE_DIR_PIN 43
#define SPINDLE_LASER_PWM_PIN 45 // Hardware PWM
#define SPINDLE_LASER_ENA_PIN 41 // Pullup!
#elif TEMP_SENSOR_BED == 0 // Can't use E0 so see if HEATER_BED_PIN is available
#undef HEATER_BED_PIN
#define SPINDLE_DIR_PIN 38 // Probably pin 4 on 10 pin connector closest to the E0 socket
#define SPINDLE_LASER_PWM_PIN 4 // Hardware PWM - Special precautions usually needed.
#define SPINDLE_LASER_ENA_PIN 40 // Pullup! (Probably pin 6 on the 10-pin
// connector closest to the E0 socket)
#endif
#endif
#endif
/**
* Where to get the spindle signals on the E0 socket
*
* spindle signal socket name socket name
* -------
* SPINDLE_LASER_ENA_PIN /ENABLE *| |O VMOT
* MS1 O| |O GND
* MS2 O| |O 2B
* MS3 O| |O 2A
* /RESET O| |O 1A
* /SLEEP O| |O 1B
* SPINDLE_DIR_PIN STEP O| |O VDD
* SPINDLE_LASER_PWM_PIN DIR O| |O GND
* -------
* * - pin closest to MS1, MS2 & MS3 jumpers on the board
*
* Note: Socket names vary from vendor to vendor.
*/
| {
"language": "C"
} |
/* vim: set ts=8 sw=8 sts=8 noet tw=78:
*
* tup - A file-based build system
*
* Copyright (C) 2012-2020 Mike Shal <marfey@gmail.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License along
* with this program; if not, write to the Free Software Foundation, Inc.,
* 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
*/
#ifndef tup_compat_freebsd_h
#define tup_compat_freebsd_h
int clearenv(void);
#endif
| {
"language": "C"
} |
/*
* Copyright (c) 1988-1997 Sam Leffler
* Copyright (c) 1991-1997 Silicon Graphics, Inc.
*
* Permission to use, copy, modify, distribute, and sell this software and
* its documentation for any purpose is hereby granted without fee, provided
* that (i) the above copyright notices and this permission notice appear in
* all copies of the software and related documentation, and (ii) the names of
* Sam Leffler and Silicon Graphics may not be used in any advertising or
* publicity relating to the software without the specific, prior written
* permission of Sam Leffler and Silicon Graphics.
*
* THE SOFTWARE IS PROVIDED "AS-IS" AND WITHOUT WARRANTY OF ANY KIND,
* EXPRESS, IMPLIED OR OTHERWISE, INCLUDING WITHOUT LIMITATION, ANY
* WARRANTY OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE.
*
* IN NO EVENT SHALL SAM LEFFLER OR SILICON GRAPHICS BE LIABLE FOR
* ANY SPECIAL, INCIDENTAL, INDIRECT OR CONSEQUENTIAL DAMAGES OF ANY KIND,
* OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
* WHETHER OR NOT ADVISED OF THE POSSIBILITY OF DAMAGE, AND ON ANY THEORY OF
* LIABILITY, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
* OF THIS SOFTWARE.
*/
/*
* TIFF Library.
*
* JBIG Compression Algorithm Support.
* Contributed by Lee Howard <faxguy@deanox.com>
*
*/
#include "tiffiop.h"
#ifdef JBIG_SUPPORT
#include "jbig.h"
static int JBIGSetupDecode(TIFF* tif)
{
if (TIFFNumberOfStrips(tif) != 1)
{
TIFFErrorExt(tif->tif_clientdata, "JBIG", "Multistrip images not supported in decoder");
return 0;
}
return 1;
}
static int JBIGDecode(TIFF* tif, uint8* buffer, tmsize_t size, uint16 s)
{
struct jbg_dec_state decoder;
int decodeStatus = 0;
unsigned char* pImage = NULL;
unsigned long decodedSize;
(void) s;
if (isFillOrder(tif, tif->tif_dir.td_fillorder))
{
TIFFReverseBits(tif->tif_rawcp, tif->tif_rawcc);
}
jbg_dec_init(&decoder);
#if defined(HAVE_JBG_NEWLEN)
jbg_newlen(tif->tif_rawcp, (size_t)tif->tif_rawcc);
/*
* I do not check the return status of jbg_newlen because even if this
* function fails it does not necessarily mean that decoding the image
* will fail. It is generally only needed for received fax images
* that do not contain the actual length of the image in the BIE
* header. I do not log when an error occurs because that will cause
* problems when converting JBIG encoded TIFF's to
* PostScript. As long as the actual image length is contained in the
* BIE header jbg_dec_in should succeed.
*/
#endif /* HAVE_JBG_NEWLEN */
decodeStatus = jbg_dec_in(&decoder, (unsigned char*)tif->tif_rawcp,
(size_t)tif->tif_rawcc, NULL);
if (JBG_EOK != decodeStatus)
{
/*
* XXX: JBG_EN constant was defined in pre-2.0 releases of the
* JBIG-KIT. Since the 2.0 the error reporting functions were
* changed. We will handle both cases here.
*/
TIFFErrorExt(tif->tif_clientdata,
"JBIG", "Error (%d) decoding: %s",
decodeStatus,
#if defined(JBG_EN)
jbg_strerror(decodeStatus, JBG_EN)
#else
jbg_strerror(decodeStatus)
#endif
);
jbg_dec_free(&decoder);
return 0;
}
decodedSize = jbg_dec_getsize(&decoder);
if( (tmsize_t)decodedSize < size )
{
TIFFWarningExt(tif->tif_clientdata, "JBIG",
"Only decoded %lu bytes, whereas %lu requested",
decodedSize, (unsigned long)size);
}
else if( (tmsize_t)decodedSize > size )
{
TIFFErrorExt(tif->tif_clientdata, "JBIG",
"Decoded %lu bytes, whereas %lu were requested",
decodedSize, (unsigned long)size);
jbg_dec_free(&decoder);
return 0;
}
pImage = jbg_dec_getimage(&decoder, 0);
_TIFFmemcpy(buffer, pImage, decodedSize);
jbg_dec_free(&decoder);
tif->tif_rawcp += tif->tif_rawcc;
tif->tif_rawcc = 0;
return 1;
}
static int JBIGSetupEncode(TIFF* tif)
{
if (TIFFNumberOfStrips(tif) != 1)
{
TIFFErrorExt(tif->tif_clientdata, "JBIG", "Multistrip images not supported in encoder");
return 0;
}
return 1;
}
static int JBIGCopyEncodedData(TIFF* tif, unsigned char* pp, size_t cc, uint16 s)
{
(void) s;
while (cc > 0)
{
tmsize_t n = (tmsize_t)cc;
if (tif->tif_rawcc + n > tif->tif_rawdatasize)
{
n = tif->tif_rawdatasize - tif->tif_rawcc;
}
assert(n > 0);
_TIFFmemcpy(tif->tif_rawcp, pp, n);
tif->tif_rawcp += n;
tif->tif_rawcc += n;
pp += n;
cc -= (size_t)n;
if (tif->tif_rawcc >= tif->tif_rawdatasize &&
!TIFFFlushData1(tif))
{
return (-1);
}
}
return (1);
}
static void JBIGOutputBie(unsigned char* buffer, size_t len, void* userData)
{
TIFF* tif = (TIFF*)userData;
if (isFillOrder(tif, tif->tif_dir.td_fillorder))
{
TIFFReverseBits(buffer, (tmsize_t)len);
}
JBIGCopyEncodedData(tif, buffer, len, 0);
}
static int JBIGEncode(TIFF* tif, uint8* buffer, tmsize_t size, uint16 s)
{
TIFFDirectory* dir = &tif->tif_dir;
struct jbg_enc_state encoder;
(void) size, (void) s;
jbg_enc_init(&encoder,
dir->td_imagewidth,
dir->td_imagelength,
1,
&buffer,
JBIGOutputBie,
tif);
/*
* jbg_enc_out does the "real" encoding. As data is encoded,
* JBIGOutputBie is called, which writes the data to the directory.
*/
jbg_enc_out(&encoder);
jbg_enc_free(&encoder);
return 1;
}
int TIFFInitJBIG(TIFF* tif, int scheme)
{
assert(scheme == COMPRESSION_JBIG);
/*
* These flags are set so the JBIG Codec can control when to reverse
* bits and when not to and to allow the jbig decoder and bit reverser
* to write to memory when necessary.
*/
tif->tif_flags |= TIFF_NOBITREV;
tif->tif_flags &= ~TIFF_MAPPED;
/* Setup the function pointers for encode, decode, and cleanup. */
tif->tif_setupdecode = JBIGSetupDecode;
tif->tif_decodestrip = JBIGDecode;
tif->tif_setupencode = JBIGSetupEncode;
tif->tif_encodestrip = JBIGEncode;
return 1;
}
#endif /* JBIG_SUPPORT */
/* vim: set ts=8 sts=8 sw=8 noet: */
/*
* Local Variables:
* mode: c
* c-basic-offset: 8
* fill-column: 78
* End:
*/
| {
"language": "C"
} |
/*
* QEMU Hypervisor.framework (HVF) support
*
* Copyright Google Inc., 2017
*
* This work is licensed under the terms of the GNU GPL, version 2 or later.
* See the COPYING file in the top-level directory.
*
*/
/* header to be included in non-HVF-specific code */
#ifndef HVF_H
#define HVF_H
#include "cpu.h"
#include "qemu/bitops.h"
#include "exec/memory.h"
#include "sysemu/accel.h"
extern bool hvf_allowed;
#ifdef CONFIG_HVF
#include <Hypervisor/hv.h>
#include <Hypervisor/hv_vmx.h>
#include <Hypervisor/hv_error.h>
#include "target/i386/cpu.h"
uint32_t hvf_get_supported_cpuid(uint32_t func, uint32_t idx,
int reg);
#define hvf_enabled() (hvf_allowed)
#else
#define hvf_enabled() 0
#define hvf_get_supported_cpuid(func, idx, reg) 0
#endif
/* hvf_slot flags */
#define HVF_SLOT_LOG (1 << 0)
typedef struct hvf_slot {
uint64_t start;
uint64_t size;
uint8_t *mem;
int slot_id;
uint32_t flags;
MemoryRegion *region;
} hvf_slot;
typedef struct hvf_vcpu_caps {
uint64_t vmx_cap_pinbased;
uint64_t vmx_cap_procbased;
uint64_t vmx_cap_procbased2;
uint64_t vmx_cap_entry;
uint64_t vmx_cap_exit;
uint64_t vmx_cap_preemption_timer;
} hvf_vcpu_caps;
typedef struct HVFState {
AccelState parent;
hvf_slot slots[32];
int num_slots;
hvf_vcpu_caps *hvf_caps;
} HVFState;
extern HVFState *hvf_state;
void hvf_set_phys_mem(MemoryRegionSection *, bool);
void hvf_handle_io(CPUArchState *, uint16_t, void *,
int, int, int);
hvf_slot *hvf_find_overlap_slot(uint64_t, uint64_t);
/* Disable HVF if |disable| is 1, otherwise, enable it iff it is supported by
* the host CPU. Use hvf_enabled() after this to get the result. */
void hvf_disable(int disable);
/* Returns non-0 if the host CPU supports the VMX "unrestricted guest" feature
* which allows the virtual CPU to directly run in "real mode". If true, this
* allows QEMU to run several vCPU threads in parallel (see cpus.c). Otherwise,
* only a a single TCG thread can run, and it will call HVF to run the current
* instructions, except in case of "real mode" (paging disabled, typically at
* boot time), or MMIO operations. */
int hvf_sync_vcpus(void);
int hvf_init_vcpu(CPUState *);
int hvf_vcpu_exec(CPUState *);
int hvf_smp_cpu_exec(CPUState *);
void hvf_cpu_synchronize_state(CPUState *);
void hvf_cpu_synchronize_post_reset(CPUState *);
void hvf_cpu_synchronize_post_init(CPUState *);
void _hvf_cpu_synchronize_post_init(CPUState *, run_on_cpu_data);
void hvf_vcpu_destroy(CPUState *);
void hvf_raise_event(CPUState *);
/* void hvf_reset_vcpu_state(void *opaque); */
void hvf_reset_vcpu(CPUState *);
void vmx_update_tpr(CPUState *);
void update_apic_tpr(CPUState *);
int hvf_put_registers(CPUState *);
void vmx_clear_int_window_exiting(CPUState *cpu);
#define TYPE_HVF_ACCEL ACCEL_CLASS_NAME("hvf")
#define HVF_STATE(obj) \
OBJECT_CHECK(HVFState, (obj), TYPE_HVF_ACCEL)
#endif
| {
"language": "C"
} |
/*
* MSF demuxer
* Copyright (c) 2015 Paul B Mahol
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#include "libavutil/intreadwrite.h"
#include "avformat.h"
#include "internal.h"
static int msf_probe(AVProbeData *p)
{
if (memcmp(p->buf, "MSF", 3))
return 0;
if (AV_RB32(p->buf+8) <= 0)
return 0;
if (AV_RB32(p->buf+16) <= 0)
return 0;
if (AV_RB32(p->buf+4) > 16)
return AVPROBE_SCORE_MAX / 5; //unsupported / unknown codec
return AVPROBE_SCORE_MAX / 3 * 2;
}
static int msf_read_header(AVFormatContext *s)
{
unsigned codec, size;
AVStream *st;
int ret;
avio_skip(s->pb, 4);
st = avformat_new_stream(s, NULL);
if (!st)
return AVERROR(ENOMEM);
st->codecpar->codec_type = AVMEDIA_TYPE_AUDIO;
codec = avio_rb32(s->pb);
st->codecpar->channels = avio_rb32(s->pb);
if (st->codecpar->channels <= 0 || st->codecpar->channels >= INT_MAX / 1024)
return AVERROR_INVALIDDATA;
size = avio_rb32(s->pb);
st->codecpar->sample_rate = avio_rb32(s->pb);
if (st->codecpar->sample_rate <= 0)
return AVERROR_INVALIDDATA;
// avio_rb32(s->pb); /* byte flags with encoder info */
switch (codec) {
case 0: st->codecpar->codec_id = AV_CODEC_ID_PCM_S16BE; break;
case 1: st->codecpar->codec_id = AV_CODEC_ID_PCM_S16LE; break;
case 3: st->codecpar->block_align = 16 * st->codecpar->channels;
st->codecpar->codec_id = AV_CODEC_ID_ADPCM_PSX; break;
case 4:
case 5:
case 6: st->codecpar->block_align = (codec == 4 ? 96 : codec == 5 ? 152 : 192) * st->codecpar->channels;
ret = ff_alloc_extradata(st->codecpar, 14);
if (ret < 0)
return ret;
memset(st->codecpar->extradata, 0, st->codecpar->extradata_size);
AV_WL16(st->codecpar->extradata, 1); /* version */
AV_WL16(st->codecpar->extradata+2, 2048 * st->codecpar->channels); /* unknown size */
AV_WL16(st->codecpar->extradata+6, codec == 4 ? 1 : 0); /* joint stereo */
AV_WL16(st->codecpar->extradata+8, codec == 4 ? 1 : 0); /* joint stereo (repeat?) */
AV_WL16(st->codecpar->extradata+10, 1);
st->codecpar->codec_id = AV_CODEC_ID_ATRAC3; break;
case 7: st->need_parsing = AVSTREAM_PARSE_FULL_RAW;
st->codecpar->codec_id = AV_CODEC_ID_MP3; break;
default:
avpriv_request_sample(s, "Codec %d", codec);
return AVERROR_PATCHWELCOME;
}
st->duration = av_get_audio_frame_duration2(st->codecpar, size);
avio_skip(s->pb, 0x40 - avio_tell(s->pb));
avpriv_set_pts_info(st, 64, 1, st->codecpar->sample_rate);
return 0;
}
static int msf_read_packet(AVFormatContext *s, AVPacket *pkt)
{
AVCodecParameters *par = s->streams[0]->codecpar;
return av_get_packet(s->pb, pkt, par->block_align ? par->block_align : 1024 * par->channels);
}
AVInputFormat ff_msf_demuxer = {
.name = "msf",
.long_name = NULL_IF_CONFIG_SMALL("Sony PS3 MSF"),
.read_probe = msf_probe,
.read_header = msf_read_header,
.read_packet = msf_read_packet,
.extensions = "msf",
};
| {
"language": "C"
} |
/*
* Copyright © 2007 Eugene Konev <ejka@openwrt.org>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
*
* TI AR7 flash partition table.
* Based on ar7 map by Felix Fietkau <nbd@openwrt.org>
*
*/
#include <linux/kernel.h>
#include <linux/slab.h>
#include <linux/mtd/mtd.h>
#include <linux/mtd/partitions.h>
#include <linux/bootmem.h>
#include <linux/module.h>
#include <uapi/linux/magic.h>
#define AR7_PARTS 4
#define ROOT_OFFSET 0xe0000
#define LOADER_MAGIC1 le32_to_cpu(0xfeedfa42)
#define LOADER_MAGIC2 le32_to_cpu(0xfeed1281)
struct ar7_bin_rec {
unsigned int checksum;
unsigned int length;
unsigned int address;
};
static int create_mtd_partitions(struct mtd_info *master,
struct mtd_partition **pparts,
struct mtd_part_parser_data *data)
{
struct ar7_bin_rec header;
unsigned int offset;
size_t len;
unsigned int pre_size = master->erasesize, post_size = 0;
unsigned int root_offset = ROOT_OFFSET;
int retries = 10;
struct mtd_partition *ar7_parts;
ar7_parts = kzalloc(sizeof(*ar7_parts) * AR7_PARTS, GFP_KERNEL);
if (!ar7_parts)
return -ENOMEM;
ar7_parts[0].name = "loader";
ar7_parts[0].offset = 0;
ar7_parts[0].size = master->erasesize;
ar7_parts[0].mask_flags = MTD_WRITEABLE;
ar7_parts[1].name = "config";
ar7_parts[1].offset = 0;
ar7_parts[1].size = master->erasesize;
ar7_parts[1].mask_flags = 0;
do { /* Try 10 blocks starting from master->erasesize */
offset = pre_size;
mtd_read(master, offset, sizeof(header), &len,
(uint8_t *)&header);
if (!strncmp((char *)&header, "TIENV0.8", 8))
ar7_parts[1].offset = pre_size;
if (header.checksum == LOADER_MAGIC1)
break;
if (header.checksum == LOADER_MAGIC2)
break;
pre_size += master->erasesize;
} while (retries--);
pre_size = offset;
if (!ar7_parts[1].offset) {
ar7_parts[1].offset = master->size - master->erasesize;
post_size = master->erasesize;
}
switch (header.checksum) {
case LOADER_MAGIC1:
while (header.length) {
offset += sizeof(header) + header.length;
mtd_read(master, offset, sizeof(header), &len,
(uint8_t *)&header);
}
root_offset = offset + sizeof(header) + 4;
break;
case LOADER_MAGIC2:
while (header.length) {
offset += sizeof(header) + header.length;
mtd_read(master, offset, sizeof(header), &len,
(uint8_t *)&header);
}
root_offset = offset + sizeof(header) + 4 + 0xff;
root_offset &= ~(uint32_t)0xff;
break;
default:
printk(KERN_WARNING "Unknown magic: %08x\n", header.checksum);
break;
}
mtd_read(master, root_offset, sizeof(header), &len, (u8 *)&header);
if (header.checksum != SQUASHFS_MAGIC) {
root_offset += master->erasesize - 1;
root_offset &= ~(master->erasesize - 1);
}
ar7_parts[2].name = "linux";
ar7_parts[2].offset = pre_size;
ar7_parts[2].size = master->size - pre_size - post_size;
ar7_parts[2].mask_flags = 0;
ar7_parts[3].name = "rootfs";
ar7_parts[3].offset = root_offset;
ar7_parts[3].size = master->size - root_offset - post_size;
ar7_parts[3].mask_flags = 0;
*pparts = ar7_parts;
return AR7_PARTS;
}
static struct mtd_part_parser ar7_parser = {
.owner = THIS_MODULE,
.parse_fn = create_mtd_partitions,
.name = "ar7part",
};
static int __init ar7_parser_init(void)
{
return register_mtd_parser(&ar7_parser);
}
static void __exit ar7_parser_exit(void)
{
deregister_mtd_parser(&ar7_parser);
}
module_init(ar7_parser_init);
module_exit(ar7_parser_exit);
MODULE_LICENSE("GPL");
MODULE_AUTHOR( "Felix Fietkau <nbd@openwrt.org>, "
"Eugene Konev <ejka@openwrt.org>");
MODULE_DESCRIPTION("MTD partitioning for TI AR7");
| {
"language": "C"
} |
#ifndef COIN_GLHEADERS_H
#define COIN_GLHEADERS_H
/**************************************************************************\
* Copyright (c) Kongsberg Oil & Gas Technologies AS
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met:
*
* Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
*
* Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
\**************************************************************************/
/*
* This header file is supposed to take care of all operating system
* dependent anomalies connected to including the gl.h header file.
*/
/* This define is at least needed before inclusion of the header files
that are part of NVidia's Linux drivers v41.91. Without it, none of
the extension and OpenGL 1.1+ function prototypes will be set up. */
#define GL_GLEXT_PROTOTYPES 1
/* #include <windows.h> - not needed on system */
#include <OpenGL/gl.h>
#include <OpenGL/glu.h>
#include <OpenGL/glext.h>
#endif /* ! COIN_GLHEADERS_H */
| {
"language": "C"
} |
/*
* Policy capability support functions
*/
#include <string.h>
#include <sepol/policydb/polcaps.h>
static const char *polcap_names[] = {
"network_peer_controls", /* POLICYDB_CAPABILITY_NETPEER */
"open_perms", /* POLICYDB_CAPABILITY_OPENPERM */
"extended_socket_class", /* POLICYDB_CAPABILITY_EXTSOCKCLASS */
"always_check_network", /* POLICYDB_CAPABILITY_ALWAYSNETWORK */
NULL
};
int sepol_polcap_getnum(const char *name)
{
int capnum;
for (capnum = 0; capnum <= POLICYDB_CAPABILITY_MAX; capnum++) {
if (polcap_names[capnum] == NULL)
continue;
if (strcasecmp(polcap_names[capnum], name) == 0)
return capnum;
}
return -1;
}
const char *sepol_polcap_getname(unsigned int capnum)
{
if (capnum > POLICYDB_CAPABILITY_MAX)
return NULL;
return polcap_names[capnum];
}
| {
"language": "C"
} |
/*
* Copyright (C) 2014 Stefan Roese <sr@denx.de>
*
* SPDX-License-Identifier: GPL-2.0+
*/
#ifndef _CONFIG_SYNOLOGY_DS414_H
#define _CONFIG_SYNOLOGY_DS414_H
/*
* High Level Configuration Options (easy to change)
*/
#define CONFIG_DISPLAY_BOARDINFO_LATE
/*
* TEXT_BASE needs to be below 16MiB, since this area is scrubbed
* for DDR ECC byte filling in the SPL before loading the main
* U-Boot into it.
*/
#define CONFIG_SYS_TEXT_BASE 0x00800000
#define CONFIG_SYS_TCLK 250000000 /* 250MHz */
/*
* Commands configuration
*/
/* I2C */
#define CONFIG_SYS_I2C
#define CONFIG_SYS_I2C_MVTWSI
#define CONFIG_I2C_MVTWSI_BASE0 MVEBU_TWSI_BASE
#define CONFIG_SYS_I2C_SLAVE 0x0
#define CONFIG_SYS_I2C_SPEED 100000
/* SPI NOR flash default params, used by sf commands */
#define CONFIG_SF_DEFAULT_SPEED 1000000
#define CONFIG_SF_DEFAULT_MODE SPI_MODE_3
/* Environment in SPI NOR flash */
#define CONFIG_ENV_OFFSET 0x7E0000 /* RedBoot config partition in DTS */
#define CONFIG_ENV_SIZE (64 << 10) /* 64KiB */
#define CONFIG_ENV_SECT_SIZE (64 << 10) /* 64KiB sectors */
#define CONFIG_PHY_MARVELL /* there is a marvell phy */
#define CONFIG_PHY_ADDR { 0x1, 0x0 }
#define CONFIG_SYS_NETA_INTERFACE_TYPE PHY_INTERFACE_MODE_RGMII
#define CONFIG_SYS_ALT_MEMTEST
/* PCIe support */
#ifndef CONFIG_SPL_BUILD
#define CONFIG_PCI_MVEBU
#define CONFIG_PCI_SCAN_SHOW
#endif
/* USB/EHCI/XHCI configuration */
#define CONFIG_USB_MAX_CONTROLLER_COUNT 2
/* FIXME: broken XHCI support
* Below defines should enable support for the two rear USB3 ports. Sadly, this
* does not work because:
* - xhci-pci seems to not support DM_USB, so with that enabled it is not
* found.
* - USB init fails, controller does not respond in time */
#if !defined(CONFIG_USB_XHCI_HCD)
#define CONFIG_EHCI_IS_TDI
#endif
/* why is this only defined in mv-common.h if CONFIG_DM is undefined? */
/*
* mv-common.h should be defined after CMD configs since it used them
* to enable certain macros
*/
#include "mv-common.h"
/*
* Memory layout while starting into the bin_hdr via the
* BootROM:
*
* 0x4000.4000 - 0x4003.4000 headers space (192KiB)
* 0x4000.4030 bin_hdr start address
* 0x4003.4000 - 0x4004.7c00 BootROM memory allocations (15KiB)
* 0x4007.fffc BootROM stack top
*
* The address space between 0x4007.fffc and 0x400f.fff is not locked in
* L2 cache thus cannot be used.
*/
/* SPL */
/* Defines for SPL */
#define CONFIG_SPL_FRAMEWORK
#define CONFIG_SPL_TEXT_BASE 0x40004030
#define CONFIG_SPL_MAX_SIZE ((128 << 10) - 0x4030)
#define CONFIG_SPL_BSS_START_ADDR (0x40000000 + (128 << 10))
#define CONFIG_SPL_BSS_MAX_SIZE (16 << 10)
#ifdef CONFIG_SPL_BUILD
#define CONFIG_SYS_MALLOC_SIMPLE
#endif
#define CONFIG_SPL_STACK (0x40000000 + ((192 - 16) << 10))
#define CONFIG_SPL_BOOTROM_SAVE (CONFIG_SPL_STACK + 4)
/* SPL related SPI defines */
#define CONFIG_SPL_SPI_LOAD
#define CONFIG_SYS_SPI_U_BOOT_OFFS 0x24000
/* DS414 bus width is 32bits */
#define CONFIG_DDR_32BIT
/* Use random ethernet address if not configured */
#define CONFIG_LIB_RAND
#define CONFIG_NET_RANDOM_ETHADDR
/* Default Environment */
#define CONFIG_BOOTCOMMAND "sf read ${loadaddr} 0xd0000 0x700000; bootm"
#define CONFIG_LOADADDR 0x80000
#undef CONFIG_PREBOOT /* override preboot for USB and SPI flash init */
#define CONFIG_PREBOOT "usb start; sf probe"
#endif /* _CONFIG_SYNOLOGY_DS414_H */
| {
"language": "C"
} |
#ifdef RCSID
static char RCSid[] =
"$Header: d:/cvsroot/tads/TADS2/RUNSTAT.C,v 1.2 1999/05/17 02:52:13 MJRoberts Exp $";
#endif
/*
* Copyright (c) 1992, 2002 Michael J. Roberts. All Rights Reserved.
*
* Please see the accompanying license file, LICENSE.TXT, for information
* on using and copying this software.
*/
/*
Name
runstat.c - tads 1 compatible runstat()
Function
generates status line
Notes
none
Modified
04/04/92 MJRoberts - creation
*/
#include "os.h"
#include "std.h"
#include "mcm.h"
#include "obj.h"
#include "run.h"
#include "tio.h"
#include "voc.h"
#include "dat.h"
static runcxdef *runctx;
static voccxdef *vocctx;
static tiocxdef *tioctx;
void runstat(void)
{
objnum locobj;
int savemoremode;
/* get the location of the Me object */
runppr(runctx, vocctx->voccxme, PRP_LOCATION, 0);
/* if that's no an object, there's nothing we can do */
if (runtostyp(runctx) != DAT_OBJECT)
{
rundisc(runctx);
return;
}
/* get Me.location */
locobj = runpopobj(runctx);
/* flush any pending output */
outflushn(0);
/* switch to output display mode 1 (status line) */
os_status(1);
/* turn off MORE mode */
savemoremode = setmore(0);
/* call the statusLine method of the current room */
runppr(runctx, locobj, PRP_STATUSLINE, 0);
/* if we're in the status line, make sure the line gets flushed */
if (os_get_status() != 0)
tioputs(tioctx, "\\n");
outflushn(0);
/* restore the previous MORE mode */
setmore(savemoremode);
/* switch to output display mode 0 (main text area) */
os_status(0);
}
void runistat(voccxdef *vctx, runcxdef *rctx, tiocxdef *tctx)
{
runctx = rctx;
vocctx = vctx;
tioctx = tctx;
}
| {
"language": "C"
} |
/*
* Copyright 2012 The WebRTC Project Authors. All rights reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#ifndef WEBRTC_EXAMPLES_PEERCONNECTION_CLIENT_FLAGDEFS_H_
#define WEBRTC_EXAMPLES_PEERCONNECTION_CLIENT_FLAGDEFS_H_
#include "webrtc/rtc_base/flags.h"
extern const uint16_t kDefaultServerPort; // From defaults.[h|cc]
// Define flags for the peerconnect_client testing tool, in a separate
// header file so that they can be shared across the different main.cc's
// for each platform.
DEFINE_bool(help, false, "Prints this message");
DEFINE_bool(autoconnect, false, "Connect to the server without user "
"intervention.");
DEFINE_string(server, "localhost", "The server to connect to.");
DEFINE_int(port, kDefaultServerPort,
"The port on which the server is listening.");
DEFINE_bool(autocall, false, "Call the first available other client on "
"the server without user intervention. Note: this flag should only be set "
"to true on one of the two clients.");
#endif // WEBRTC_EXAMPLES_PEERCONNECTION_CLIENT_FLAGDEFS_H_
| {
"language": "C"
} |
/*
i2c-isch.c - Linux kernel driver for Intel SCH chipset SMBus
- Based on i2c-piix4.c
Copyright (c) 1998 - 2002 Frodo Looijaard <frodol@dds.nl> and
Philip Edelbrock <phil@netroedge.com>
- Intel SCH support
Copyright (c) 2007 - 2008 Jacob Jun Pan <jacob.jun.pan@intel.com>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License version 2 as
published by the Free Software Foundation.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
*/
/*
Supports:
Intel SCH chipsets (AF82US15W, AF82US15L, AF82UL11L)
Note: we assume there can only be one device, with one SMBus interface.
*/
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/kernel.h>
#include <linux/delay.h>
#include <linux/stddef.h>
#include <linux/ioport.h>
#include <linux/i2c.h>
#include <linux/io.h>
#include <linux/acpi.h>
/* SCH SMBus address offsets */
#define SMBHSTCNT (0 + sch_smba)
#define SMBHSTSTS (1 + sch_smba)
#define SMBHSTCLK (2 + sch_smba)
#define SMBHSTADD (4 + sch_smba) /* TSA */
#define SMBHSTCMD (5 + sch_smba)
#define SMBHSTDAT0 (6 + sch_smba)
#define SMBHSTDAT1 (7 + sch_smba)
#define SMBBLKDAT (0x20 + sch_smba)
/* Other settings */
#define MAX_RETRIES 5000
/* I2C constants */
#define SCH_QUICK 0x00
#define SCH_BYTE 0x01
#define SCH_BYTE_DATA 0x02
#define SCH_WORD_DATA 0x03
#define SCH_BLOCK_DATA 0x05
static unsigned short sch_smba;
static struct i2c_adapter sch_adapter;
static int backbone_speed = 33000; /* backbone speed in kHz */
module_param(backbone_speed, int, S_IRUSR | S_IWUSR);
MODULE_PARM_DESC(backbone_speed, "Backbone speed in kHz, (default = 33000)");
/*
* Start the i2c transaction -- the i2c_access will prepare the transaction
* and this function will execute it.
* return 0 for success and others for failure.
*/
static int sch_transaction(void)
{
int temp;
int result = 0;
int retries = 0;
dev_dbg(&sch_adapter.dev, "Transaction (pre): CNT=%02x, CMD=%02x, "
"ADD=%02x, DAT0=%02x, DAT1=%02x\n", inb(SMBHSTCNT),
inb(SMBHSTCMD), inb(SMBHSTADD), inb(SMBHSTDAT0),
inb(SMBHSTDAT1));
/* Make sure the SMBus host is ready to start transmitting */
temp = inb(SMBHSTSTS) & 0x0f;
if (temp) {
/* Can not be busy since we checked it in sch_access */
if (temp & 0x01) {
dev_dbg(&sch_adapter.dev, "Completion (%02x). "
"Clear...\n", temp);
}
if (temp & 0x06) {
dev_dbg(&sch_adapter.dev, "SMBus error (%02x). "
"Resetting...\n", temp);
}
outb(temp, SMBHSTSTS);
temp = inb(SMBHSTSTS) & 0x0f;
if (temp) {
dev_err(&sch_adapter.dev,
"SMBus is not ready: (%02x)\n", temp);
return -EAGAIN;
}
}
/* start the transaction by setting bit 4 */
outb(inb(SMBHSTCNT) | 0x10, SMBHSTCNT);
do {
usleep_range(100, 200);
temp = inb(SMBHSTSTS) & 0x0f;
} while ((temp & 0x08) && (retries++ < MAX_RETRIES));
/* If the SMBus is still busy, we give up */
if (retries > MAX_RETRIES) {
dev_err(&sch_adapter.dev, "SMBus Timeout!\n");
result = -ETIMEDOUT;
}
if (temp & 0x04) {
result = -EIO;
dev_dbg(&sch_adapter.dev, "Bus collision! SMBus may be "
"locked until next hard reset. (sorry!)\n");
/* Clock stops and slave is stuck in mid-transmission */
} else if (temp & 0x02) {
result = -EIO;
dev_err(&sch_adapter.dev, "Error: no response!\n");
} else if (temp & 0x01) {
dev_dbg(&sch_adapter.dev, "Post complete!\n");
outb(temp, SMBHSTSTS);
temp = inb(SMBHSTSTS) & 0x07;
if (temp & 0x06) {
/* Completion clear failed */
dev_dbg(&sch_adapter.dev, "Failed reset at end of "
"transaction (%02x), Bus error!\n", temp);
}
} else {
result = -ENXIO;
dev_dbg(&sch_adapter.dev, "No such address.\n");
}
dev_dbg(&sch_adapter.dev, "Transaction (post): CNT=%02x, CMD=%02x, "
"ADD=%02x, DAT0=%02x, DAT1=%02x\n", inb(SMBHSTCNT),
inb(SMBHSTCMD), inb(SMBHSTADD), inb(SMBHSTDAT0),
inb(SMBHSTDAT1));
return result;
}
/*
* This is the main access entry for i2c-sch access
* adap is i2c_adapter pointer, addr is the i2c device bus address, read_write
* (0 for read and 1 for write), size is i2c transaction type and data is the
* union of transaction for data to be transferred or data read from bus.
* return 0 for success and others for failure.
*/
static s32 sch_access(struct i2c_adapter *adap, u16 addr,
unsigned short flags, char read_write,
u8 command, int size, union i2c_smbus_data *data)
{
int i, len, temp, rc;
/* Make sure the SMBus host is not busy */
temp = inb(SMBHSTSTS) & 0x0f;
if (temp & 0x08) {
dev_dbg(&sch_adapter.dev, "SMBus busy (%02x)\n", temp);
return -EAGAIN;
}
temp = inw(SMBHSTCLK);
if (!temp) {
/*
* We can't determine if we have 33 or 25 MHz clock for
* SMBus, so expect 33 MHz and calculate a bus clock of
* 100 kHz. If we actually run at 25 MHz the bus will be
* run ~75 kHz instead which should do no harm.
*/
dev_notice(&sch_adapter.dev,
"Clock divider unitialized. Setting defaults\n");
outw(backbone_speed / (4 * 100), SMBHSTCLK);
}
dev_dbg(&sch_adapter.dev, "access size: %d %s\n", size,
(read_write)?"READ":"WRITE");
switch (size) {
case I2C_SMBUS_QUICK:
outb((addr << 1) | read_write, SMBHSTADD);
size = SCH_QUICK;
break;
case I2C_SMBUS_BYTE:
outb((addr << 1) | read_write, SMBHSTADD);
if (read_write == I2C_SMBUS_WRITE)
outb(command, SMBHSTCMD);
size = SCH_BYTE;
break;
case I2C_SMBUS_BYTE_DATA:
outb((addr << 1) | read_write, SMBHSTADD);
outb(command, SMBHSTCMD);
if (read_write == I2C_SMBUS_WRITE)
outb(data->byte, SMBHSTDAT0);
size = SCH_BYTE_DATA;
break;
case I2C_SMBUS_WORD_DATA:
outb((addr << 1) | read_write, SMBHSTADD);
outb(command, SMBHSTCMD);
if (read_write == I2C_SMBUS_WRITE) {
outb(data->word & 0xff, SMBHSTDAT0);
outb((data->word & 0xff00) >> 8, SMBHSTDAT1);
}
size = SCH_WORD_DATA;
break;
case I2C_SMBUS_BLOCK_DATA:
outb((addr << 1) | read_write, SMBHSTADD);
outb(command, SMBHSTCMD);
if (read_write == I2C_SMBUS_WRITE) {
len = data->block[0];
if (len == 0 || len > I2C_SMBUS_BLOCK_MAX)
return -EINVAL;
outb(len, SMBHSTDAT0);
for (i = 1; i <= len; i++)
outb(data->block[i], SMBBLKDAT+i-1);
}
size = SCH_BLOCK_DATA;
break;
default:
dev_warn(&adap->dev, "Unsupported transaction %d\n", size);
return -EOPNOTSUPP;
}
dev_dbg(&sch_adapter.dev, "write size %d to 0x%04x\n", size, SMBHSTCNT);
outb((inb(SMBHSTCNT) & 0xb0) | (size & 0x7), SMBHSTCNT);
rc = sch_transaction();
if (rc) /* Error in transaction */
return rc;
if ((read_write == I2C_SMBUS_WRITE) || (size == SCH_QUICK))
return 0;
switch (size) {
case SCH_BYTE:
case SCH_BYTE_DATA:
data->byte = inb(SMBHSTDAT0);
break;
case SCH_WORD_DATA:
data->word = inb(SMBHSTDAT0) + (inb(SMBHSTDAT1) << 8);
break;
case SCH_BLOCK_DATA:
data->block[0] = inb(SMBHSTDAT0);
if (data->block[0] == 0 || data->block[0] > I2C_SMBUS_BLOCK_MAX)
return -EPROTO;
for (i = 1; i <= data->block[0]; i++)
data->block[i] = inb(SMBBLKDAT+i-1);
break;
}
return 0;
}
static u32 sch_func(struct i2c_adapter *adapter)
{
return I2C_FUNC_SMBUS_QUICK | I2C_FUNC_SMBUS_BYTE |
I2C_FUNC_SMBUS_BYTE_DATA | I2C_FUNC_SMBUS_WORD_DATA |
I2C_FUNC_SMBUS_BLOCK_DATA;
}
static const struct i2c_algorithm smbus_algorithm = {
.smbus_xfer = sch_access,
.functionality = sch_func,
};
static struct i2c_adapter sch_adapter = {
.owner = THIS_MODULE,
.class = I2C_CLASS_HWMON | I2C_CLASS_SPD,
.algo = &smbus_algorithm,
};
static int smbus_sch_probe(struct platform_device *dev)
{
struct resource *res;
int retval;
res = platform_get_resource(dev, IORESOURCE_IO, 0);
if (!res)
return -EBUSY;
if (!devm_request_region(&dev->dev, res->start, resource_size(res),
dev->name)) {
dev_err(&dev->dev, "SMBus region 0x%x already in use!\n",
sch_smba);
return -EBUSY;
}
sch_smba = res->start;
dev_dbg(&dev->dev, "SMBA = 0x%X\n", sch_smba);
/* set up the sysfs linkage to our parent device */
sch_adapter.dev.parent = &dev->dev;
snprintf(sch_adapter.name, sizeof(sch_adapter.name),
"SMBus SCH adapter at %04x", sch_smba);
retval = i2c_add_adapter(&sch_adapter);
if (retval) {
dev_err(&dev->dev, "Couldn't register adapter!\n");
sch_smba = 0;
}
return retval;
}
static int smbus_sch_remove(struct platform_device *pdev)
{
if (sch_smba) {
i2c_del_adapter(&sch_adapter);
sch_smba = 0;
}
return 0;
}
static struct platform_driver smbus_sch_driver = {
.driver = {
.name = "isch_smbus",
},
.probe = smbus_sch_probe,
.remove = smbus_sch_remove,
};
module_platform_driver(smbus_sch_driver);
MODULE_AUTHOR("Jacob Pan <jacob.jun.pan@intel.com>");
MODULE_DESCRIPTION("Intel SCH SMBus driver");
MODULE_LICENSE("GPL");
MODULE_ALIAS("platform:isch_smbus");
| {
"language": "C"
} |
// Copyright 2020 Espressif Systems (Shanghai) PTE LTD
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include <stdio.h>
#include "esp_attr.h"
#include "esp_err.h"
#include "esp_log.h"
#include "trax.h"
#include "soc/sensitive_reg.h"
#include "sdkconfig.h"
#define TRACEMEM_MUX_BLK0_NUM 19
#define TRACEMEM_MUX_BLK1_NUM 20
static const char *__attribute__((unused)) TAG = "trax";
// IDF-1785
int trax_enable(trax_ena_select_t which)
{
#ifndef CONFIG_ESP32S3_TRAX
ESP_LOGE(TAG, "Trax_enable called, but trax is disabled in menuconfig!");
return ESP_ERR_NO_MEM;
#endif
if (which != TRAX_ENA_PRO) {
return ESP_ERR_INVALID_ARG;
}
// REG_WRITE(DPORT_PMS_OCCUPY_3_REG, BIT(TRACEMEM_MUX_BLK1_NUM-4));
return ESP_OK;
}
| {
"language": "C"
} |
/*
* AMD 10Gb Ethernet driver
*
* This file is available to you under your choice of the following two
* licenses:
*
* License 1: GPLv2
*
* Copyright (c) 2014-2016 Advanced Micro Devices, Inc.
*
* This file is free software; you may copy, redistribute and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 2 of the License, or (at
* your option) any later version.
*
* This file is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*
* This file incorporates work covered by the following copyright and
* permission notice:
* The Synopsys DWC ETHER XGMAC Software Driver and documentation
* (hereinafter "Software") is an unsupported proprietary work of Synopsys,
* Inc. unless otherwise expressly agreed to in writing between Synopsys
* and you.
*
* The Software IS NOT an item of Licensed Software or Licensed Product
* under any End User Software License Agreement or Agreement for Licensed
* Product with Synopsys or any supplement thereto. Permission is hereby
* granted, free of charge, to any person obtaining a copy of this software
* annotated with this license and the Software, to deal in the Software
* without restriction, including without limitation the rights to use,
* copy, modify, merge, publish, distribute, sublicense, and/or sell copies
* of the Software, and to permit persons to whom the Software is furnished
* to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included
* in all copies or substantial portions of the Software.
*
* THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS"
* BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
* TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
* PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS
* BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
* THE POSSIBILITY OF SUCH DAMAGE.
*
*
* License 2: Modified BSD
*
* Copyright (c) 2014-2016 Advanced Micro Devices, Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of Advanced Micro Devices, Inc. nor the
* names of its contributors may be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY
* DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
* This file incorporates work covered by the following copyright and
* permission notice:
* The Synopsys DWC ETHER XGMAC Software Driver and documentation
* (hereinafter "Software") is an unsupported proprietary work of Synopsys,
* Inc. unless otherwise expressly agreed to in writing between Synopsys
* and you.
*
* The Software IS NOT an item of Licensed Software or Licensed Product
* under any End User Software License Agreement or Agreement for Licensed
* Product with Synopsys or any supplement thereto. Permission is hereby
* granted, free of charge, to any person obtaining a copy of this software
* annotated with this license and the Software, to deal in the Software
* without restriction, including without limitation the rights to use,
* copy, modify, merge, publish, distribute, sublicense, and/or sell copies
* of the Software, and to permit persons to whom the Software is furnished
* to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included
* in all copies or substantial portions of the Software.
*
* THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS"
* BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
* TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
* PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS
* BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
* THE POSSIBILITY OF SUCH DAMAGE.
*/
#include <linux/module.h>
#include <linux/device.h>
#include <linux/spinlock.h>
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
#include <linux/io.h>
#include <linux/notifier.h>
#include "xgbe.h"
#include "xgbe-common.h"
MODULE_AUTHOR("Tom Lendacky <thomas.lendacky@amd.com>");
MODULE_LICENSE("Dual BSD/GPL");
MODULE_VERSION(XGBE_DRV_VERSION);
MODULE_DESCRIPTION(XGBE_DRV_DESC);
static int debug = -1;
module_param(debug, int, 0644);
MODULE_PARM_DESC(debug, " Network interface message level setting");
static const u32 default_msg_level = (NETIF_MSG_LINK | NETIF_MSG_IFDOWN |
NETIF_MSG_IFUP);
static void xgbe_default_config(struct xgbe_prv_data *pdata)
{
DBGPR("-->xgbe_default_config\n");
pdata->blen = DMA_SBMR_BLEN_64;
pdata->pbl = DMA_PBL_128;
pdata->aal = 1;
pdata->rd_osr_limit = 8;
pdata->wr_osr_limit = 8;
pdata->tx_sf_mode = MTL_TSF_ENABLE;
pdata->tx_threshold = MTL_TX_THRESHOLD_64;
pdata->tx_osp_mode = DMA_OSP_ENABLE;
pdata->rx_sf_mode = MTL_RSF_DISABLE;
pdata->rx_threshold = MTL_RX_THRESHOLD_64;
pdata->pause_autoneg = 1;
pdata->tx_pause = 1;
pdata->rx_pause = 1;
pdata->phy_speed = SPEED_UNKNOWN;
pdata->power_down = 0;
DBGPR("<--xgbe_default_config\n");
}
static void xgbe_init_all_fptrs(struct xgbe_prv_data *pdata)
{
xgbe_init_function_ptrs_dev(&pdata->hw_if);
xgbe_init_function_ptrs_phy(&pdata->phy_if);
xgbe_init_function_ptrs_i2c(&pdata->i2c_if);
xgbe_init_function_ptrs_desc(&pdata->desc_if);
pdata->vdata->init_function_ptrs_phy_impl(&pdata->phy_if);
}
struct xgbe_prv_data *xgbe_alloc_pdata(struct device *dev)
{
struct xgbe_prv_data *pdata;
struct net_device *netdev;
netdev = alloc_etherdev_mq(sizeof(struct xgbe_prv_data),
XGBE_MAX_DMA_CHANNELS);
if (!netdev) {
dev_err(dev, "alloc_etherdev_mq failed\n");
return ERR_PTR(-ENOMEM);
}
SET_NETDEV_DEV(netdev, dev);
pdata = netdev_priv(netdev);
pdata->netdev = netdev;
pdata->dev = dev;
spin_lock_init(&pdata->lock);
spin_lock_init(&pdata->xpcs_lock);
mutex_init(&pdata->rss_mutex);
spin_lock_init(&pdata->tstamp_lock);
mutex_init(&pdata->i2c_mutex);
init_completion(&pdata->i2c_complete);
init_completion(&pdata->mdio_complete);
INIT_LIST_HEAD(&pdata->vxlan_ports);
pdata->msg_enable = netif_msg_init(debug, default_msg_level);
set_bit(XGBE_DOWN, &pdata->dev_state);
set_bit(XGBE_STOPPED, &pdata->dev_state);
return pdata;
}
void xgbe_free_pdata(struct xgbe_prv_data *pdata)
{
struct net_device *netdev = pdata->netdev;
free_netdev(netdev);
}
void xgbe_set_counts(struct xgbe_prv_data *pdata)
{
/* Set all the function pointers */
xgbe_init_all_fptrs(pdata);
/* Populate the hardware features */
xgbe_get_all_hw_features(pdata);
/* Set default max values if not provided */
if (!pdata->tx_max_channel_count)
pdata->tx_max_channel_count = pdata->hw_feat.tx_ch_cnt;
if (!pdata->rx_max_channel_count)
pdata->rx_max_channel_count = pdata->hw_feat.rx_ch_cnt;
if (!pdata->tx_max_q_count)
pdata->tx_max_q_count = pdata->hw_feat.tx_q_cnt;
if (!pdata->rx_max_q_count)
pdata->rx_max_q_count = pdata->hw_feat.rx_q_cnt;
/* Calculate the number of Tx and Rx rings to be created
* -Tx (DMA) Channels map 1-to-1 to Tx Queues so set
* the number of Tx queues to the number of Tx channels
* enabled
* -Rx (DMA) Channels do not map 1-to-1 so use the actual
* number of Rx queues or maximum allowed
*/
pdata->tx_ring_count = min_t(unsigned int, num_online_cpus(),
pdata->hw_feat.tx_ch_cnt);
pdata->tx_ring_count = min_t(unsigned int, pdata->tx_ring_count,
pdata->tx_max_channel_count);
pdata->tx_ring_count = min_t(unsigned int, pdata->tx_ring_count,
pdata->tx_max_q_count);
pdata->tx_q_count = pdata->tx_ring_count;
pdata->rx_ring_count = min_t(unsigned int, num_online_cpus(),
pdata->hw_feat.rx_ch_cnt);
pdata->rx_ring_count = min_t(unsigned int, pdata->rx_ring_count,
pdata->rx_max_channel_count);
pdata->rx_q_count = min_t(unsigned int, pdata->hw_feat.rx_q_cnt,
pdata->rx_max_q_count);
if (netif_msg_probe(pdata)) {
dev_dbg(pdata->dev, "TX/RX DMA channel count = %u/%u\n",
pdata->tx_ring_count, pdata->rx_ring_count);
dev_dbg(pdata->dev, "TX/RX hardware queue count = %u/%u\n",
pdata->tx_q_count, pdata->rx_q_count);
}
}
int xgbe_config_netdev(struct xgbe_prv_data *pdata)
{
struct net_device *netdev = pdata->netdev;
struct device *dev = pdata->dev;
int ret;
netdev->irq = pdata->dev_irq;
netdev->base_addr = (unsigned long)pdata->xgmac_regs;
memcpy(netdev->dev_addr, pdata->mac_addr, netdev->addr_len);
/* Initialize ECC timestamps */
pdata->tx_sec_period = jiffies;
pdata->tx_ded_period = jiffies;
pdata->rx_sec_period = jiffies;
pdata->rx_ded_period = jiffies;
pdata->desc_sec_period = jiffies;
pdata->desc_ded_period = jiffies;
/* Issue software reset to device */
ret = pdata->hw_if.exit(pdata);
if (ret) {
dev_err(dev, "software reset failed\n");
return ret;
}
/* Set default configuration data */
xgbe_default_config(pdata);
/* Set the DMA mask */
ret = dma_set_mask_and_coherent(dev,
DMA_BIT_MASK(pdata->hw_feat.dma_width));
if (ret) {
dev_err(dev, "dma_set_mask_and_coherent failed\n");
return ret;
}
/* Set default max values if not provided */
if (!pdata->tx_max_fifo_size)
pdata->tx_max_fifo_size = pdata->hw_feat.tx_fifo_size;
if (!pdata->rx_max_fifo_size)
pdata->rx_max_fifo_size = pdata->hw_feat.rx_fifo_size;
/* Set and validate the number of descriptors for a ring */
BUILD_BUG_ON_NOT_POWER_OF_2(XGBE_TX_DESC_CNT);
pdata->tx_desc_count = XGBE_TX_DESC_CNT;
BUILD_BUG_ON_NOT_POWER_OF_2(XGBE_RX_DESC_CNT);
pdata->rx_desc_count = XGBE_RX_DESC_CNT;
/* Adjust the number of queues based on interrupts assigned */
if (pdata->channel_irq_count) {
pdata->tx_ring_count = min_t(unsigned int, pdata->tx_ring_count,
pdata->channel_irq_count);
pdata->rx_ring_count = min_t(unsigned int, pdata->rx_ring_count,
pdata->channel_irq_count);
if (netif_msg_probe(pdata))
dev_dbg(pdata->dev,
"adjusted TX/RX DMA channel count = %u/%u\n",
pdata->tx_ring_count, pdata->rx_ring_count);
}
/* Initialize RSS hash key */
netdev_rss_key_fill(pdata->rss_key, sizeof(pdata->rss_key));
XGMAC_SET_BITS(pdata->rss_options, MAC_RSSCR, IP2TE, 1);
XGMAC_SET_BITS(pdata->rss_options, MAC_RSSCR, TCP4TE, 1);
XGMAC_SET_BITS(pdata->rss_options, MAC_RSSCR, UDP4TE, 1);
/* Call MDIO/PHY initialization routine */
pdata->debugfs_an_cdr_workaround = pdata->vdata->an_cdr_workaround;
ret = pdata->phy_if.phy_init(pdata);
if (ret)
return ret;
/* Set device operations */
netdev->netdev_ops = xgbe_get_netdev_ops();
netdev->ethtool_ops = xgbe_get_ethtool_ops();
#ifdef CONFIG_AMD_XGBE_DCB
netdev->dcbnl_ops = xgbe_get_dcbnl_ops();
#endif
/* Set device features */
netdev->hw_features = NETIF_F_SG |
NETIF_F_IP_CSUM |
NETIF_F_IPV6_CSUM |
NETIF_F_RXCSUM |
NETIF_F_TSO |
NETIF_F_TSO6 |
NETIF_F_GRO |
NETIF_F_HW_VLAN_CTAG_RX |
NETIF_F_HW_VLAN_CTAG_TX |
NETIF_F_HW_VLAN_CTAG_FILTER;
if (pdata->hw_feat.rss)
netdev->hw_features |= NETIF_F_RXHASH;
if (pdata->hw_feat.vxn) {
netdev->hw_enc_features = NETIF_F_SG |
NETIF_F_IP_CSUM |
NETIF_F_IPV6_CSUM |
NETIF_F_RXCSUM |
NETIF_F_TSO |
NETIF_F_TSO6 |
NETIF_F_GRO |
NETIF_F_GSO_UDP_TUNNEL |
NETIF_F_GSO_UDP_TUNNEL_CSUM |
NETIF_F_RX_UDP_TUNNEL_PORT;
netdev->hw_features |= NETIF_F_GSO_UDP_TUNNEL |
NETIF_F_GSO_UDP_TUNNEL_CSUM |
NETIF_F_RX_UDP_TUNNEL_PORT;
pdata->vxlan_offloads_set = 1;
pdata->vxlan_features = NETIF_F_GSO_UDP_TUNNEL |
NETIF_F_GSO_UDP_TUNNEL_CSUM |
NETIF_F_RX_UDP_TUNNEL_PORT;
}
netdev->vlan_features |= NETIF_F_SG |
NETIF_F_IP_CSUM |
NETIF_F_IPV6_CSUM |
NETIF_F_TSO |
NETIF_F_TSO6;
netdev->features |= netdev->hw_features;
pdata->netdev_features = netdev->features;
netdev->priv_flags |= IFF_UNICAST_FLT;
netdev->min_mtu = 0;
netdev->max_mtu = XGMAC_JUMBO_PACKET_MTU;
/* Use default watchdog timeout */
netdev->watchdog_timeo = 0;
xgbe_init_rx_coalesce(pdata);
xgbe_init_tx_coalesce(pdata);
netif_carrier_off(netdev);
ret = register_netdev(netdev);
if (ret) {
dev_err(dev, "net device registration failed\n");
return ret;
}
if (IS_REACHABLE(CONFIG_PTP_1588_CLOCK))
xgbe_ptp_register(pdata);
xgbe_debugfs_init(pdata);
netif_dbg(pdata, drv, pdata->netdev, "%u Tx software queues\n",
pdata->tx_ring_count);
netif_dbg(pdata, drv, pdata->netdev, "%u Rx software queues\n",
pdata->rx_ring_count);
return 0;
}
void xgbe_deconfig_netdev(struct xgbe_prv_data *pdata)
{
struct net_device *netdev = pdata->netdev;
xgbe_debugfs_exit(pdata);
if (IS_REACHABLE(CONFIG_PTP_1588_CLOCK))
xgbe_ptp_unregister(pdata);
unregister_netdev(netdev);
pdata->phy_if.phy_exit(pdata);
}
static int xgbe_netdev_event(struct notifier_block *nb, unsigned long event,
void *data)
{
struct net_device *netdev = netdev_notifier_info_to_dev(data);
struct xgbe_prv_data *pdata = netdev_priv(netdev);
if (netdev->netdev_ops != xgbe_get_netdev_ops())
goto out;
switch (event) {
case NETDEV_CHANGENAME:
xgbe_debugfs_rename(pdata);
break;
default:
break;
}
out:
return NOTIFY_DONE;
}
static struct notifier_block xgbe_netdev_notifier = {
.notifier_call = xgbe_netdev_event,
};
static int __init xgbe_mod_init(void)
{
int ret;
ret = register_netdevice_notifier(&xgbe_netdev_notifier);
if (ret)
return ret;
ret = xgbe_platform_init();
if (ret)
return ret;
ret = xgbe_pci_init();
if (ret)
return ret;
return 0;
}
static void __exit xgbe_mod_exit(void)
{
xgbe_pci_exit();
xgbe_platform_exit();
unregister_netdevice_notifier(&xgbe_netdev_notifier);
}
module_init(xgbe_mod_init);
module_exit(xgbe_mod_exit);
| {
"language": "C"
} |
/*
* Copyright (c) 2006 Oracle. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
* General Public License (GPL) Version 2, available from the file
* COPYING in the main directory of this source tree, or the
* OpenIB.org BSD license below:
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* - Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
*
* - Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*
*/
#include <linux/kernel.h>
#include <linux/gfp.h>
#include <linux/in.h>
#include <net/tcp.h>
#include "rds.h"
#include "tcp.h"
/*
* cheesy, but simple..
*/
static void rds_tcp_accept_worker(struct work_struct *work);
static DECLARE_WORK(rds_tcp_listen_work, rds_tcp_accept_worker);
static struct socket *rds_tcp_listen_sock;
static int rds_tcp_accept_one(struct socket *sock)
{
struct socket *new_sock = NULL;
struct rds_connection *conn;
int ret;
struct inet_sock *inet;
ret = sock_create_lite(sock->sk->sk_family, sock->sk->sk_type,
sock->sk->sk_protocol, &new_sock);
if (ret)
goto out;
new_sock->type = sock->type;
new_sock->ops = sock->ops;
ret = sock->ops->accept(sock, new_sock, O_NONBLOCK);
if (ret < 0)
goto out;
rds_tcp_tune(new_sock);
inet = inet_sk(new_sock->sk);
rdsdebug("accepted tcp %pI4:%u -> %pI4:%u\n",
&inet->inet_saddr, ntohs(inet->inet_sport),
&inet->inet_daddr, ntohs(inet->inet_dport));
conn = rds_conn_create(inet->inet_saddr, inet->inet_daddr,
&rds_tcp_transport, GFP_KERNEL);
if (IS_ERR(conn)) {
ret = PTR_ERR(conn);
goto out;
}
/*
* see the comment above rds_queue_delayed_reconnect()
*/
if (!rds_conn_transition(conn, RDS_CONN_DOWN, RDS_CONN_CONNECTING)) {
if (rds_conn_state(conn) == RDS_CONN_UP)
rds_tcp_stats_inc(s_tcp_listen_closed_stale);
else
rds_tcp_stats_inc(s_tcp_connect_raced);
rds_conn_drop(conn);
ret = 0;
goto out;
}
rds_tcp_set_callbacks(new_sock, conn);
rds_connect_complete(conn);
new_sock = NULL;
ret = 0;
out:
if (new_sock)
sock_release(new_sock);
return ret;
}
static void rds_tcp_accept_worker(struct work_struct *work)
{
while (rds_tcp_accept_one(rds_tcp_listen_sock) == 0)
cond_resched();
}
void rds_tcp_listen_data_ready(struct sock *sk, int bytes)
{
void (*ready)(struct sock *sk, int bytes);
rdsdebug("listen data ready sk %p\n", sk);
read_lock_bh(&sk->sk_callback_lock);
ready = sk->sk_user_data;
if (!ready) { /* check for teardown race */
ready = sk->sk_data_ready;
goto out;
}
/*
* ->sk_data_ready is also called for a newly established child socket
* before it has been accepted and the accepter has set up their
* data_ready.. we only want to queue listen work for our listening
* socket
*/
if (sk->sk_state == TCP_LISTEN)
queue_work(rds_wq, &rds_tcp_listen_work);
out:
read_unlock_bh(&sk->sk_callback_lock);
ready(sk, bytes);
}
int rds_tcp_listen_init(void)
{
struct sockaddr_in sin;
struct socket *sock = NULL;
int ret;
ret = sock_create(PF_INET, SOCK_STREAM, IPPROTO_TCP, &sock);
if (ret < 0)
goto out;
sock->sk->sk_reuse = 1;
rds_tcp_nonagle(sock);
write_lock_bh(&sock->sk->sk_callback_lock);
sock->sk->sk_user_data = sock->sk->sk_data_ready;
sock->sk->sk_data_ready = rds_tcp_listen_data_ready;
write_unlock_bh(&sock->sk->sk_callback_lock);
sin.sin_family = PF_INET,
sin.sin_addr.s_addr = (__force u32)htonl(INADDR_ANY);
sin.sin_port = (__force u16)htons(RDS_TCP_PORT);
ret = sock->ops->bind(sock, (struct sockaddr *)&sin, sizeof(sin));
if (ret < 0)
goto out;
ret = sock->ops->listen(sock, 64);
if (ret < 0)
goto out;
rds_tcp_listen_sock = sock;
sock = NULL;
out:
if (sock)
sock_release(sock);
return ret;
}
void rds_tcp_listen_stop(void)
{
struct socket *sock = rds_tcp_listen_sock;
struct sock *sk;
if (!sock)
return;
sk = sock->sk;
/* serialize with and prevent further callbacks */
lock_sock(sk);
write_lock_bh(&sk->sk_callback_lock);
if (sk->sk_user_data) {
sk->sk_data_ready = sk->sk_user_data;
sk->sk_user_data = NULL;
}
write_unlock_bh(&sk->sk_callback_lock);
release_sock(sk);
/* wait for accepts to stop and close the socket */
flush_workqueue(rds_wq);
sock_release(sock);
rds_tcp_listen_sock = NULL;
}
| {
"language": "C"
} |
// Copyright 2010 Google Inc. All Rights Reserved.
//
// Use of this source code is governed by a BSD-style license
// that can be found in the COPYING file in the root of the source
// tree. An additional intellectual property rights grant can be found
// in the file PATENTS. All contributing project authors may
// be found in the AUTHORS file in the root of the source tree.
// -----------------------------------------------------------------------------
//
// Low-level API for VP8 decoder
//
// Author: Skal (pascal.massimino@gmail.com)
#ifndef WEBP_DEC_VP8_DEC_H_
#define WEBP_DEC_VP8_DEC_H_
#include "src/webp/decode.h"
#ifdef __cplusplus
extern "C" {
#endif
//------------------------------------------------------------------------------
// Lower-level API
//
// These functions provide fine-grained control of the decoding process.
// The call flow should resemble:
//
// VP8Io io;
// VP8InitIo(&io);
// io.data = data;
// io.data_size = size;
// /* customize io's functions (setup()/put()/teardown()) if needed. */
//
// VP8Decoder* dec = VP8New();
// int ok = VP8Decode(dec, &io);
// if (!ok) printf("Error: %s\n", VP8StatusMessage(dec));
// VP8Delete(dec);
// return ok;
// Input / Output
typedef struct VP8Io VP8Io;
typedef int (*VP8IoPutHook)(const VP8Io* io);
typedef int (*VP8IoSetupHook)(VP8Io* io);
typedef void (*VP8IoTeardownHook)(const VP8Io* io);
struct VP8Io {
// set by VP8GetHeaders()
int width, height; // picture dimensions, in pixels (invariable).
// These are the original, uncropped dimensions.
// The actual area passed to put() is stored
// in mb_w / mb_h fields.
// set before calling put()
int mb_y; // position of the current rows (in pixels)
int mb_w; // number of columns in the sample
int mb_h; // number of rows in the sample
const uint8_t* y, *u, *v; // rows to copy (in yuv420 format)
int y_stride; // row stride for luma
int uv_stride; // row stride for chroma
void* opaque; // user data
// called when fresh samples are available. Currently, samples are in
// YUV420 format, and can be up to width x 24 in size (depending on the
// in-loop filtering level, e.g.). Should return false in case of error
// or abort request. The actual size of the area to update is mb_w x mb_h
// in size, taking cropping into account.
VP8IoPutHook put;
// called just before starting to decode the blocks.
// Must return false in case of setup error, true otherwise. If false is
// returned, teardown() will NOT be called. But if the setup succeeded
// and true is returned, then teardown() will always be called afterward.
VP8IoSetupHook setup;
// Called just after block decoding is finished (or when an error occurred
// during put()). Is NOT called if setup() failed.
VP8IoTeardownHook teardown;
// this is a recommendation for the user-side yuv->rgb converter. This flag
// is set when calling setup() hook and can be overwritten by it. It then
// can be taken into consideration during the put() method.
int fancy_upsampling;
// Input buffer.
size_t data_size;
const uint8_t* data;
// If true, in-loop filtering will not be performed even if present in the
// bitstream. Switching off filtering may speed up decoding at the expense
// of more visible blocking. Note that output will also be non-compliant
// with the VP8 specifications.
int bypass_filtering;
// Cropping parameters.
int use_cropping;
int crop_left, crop_right, crop_top, crop_bottom;
// Scaling parameters.
int use_scaling;
int scaled_width, scaled_height;
// If non NULL, pointer to the alpha data (if present) corresponding to the
// start of the current row (That is: it is pre-offset by mb_y and takes
// cropping into account).
const uint8_t* a;
};
// Internal, version-checked, entry point
int VP8InitIoInternal(VP8Io* const, int);
// Set the custom IO function pointers and user-data. The setter for IO hooks
// should be called before initiating incremental decoding. Returns true if
// WebPIDecoder object is successfully modified, false otherwise.
int WebPISetIOHooks(WebPIDecoder* const idec,
VP8IoPutHook put,
VP8IoSetupHook setup,
VP8IoTeardownHook teardown,
void* user_data);
// Main decoding object. This is an opaque structure.
typedef struct VP8Decoder VP8Decoder;
// Create a new decoder object.
VP8Decoder* VP8New(void);
// Must be called to make sure 'io' is initialized properly.
// Returns false in case of version mismatch. Upon such failure, no other
// decoding function should be called (VP8Decode, VP8GetHeaders, ...)
static WEBP_INLINE int VP8InitIo(VP8Io* const io) {
return VP8InitIoInternal(io, WEBP_DECODER_ABI_VERSION);
}
// Decode the VP8 frame header. Returns true if ok.
// Note: 'io->data' must be pointing to the start of the VP8 frame header.
int VP8GetHeaders(VP8Decoder* const dec, VP8Io* const io);
// Decode a picture. Will call VP8GetHeaders() if it wasn't done already.
// Returns false in case of error.
int VP8Decode(VP8Decoder* const dec, VP8Io* const io);
// Return current status of the decoder:
VP8StatusCode VP8Status(VP8Decoder* const dec);
// return readable string corresponding to the last status.
const char* VP8StatusMessage(VP8Decoder* const dec);
// Resets the decoder in its initial state, reclaiming memory.
// Not a mandatory call between calls to VP8Decode().
void VP8Clear(VP8Decoder* const dec);
// Destroy the decoder object.
void VP8Delete(VP8Decoder* const dec);
//------------------------------------------------------------------------------
// Miscellaneous VP8/VP8L bitstream probing functions.
// Returns true if the next 3 bytes in data contain the VP8 signature.
WEBP_EXTERN int VP8CheckSignature(const uint8_t* const data, size_t data_size);
// Validates the VP8 data-header and retrieves basic header information viz
// width and height. Returns 0 in case of formatting error. *width/*height
// can be passed NULL.
WEBP_EXTERN int VP8GetInfo(
const uint8_t* data,
size_t data_size, // data available so far
size_t chunk_size, // total data size expected in the chunk
int* const width, int* const height);
// Returns true if the next byte(s) in data is a VP8L signature.
WEBP_EXTERN int VP8LCheckSignature(const uint8_t* const data, size_t size);
// Validates the VP8L data-header and retrieves basic header information viz
// width, height and alpha. Returns 0 in case of formatting error.
// width/height/has_alpha can be passed NULL.
WEBP_EXTERN int VP8LGetInfo(
const uint8_t* data, size_t data_size, // data available so far
int* const width, int* const height, int* const has_alpha);
#ifdef __cplusplus
} // extern "C"
#endif
#endif // WEBP_DEC_VP8_DEC_H_
| {
"language": "C"
} |
/*-------------------------------------------------------------------------
*
* dynloader.h
* dynamic loader for HP-UX using the shared library mechanism
*
* Portions Copyright (c) 1996-2010, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
*
* IDENTIFICATION
* $PostgreSQL$
*
* NOTES
* all functions are defined here -- it's impossible to trace the
* shl_* routines from the bundled HP-UX debugger.
*
*-------------------------------------------------------------------------
*/
/* System includes */
#include "fmgr.h"
extern void *pg_dlopen(char *filename);
extern PGFunction pg_dlsym(void *handle, char *funcname);
extern void pg_dlclose(void *handle);
extern char *pg_dlerror(void);
| {
"language": "C"
} |
/*
* libmad - MPEG audio decoder library
* Copyright (C) 2000-2004 Underbit Technologies, Inc.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*
* $Id: huffman.h,v 1.11 2004/01/23 09:41:32 rob Exp $
*/
# ifndef LIBMAD_HUFFMAN_H
# define LIBMAD_HUFFMAN_H
union huffquad {
struct {
unsigned short final : 1;
unsigned short bits : 3;
unsigned short offset : 12;
} ptr;
struct {
unsigned short final : 1;
unsigned short hlen : 3;
unsigned short v : 1;
unsigned short w : 1;
unsigned short x : 1;
unsigned short y : 1;
} value;
unsigned short final : 1;
};
union huffpair {
struct {
unsigned short final : 1;
unsigned short bits : 3;
unsigned short offset : 12;
} ptr;
struct {
unsigned short final : 1;
unsigned short hlen : 3;
unsigned short x : 4;
unsigned short y : 4;
} value;
unsigned short final : 1;
};
struct hufftable {
union huffpair const *table;
unsigned short linbits;
unsigned short startbits;
};
extern union huffquad const *const mad_huff_quad_table[2];
extern struct hufftable const mad_huff_pair_table[32];
# endif
| {
"language": "C"
} |
/*
* Scilab ( http://www.scilab.org/ ) - This file is part of Scilab
* Copyright (C) 2009 - DIGITEO - Antoine ELIAS
*
* Copyright (C) 2012 - 2016 - Scilab Enterprises
*
* This file is hereby licensed under the terms of the GNU GPL v2.0,
* pursuant to article 5.3.4 of the CeCILL v.2.1.
* This file was originally licensed under the terms of the CeCILL v2.1,
* and continues to be available under such terms.
* For more information, see the COPYING file which you should have received
* along with this program.
*/
#ifndef __INTERNAL_DOUBLE_API__
#define __INTERNAL_DOUBLE_API__
//internal double functions
SciErr getCommonMatrixOfDouble(void* _pvCtx, int* _piAddress, char _cType, int _iComplex, int* _piRows, int* _piCols, double** _pdblReal, double** _pdblImg);
SciErr allocCommonMatrixOfDouble(void* _pvCtx, int _iVar, char _cType, int _iComplex, int _iRows, int _iCols, double** _pdblReal, double** _pdblImg);
SciErr createCommonNamedMatrixOfDouble(void* _pvCtx, const char* _pstName, int _iComplex, int _iRows, int _iCols, const double* _pdblReal, const double* _pdblImg);
SciErr readCommonNamedMatrixOfDouble(void* _pvCtx, const char* _pstName, int _iComplex, int* _piRows, int* _piCols, double* _pdblReal, double* _pdblImg);
#endif /* __INTERNAL_DOUBLE_API__ */
| {
"language": "C"
} |
/* SPU2-X, A plugin for Emulating the Sound Processing Unit of the Playstation 2
* Developed and maintained by the Pcsx2 Development Team.
*
* Original portions from SPU2ghz are (c) 2008 by David Quintana [gigaherz]
*
* SPU2-X is free software: you can redistribute it and/or modify it under the terms
* of the GNU Lesser General Public License as published by the Free Software Found-
* ation, either version 3 of the License, or (at your option) any later version.
*
* SPU2-X is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY;
* without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
* PURPOSE. See the GNU Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public License
* along with SPU2-X. If not, see <http://www.gnu.org/licenses/>.
*/
#ifndef __LINUX_H__
#define __LINUX_H__
#include <assert.h>
#include <stdlib.h>
#include <sys/ioctl.h>
#include <fcntl.h>
#include <sys/soundcard.h>
#include <unistd.h>
#define SAMPLE_RATE 48000L
// Pull in from Alsa.cpp
extern int AlsaSetupSound();
extern void AlsaRemoveSound();
extern int AlsaSoundGetBytesBuffered();
extern void AlsaSoundFeedVoiceData(unsigned char *pSound, long lBytes);
extern int SetupSound();
extern void RemoveSound();
extern int SoundGetBytesBuffered();
extern void SoundFeedVoiceData(unsigned char *pSound, long lBytes);
#endif // __LINUX_H__
| {
"language": "C"
} |
#include "cpu_features.h"
/* Include the BMI2-optimized version? */
#undef DISPATCH_BMI2
#if !defined(__BMI2__) && X86_CPU_FEATURES_ENABLED && \
COMPILER_SUPPORTS_BMI2_TARGET
# define FUNCNAME deflate_decompress_bmi2
# define ATTRIBUTES __attribute__((target("bmi2")))
# define DISPATCH 1
# define DISPATCH_BMI2 1
# include "../decompress_template.h"
#endif
#ifdef DISPATCH
static inline decompress_func_t
arch_select_decompress_func(void)
{
u32 features = get_cpu_features();
#ifdef DISPATCH_BMI2
if (features & X86_CPU_FEATURE_BMI2)
return deflate_decompress_bmi2;
#endif
return NULL;
}
#endif /* DISPATCH */
| {
"language": "C"
} |
#define VERSION "0.23"
/* ns83820.c by Benjamin LaHaise with contributions.
*
* Questions/comments/discussion to linux-ns83820@kvack.org.
*
* $Revision: 1.34.2.23 $
*
* Copyright 2001 Benjamin LaHaise.
* Copyright 2001, 2002 Red Hat.
*
* Mmmm, chocolate vanilla mocha...
*
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*
*
* ChangeLog
* =========
* 20010414 0.1 - created
* 20010622 0.2 - basic rx and tx.
* 20010711 0.3 - added duplex and link state detection support.
* 20010713 0.4 - zero copy, no hangs.
* 0.5 - 64 bit dma support (davem will hate me for this)
* - disable jumbo frames to avoid tx hangs
* - work around tx deadlocks on my 1.02 card via
* fiddling with TXCFG
* 20010810 0.6 - use pci dma api for ringbuffers, work on ia64
* 20010816 0.7 - misc cleanups
* 20010826 0.8 - fix critical zero copy bugs
* 0.9 - internal experiment
* 20010827 0.10 - fix ia64 unaligned access.
* 20010906 0.11 - accept all packets with checksum errors as
* otherwise fragments get lost
* - fix >> 32 bugs
* 0.12 - add statistics counters
* - add allmulti/promisc support
* 20011009 0.13 - hotplug support, other smaller pci api cleanups
* 20011204 0.13a - optical transceiver support added
* by Michael Clark <michael@metaparadigm.com>
* 20011205 0.13b - call register_netdev earlier in initialization
* suppress duplicate link status messages
* 20011117 0.14 - ethtool GDRVINFO, GLINK support from jgarzik
* 20011204 0.15 get ppc (big endian) working
* 20011218 0.16 various cleanups
* 20020310 0.17 speedups
* 20020610 0.18 - actually use the pci dma api for highmem
* - remove pci latency register fiddling
* 0.19 - better bist support
* - add ihr and reset_phy parameters
* - gmii bus probing
* - fix missed txok introduced during performance
* tuning
* 0.20 - fix stupid RFEN thinko. i am such a smurf.
* 20040828 0.21 - add hardware vlan accleration
* by Neil Horman <nhorman@redhat.com>
* 20050406 0.22 - improved DAC ifdefs from Andi Kleen
* - removal of dead code from Adrian Bunk
* - fix half duplex collision behaviour
* Driver Overview
* ===============
*
* This driver was originally written for the National Semiconductor
* 83820 chip, a 10/100/1000 Mbps 64 bit PCI ethernet NIC. Hopefully
* this code will turn out to be a) clean, b) correct, and c) fast.
* With that in mind, I'm aiming to split the code up as much as
* reasonably possible. At present there are X major sections that
* break down into a) packet receive, b) packet transmit, c) link
* management, d) initialization and configuration. Where possible,
* these code paths are designed to run in parallel.
*
* This driver has been tested and found to work with the following
* cards (in no particular order):
*
* Cameo SOHO-GA2000T SOHO-GA2500T
* D-Link DGE-500T
* PureData PDP8023Z-TG
* SMC SMC9452TX SMC9462TX
* Netgear GA621
*
* Special thanks to SMC for providing hardware to test this driver on.
*
* Reports of success or failure would be greatly appreciated.
*/
//#define dprintk printk
#define dprintk(x...) do { } while (0)
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/types.h>
#include <linux/pci.h>
#include <linux/dma-mapping.h>
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
#include <linux/delay.h>
#include <linux/workqueue.h>
#include <linux/init.h>
#include <linux/ip.h> /* for iph */
#include <linux/in.h> /* for IPPROTO_... */
#include <linux/compiler.h>
#include <linux/prefetch.h>
#include <linux/ethtool.h>
#include <linux/sched.h>
#include <linux/timer.h>
#include <linux/if_vlan.h>
#include <linux/rtnetlink.h>
#include <linux/jiffies.h>
#include <asm/io.h>
#include <asm/uaccess.h>
#include <asm/system.h>
#define DRV_NAME "ns83820"
/* Global parameters. See module_param near the bottom. */
static int ihr = 2;
static int reset_phy = 0;
static int lnksts = 0; /* CFG_LNKSTS bit polarity */
/* Dprintk is used for more interesting debug events */
#undef Dprintk
#define Dprintk dprintk
/* tunables */
#define RX_BUF_SIZE 1500 /* 8192 */
#if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
#define NS83820_VLAN_ACCEL_SUPPORT
#endif
/* Must not exceed ~65000. */
#define NR_RX_DESC 64
#define NR_TX_DESC 128
/* not tunable */
#define REAL_RX_BUF_SIZE (RX_BUF_SIZE + 14) /* rx/tx mac addr + type */
#define MIN_TX_DESC_FREE 8
/* register defines */
#define CFGCS 0x04
#define CR_TXE 0x00000001
#define CR_TXD 0x00000002
/* Ramit : Here's a tip, don't do a RXD immediately followed by an RXE
* The Receive engine skips one descriptor and moves
* onto the next one!! */
#define CR_RXE 0x00000004
#define CR_RXD 0x00000008
#define CR_TXR 0x00000010
#define CR_RXR 0x00000020
#define CR_SWI 0x00000080
#define CR_RST 0x00000100
#define PTSCR_EEBIST_FAIL 0x00000001
#define PTSCR_EEBIST_EN 0x00000002
#define PTSCR_EELOAD_EN 0x00000004
#define PTSCR_RBIST_FAIL 0x000001b8
#define PTSCR_RBIST_DONE 0x00000200
#define PTSCR_RBIST_EN 0x00000400
#define PTSCR_RBIST_RST 0x00002000
#define MEAR_EEDI 0x00000001
#define MEAR_EEDO 0x00000002
#define MEAR_EECLK 0x00000004
#define MEAR_EESEL 0x00000008
#define MEAR_MDIO 0x00000010
#define MEAR_MDDIR 0x00000020
#define MEAR_MDC 0x00000040
#define ISR_TXDESC3 0x40000000
#define ISR_TXDESC2 0x20000000
#define ISR_TXDESC1 0x10000000
#define ISR_TXDESC0 0x08000000
#define ISR_RXDESC3 0x04000000
#define ISR_RXDESC2 0x02000000
#define ISR_RXDESC1 0x01000000
#define ISR_RXDESC0 0x00800000
#define ISR_TXRCMP 0x00400000
#define ISR_RXRCMP 0x00200000
#define ISR_DPERR 0x00100000
#define ISR_SSERR 0x00080000
#define ISR_RMABT 0x00040000
#define ISR_RTABT 0x00020000
#define ISR_RXSOVR 0x00010000
#define ISR_HIBINT 0x00008000
#define ISR_PHY 0x00004000
#define ISR_PME 0x00002000
#define ISR_SWI 0x00001000
#define ISR_MIB 0x00000800
#define ISR_TXURN 0x00000400
#define ISR_TXIDLE 0x00000200
#define ISR_TXERR 0x00000100
#define ISR_TXDESC 0x00000080
#define ISR_TXOK 0x00000040
#define ISR_RXORN 0x00000020
#define ISR_RXIDLE 0x00000010
#define ISR_RXEARLY 0x00000008
#define ISR_RXERR 0x00000004
#define ISR_RXDESC 0x00000002
#define ISR_RXOK 0x00000001
#define TXCFG_CSI 0x80000000
#define TXCFG_HBI 0x40000000
#define TXCFG_MLB 0x20000000
#define TXCFG_ATP 0x10000000
#define TXCFG_ECRETRY 0x00800000
#define TXCFG_BRST_DIS 0x00080000
#define TXCFG_MXDMA1024 0x00000000
#define TXCFG_MXDMA512 0x00700000
#define TXCFG_MXDMA256 0x00600000
#define TXCFG_MXDMA128 0x00500000
#define TXCFG_MXDMA64 0x00400000
#define TXCFG_MXDMA32 0x00300000
#define TXCFG_MXDMA16 0x00200000
#define TXCFG_MXDMA8 0x00100000
#define CFG_LNKSTS 0x80000000
#define CFG_SPDSTS 0x60000000
#define CFG_SPDSTS1 0x40000000
#define CFG_SPDSTS0 0x20000000
#define CFG_DUPSTS 0x10000000
#define CFG_TBI_EN 0x01000000
#define CFG_MODE_1000 0x00400000
/* Ramit : Dont' ever use AUTO_1000, it never works and is buggy.
* Read the Phy response and then configure the MAC accordingly */
#define CFG_AUTO_1000 0x00200000
#define CFG_PINT_CTL 0x001c0000
#define CFG_PINT_DUPSTS 0x00100000
#define CFG_PINT_LNKSTS 0x00080000
#define CFG_PINT_SPDSTS 0x00040000
#define CFG_TMRTEST 0x00020000
#define CFG_MRM_DIS 0x00010000
#define CFG_MWI_DIS 0x00008000
#define CFG_T64ADDR 0x00004000
#define CFG_PCI64_DET 0x00002000
#define CFG_DATA64_EN 0x00001000
#define CFG_M64ADDR 0x00000800
#define CFG_PHY_RST 0x00000400
#define CFG_PHY_DIS 0x00000200
#define CFG_EXTSTS_EN 0x00000100
#define CFG_REQALG 0x00000080
#define CFG_SB 0x00000040
#define CFG_POW 0x00000020
#define CFG_EXD 0x00000010
#define CFG_PESEL 0x00000008
#define CFG_BROM_DIS 0x00000004
#define CFG_EXT_125 0x00000002
#define CFG_BEM 0x00000001
#define EXTSTS_UDPPKT 0x00200000
#define EXTSTS_TCPPKT 0x00080000
#define EXTSTS_IPPKT 0x00020000
#define EXTSTS_VPKT 0x00010000
#define EXTSTS_VTG_MASK 0x0000ffff
#define SPDSTS_POLARITY (CFG_SPDSTS1 | CFG_SPDSTS0 | CFG_DUPSTS | (lnksts ? CFG_LNKSTS : 0))
#define MIBC_MIBS 0x00000008
#define MIBC_ACLR 0x00000004
#define MIBC_FRZ 0x00000002
#define MIBC_WRN 0x00000001
#define PCR_PSEN (1 << 31)
#define PCR_PS_MCAST (1 << 30)
#define PCR_PS_DA (1 << 29)
#define PCR_STHI_8 (3 << 23)
#define PCR_STLO_4 (1 << 23)
#define PCR_FFHI_8K (3 << 21)
#define PCR_FFLO_4K (1 << 21)
#define PCR_PAUSE_CNT 0xFFFE
#define RXCFG_AEP 0x80000000
#define RXCFG_ARP 0x40000000
#define RXCFG_STRIPCRC 0x20000000
#define RXCFG_RX_FD 0x10000000
#define RXCFG_ALP 0x08000000
#define RXCFG_AIRL 0x04000000
#define RXCFG_MXDMA512 0x00700000
#define RXCFG_DRTH 0x0000003e
#define RXCFG_DRTH0 0x00000002
#define RFCR_RFEN 0x80000000
#define RFCR_AAB 0x40000000
#define RFCR_AAM 0x20000000
#define RFCR_AAU 0x10000000
#define RFCR_APM 0x08000000
#define RFCR_APAT 0x07800000
#define RFCR_APAT3 0x04000000
#define RFCR_APAT2 0x02000000
#define RFCR_APAT1 0x01000000
#define RFCR_APAT0 0x00800000
#define RFCR_AARP 0x00400000
#define RFCR_MHEN 0x00200000
#define RFCR_UHEN 0x00100000
#define RFCR_ULM 0x00080000
#define VRCR_RUDPE 0x00000080
#define VRCR_RTCPE 0x00000040
#define VRCR_RIPE 0x00000020
#define VRCR_IPEN 0x00000010
#define VRCR_DUTF 0x00000008
#define VRCR_DVTF 0x00000004
#define VRCR_VTREN 0x00000002
#define VRCR_VTDEN 0x00000001
#define VTCR_PPCHK 0x00000008
#define VTCR_GCHK 0x00000004
#define VTCR_VPPTI 0x00000002
#define VTCR_VGTI 0x00000001
#define CR 0x00
#define CFG 0x04
#define MEAR 0x08
#define PTSCR 0x0c
#define ISR 0x10
#define IMR 0x14
#define IER 0x18
#define IHR 0x1c
#define TXDP 0x20
#define TXDP_HI 0x24
#define TXCFG 0x28
#define GPIOR 0x2c
#define RXDP 0x30
#define RXDP_HI 0x34
#define RXCFG 0x38
#define PQCR 0x3c
#define WCSR 0x40
#define PCR 0x44
#define RFCR 0x48
#define RFDR 0x4c
#define SRR 0x58
#define VRCR 0xbc
#define VTCR 0xc0
#define VDR 0xc4
#define CCSR 0xcc
#define TBICR 0xe0
#define TBISR 0xe4
#define TANAR 0xe8
#define TANLPAR 0xec
#define TANER 0xf0
#define TESR 0xf4
#define TBICR_MR_AN_ENABLE 0x00001000
#define TBICR_MR_RESTART_AN 0x00000200
#define TBISR_MR_LINK_STATUS 0x00000020
#define TBISR_MR_AN_COMPLETE 0x00000004
#define TANAR_PS2 0x00000100
#define TANAR_PS1 0x00000080
#define TANAR_HALF_DUP 0x00000040
#define TANAR_FULL_DUP 0x00000020
#define GPIOR_GP5_OE 0x00000200
#define GPIOR_GP4_OE 0x00000100
#define GPIOR_GP3_OE 0x00000080
#define GPIOR_GP2_OE 0x00000040
#define GPIOR_GP1_OE 0x00000020
#define GPIOR_GP3_OUT 0x00000004
#define GPIOR_GP1_OUT 0x00000001
#define LINK_AUTONEGOTIATE 0x01
#define LINK_DOWN 0x02
#define LINK_UP 0x04
#define HW_ADDR_LEN sizeof(dma_addr_t)
#define desc_addr_set(desc, addr) \
do { \
((desc)[0] = cpu_to_le32(addr)); \
if (HW_ADDR_LEN == 8) \
(desc)[1] = cpu_to_le32(((u64)addr) >> 32); \
} while(0)
#define desc_addr_get(desc) \
(le32_to_cpu((desc)[0]) | \
(HW_ADDR_LEN == 8 ? ((dma_addr_t)le32_to_cpu((desc)[1]))<<32 : 0))
#define DESC_LINK 0
#define DESC_BUFPTR (DESC_LINK + HW_ADDR_LEN/4)
#define DESC_CMDSTS (DESC_BUFPTR + HW_ADDR_LEN/4)
#define DESC_EXTSTS (DESC_CMDSTS + 4/4)
#define CMDSTS_OWN 0x80000000
#define CMDSTS_MORE 0x40000000
#define CMDSTS_INTR 0x20000000
#define CMDSTS_ERR 0x10000000
#define CMDSTS_OK 0x08000000
#define CMDSTS_RUNT 0x00200000
#define CMDSTS_LEN_MASK 0x0000ffff
#define CMDSTS_DEST_MASK 0x01800000
#define CMDSTS_DEST_SELF 0x00800000
#define CMDSTS_DEST_MULTI 0x01000000
#define DESC_SIZE 8 /* Should be cache line sized */
struct rx_info {
spinlock_t lock;
int up;
unsigned long idle;
struct sk_buff *skbs[NR_RX_DESC];
__le32 *next_rx_desc;
u16 next_rx, next_empty;
__le32 *descs;
dma_addr_t phy_descs;
};
struct ns83820 {
struct net_device_stats stats;
u8 __iomem *base;
struct pci_dev *pci_dev;
struct net_device *ndev;
#ifdef NS83820_VLAN_ACCEL_SUPPORT
struct vlan_group *vlgrp;
#endif
struct rx_info rx_info;
struct tasklet_struct rx_tasklet;
unsigned ihr;
struct work_struct tq_refill;
/* protects everything below. irqsave when using. */
spinlock_t misc_lock;
u32 CFG_cache;
u32 MEAR_cache;
u32 IMR_cache;
unsigned linkstate;
spinlock_t tx_lock;
u16 tx_done_idx;
u16 tx_idx;
volatile u16 tx_free_idx; /* idx of free desc chain */
u16 tx_intr_idx;
atomic_t nr_tx_skbs;
struct sk_buff *tx_skbs[NR_TX_DESC];
char pad[16] __attribute__((aligned(16)));
__le32 *tx_descs;
dma_addr_t tx_phy_descs;
struct timer_list tx_watchdog;
};
static inline struct ns83820 *PRIV(struct net_device *dev)
{
return netdev_priv(dev);
}
#define __kick_rx(dev) writel(CR_RXE, dev->base + CR)
static inline void kick_rx(struct net_device *ndev)
{
struct ns83820 *dev = PRIV(ndev);
dprintk("kick_rx: maybe kicking\n");
if (test_and_clear_bit(0, &dev->rx_info.idle)) {
dprintk("actually kicking\n");
writel(dev->rx_info.phy_descs +
(4 * DESC_SIZE * dev->rx_info.next_rx),
dev->base + RXDP);
if (dev->rx_info.next_rx == dev->rx_info.next_empty)
printk(KERN_DEBUG "%s: uh-oh: next_rx == next_empty???\n",
ndev->name);
__kick_rx(dev);
}
}
//free = (tx_done_idx + NR_TX_DESC-2 - free_idx) % NR_TX_DESC
#define start_tx_okay(dev) \
(((NR_TX_DESC-2 + dev->tx_done_idx - dev->tx_free_idx) % NR_TX_DESC) > MIN_TX_DESC_FREE)
#ifdef NS83820_VLAN_ACCEL_SUPPORT
static void ns83820_vlan_rx_register(struct net_device *ndev, struct vlan_group *grp)
{
struct ns83820 *dev = PRIV(ndev);
spin_lock_irq(&dev->misc_lock);
spin_lock(&dev->tx_lock);
dev->vlgrp = grp;
spin_unlock(&dev->tx_lock);
spin_unlock_irq(&dev->misc_lock);
}
#endif
/* Packet Receiver
*
* The hardware supports linked lists of receive descriptors for
* which ownership is transfered back and forth by means of an
* ownership bit. While the hardware does support the use of a
* ring for receive descriptors, we only make use of a chain in
* an attempt to reduce bus traffic under heavy load scenarios.
* This will also make bugs a bit more obvious. The current code
* only makes use of a single rx chain; I hope to implement
* priority based rx for version 1.0. Goal: even under overload
* conditions, still route realtime traffic with as low jitter as
* possible.
*/
static inline void build_rx_desc(struct ns83820 *dev, __le32 *desc, dma_addr_t link, dma_addr_t buf, u32 cmdsts, u32 extsts)
{
desc_addr_set(desc + DESC_LINK, link);
desc_addr_set(desc + DESC_BUFPTR, buf);
desc[DESC_EXTSTS] = cpu_to_le32(extsts);
mb();
desc[DESC_CMDSTS] = cpu_to_le32(cmdsts);
}
#define nr_rx_empty(dev) ((NR_RX_DESC-2 + dev->rx_info.next_rx - dev->rx_info.next_empty) % NR_RX_DESC)
static inline int ns83820_add_rx_skb(struct ns83820 *dev, struct sk_buff *skb)
{
unsigned next_empty;
u32 cmdsts;
__le32 *sg;
dma_addr_t buf;
next_empty = dev->rx_info.next_empty;
/* don't overrun last rx marker */
if (unlikely(nr_rx_empty(dev) <= 2)) {
kfree_skb(skb);
return 1;
}
#if 0
dprintk("next_empty[%d] nr_used[%d] next_rx[%d]\n",
dev->rx_info.next_empty,
dev->rx_info.nr_used,
dev->rx_info.next_rx
);
#endif
sg = dev->rx_info.descs + (next_empty * DESC_SIZE);
BUG_ON(NULL != dev->rx_info.skbs[next_empty]);
dev->rx_info.skbs[next_empty] = skb;
dev->rx_info.next_empty = (next_empty + 1) % NR_RX_DESC;
cmdsts = REAL_RX_BUF_SIZE | CMDSTS_INTR;
buf = pci_map_single(dev->pci_dev, skb->data,
REAL_RX_BUF_SIZE, PCI_DMA_FROMDEVICE);
build_rx_desc(dev, sg, 0, buf, cmdsts, 0);
/* update link of previous rx */
if (likely(next_empty != dev->rx_info.next_rx))
dev->rx_info.descs[((NR_RX_DESC + next_empty - 1) % NR_RX_DESC) * DESC_SIZE] = cpu_to_le32(dev->rx_info.phy_descs + (next_empty * DESC_SIZE * 4));
return 0;
}
static inline int rx_refill(struct net_device *ndev, gfp_t gfp)
{
struct ns83820 *dev = PRIV(ndev);
unsigned i;
unsigned long flags = 0;
if (unlikely(nr_rx_empty(dev) <= 2))
return 0;
dprintk("rx_refill(%p)\n", ndev);
if (gfp == GFP_ATOMIC)
spin_lock_irqsave(&dev->rx_info.lock, flags);
for (i=0; i<NR_RX_DESC; i++) {
struct sk_buff *skb;
long res;
/* extra 16 bytes for alignment */
skb = __netdev_alloc_skb(ndev, REAL_RX_BUF_SIZE+16, gfp);
if (unlikely(!skb))
break;
skb_reserve(skb, skb->data - PTR_ALIGN(skb->data, 16));
if (gfp != GFP_ATOMIC)
spin_lock_irqsave(&dev->rx_info.lock, flags);
res = ns83820_add_rx_skb(dev, skb);
if (gfp != GFP_ATOMIC)
spin_unlock_irqrestore(&dev->rx_info.lock, flags);
if (res) {
i = 1;
break;
}
}
if (gfp == GFP_ATOMIC)
spin_unlock_irqrestore(&dev->rx_info.lock, flags);
return i ? 0 : -ENOMEM;
}
static void rx_refill_atomic(struct net_device *ndev)
{
rx_refill(ndev, GFP_ATOMIC);
}
/* REFILL */
static inline void queue_refill(struct work_struct *work)
{
struct ns83820 *dev = container_of(work, struct ns83820, tq_refill);
struct net_device *ndev = dev->ndev;
rx_refill(ndev, GFP_KERNEL);
if (dev->rx_info.up)
kick_rx(ndev);
}
static inline void clear_rx_desc(struct ns83820 *dev, unsigned i)
{
build_rx_desc(dev, dev->rx_info.descs + (DESC_SIZE * i), 0, 0, CMDSTS_OWN, 0);
}
static void phy_intr(struct net_device *ndev)
{
struct ns83820 *dev = PRIV(ndev);
static const char *speeds[] = { "10", "100", "1000", "1000(?)", "1000F" };
u32 cfg, new_cfg;
u32 tbisr, tanar, tanlpar;
int speed, fullduplex, newlinkstate;
cfg = readl(dev->base + CFG) ^ SPDSTS_POLARITY;
if (dev->CFG_cache & CFG_TBI_EN) {
/* we have an optical transceiver */
tbisr = readl(dev->base + TBISR);
tanar = readl(dev->base + TANAR);
tanlpar = readl(dev->base + TANLPAR);
dprintk("phy_intr: tbisr=%08x, tanar=%08x, tanlpar=%08x\n",
tbisr, tanar, tanlpar);
if ( (fullduplex = (tanlpar & TANAR_FULL_DUP) &&
(tanar & TANAR_FULL_DUP)) ) {
/* both of us are full duplex */
writel(readl(dev->base + TXCFG)
| TXCFG_CSI | TXCFG_HBI | TXCFG_ATP,
dev->base + TXCFG);
writel(readl(dev->base + RXCFG) | RXCFG_RX_FD,
dev->base + RXCFG);
/* Light up full duplex LED */
writel(readl(dev->base + GPIOR) | GPIOR_GP1_OUT,
dev->base + GPIOR);
} else if (((tanlpar & TANAR_HALF_DUP) &&
(tanar & TANAR_HALF_DUP)) ||
((tanlpar & TANAR_FULL_DUP) &&
(tanar & TANAR_HALF_DUP)) ||
((tanlpar & TANAR_HALF_DUP) &&
(tanar & TANAR_FULL_DUP))) {
/* one or both of us are half duplex */
writel((readl(dev->base + TXCFG)
& ~(TXCFG_CSI | TXCFG_HBI)) | TXCFG_ATP,
dev->base + TXCFG);
writel(readl(dev->base + RXCFG) & ~RXCFG_RX_FD,
dev->base + RXCFG);
/* Turn off full duplex LED */
writel(readl(dev->base + GPIOR) & ~GPIOR_GP1_OUT,
dev->base + GPIOR);
}
speed = 4; /* 1000F */
} else {
/* we have a copper transceiver */
new_cfg = dev->CFG_cache & ~(CFG_SB | CFG_MODE_1000 | CFG_SPDSTS);
if (cfg & CFG_SPDSTS1)
new_cfg |= CFG_MODE_1000;
else
new_cfg &= ~CFG_MODE_1000;
speed = ((cfg / CFG_SPDSTS0) & 3);
fullduplex = (cfg & CFG_DUPSTS);
if (fullduplex) {
new_cfg |= CFG_SB;
writel(readl(dev->base + TXCFG)
| TXCFG_CSI | TXCFG_HBI,
dev->base + TXCFG);
writel(readl(dev->base + RXCFG) | RXCFG_RX_FD,
dev->base + RXCFG);
} else {
writel(readl(dev->base + TXCFG)
& ~(TXCFG_CSI | TXCFG_HBI),
dev->base + TXCFG);
writel(readl(dev->base + RXCFG) & ~(RXCFG_RX_FD),
dev->base + RXCFG);
}
if ((cfg & CFG_LNKSTS) &&
((new_cfg ^ dev->CFG_cache) != 0)) {
writel(new_cfg, dev->base + CFG);
dev->CFG_cache = new_cfg;
}
dev->CFG_cache &= ~CFG_SPDSTS;
dev->CFG_cache |= cfg & CFG_SPDSTS;
}
newlinkstate = (cfg & CFG_LNKSTS) ? LINK_UP : LINK_DOWN;
if (newlinkstate & LINK_UP &&
dev->linkstate != newlinkstate) {
netif_start_queue(ndev);
netif_wake_queue(ndev);
printk(KERN_INFO "%s: link now %s mbps, %s duplex and up.\n",
ndev->name,
speeds[speed],
fullduplex ? "full" : "half");
} else if (newlinkstate & LINK_DOWN &&
dev->linkstate != newlinkstate) {
netif_stop_queue(ndev);
printk(KERN_INFO "%s: link now down.\n", ndev->name);
}
dev->linkstate = newlinkstate;
}
static int ns83820_setup_rx(struct net_device *ndev)
{
struct ns83820 *dev = PRIV(ndev);
unsigned i;
int ret;
dprintk("ns83820_setup_rx(%p)\n", ndev);
dev->rx_info.idle = 1;
dev->rx_info.next_rx = 0;
dev->rx_info.next_rx_desc = dev->rx_info.descs;
dev->rx_info.next_empty = 0;
for (i=0; i<NR_RX_DESC; i++)
clear_rx_desc(dev, i);
writel(0, dev->base + RXDP_HI);
writel(dev->rx_info.phy_descs, dev->base + RXDP);
ret = rx_refill(ndev, GFP_KERNEL);
if (!ret) {
dprintk("starting receiver\n");
/* prevent the interrupt handler from stomping on us */
spin_lock_irq(&dev->rx_info.lock);
writel(0x0001, dev->base + CCSR);
writel(0, dev->base + RFCR);
writel(0x7fc00000, dev->base + RFCR);
writel(0xffc00000, dev->base + RFCR);
dev->rx_info.up = 1;
phy_intr(ndev);
/* Okay, let it rip */
spin_lock_irq(&dev->misc_lock);
dev->IMR_cache |= ISR_PHY;
dev->IMR_cache |= ISR_RXRCMP;
//dev->IMR_cache |= ISR_RXERR;
//dev->IMR_cache |= ISR_RXOK;
dev->IMR_cache |= ISR_RXORN;
dev->IMR_cache |= ISR_RXSOVR;
dev->IMR_cache |= ISR_RXDESC;
dev->IMR_cache |= ISR_RXIDLE;
dev->IMR_cache |= ISR_TXDESC;
dev->IMR_cache |= ISR_TXIDLE;
writel(dev->IMR_cache, dev->base + IMR);
writel(1, dev->base + IER);
spin_unlock(&dev->misc_lock);
kick_rx(ndev);
spin_unlock_irq(&dev->rx_info.lock);
}
return ret;
}
static void ns83820_cleanup_rx(struct ns83820 *dev)
{
unsigned i;
unsigned long flags;
dprintk("ns83820_cleanup_rx(%p)\n", dev);
/* disable receive interrupts */
spin_lock_irqsave(&dev->misc_lock, flags);
dev->IMR_cache &= ~(ISR_RXOK | ISR_RXDESC | ISR_RXERR | ISR_RXEARLY | ISR_RXIDLE);
writel(dev->IMR_cache, dev->base + IMR);
spin_unlock_irqrestore(&dev->misc_lock, flags);
/* synchronize with the interrupt handler and kill it */
dev->rx_info.up = 0;
synchronize_irq(dev->pci_dev->irq);
/* touch the pci bus... */
readl(dev->base + IMR);
/* assumes the transmitter is already disabled and reset */
writel(0, dev->base + RXDP_HI);
writel(0, dev->base + RXDP);
for (i=0; i<NR_RX_DESC; i++) {
struct sk_buff *skb = dev->rx_info.skbs[i];
dev->rx_info.skbs[i] = NULL;
clear_rx_desc(dev, i);
kfree_skb(skb);
}
}
static void ns83820_rx_kick(struct net_device *ndev)
{
struct ns83820 *dev = PRIV(ndev);
/*if (nr_rx_empty(dev) >= NR_RX_DESC/4)*/ {
if (dev->rx_info.up) {
rx_refill_atomic(ndev);
kick_rx(ndev);
}
}
if (dev->rx_info.up && nr_rx_empty(dev) > NR_RX_DESC*3/4)
schedule_work(&dev->tq_refill);
else
kick_rx(ndev);
if (dev->rx_info.idle)
printk(KERN_DEBUG "%s: BAD\n", ndev->name);
}
/* rx_irq
*
*/
static void rx_irq(struct net_device *ndev)
{
struct ns83820 *dev = PRIV(ndev);
struct rx_info *info = &dev->rx_info;
unsigned next_rx;
int rx_rc, len;
u32 cmdsts;
__le32 *desc;
unsigned long flags;
int nr = 0;
dprintk("rx_irq(%p)\n", ndev);
dprintk("rxdp: %08x, descs: %08lx next_rx[%d]: %p next_empty[%d]: %p\n",
readl(dev->base + RXDP),
(long)(dev->rx_info.phy_descs),
(int)dev->rx_info.next_rx,
(dev->rx_info.descs + (DESC_SIZE * dev->rx_info.next_rx)),
(int)dev->rx_info.next_empty,
(dev->rx_info.descs + (DESC_SIZE * dev->rx_info.next_empty))
);
spin_lock_irqsave(&info->lock, flags);
if (!info->up)
goto out;
dprintk("walking descs\n");
next_rx = info->next_rx;
desc = info->next_rx_desc;
while ((CMDSTS_OWN & (cmdsts = le32_to_cpu(desc[DESC_CMDSTS]))) &&
(cmdsts != CMDSTS_OWN)) {
struct sk_buff *skb;
u32 extsts = le32_to_cpu(desc[DESC_EXTSTS]);
dma_addr_t bufptr = desc_addr_get(desc + DESC_BUFPTR);
dprintk("cmdsts: %08x\n", cmdsts);
dprintk("link: %08x\n", cpu_to_le32(desc[DESC_LINK]));
dprintk("extsts: %08x\n", extsts);
skb = info->skbs[next_rx];
info->skbs[next_rx] = NULL;
info->next_rx = (next_rx + 1) % NR_RX_DESC;
mb();
clear_rx_desc(dev, next_rx);
pci_unmap_single(dev->pci_dev, bufptr,
RX_BUF_SIZE, PCI_DMA_FROMDEVICE);
len = cmdsts & CMDSTS_LEN_MASK;
#ifdef NS83820_VLAN_ACCEL_SUPPORT
/* NH: As was mentioned below, this chip is kinda
* brain dead about vlan tag stripping. Frames
* that are 64 bytes with a vlan header appended
* like arp frames, or pings, are flagged as Runts
* when the tag is stripped and hardware. This
* also means that the OK bit in the descriptor
* is cleared when the frame comes in so we have
* to do a specific length check here to make sure
* the frame would have been ok, had we not stripped
* the tag.
*/
if (likely((CMDSTS_OK & cmdsts) ||
((cmdsts & CMDSTS_RUNT) && len >= 56))) {
#else
if (likely(CMDSTS_OK & cmdsts)) {
#endif
skb_put(skb, len);
if (unlikely(!skb))
goto netdev_mangle_me_harder_failed;
if (cmdsts & CMDSTS_DEST_MULTI)
dev->stats.multicast ++;
dev->stats.rx_packets ++;
dev->stats.rx_bytes += len;
if ((extsts & 0x002a0000) && !(extsts & 0x00540000)) {
skb->ip_summed = CHECKSUM_UNNECESSARY;
} else {
skb->ip_summed = CHECKSUM_NONE;
}
skb->protocol = eth_type_trans(skb, ndev);
#ifdef NS83820_VLAN_ACCEL_SUPPORT
if(extsts & EXTSTS_VPKT) {
unsigned short tag;
tag = ntohs(extsts & EXTSTS_VTG_MASK);
rx_rc = vlan_hwaccel_rx(skb,dev->vlgrp,tag);
} else {
rx_rc = netif_rx(skb);
}
#else
rx_rc = netif_rx(skb);
#endif
if (NET_RX_DROP == rx_rc) {
netdev_mangle_me_harder_failed:
dev->stats.rx_dropped ++;
}
} else {
kfree_skb(skb);
}
nr++;
next_rx = info->next_rx;
desc = info->descs + (DESC_SIZE * next_rx);
}
info->next_rx = next_rx;
info->next_rx_desc = info->descs + (DESC_SIZE * next_rx);
out:
if (0 && !nr) {
Dprintk("dazed: cmdsts_f: %08x\n", cmdsts);
}
spin_unlock_irqrestore(&info->lock, flags);
}
static void rx_action(unsigned long _dev)
{
struct net_device *ndev = (void *)_dev;
struct ns83820 *dev = PRIV(ndev);
rx_irq(ndev);
writel(ihr, dev->base + IHR);
spin_lock_irq(&dev->misc_lock);
dev->IMR_cache |= ISR_RXDESC;
writel(dev->IMR_cache, dev->base + IMR);
spin_unlock_irq(&dev->misc_lock);
rx_irq(ndev);
ns83820_rx_kick(ndev);
}
/* Packet Transmit code
*/
static inline void kick_tx(struct ns83820 *dev)
{
dprintk("kick_tx(%p): tx_idx=%d free_idx=%d\n",
dev, dev->tx_idx, dev->tx_free_idx);
writel(CR_TXE, dev->base + CR);
}
/* No spinlock needed on the transmit irq path as the interrupt handler is
* serialized.
*/
static void do_tx_done(struct net_device *ndev)
{
struct ns83820 *dev = PRIV(ndev);
u32 cmdsts, tx_done_idx;
__le32 *desc;
dprintk("do_tx_done(%p)\n", ndev);
tx_done_idx = dev->tx_done_idx;
desc = dev->tx_descs + (tx_done_idx * DESC_SIZE);
dprintk("tx_done_idx=%d free_idx=%d cmdsts=%08x\n",
tx_done_idx, dev->tx_free_idx, le32_to_cpu(desc[DESC_CMDSTS]));
while ((tx_done_idx != dev->tx_free_idx) &&
!(CMDSTS_OWN & (cmdsts = le32_to_cpu(desc[DESC_CMDSTS]))) ) {
struct sk_buff *skb;
unsigned len;
dma_addr_t addr;
if (cmdsts & CMDSTS_ERR)
dev->stats.tx_errors ++;
if (cmdsts & CMDSTS_OK)
dev->stats.tx_packets ++;
if (cmdsts & CMDSTS_OK)
dev->stats.tx_bytes += cmdsts & 0xffff;
dprintk("tx_done_idx=%d free_idx=%d cmdsts=%08x\n",
tx_done_idx, dev->tx_free_idx, cmdsts);
skb = dev->tx_skbs[tx_done_idx];
dev->tx_skbs[tx_done_idx] = NULL;
dprintk("done(%p)\n", skb);
len = cmdsts & CMDSTS_LEN_MASK;
addr = desc_addr_get(desc + DESC_BUFPTR);
if (skb) {
pci_unmap_single(dev->pci_dev,
addr,
len,
PCI_DMA_TODEVICE);
dev_kfree_skb_irq(skb);
atomic_dec(&dev->nr_tx_skbs);
} else
pci_unmap_page(dev->pci_dev,
addr,
len,
PCI_DMA_TODEVICE);
tx_done_idx = (tx_done_idx + 1) % NR_TX_DESC;
dev->tx_done_idx = tx_done_idx;
desc[DESC_CMDSTS] = cpu_to_le32(0);
mb();
desc = dev->tx_descs + (tx_done_idx * DESC_SIZE);
}
/* Allow network stack to resume queueing packets after we've
* finished transmitting at least 1/4 of the packets in the queue.
*/
if (netif_queue_stopped(ndev) && start_tx_okay(dev)) {
dprintk("start_queue(%p)\n", ndev);
netif_start_queue(ndev);
netif_wake_queue(ndev);
}
}
static void ns83820_cleanup_tx(struct ns83820 *dev)
{
unsigned i;
for (i=0; i<NR_TX_DESC; i++) {
struct sk_buff *skb = dev->tx_skbs[i];
dev->tx_skbs[i] = NULL;
if (skb) {
__le32 *desc = dev->tx_descs + (i * DESC_SIZE);
pci_unmap_single(dev->pci_dev,
desc_addr_get(desc + DESC_BUFPTR),
le32_to_cpu(desc[DESC_CMDSTS]) & CMDSTS_LEN_MASK,
PCI_DMA_TODEVICE);
dev_kfree_skb_irq(skb);
atomic_dec(&dev->nr_tx_skbs);
}
}
memset(dev->tx_descs, 0, NR_TX_DESC * DESC_SIZE * 4);
}
/* transmit routine. This code relies on the network layer serializing
* its calls in, but will run happily in parallel with the interrupt
* handler. This code currently has provisions for fragmenting tx buffers
* while trying to track down a bug in either the zero copy code or
* the tx fifo (hence the MAX_FRAG_LEN).
*/
static netdev_tx_t ns83820_hard_start_xmit(struct sk_buff *skb,
struct net_device *ndev)
{
struct ns83820 *dev = PRIV(ndev);
u32 free_idx, cmdsts, extsts;
int nr_free, nr_frags;
unsigned tx_done_idx, last_idx;
dma_addr_t buf;
unsigned len;
skb_frag_t *frag;
int stopped = 0;
int do_intr = 0;
volatile __le32 *first_desc;
dprintk("ns83820_hard_start_xmit\n");
nr_frags = skb_shinfo(skb)->nr_frags;
again:
if (unlikely(dev->CFG_cache & CFG_LNKSTS)) {
netif_stop_queue(ndev);
if (unlikely(dev->CFG_cache & CFG_LNKSTS))
return NETDEV_TX_BUSY;
netif_start_queue(ndev);
}
last_idx = free_idx = dev->tx_free_idx;
tx_done_idx = dev->tx_done_idx;
nr_free = (tx_done_idx + NR_TX_DESC-2 - free_idx) % NR_TX_DESC;
nr_free -= 1;
if (nr_free <= nr_frags) {
dprintk("stop_queue - not enough(%p)\n", ndev);
netif_stop_queue(ndev);
/* Check again: we may have raced with a tx done irq */
if (dev->tx_done_idx != tx_done_idx) {
dprintk("restart queue(%p)\n", ndev);
netif_start_queue(ndev);
goto again;
}
return NETDEV_TX_BUSY;
}
if (free_idx == dev->tx_intr_idx) {
do_intr = 1;
dev->tx_intr_idx = (dev->tx_intr_idx + NR_TX_DESC/4) % NR_TX_DESC;
}
nr_free -= nr_frags;
if (nr_free < MIN_TX_DESC_FREE) {
dprintk("stop_queue - last entry(%p)\n", ndev);
netif_stop_queue(ndev);
stopped = 1;
}
frag = skb_shinfo(skb)->frags;
if (!nr_frags)
frag = NULL;
extsts = 0;
if (skb->ip_summed == CHECKSUM_PARTIAL) {
extsts |= EXTSTS_IPPKT;
if (IPPROTO_TCP == ip_hdr(skb)->protocol)
extsts |= EXTSTS_TCPPKT;
else if (IPPROTO_UDP == ip_hdr(skb)->protocol)
extsts |= EXTSTS_UDPPKT;
}
#ifdef NS83820_VLAN_ACCEL_SUPPORT
if(vlan_tx_tag_present(skb)) {
/* fetch the vlan tag info out of the
* ancilliary data if the vlan code
* is using hw vlan acceleration
*/
short tag = vlan_tx_tag_get(skb);
extsts |= (EXTSTS_VPKT | htons(tag));
}
#endif
len = skb->len;
if (nr_frags)
len -= skb->data_len;
buf = pci_map_single(dev->pci_dev, skb->data, len, PCI_DMA_TODEVICE);
first_desc = dev->tx_descs + (free_idx * DESC_SIZE);
for (;;) {
volatile __le32 *desc = dev->tx_descs + (free_idx * DESC_SIZE);
dprintk("frag[%3u]: %4u @ 0x%08Lx\n", free_idx, len,
(unsigned long long)buf);
last_idx = free_idx;
free_idx = (free_idx + 1) % NR_TX_DESC;
desc[DESC_LINK] = cpu_to_le32(dev->tx_phy_descs + (free_idx * DESC_SIZE * 4));
desc_addr_set(desc + DESC_BUFPTR, buf);
desc[DESC_EXTSTS] = cpu_to_le32(extsts);
cmdsts = ((nr_frags) ? CMDSTS_MORE : do_intr ? CMDSTS_INTR : 0);
cmdsts |= (desc == first_desc) ? 0 : CMDSTS_OWN;
cmdsts |= len;
desc[DESC_CMDSTS] = cpu_to_le32(cmdsts);
if (!nr_frags)
break;
buf = pci_map_page(dev->pci_dev, frag->page,
frag->page_offset,
frag->size, PCI_DMA_TODEVICE);
dprintk("frag: buf=%08Lx page=%08lx offset=%08lx\n",
(long long)buf, (long) page_to_pfn(frag->page),
frag->page_offset);
len = frag->size;
frag++;
nr_frags--;
}
dprintk("done pkt\n");
spin_lock_irq(&dev->tx_lock);
dev->tx_skbs[last_idx] = skb;
first_desc[DESC_CMDSTS] |= cpu_to_le32(CMDSTS_OWN);
dev->tx_free_idx = free_idx;
atomic_inc(&dev->nr_tx_skbs);
spin_unlock_irq(&dev->tx_lock);
kick_tx(dev);
/* Check again: we may have raced with a tx done irq */
if (stopped && (dev->tx_done_idx != tx_done_idx) && start_tx_okay(dev))
netif_start_queue(ndev);
return NETDEV_TX_OK;
}
static void ns83820_update_stats(struct ns83820 *dev)
{
u8 __iomem *base = dev->base;
/* the DP83820 will freeze counters, so we need to read all of them */
dev->stats.rx_errors += readl(base + 0x60) & 0xffff;
dev->stats.rx_crc_errors += readl(base + 0x64) & 0xffff;
dev->stats.rx_missed_errors += readl(base + 0x68) & 0xffff;
dev->stats.rx_frame_errors += readl(base + 0x6c) & 0xffff;
/*dev->stats.rx_symbol_errors +=*/ readl(base + 0x70);
dev->stats.rx_length_errors += readl(base + 0x74) & 0xffff;
dev->stats.rx_length_errors += readl(base + 0x78) & 0xffff;
/*dev->stats.rx_badopcode_errors += */ readl(base + 0x7c);
/*dev->stats.rx_pause_count += */ readl(base + 0x80);
/*dev->stats.tx_pause_count += */ readl(base + 0x84);
dev->stats.tx_carrier_errors += readl(base + 0x88) & 0xff;
}
static struct net_device_stats *ns83820_get_stats(struct net_device *ndev)
{
struct ns83820 *dev = PRIV(ndev);
/* somewhat overkill */
spin_lock_irq(&dev->misc_lock);
ns83820_update_stats(dev);
spin_unlock_irq(&dev->misc_lock);
return &dev->stats;
}
/* Let ethtool retrieve info */
static int ns83820_get_settings(struct net_device *ndev,
struct ethtool_cmd *cmd)
{
struct ns83820 *dev = PRIV(ndev);
u32 cfg, tanar, tbicr;
int have_optical = 0;
int fullduplex = 0;
/*
* Here's the list of available ethtool commands from other drivers:
* cmd->advertising =
* cmd->speed =
* cmd->duplex =
* cmd->port = 0;
* cmd->phy_address =
* cmd->transceiver = 0;
* cmd->autoneg =
* cmd->maxtxpkt = 0;
* cmd->maxrxpkt = 0;
*/
/* read current configuration */
cfg = readl(dev->base + CFG) ^ SPDSTS_POLARITY;
tanar = readl(dev->base + TANAR);
tbicr = readl(dev->base + TBICR);
if (dev->CFG_cache & CFG_TBI_EN) {
/* we have an optical interface */
have_optical = 1;
fullduplex = (cfg & CFG_DUPSTS) ? 1 : 0;
} else {
/* We have copper */
fullduplex = (cfg & CFG_DUPSTS) ? 1 : 0;
}
cmd->supported = SUPPORTED_Autoneg;
/* we have optical interface */
if (dev->CFG_cache & CFG_TBI_EN) {
cmd->supported |= SUPPORTED_1000baseT_Half |
SUPPORTED_1000baseT_Full |
SUPPORTED_FIBRE;
cmd->port = PORT_FIBRE;
} /* TODO: else copper related support */
cmd->duplex = fullduplex ? DUPLEX_FULL : DUPLEX_HALF;
switch (cfg / CFG_SPDSTS0 & 3) {
case 2:
cmd->speed = SPEED_1000;
break;
case 1:
cmd->speed = SPEED_100;
break;
default:
cmd->speed = SPEED_10;
break;
}
cmd->autoneg = (tbicr & TBICR_MR_AN_ENABLE) ? 1: 0;
return 0;
}
/* Let ethool change settings*/
static int ns83820_set_settings(struct net_device *ndev,
struct ethtool_cmd *cmd)
{
struct ns83820 *dev = PRIV(ndev);
u32 cfg, tanar;
int have_optical = 0;
int fullduplex = 0;
/* read current configuration */
cfg = readl(dev->base + CFG) ^ SPDSTS_POLARITY;
tanar = readl(dev->base + TANAR);
if (dev->CFG_cache & CFG_TBI_EN) {
/* we have optical */
have_optical = 1;
fullduplex = (tanar & TANAR_FULL_DUP);
} else {
/* we have copper */
fullduplex = cfg & CFG_DUPSTS;
}
spin_lock_irq(&dev->misc_lock);
spin_lock(&dev->tx_lock);
/* Set duplex */
if (cmd->duplex != fullduplex) {
if (have_optical) {
/*set full duplex*/
if (cmd->duplex == DUPLEX_FULL) {
/* force full duplex */
writel(readl(dev->base + TXCFG)
| TXCFG_CSI | TXCFG_HBI | TXCFG_ATP,
dev->base + TXCFG);
writel(readl(dev->base + RXCFG) | RXCFG_RX_FD,
dev->base + RXCFG);
/* Light up full duplex LED */
writel(readl(dev->base + GPIOR) | GPIOR_GP1_OUT,
dev->base + GPIOR);
} else {
/*TODO: set half duplex */
}
} else {
/*we have copper*/
/* TODO: Set duplex for copper cards */
}
printk(KERN_INFO "%s: Duplex set via ethtool\n",
ndev->name);
}
/* Set autonegotiation */
if (1) {
if (cmd->autoneg == AUTONEG_ENABLE) {
/* restart auto negotiation */
writel(TBICR_MR_AN_ENABLE | TBICR_MR_RESTART_AN,
dev->base + TBICR);
writel(TBICR_MR_AN_ENABLE, dev->base + TBICR);
dev->linkstate = LINK_AUTONEGOTIATE;
printk(KERN_INFO "%s: autoneg enabled via ethtool\n",
ndev->name);
} else {
/* disable auto negotiation */
writel(0x00000000, dev->base + TBICR);
}
printk(KERN_INFO "%s: autoneg %s via ethtool\n", ndev->name,
cmd->autoneg ? "ENABLED" : "DISABLED");
}
phy_intr(ndev);
spin_unlock(&dev->tx_lock);
spin_unlock_irq(&dev->misc_lock);
return 0;
}
/* end ethtool get/set support -df */
static void ns83820_get_drvinfo(struct net_device *ndev, struct ethtool_drvinfo *info)
{
struct ns83820 *dev = PRIV(ndev);
strcpy(info->driver, "ns83820");
strcpy(info->version, VERSION);
strcpy(info->bus_info, pci_name(dev->pci_dev));
}
static u32 ns83820_get_link(struct net_device *ndev)
{
struct ns83820 *dev = PRIV(ndev);
u32 cfg = readl(dev->base + CFG) ^ SPDSTS_POLARITY;
return cfg & CFG_LNKSTS ? 1 : 0;
}
static const struct ethtool_ops ops = {
.get_settings = ns83820_get_settings,
.set_settings = ns83820_set_settings,
.get_drvinfo = ns83820_get_drvinfo,
.get_link = ns83820_get_link
};
/* this function is called in irq context from the ISR */
static void ns83820_mib_isr(struct ns83820 *dev)
{
unsigned long flags;
spin_lock_irqsave(&dev->misc_lock, flags);
ns83820_update_stats(dev);
spin_unlock_irqrestore(&dev->misc_lock, flags);
}
static void ns83820_do_isr(struct net_device *ndev, u32 isr);
static irqreturn_t ns83820_irq(int foo, void *data)
{
struct net_device *ndev = data;
struct ns83820 *dev = PRIV(ndev);
u32 isr;
dprintk("ns83820_irq(%p)\n", ndev);
dev->ihr = 0;
isr = readl(dev->base + ISR);
dprintk("irq: %08x\n", isr);
ns83820_do_isr(ndev, isr);
return IRQ_HANDLED;
}
static void ns83820_do_isr(struct net_device *ndev, u32 isr)
{
struct ns83820 *dev = PRIV(ndev);
unsigned long flags;
#ifdef DEBUG
if (isr & ~(ISR_PHY | ISR_RXDESC | ISR_RXEARLY | ISR_RXOK | ISR_RXERR | ISR_TXIDLE | ISR_TXOK | ISR_TXDESC))
Dprintk("odd isr? 0x%08x\n", isr);
#endif
if (ISR_RXIDLE & isr) {
dev->rx_info.idle = 1;
Dprintk("oh dear, we are idle\n");
ns83820_rx_kick(ndev);
}
if ((ISR_RXDESC | ISR_RXOK) & isr) {
prefetch(dev->rx_info.next_rx_desc);
spin_lock_irqsave(&dev->misc_lock, flags);
dev->IMR_cache &= ~(ISR_RXDESC | ISR_RXOK);
writel(dev->IMR_cache, dev->base + IMR);
spin_unlock_irqrestore(&dev->misc_lock, flags);
tasklet_schedule(&dev->rx_tasklet);
//rx_irq(ndev);
//writel(4, dev->base + IHR);
}
if ((ISR_RXIDLE | ISR_RXORN | ISR_RXDESC | ISR_RXOK | ISR_RXERR) & isr)
ns83820_rx_kick(ndev);
if (unlikely(ISR_RXSOVR & isr)) {
//printk("overrun: rxsovr\n");
dev->stats.rx_fifo_errors ++;
}
if (unlikely(ISR_RXORN & isr)) {
//printk("overrun: rxorn\n");
dev->stats.rx_fifo_errors ++;
}
if ((ISR_RXRCMP & isr) && dev->rx_info.up)
writel(CR_RXE, dev->base + CR);
if (ISR_TXIDLE & isr) {
u32 txdp;
txdp = readl(dev->base + TXDP);
dprintk("txdp: %08x\n", txdp);
txdp -= dev->tx_phy_descs;
dev->tx_idx = txdp / (DESC_SIZE * 4);
if (dev->tx_idx >= NR_TX_DESC) {
printk(KERN_ALERT "%s: BUG -- txdp out of range\n", ndev->name);
dev->tx_idx = 0;
}
/* The may have been a race between a pci originated read
* and the descriptor update from the cpu. Just in case,
* kick the transmitter if the hardware thinks it is on a
* different descriptor than we are.
*/
if (dev->tx_idx != dev->tx_free_idx)
kick_tx(dev);
}
/* Defer tx ring processing until more than a minimum amount of
* work has accumulated
*/
if ((ISR_TXDESC | ISR_TXIDLE | ISR_TXOK | ISR_TXERR) & isr) {
spin_lock_irqsave(&dev->tx_lock, flags);
do_tx_done(ndev);
spin_unlock_irqrestore(&dev->tx_lock, flags);
/* Disable TxOk if there are no outstanding tx packets.
*/
if ((dev->tx_done_idx == dev->tx_free_idx) &&
(dev->IMR_cache & ISR_TXOK)) {
spin_lock_irqsave(&dev->misc_lock, flags);
dev->IMR_cache &= ~ISR_TXOK;
writel(dev->IMR_cache, dev->base + IMR);
spin_unlock_irqrestore(&dev->misc_lock, flags);
}
}
/* The TxIdle interrupt can come in before the transmit has
* completed. Normally we reap packets off of the combination
* of TxDesc and TxIdle and leave TxOk disabled (since it
* occurs on every packet), but when no further irqs of this
* nature are expected, we must enable TxOk.
*/
if ((ISR_TXIDLE & isr) && (dev->tx_done_idx != dev->tx_free_idx)) {
spin_lock_irqsave(&dev->misc_lock, flags);
dev->IMR_cache |= ISR_TXOK;
writel(dev->IMR_cache, dev->base + IMR);
spin_unlock_irqrestore(&dev->misc_lock, flags);
}
/* MIB interrupt: one of the statistics counters is about to overflow */
if (unlikely(ISR_MIB & isr))
ns83820_mib_isr(dev);
/* PHY: Link up/down/negotiation state change */
if (unlikely(ISR_PHY & isr))
phy_intr(ndev);
#if 0 /* Still working on the interrupt mitigation strategy */
if (dev->ihr)
writel(dev->ihr, dev->base + IHR);
#endif
}
static void ns83820_do_reset(struct ns83820 *dev, u32 which)
{
Dprintk("resetting chip...\n");
writel(which, dev->base + CR);
do {
schedule();
} while (readl(dev->base + CR) & which);
Dprintk("okay!\n");
}
static int ns83820_stop(struct net_device *ndev)
{
struct ns83820 *dev = PRIV(ndev);
/* FIXME: protect against interrupt handler? */
del_timer_sync(&dev->tx_watchdog);
/* disable interrupts */
writel(0, dev->base + IMR);
writel(0, dev->base + IER);
readl(dev->base + IER);
dev->rx_info.up = 0;
synchronize_irq(dev->pci_dev->irq);
ns83820_do_reset(dev, CR_RST);
synchronize_irq(dev->pci_dev->irq);
spin_lock_irq(&dev->misc_lock);
dev->IMR_cache &= ~(ISR_TXURN | ISR_TXIDLE | ISR_TXERR | ISR_TXDESC | ISR_TXOK);
spin_unlock_irq(&dev->misc_lock);
ns83820_cleanup_rx(dev);
ns83820_cleanup_tx(dev);
return 0;
}
static void ns83820_tx_timeout(struct net_device *ndev)
{
struct ns83820 *dev = PRIV(ndev);
u32 tx_done_idx;
__le32 *desc;
unsigned long flags;
spin_lock_irqsave(&dev->tx_lock, flags);
tx_done_idx = dev->tx_done_idx;
desc = dev->tx_descs + (tx_done_idx * DESC_SIZE);
printk(KERN_INFO "%s: tx_timeout: tx_done_idx=%d free_idx=%d cmdsts=%08x\n",
ndev->name,
tx_done_idx, dev->tx_free_idx, le32_to_cpu(desc[DESC_CMDSTS]));
#if defined(DEBUG)
{
u32 isr;
isr = readl(dev->base + ISR);
printk("irq: %08x imr: %08x\n", isr, dev->IMR_cache);
ns83820_do_isr(ndev, isr);
}
#endif
do_tx_done(ndev);
tx_done_idx = dev->tx_done_idx;
desc = dev->tx_descs + (tx_done_idx * DESC_SIZE);
printk(KERN_INFO "%s: after: tx_done_idx=%d free_idx=%d cmdsts=%08x\n",
ndev->name,
tx_done_idx, dev->tx_free_idx, le32_to_cpu(desc[DESC_CMDSTS]));
spin_unlock_irqrestore(&dev->tx_lock, flags);
}
static void ns83820_tx_watch(unsigned long data)
{
struct net_device *ndev = (void *)data;
struct ns83820 *dev = PRIV(ndev);
#if defined(DEBUG)
printk("ns83820_tx_watch: %u %u %d\n",
dev->tx_done_idx, dev->tx_free_idx, atomic_read(&dev->nr_tx_skbs)
);
#endif
if (time_after(jiffies, dev_trans_start(ndev) + 1*HZ) &&
dev->tx_done_idx != dev->tx_free_idx) {
printk(KERN_DEBUG "%s: ns83820_tx_watch: %u %u %d\n",
ndev->name,
dev->tx_done_idx, dev->tx_free_idx,
atomic_read(&dev->nr_tx_skbs));
ns83820_tx_timeout(ndev);
}
mod_timer(&dev->tx_watchdog, jiffies + 2*HZ);
}
static int ns83820_open(struct net_device *ndev)
{
struct ns83820 *dev = PRIV(ndev);
unsigned i;
u32 desc;
int ret;
dprintk("ns83820_open\n");
writel(0, dev->base + PQCR);
ret = ns83820_setup_rx(ndev);
if (ret)
goto failed;
memset(dev->tx_descs, 0, 4 * NR_TX_DESC * DESC_SIZE);
for (i=0; i<NR_TX_DESC; i++) {
dev->tx_descs[(i * DESC_SIZE) + DESC_LINK]
= cpu_to_le32(
dev->tx_phy_descs
+ ((i+1) % NR_TX_DESC) * DESC_SIZE * 4);
}
dev->tx_idx = 0;
dev->tx_done_idx = 0;
desc = dev->tx_phy_descs;
writel(0, dev->base + TXDP_HI);
writel(desc, dev->base + TXDP);
init_timer(&dev->tx_watchdog);
dev->tx_watchdog.data = (unsigned long)ndev;
dev->tx_watchdog.function = ns83820_tx_watch;
mod_timer(&dev->tx_watchdog, jiffies + 2*HZ);
netif_start_queue(ndev); /* FIXME: wait for phy to come up */
return 0;
failed:
ns83820_stop(ndev);
return ret;
}
static void ns83820_getmac(struct ns83820 *dev, u8 *mac)
{
unsigned i;
for (i=0; i<3; i++) {
u32 data;
/* Read from the perfect match memory: this is loaded by
* the chip from the EEPROM via the EELOAD self test.
*/
writel(i*2, dev->base + RFCR);
data = readl(dev->base + RFDR);
*mac++ = data;
*mac++ = data >> 8;
}
}
static int ns83820_change_mtu(struct net_device *ndev, int new_mtu)
{
if (new_mtu > RX_BUF_SIZE)
return -EINVAL;
ndev->mtu = new_mtu;
return 0;
}
static void ns83820_set_multicast(struct net_device *ndev)
{
struct ns83820 *dev = PRIV(ndev);
u8 __iomem *rfcr = dev->base + RFCR;
u32 and_mask = 0xffffffff;
u32 or_mask = 0;
u32 val;
if (ndev->flags & IFF_PROMISC)
or_mask |= RFCR_AAU | RFCR_AAM;
else
and_mask &= ~(RFCR_AAU | RFCR_AAM);
if (ndev->flags & IFF_ALLMULTI || ndev->mc_count)
or_mask |= RFCR_AAM;
else
and_mask &= ~RFCR_AAM;
spin_lock_irq(&dev->misc_lock);
val = (readl(rfcr) & and_mask) | or_mask;
/* Ramit : RFCR Write Fix doc says RFEN must be 0 modify other bits */
writel(val & ~RFCR_RFEN, rfcr);
writel(val, rfcr);
spin_unlock_irq(&dev->misc_lock);
}
static void ns83820_run_bist(struct net_device *ndev, const char *name, u32 enable, u32 done, u32 fail)
{
struct ns83820 *dev = PRIV(ndev);
int timed_out = 0;
unsigned long start;
u32 status;
int loops = 0;
dprintk("%s: start %s\n", ndev->name, name);
start = jiffies;
writel(enable, dev->base + PTSCR);
for (;;) {
loops++;
status = readl(dev->base + PTSCR);
if (!(status & enable))
break;
if (status & done)
break;
if (status & fail)
break;
if (time_after_eq(jiffies, start + HZ)) {
timed_out = 1;
break;
}
schedule_timeout_uninterruptible(1);
}
if (status & fail)
printk(KERN_INFO "%s: %s failed! (0x%08x & 0x%08x)\n",
ndev->name, name, status, fail);
else if (timed_out)
printk(KERN_INFO "%s: run_bist %s timed out! (%08x)\n",
ndev->name, name, status);
dprintk("%s: done %s in %d loops\n", ndev->name, name, loops);
}
#ifdef PHY_CODE_IS_FINISHED
static void ns83820_mii_write_bit(struct ns83820 *dev, int bit)
{
/* drive MDC low */
dev->MEAR_cache &= ~MEAR_MDC;
writel(dev->MEAR_cache, dev->base + MEAR);
readl(dev->base + MEAR);
/* enable output, set bit */
dev->MEAR_cache |= MEAR_MDDIR;
if (bit)
dev->MEAR_cache |= MEAR_MDIO;
else
dev->MEAR_cache &= ~MEAR_MDIO;
/* set the output bit */
writel(dev->MEAR_cache, dev->base + MEAR);
readl(dev->base + MEAR);
/* Wait. Max clock rate is 2.5MHz, this way we come in under 1MHz */
udelay(1);
/* drive MDC high causing the data bit to be latched */
dev->MEAR_cache |= MEAR_MDC;
writel(dev->MEAR_cache, dev->base + MEAR);
readl(dev->base + MEAR);
/* Wait again... */
udelay(1);
}
static int ns83820_mii_read_bit(struct ns83820 *dev)
{
int bit;
/* drive MDC low, disable output */
dev->MEAR_cache &= ~MEAR_MDC;
dev->MEAR_cache &= ~MEAR_MDDIR;
writel(dev->MEAR_cache, dev->base + MEAR);
readl(dev->base + MEAR);
/* Wait. Max clock rate is 2.5MHz, this way we come in under 1MHz */
udelay(1);
/* drive MDC high causing the data bit to be latched */
bit = (readl(dev->base + MEAR) & MEAR_MDIO) ? 1 : 0;
dev->MEAR_cache |= MEAR_MDC;
writel(dev->MEAR_cache, dev->base + MEAR);
/* Wait again... */
udelay(1);
return bit;
}
static unsigned ns83820_mii_read_reg(struct ns83820 *dev, unsigned phy, unsigned reg)
{
unsigned data = 0;
int i;
/* read some garbage so that we eventually sync up */
for (i=0; i<64; i++)
ns83820_mii_read_bit(dev);
ns83820_mii_write_bit(dev, 0); /* start */
ns83820_mii_write_bit(dev, 1);
ns83820_mii_write_bit(dev, 1); /* opcode read */
ns83820_mii_write_bit(dev, 0);
/* write out the phy address: 5 bits, msb first */
for (i=0; i<5; i++)
ns83820_mii_write_bit(dev, phy & (0x10 >> i));
/* write out the register address, 5 bits, msb first */
for (i=0; i<5; i++)
ns83820_mii_write_bit(dev, reg & (0x10 >> i));
ns83820_mii_read_bit(dev); /* turn around cycles */
ns83820_mii_read_bit(dev);
/* read in the register data, 16 bits msb first */
for (i=0; i<16; i++) {
data <<= 1;
data |= ns83820_mii_read_bit(dev);
}
return data;
}
static unsigned ns83820_mii_write_reg(struct ns83820 *dev, unsigned phy, unsigned reg, unsigned data)
{
int i;
/* read some garbage so that we eventually sync up */
for (i=0; i<64; i++)
ns83820_mii_read_bit(dev);
ns83820_mii_write_bit(dev, 0); /* start */
ns83820_mii_write_bit(dev, 1);
ns83820_mii_write_bit(dev, 0); /* opcode read */
ns83820_mii_write_bit(dev, 1);
/* write out the phy address: 5 bits, msb first */
for (i=0; i<5; i++)
ns83820_mii_write_bit(dev, phy & (0x10 >> i));
/* write out the register address, 5 bits, msb first */
for (i=0; i<5; i++)
ns83820_mii_write_bit(dev, reg & (0x10 >> i));
ns83820_mii_read_bit(dev); /* turn around cycles */
ns83820_mii_read_bit(dev);
/* read in the register data, 16 bits msb first */
for (i=0; i<16; i++)
ns83820_mii_write_bit(dev, (data >> (15 - i)) & 1);
return data;
}
static void ns83820_probe_phy(struct net_device *ndev)
{
struct ns83820 *dev = PRIV(ndev);
static int first;
int i;
#define MII_PHYIDR1 0x02
#define MII_PHYIDR2 0x03
#if 0
if (!first) {
unsigned tmp;
ns83820_mii_read_reg(dev, 1, 0x09);
ns83820_mii_write_reg(dev, 1, 0x10, 0x0d3e);
tmp = ns83820_mii_read_reg(dev, 1, 0x00);
ns83820_mii_write_reg(dev, 1, 0x00, tmp | 0x8000);
udelay(1300);
ns83820_mii_read_reg(dev, 1, 0x09);
}
#endif
first = 1;
for (i=1; i<2; i++) {
int j;
unsigned a, b;
a = ns83820_mii_read_reg(dev, i, MII_PHYIDR1);
b = ns83820_mii_read_reg(dev, i, MII_PHYIDR2);
//printk("%s: phy %d: 0x%04x 0x%04x\n",
// ndev->name, i, a, b);
for (j=0; j<0x16; j+=4) {
dprintk("%s: [0x%02x] %04x %04x %04x %04x\n",
ndev->name, j,
ns83820_mii_read_reg(dev, i, 0 + j),
ns83820_mii_read_reg(dev, i, 1 + j),
ns83820_mii_read_reg(dev, i, 2 + j),
ns83820_mii_read_reg(dev, i, 3 + j)
);
}
}
{
unsigned a, b;
/* read firmware version: memory addr is 0x8402 and 0x8403 */
ns83820_mii_write_reg(dev, 1, 0x16, 0x000d);
ns83820_mii_write_reg(dev, 1, 0x1e, 0x810e);
a = ns83820_mii_read_reg(dev, 1, 0x1d);
ns83820_mii_write_reg(dev, 1, 0x16, 0x000d);
ns83820_mii_write_reg(dev, 1, 0x1e, 0x810e);
b = ns83820_mii_read_reg(dev, 1, 0x1d);
dprintk("version: 0x%04x 0x%04x\n", a, b);
}
}
#endif
static const struct net_device_ops netdev_ops = {
.ndo_open = ns83820_open,
.ndo_stop = ns83820_stop,
.ndo_start_xmit = ns83820_hard_start_xmit,
.ndo_get_stats = ns83820_get_stats,
.ndo_change_mtu = ns83820_change_mtu,
.ndo_set_multicast_list = ns83820_set_multicast,
.ndo_validate_addr = eth_validate_addr,
.ndo_set_mac_address = eth_mac_addr,
.ndo_tx_timeout = ns83820_tx_timeout,
#ifdef NS83820_VLAN_ACCEL_SUPPORT
.ndo_vlan_rx_register = ns83820_vlan_rx_register,
#endif
};
static int __devinit ns83820_init_one(struct pci_dev *pci_dev,
const struct pci_device_id *id)
{
struct net_device *ndev;
struct ns83820 *dev;
long addr;
int err;
int using_dac = 0;
/* See if we can set the dma mask early on; failure is fatal. */
if (sizeof(dma_addr_t) == 8 &&
!pci_set_dma_mask(pci_dev, DMA_BIT_MASK(64))) {
using_dac = 1;
} else if (!pci_set_dma_mask(pci_dev, DMA_BIT_MASK(32))) {
using_dac = 0;
} else {
dev_warn(&pci_dev->dev, "pci_set_dma_mask failed!\n");
return -ENODEV;
}
ndev = alloc_etherdev(sizeof(struct ns83820));
dev = PRIV(ndev);
err = -ENOMEM;
if (!dev)
goto out;
dev->ndev = ndev;
spin_lock_init(&dev->rx_info.lock);
spin_lock_init(&dev->tx_lock);
spin_lock_init(&dev->misc_lock);
dev->pci_dev = pci_dev;
SET_NETDEV_DEV(ndev, &pci_dev->dev);
INIT_WORK(&dev->tq_refill, queue_refill);
tasklet_init(&dev->rx_tasklet, rx_action, (unsigned long)ndev);
err = pci_enable_device(pci_dev);
if (err) {
dev_info(&pci_dev->dev, "pci_enable_dev failed: %d\n", err);
goto out_free;
}
pci_set_master(pci_dev);
addr = pci_resource_start(pci_dev, 1);
dev->base = ioremap_nocache(addr, PAGE_SIZE);
dev->tx_descs = pci_alloc_consistent(pci_dev,
4 * DESC_SIZE * NR_TX_DESC, &dev->tx_phy_descs);
dev->rx_info.descs = pci_alloc_consistent(pci_dev,
4 * DESC_SIZE * NR_RX_DESC, &dev->rx_info.phy_descs);
err = -ENOMEM;
if (!dev->base || !dev->tx_descs || !dev->rx_info.descs)
goto out_disable;
dprintk("%p: %08lx %p: %08lx\n",
dev->tx_descs, (long)dev->tx_phy_descs,
dev->rx_info.descs, (long)dev->rx_info.phy_descs);
/* disable interrupts */
writel(0, dev->base + IMR);
writel(0, dev->base + IER);
readl(dev->base + IER);
dev->IMR_cache = 0;
err = request_irq(pci_dev->irq, ns83820_irq, IRQF_SHARED,
DRV_NAME, ndev);
if (err) {
dev_info(&pci_dev->dev, "unable to register irq %d, err %d\n",
pci_dev->irq, err);
goto out_disable;
}
/*
* FIXME: we are holding rtnl_lock() over obscenely long area only
* because some of the setup code uses dev->name. It's Wrong(tm) -
* we should be using driver-specific names for all that stuff.
* For now that will do, but we really need to come back and kill
* most of the dev_alloc_name() users later.
*/
rtnl_lock();
err = dev_alloc_name(ndev, ndev->name);
if (err < 0) {
dev_info(&pci_dev->dev, "unable to get netdev name: %d\n", err);
goto out_free_irq;
}
printk("%s: ns83820.c: 0x22c: %08x, subsystem: %04x:%04x\n",
ndev->name, le32_to_cpu(readl(dev->base + 0x22c)),
pci_dev->subsystem_vendor, pci_dev->subsystem_device);
ndev->netdev_ops = &netdev_ops;
SET_ETHTOOL_OPS(ndev, &ops);
ndev->watchdog_timeo = 5 * HZ;
pci_set_drvdata(pci_dev, ndev);
ns83820_do_reset(dev, CR_RST);
/* Must reset the ram bist before running it */
writel(PTSCR_RBIST_RST, dev->base + PTSCR);
ns83820_run_bist(ndev, "sram bist", PTSCR_RBIST_EN,
PTSCR_RBIST_DONE, PTSCR_RBIST_FAIL);
ns83820_run_bist(ndev, "eeprom bist", PTSCR_EEBIST_EN, 0,
PTSCR_EEBIST_FAIL);
ns83820_run_bist(ndev, "eeprom load", PTSCR_EELOAD_EN, 0, 0);
/* I love config registers */
dev->CFG_cache = readl(dev->base + CFG);
if ((dev->CFG_cache & CFG_PCI64_DET)) {
printk(KERN_INFO "%s: detected 64 bit PCI data bus.\n",
ndev->name);
/*dev->CFG_cache |= CFG_DATA64_EN;*/
if (!(dev->CFG_cache & CFG_DATA64_EN))
printk(KERN_INFO "%s: EEPROM did not enable 64 bit bus. Disabled.\n",
ndev->name);
} else
dev->CFG_cache &= ~(CFG_DATA64_EN);
dev->CFG_cache &= (CFG_TBI_EN | CFG_MRM_DIS | CFG_MWI_DIS |
CFG_T64ADDR | CFG_DATA64_EN | CFG_EXT_125 |
CFG_M64ADDR);
dev->CFG_cache |= CFG_PINT_DUPSTS | CFG_PINT_LNKSTS | CFG_PINT_SPDSTS |
CFG_EXTSTS_EN | CFG_EXD | CFG_PESEL;
dev->CFG_cache |= CFG_REQALG;
dev->CFG_cache |= CFG_POW;
dev->CFG_cache |= CFG_TMRTEST;
/* When compiled with 64 bit addressing, we must always enable
* the 64 bit descriptor format.
*/
if (sizeof(dma_addr_t) == 8)
dev->CFG_cache |= CFG_M64ADDR;
if (using_dac)
dev->CFG_cache |= CFG_T64ADDR;
/* Big endian mode does not seem to do what the docs suggest */
dev->CFG_cache &= ~CFG_BEM;
/* setup optical transceiver if we have one */
if (dev->CFG_cache & CFG_TBI_EN) {
printk(KERN_INFO "%s: enabling optical transceiver\n",
ndev->name);
writel(readl(dev->base + GPIOR) | 0x3e8, dev->base + GPIOR);
/* setup auto negotiation feature advertisement */
writel(readl(dev->base + TANAR)
| TANAR_HALF_DUP | TANAR_FULL_DUP,
dev->base + TANAR);
/* start auto negotiation */
writel(TBICR_MR_AN_ENABLE | TBICR_MR_RESTART_AN,
dev->base + TBICR);
writel(TBICR_MR_AN_ENABLE, dev->base + TBICR);
dev->linkstate = LINK_AUTONEGOTIATE;
dev->CFG_cache |= CFG_MODE_1000;
}
writel(dev->CFG_cache, dev->base + CFG);
dprintk("CFG: %08x\n", dev->CFG_cache);
if (reset_phy) {
printk(KERN_INFO "%s: resetting phy\n", ndev->name);
writel(dev->CFG_cache | CFG_PHY_RST, dev->base + CFG);
msleep(10);
writel(dev->CFG_cache, dev->base + CFG);
}
#if 0 /* Huh? This sets the PCI latency register. Should be done via
* the PCI layer. FIXME.
*/
if (readl(dev->base + SRR))
writel(readl(dev->base+0x20c) | 0xfe00, dev->base + 0x20c);
#endif
/* Note! The DMA burst size interacts with packet
* transmission, such that the largest packet that
* can be transmitted is 8192 - FLTH - burst size.
* If only the transmit fifo was larger...
*/
/* Ramit : 1024 DMA is not a good idea, it ends up banging
* some DELL and COMPAQ SMP systems */
writel(TXCFG_CSI | TXCFG_HBI | TXCFG_ATP | TXCFG_MXDMA512
| ((1600 / 32) * 0x100),
dev->base + TXCFG);
/* Flush the interrupt holdoff timer */
writel(0x000, dev->base + IHR);
writel(0x100, dev->base + IHR);
writel(0x000, dev->base + IHR);
/* Set Rx to full duplex, don't accept runt, errored, long or length
* range errored packets. Use 512 byte DMA.
*/
/* Ramit : 1024 DMA is not a good idea, it ends up banging
* some DELL and COMPAQ SMP systems
* Turn on ALP, only we are accpeting Jumbo Packets */
writel(RXCFG_AEP | RXCFG_ARP | RXCFG_AIRL | RXCFG_RX_FD
| RXCFG_STRIPCRC
//| RXCFG_ALP
| (RXCFG_MXDMA512) | 0, dev->base + RXCFG);
/* Disable priority queueing */
writel(0, dev->base + PQCR);
/* Enable IP checksum validation and detetion of VLAN headers.
* Note: do not set the reject options as at least the 0x102
* revision of the chip does not properly accept IP fragments
* at least for UDP.
*/
/* Ramit : Be sure to turn on RXCFG_ARP if VLAN's are enabled, since
* the MAC it calculates the packetsize AFTER stripping the VLAN
* header, and if a VLAN Tagged packet of 64 bytes is received (like
* a ping with a VLAN header) then the card, strips the 4 byte VLAN
* tag and then checks the packet size, so if RXCFG_ARP is not enabled,
* it discrards it!. These guys......
* also turn on tag stripping if hardware acceleration is enabled
*/
#ifdef NS83820_VLAN_ACCEL_SUPPORT
#define VRCR_INIT_VALUE (VRCR_IPEN|VRCR_VTDEN|VRCR_VTREN)
#else
#define VRCR_INIT_VALUE (VRCR_IPEN|VRCR_VTDEN)
#endif
writel(VRCR_INIT_VALUE, dev->base + VRCR);
/* Enable per-packet TCP/UDP/IP checksumming
* and per packet vlan tag insertion if
* vlan hardware acceleration is enabled
*/
#ifdef NS83820_VLAN_ACCEL_SUPPORT
#define VTCR_INIT_VALUE (VTCR_PPCHK|VTCR_VPPTI)
#else
#define VTCR_INIT_VALUE VTCR_PPCHK
#endif
writel(VTCR_INIT_VALUE, dev->base + VTCR);
/* Ramit : Enable async and sync pause frames */
/* writel(0, dev->base + PCR); */
writel((PCR_PS_MCAST | PCR_PS_DA | PCR_PSEN | PCR_FFLO_4K |
PCR_FFHI_8K | PCR_STLO_4 | PCR_STHI_8 | PCR_PAUSE_CNT),
dev->base + PCR);
/* Disable Wake On Lan */
writel(0, dev->base + WCSR);
ns83820_getmac(dev, ndev->dev_addr);
/* Yes, we support dumb IP checksum on transmit */
ndev->features |= NETIF_F_SG;
ndev->features |= NETIF_F_IP_CSUM;
#ifdef NS83820_VLAN_ACCEL_SUPPORT
/* We also support hardware vlan acceleration */
ndev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
#endif
if (using_dac) {
printk(KERN_INFO "%s: using 64 bit addressing.\n",
ndev->name);
ndev->features |= NETIF_F_HIGHDMA;
}
printk(KERN_INFO "%s: ns83820 v" VERSION ": DP83820 v%u.%u: %pM io=0x%08lx irq=%d f=%s\n",
ndev->name,
(unsigned)readl(dev->base + SRR) >> 8,
(unsigned)readl(dev->base + SRR) & 0xff,
ndev->dev_addr, addr, pci_dev->irq,
(ndev->features & NETIF_F_HIGHDMA) ? "h,sg" : "sg"
);
#ifdef PHY_CODE_IS_FINISHED
ns83820_probe_phy(ndev);
#endif
err = register_netdevice(ndev);
if (err) {
printk(KERN_INFO "ns83820: unable to register netdev: %d\n", err);
goto out_cleanup;
}
rtnl_unlock();
return 0;
out_cleanup:
writel(0, dev->base + IMR); /* paranoia */
writel(0, dev->base + IER);
readl(dev->base + IER);
out_free_irq:
rtnl_unlock();
free_irq(pci_dev->irq, ndev);
out_disable:
if (dev->base)
iounmap(dev->base);
pci_free_consistent(pci_dev, 4 * DESC_SIZE * NR_TX_DESC, dev->tx_descs, dev->tx_phy_descs);
pci_free_consistent(pci_dev, 4 * DESC_SIZE * NR_RX_DESC, dev->rx_info.descs, dev->rx_info.phy_descs);
pci_disable_device(pci_dev);
out_free:
free_netdev(ndev);
pci_set_drvdata(pci_dev, NULL);
out:
return err;
}
static void __devexit ns83820_remove_one(struct pci_dev *pci_dev)
{
struct net_device *ndev = pci_get_drvdata(pci_dev);
struct ns83820 *dev = PRIV(ndev); /* ok even if NULL */
if (!ndev) /* paranoia */
return;
writel(0, dev->base + IMR); /* paranoia */
writel(0, dev->base + IER);
readl(dev->base + IER);
unregister_netdev(ndev);
free_irq(dev->pci_dev->irq, ndev);
iounmap(dev->base);
pci_free_consistent(dev->pci_dev, 4 * DESC_SIZE * NR_TX_DESC,
dev->tx_descs, dev->tx_phy_descs);
pci_free_consistent(dev->pci_dev, 4 * DESC_SIZE * NR_RX_DESC,
dev->rx_info.descs, dev->rx_info.phy_descs);
pci_disable_device(dev->pci_dev);
free_netdev(ndev);
pci_set_drvdata(pci_dev, NULL);
}
static struct pci_device_id ns83820_pci_tbl[] = {
{ 0x100b, 0x0022, PCI_ANY_ID, PCI_ANY_ID, 0, .driver_data = 0, },
{ 0, },
};
static struct pci_driver driver = {
.name = "ns83820",
.id_table = ns83820_pci_tbl,
.probe = ns83820_init_one,
.remove = __devexit_p(ns83820_remove_one),
#if 0 /* FIXME: implement */
.suspend = ,
.resume = ,
#endif
};
static int __init ns83820_init(void)
{
printk(KERN_INFO "ns83820.c: National Semiconductor DP83820 10/100/1000 driver.\n");
return pci_register_driver(&driver);
}
static void __exit ns83820_exit(void)
{
pci_unregister_driver(&driver);
}
MODULE_AUTHOR("Benjamin LaHaise <bcrl@kvack.org>");
MODULE_DESCRIPTION("National Semiconductor DP83820 10/100/1000 driver");
MODULE_LICENSE("GPL");
MODULE_DEVICE_TABLE(pci, ns83820_pci_tbl);
module_param(lnksts, int, 0);
MODULE_PARM_DESC(lnksts, "Polarity of LNKSTS bit");
module_param(ihr, int, 0);
MODULE_PARM_DESC(ihr, "Time in 100 us increments to delay interrupts (range 0-127)");
module_param(reset_phy, int, 0);
MODULE_PARM_DESC(reset_phy, "Set to 1 to reset the PHY on startup");
module_init(ns83820_init);
module_exit(ns83820_exit);
| {
"language": "C"
} |
/*
* Copyright(c) 1997-2001 id Software, Inc.
* Copyright(c) 2002 The Quakeforge Project.
* Copyright(c) 2006 Quetoo.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version 2
* of the License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
*
* See the GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
*/
#pragma once
vec_t R_DistanceToSurface(const vec3_t p, const r_bsp_surface_t *surf);
const r_bsp_leaf_t *R_LeafForPoint(const vec3_t p, const r_bsp_model_t *bsp);
_Bool R_LeafVisible(const r_bsp_leaf_t *leaf);
_Bool R_LeafHearable(const r_bsp_leaf_t *leaf);
_Bool R_CullBox(const vec3_t mins, const vec3_t maxs);
_Bool R_CullSphere(const vec3_t point, const vec_t radius);
#ifdef __R_LOCAL_H__
_Bool R_CullBspInlineModel(const r_entity_t *e);
void R_DrawBspInlineModels(const r_entities_t *ents);
void R_AddBspInlineModelFlares(const r_entities_t *ents);
void R_DrawBspLeafs(void);
void R_DrawBspNormals(void);
void R_MarkBspSurfaces(void);
void R_UpdateVis(void);
#endif /* __R_LOCAL_H__ */
| {
"language": "C"
} |
/*
+----------------------------------------------------------------------+
| Copyright (c) The PHP Group |
+----------------------------------------------------------------------+
| This source file is subject to version 3.01 of the PHP license, |
| that is bundled with this package in the file LICENSE, and is |
| available through the world-wide-web at the following url: |
| http://www.php.net/license/3_01.txt |
| If you did not receive a copy of the PHP license and are unable to |
| obtain it through the world-wide-web, please send a note to |
| license@php.net so we can mail you a copy immediately. |
+----------------------------------------------------------------------+
| Authors: Wez Furlong <wez@thebrainroom.com> |
| Sara Golemon <pollita@php.net> |
+----------------------------------------------------------------------+
*/
#include "php.h"
#include "php_globals.h"
#include "ext/standard/file.h"
#include "ext/standard/flock_compat.h"
#ifdef HAVE_SYS_FILE_H
#include <sys/file.h>
#endif
#include <stddef.h>
#if HAVE_UTIME
# ifdef PHP_WIN32
# include <sys/utime.h>
# else
# include <utime.h>
# endif
#endif
static int le_protocols;
struct php_user_stream_wrapper {
char * protoname;
zend_class_entry *ce;
php_stream_wrapper wrapper;
};
static php_stream *user_wrapper_opener(php_stream_wrapper *wrapper, const char *filename, const char *mode, int options, zend_string **opened_path, php_stream_context *context STREAMS_DC);
static int user_wrapper_stat_url(php_stream_wrapper *wrapper, const char *url, int flags, php_stream_statbuf *ssb, php_stream_context *context);
static int user_wrapper_unlink(php_stream_wrapper *wrapper, const char *url, int options, php_stream_context *context);
static int user_wrapper_rename(php_stream_wrapper *wrapper, const char *url_from, const char *url_to, int options, php_stream_context *context);
static int user_wrapper_mkdir(php_stream_wrapper *wrapper, const char *url, int mode, int options, php_stream_context *context);
static int user_wrapper_rmdir(php_stream_wrapper *wrapper, const char *url, int options, php_stream_context *context);
static int user_wrapper_metadata(php_stream_wrapper *wrapper, const char *url, int option, void *value, php_stream_context *context);
static php_stream *user_wrapper_opendir(php_stream_wrapper *wrapper, const char *filename, const char *mode,
int options, zend_string **opened_path, php_stream_context *context STREAMS_DC);
static const php_stream_wrapper_ops user_stream_wops = {
user_wrapper_opener,
NULL, /* close - the streams themselves know how */
NULL, /* stat - the streams themselves know how */
user_wrapper_stat_url,
user_wrapper_opendir,
"user-space",
user_wrapper_unlink,
user_wrapper_rename,
user_wrapper_mkdir,
user_wrapper_rmdir,
user_wrapper_metadata
};
static void stream_wrapper_dtor(zend_resource *rsrc)
{
struct php_user_stream_wrapper * uwrap = (struct php_user_stream_wrapper*)rsrc->ptr;
efree(uwrap->protoname);
efree(uwrap);
}
PHP_MINIT_FUNCTION(user_streams)
{
le_protocols = zend_register_list_destructors_ex(stream_wrapper_dtor, NULL, "stream factory", 0);
if (le_protocols == FAILURE)
return FAILURE;
REGISTER_LONG_CONSTANT("STREAM_USE_PATH", USE_PATH, CONST_CS|CONST_PERSISTENT);
REGISTER_LONG_CONSTANT("STREAM_IGNORE_URL", IGNORE_URL, CONST_CS|CONST_PERSISTENT);
REGISTER_LONG_CONSTANT("STREAM_REPORT_ERRORS", REPORT_ERRORS, CONST_CS|CONST_PERSISTENT);
REGISTER_LONG_CONSTANT("STREAM_MUST_SEEK", STREAM_MUST_SEEK, CONST_CS|CONST_PERSISTENT);
REGISTER_LONG_CONSTANT("STREAM_URL_STAT_LINK", PHP_STREAM_URL_STAT_LINK, CONST_CS|CONST_PERSISTENT);
REGISTER_LONG_CONSTANT("STREAM_URL_STAT_QUIET", PHP_STREAM_URL_STAT_QUIET, CONST_CS|CONST_PERSISTENT);
REGISTER_LONG_CONSTANT("STREAM_MKDIR_RECURSIVE", PHP_STREAM_MKDIR_RECURSIVE, CONST_CS|CONST_PERSISTENT);
REGISTER_LONG_CONSTANT("STREAM_IS_URL", PHP_STREAM_IS_URL, CONST_CS|CONST_PERSISTENT);
REGISTER_LONG_CONSTANT("STREAM_OPTION_BLOCKING", PHP_STREAM_OPTION_BLOCKING, CONST_CS|CONST_PERSISTENT);
REGISTER_LONG_CONSTANT("STREAM_OPTION_READ_TIMEOUT", PHP_STREAM_OPTION_READ_TIMEOUT, CONST_CS|CONST_PERSISTENT);
REGISTER_LONG_CONSTANT("STREAM_OPTION_READ_BUFFER", PHP_STREAM_OPTION_READ_BUFFER, CONST_CS|CONST_PERSISTENT);
REGISTER_LONG_CONSTANT("STREAM_OPTION_WRITE_BUFFER", PHP_STREAM_OPTION_WRITE_BUFFER, CONST_CS|CONST_PERSISTENT);
REGISTER_LONG_CONSTANT("STREAM_BUFFER_NONE", PHP_STREAM_BUFFER_NONE, CONST_CS|CONST_PERSISTENT);
REGISTER_LONG_CONSTANT("STREAM_BUFFER_LINE", PHP_STREAM_BUFFER_LINE, CONST_CS|CONST_PERSISTENT);
REGISTER_LONG_CONSTANT("STREAM_BUFFER_FULL", PHP_STREAM_BUFFER_FULL, CONST_CS|CONST_PERSISTENT);
REGISTER_LONG_CONSTANT("STREAM_CAST_AS_STREAM", PHP_STREAM_AS_STDIO, CONST_CS|CONST_PERSISTENT);
REGISTER_LONG_CONSTANT("STREAM_CAST_FOR_SELECT", PHP_STREAM_AS_FD_FOR_SELECT, CONST_CS|CONST_PERSISTENT);
REGISTER_LONG_CONSTANT("STREAM_META_TOUCH", PHP_STREAM_META_TOUCH, CONST_CS|CONST_PERSISTENT);
REGISTER_LONG_CONSTANT("STREAM_META_OWNER", PHP_STREAM_META_OWNER, CONST_CS|CONST_PERSISTENT);
REGISTER_LONG_CONSTANT("STREAM_META_OWNER_NAME", PHP_STREAM_META_OWNER_NAME, CONST_CS|CONST_PERSISTENT);
REGISTER_LONG_CONSTANT("STREAM_META_GROUP", PHP_STREAM_META_GROUP, CONST_CS|CONST_PERSISTENT);
REGISTER_LONG_CONSTANT("STREAM_META_GROUP_NAME", PHP_STREAM_META_GROUP_NAME, CONST_CS|CONST_PERSISTENT);
REGISTER_LONG_CONSTANT("STREAM_META_ACCESS", PHP_STREAM_META_ACCESS, CONST_CS|CONST_PERSISTENT);
return SUCCESS;
}
struct _php_userstream_data {
struct php_user_stream_wrapper * wrapper;
zval object;
};
typedef struct _php_userstream_data php_userstream_data_t;
/* names of methods */
#define USERSTREAM_OPEN "stream_open"
#define USERSTREAM_CLOSE "stream_close"
#define USERSTREAM_READ "stream_read"
#define USERSTREAM_WRITE "stream_write"
#define USERSTREAM_FLUSH "stream_flush"
#define USERSTREAM_SEEK "stream_seek"
#define USERSTREAM_TELL "stream_tell"
#define USERSTREAM_EOF "stream_eof"
#define USERSTREAM_STAT "stream_stat"
#define USERSTREAM_STATURL "url_stat"
#define USERSTREAM_UNLINK "unlink"
#define USERSTREAM_RENAME "rename"
#define USERSTREAM_MKDIR "mkdir"
#define USERSTREAM_RMDIR "rmdir"
#define USERSTREAM_DIR_OPEN "dir_opendir"
#define USERSTREAM_DIR_READ "dir_readdir"
#define USERSTREAM_DIR_REWIND "dir_rewinddir"
#define USERSTREAM_DIR_CLOSE "dir_closedir"
#define USERSTREAM_LOCK "stream_lock"
#define USERSTREAM_CAST "stream_cast"
#define USERSTREAM_SET_OPTION "stream_set_option"
#define USERSTREAM_TRUNCATE "stream_truncate"
#define USERSTREAM_METADATA "stream_metadata"
/* {{{ class should have methods like these:
function stream_open($path, $mode, $options, &$opened_path)
{
return true/false;
}
function stream_read($count)
{
return false on error;
else return string;
}
function stream_write($data)
{
return false on error;
else return count written;
}
function stream_close()
{
}
function stream_flush()
{
return true/false;
}
function stream_seek($offset, $whence)
{
return true/false;
}
function stream_tell()
{
return (int)$position;
}
function stream_eof()
{
return true/false;
}
function stream_stat()
{
return array( just like that returned by fstat() );
}
function stream_cast($castas)
{
if ($castas == STREAM_CAST_FOR_SELECT) {
return $this->underlying_stream;
}
return false;
}
function stream_set_option($option, $arg1, $arg2)
{
switch($option) {
case STREAM_OPTION_BLOCKING:
$blocking = $arg1;
...
case STREAM_OPTION_READ_TIMEOUT:
$sec = $arg1;
$usec = $arg2;
...
case STREAM_OPTION_WRITE_BUFFER:
$mode = $arg1;
$size = $arg2;
...
default:
return false;
}
}
function url_stat(string $url, int $flags)
{
return array( just like that returned by stat() );
}
function unlink(string $url)
{
return true / false;
}
function rename(string $from, string $to)
{
return true / false;
}
function mkdir($dir, $mode, $options)
{
return true / false;
}
function rmdir($dir, $options)
{
return true / false;
}
function dir_opendir(string $url, int $options)
{
return true / false;
}
function dir_readdir()
{
return string next filename in dir ;
}
function dir_closedir()
{
release dir related resources;
}
function dir_rewinddir()
{
reset to start of dir list;
}
function stream_lock($operation)
{
return true / false;
}
function stream_truncate($new_size)
{
return true / false;
}
}}} **/
static void user_stream_create_object(struct php_user_stream_wrapper *uwrap, php_stream_context *context, zval *object)
{
if (uwrap->ce->ce_flags & (ZEND_ACC_INTERFACE|ZEND_ACC_TRAIT|ZEND_ACC_IMPLICIT_ABSTRACT_CLASS|ZEND_ACC_EXPLICIT_ABSTRACT_CLASS)) {
ZVAL_UNDEF(object);
return;
}
/* create an instance of our class */
if (object_init_ex(object, uwrap->ce) == FAILURE) {
ZVAL_UNDEF(object);
return;
}
if (context) {
add_property_resource(object, "context", context->res);
GC_ADDREF(context->res);
} else {
add_property_null(object, "context");
}
if (uwrap->ce->constructor) {
zend_call_known_instance_method_with_0_params(
uwrap->ce->constructor, Z_OBJ_P(object), NULL);
}
}
static php_stream *user_wrapper_opener(php_stream_wrapper *wrapper, const char *filename, const char *mode,
int options, zend_string **opened_path, php_stream_context *context STREAMS_DC)
{
struct php_user_stream_wrapper *uwrap = (struct php_user_stream_wrapper*)wrapper->abstract;
php_userstream_data_t *us;
zval zretval, zfuncname;
zval args[4];
int call_result;
php_stream *stream = NULL;
zend_bool old_in_user_include;
/* Try to catch bad usage without preventing flexibility */
if (FG(user_stream_current_filename) != NULL && strcmp(filename, FG(user_stream_current_filename)) == 0) {
php_stream_wrapper_log_error(wrapper, options, "infinite recursion prevented");
return NULL;
}
FG(user_stream_current_filename) = filename;
/* if the user stream was registered as local and we are in include context,
we add allow_url_include restrictions to allow_url_fopen ones */
/* we need only is_url == 0 here since if is_url == 1 and remote wrappers
were restricted we wouldn't get here */
old_in_user_include = PG(in_user_include);
if(uwrap->wrapper.is_url == 0 &&
(options & STREAM_OPEN_FOR_INCLUDE) &&
!PG(allow_url_include)) {
PG(in_user_include) = 1;
}
us = emalloc(sizeof(*us));
us->wrapper = uwrap;
user_stream_create_object(uwrap, context, &us->object);
if (Z_TYPE(us->object) == IS_UNDEF) {
FG(user_stream_current_filename) = NULL;
PG(in_user_include) = old_in_user_include;
efree(us);
return NULL;
}
/* call it's stream_open method - set up params first */
ZVAL_STRING(&args[0], filename);
ZVAL_STRING(&args[1], mode);
ZVAL_LONG(&args[2], options);
ZVAL_NEW_REF(&args[3], &EG(uninitialized_zval));
ZVAL_STRING(&zfuncname, USERSTREAM_OPEN);
zend_try {
call_result = call_user_function(NULL,
Z_ISUNDEF(us->object)? NULL : &us->object,
&zfuncname,
&zretval,
4, args);
} zend_catch {
FG(user_stream_current_filename) = NULL;
zend_bailout();
} zend_end_try();
if (call_result == SUCCESS && Z_TYPE(zretval) != IS_UNDEF && zval_is_true(&zretval)) {
/* the stream is now open! */
stream = php_stream_alloc_rel(&php_stream_userspace_ops, us, 0, mode);
/* if the opened path is set, copy it out */
if (Z_ISREF(args[3]) && Z_TYPE_P(Z_REFVAL(args[3])) == IS_STRING && opened_path) {
*opened_path = zend_string_copy(Z_STR_P(Z_REFVAL(args[3])));
}
/* set wrapper data to be a reference to our object */
ZVAL_COPY(&stream->wrapperdata, &us->object);
} else {
php_stream_wrapper_log_error(wrapper, options, "\"%s::" USERSTREAM_OPEN "\" call failed",
ZSTR_VAL(us->wrapper->ce->name));
}
/* destroy everything else */
if (stream == NULL) {
zval_ptr_dtor(&us->object);
ZVAL_UNDEF(&us->object);
efree(us);
}
zval_ptr_dtor(&zretval);
zval_ptr_dtor(&zfuncname);
zval_ptr_dtor(&args[3]);
zval_ptr_dtor(&args[2]);
zval_ptr_dtor(&args[1]);
zval_ptr_dtor(&args[0]);
FG(user_stream_current_filename) = NULL;
PG(in_user_include) = old_in_user_include;
return stream;
}
static php_stream *user_wrapper_opendir(php_stream_wrapper *wrapper, const char *filename, const char *mode,
int options, zend_string **opened_path, php_stream_context *context STREAMS_DC)
{
struct php_user_stream_wrapper *uwrap = (struct php_user_stream_wrapper*)wrapper->abstract;
php_userstream_data_t *us;
zval zretval, zfuncname;
zval args[2];
int call_result;
php_stream *stream = NULL;
/* Try to catch bad usage without preventing flexibility */
if (FG(user_stream_current_filename) != NULL && strcmp(filename, FG(user_stream_current_filename)) == 0) {
php_stream_wrapper_log_error(wrapper, options, "infinite recursion prevented");
return NULL;
}
FG(user_stream_current_filename) = filename;
us = emalloc(sizeof(*us));
us->wrapper = uwrap;
user_stream_create_object(uwrap, context, &us->object);
if (Z_TYPE(us->object) == IS_UNDEF) {
FG(user_stream_current_filename) = NULL;
efree(us);
return NULL;
}
/* call it's dir_open method - set up params first */
ZVAL_STRING(&args[0], filename);
ZVAL_LONG(&args[1], options);
ZVAL_STRING(&zfuncname, USERSTREAM_DIR_OPEN);
call_result = call_user_function(NULL,
Z_ISUNDEF(us->object)? NULL : &us->object,
&zfuncname,
&zretval,
2, args);
if (call_result == SUCCESS && Z_TYPE(zretval) != IS_UNDEF && zval_is_true(&zretval)) {
/* the stream is now open! */
stream = php_stream_alloc_rel(&php_stream_userspace_dir_ops, us, 0, mode);
/* set wrapper data to be a reference to our object */
ZVAL_COPY(&stream->wrapperdata, &us->object);
} else {
php_stream_wrapper_log_error(wrapper, options, "\"%s::" USERSTREAM_DIR_OPEN "\" call failed",
ZSTR_VAL(us->wrapper->ce->name));
}
/* destroy everything else */
if (stream == NULL) {
zval_ptr_dtor(&us->object);
ZVAL_UNDEF(&us->object);
efree(us);
}
zval_ptr_dtor(&zretval);
zval_ptr_dtor(&zfuncname);
zval_ptr_dtor(&args[1]);
zval_ptr_dtor(&args[0]);
FG(user_stream_current_filename) = NULL;
return stream;
}
/* {{{ Registers a custom URL protocol handler class */
PHP_FUNCTION(stream_wrapper_register)
{
zend_string *protocol;
struct php_user_stream_wrapper *uwrap;
zend_class_entry *ce = NULL;
zend_resource *rsrc;
zend_long flags = 0;
if (zend_parse_parameters(ZEND_NUM_ARGS(), "SC|l", &protocol, &ce, &flags) == FAILURE) {
RETURN_THROWS();
}
uwrap = (struct php_user_stream_wrapper *)ecalloc(1, sizeof(*uwrap));
uwrap->ce = ce;
uwrap->protoname = estrndup(ZSTR_VAL(protocol), ZSTR_LEN(protocol));
uwrap->wrapper.wops = &user_stream_wops;
uwrap->wrapper.abstract = uwrap;
uwrap->wrapper.is_url = ((flags & PHP_STREAM_IS_URL) != 0);
rsrc = zend_register_resource(uwrap, le_protocols);
if (php_register_url_stream_wrapper_volatile(protocol, &uwrap->wrapper) == SUCCESS) {
RETURN_TRUE;
}
/* We failed. But why? */
if (zend_hash_exists(php_stream_get_url_stream_wrappers_hash(), protocol)) {
php_error_docref(NULL, E_WARNING, "Protocol %s:// is already defined.", ZSTR_VAL(protocol));
} else {
/* Hash doesn't exist so it must have been an invalid protocol scheme */
php_error_docref(NULL, E_WARNING, "Invalid protocol scheme specified. Unable to register wrapper class %s to %s://", ZSTR_VAL(uwrap->ce->name), ZSTR_VAL(protocol));
}
zend_list_delete(rsrc);
RETURN_FALSE;
}
/* }}} */
/* {{{ Unregister a wrapper for the life of the current request. */
PHP_FUNCTION(stream_wrapper_unregister)
{
zend_string *protocol;
if (zend_parse_parameters(ZEND_NUM_ARGS(), "S", &protocol) == FAILURE) {
RETURN_THROWS();
}
if (php_unregister_url_stream_wrapper_volatile(protocol) == FAILURE) {
/* We failed */
php_error_docref(NULL, E_WARNING, "Unable to unregister protocol %s://", ZSTR_VAL(protocol));
RETURN_FALSE;
}
RETURN_TRUE;
}
/* }}} */
/* {{{ Restore the original protocol handler, overriding if necessary */
PHP_FUNCTION(stream_wrapper_restore)
{
zend_string *protocol;
php_stream_wrapper *wrapper;
HashTable *global_wrapper_hash, *wrapper_hash;
if (zend_parse_parameters(ZEND_NUM_ARGS(), "S", &protocol) == FAILURE) {
RETURN_THROWS();
}
global_wrapper_hash = php_stream_get_url_stream_wrappers_hash_global();
if ((wrapper = zend_hash_find_ptr(global_wrapper_hash, protocol)) == NULL) {
php_error_docref(NULL, E_WARNING, "%s:// never existed, nothing to restore", ZSTR_VAL(protocol));
RETURN_FALSE;
}
wrapper_hash = php_stream_get_url_stream_wrappers_hash();
if (wrapper_hash == global_wrapper_hash || zend_hash_find_ptr(wrapper_hash, protocol) == wrapper) {
php_error_docref(NULL, E_NOTICE, "%s:// was never changed, nothing to restore", ZSTR_VAL(protocol));
RETURN_TRUE;
}
/* A failure here could be okay given that the protocol might have been merely unregistered */
php_unregister_url_stream_wrapper_volatile(protocol);
if (php_register_url_stream_wrapper_volatile(protocol, wrapper) == FAILURE) {
php_error_docref(NULL, E_WARNING, "Unable to restore original %s:// wrapper", ZSTR_VAL(protocol));
RETURN_FALSE;
}
RETURN_TRUE;
}
/* }}} */
static ssize_t php_userstreamop_write(php_stream *stream, const char *buf, size_t count)
{
zval func_name;
zval retval;
int call_result;
php_userstream_data_t *us = (php_userstream_data_t *)stream->abstract;
zval args[1];
ssize_t didwrite;
assert(us != NULL);
ZVAL_STRINGL(&func_name, USERSTREAM_WRITE, sizeof(USERSTREAM_WRITE)-1);
ZVAL_STRINGL(&args[0], (char*)buf, count);
call_result = call_user_function(NULL,
Z_ISUNDEF(us->object)? NULL : &us->object,
&func_name,
&retval,
1, args);
zval_ptr_dtor(&args[0]);
zval_ptr_dtor(&func_name);
if (EG(exception)) {
return -1;
}
if (call_result == SUCCESS && Z_TYPE(retval) != IS_UNDEF) {
if (Z_TYPE(retval) == IS_FALSE) {
didwrite = -1;
} else {
convert_to_long(&retval);
didwrite = Z_LVAL(retval);
}
} else {
php_error_docref(NULL, E_WARNING, "%s::" USERSTREAM_WRITE " is not implemented!",
ZSTR_VAL(us->wrapper->ce->name));
didwrite = -1;
}
/* don't allow strange buffer overruns due to bogus return */
if (didwrite > 0 && didwrite > count) {
php_error_docref(NULL, E_WARNING, "%s::" USERSTREAM_WRITE " wrote " ZEND_LONG_FMT " bytes more data than requested (" ZEND_LONG_FMT " written, " ZEND_LONG_FMT " max)",
ZSTR_VAL(us->wrapper->ce->name),
(zend_long)(didwrite - count), (zend_long)didwrite, (zend_long)count);
didwrite = count;
}
zval_ptr_dtor(&retval);
return didwrite;
}
static ssize_t php_userstreamop_read(php_stream *stream, char *buf, size_t count)
{
zval func_name;
zval retval;
zval args[1];
int call_result;
size_t didread = 0;
php_userstream_data_t *us = (php_userstream_data_t *)stream->abstract;
assert(us != NULL);
ZVAL_STRINGL(&func_name, USERSTREAM_READ, sizeof(USERSTREAM_READ)-1);
ZVAL_LONG(&args[0], count);
call_result = call_user_function(NULL,
Z_ISUNDEF(us->object)? NULL : &us->object,
&func_name,
&retval,
1, args);
zval_ptr_dtor(&args[0]);
zval_ptr_dtor(&func_name);
if (EG(exception)) {
return -1;
}
if (call_result == FAILURE) {
php_error_docref(NULL, E_WARNING, "%s::" USERSTREAM_READ " is not implemented!",
ZSTR_VAL(us->wrapper->ce->name));
return -1;
}
if (Z_TYPE(retval) == IS_FALSE) {
return -1;
}
if (!try_convert_to_string(&retval)) {
return -1;
}
didread = Z_STRLEN(retval);
if (didread > 0) {
if (didread > count) {
php_error_docref(NULL, E_WARNING, "%s::" USERSTREAM_READ " - read " ZEND_LONG_FMT " bytes more data than requested (" ZEND_LONG_FMT " read, " ZEND_LONG_FMT " max) - excess data will be lost",
ZSTR_VAL(us->wrapper->ce->name), (zend_long)(didread - count), (zend_long)didread, (zend_long)count);
didread = count;
}
memcpy(buf, Z_STRVAL(retval), didread);
}
zval_ptr_dtor(&retval);
ZVAL_UNDEF(&retval);
/* since the user stream has no way of setting the eof flag directly, we need to ask it if we hit eof */
ZVAL_STRINGL(&func_name, USERSTREAM_EOF, sizeof(USERSTREAM_EOF)-1);
call_result = call_user_function(NULL,
Z_ISUNDEF(us->object)? NULL : &us->object,
&func_name,
&retval,
0, NULL);
zval_ptr_dtor(&func_name);
if (EG(exception)) {
stream->eof = 1;
return -1;
}
if (call_result == SUCCESS && Z_TYPE(retval) != IS_UNDEF && zval_is_true(&retval)) {
stream->eof = 1;
} else if (call_result == FAILURE) {
php_error_docref(NULL, E_WARNING,
"%s::" USERSTREAM_EOF " is not implemented! Assuming EOF",
ZSTR_VAL(us->wrapper->ce->name));
stream->eof = 1;
}
zval_ptr_dtor(&retval);
return didread;
}
static int php_userstreamop_close(php_stream *stream, int close_handle)
{
zval func_name;
zval retval;
php_userstream_data_t *us = (php_userstream_data_t *)stream->abstract;
assert(us != NULL);
ZVAL_STRINGL(&func_name, USERSTREAM_CLOSE, sizeof(USERSTREAM_CLOSE)-1);
call_user_function(NULL,
Z_ISUNDEF(us->object)? NULL : &us->object,
&func_name,
&retval,
0, NULL);
zval_ptr_dtor(&retval);
zval_ptr_dtor(&func_name);
zval_ptr_dtor(&us->object);
ZVAL_UNDEF(&us->object);
efree(us);
return 0;
}
static int php_userstreamop_flush(php_stream *stream)
{
zval func_name;
zval retval;
int call_result;
php_userstream_data_t *us = (php_userstream_data_t *)stream->abstract;
assert(us != NULL);
ZVAL_STRINGL(&func_name, USERSTREAM_FLUSH, sizeof(USERSTREAM_FLUSH)-1);
call_result = call_user_function(NULL,
Z_ISUNDEF(us->object)? NULL : &us->object,
&func_name,
&retval,
0, NULL);
if (call_result == SUCCESS && Z_TYPE(retval) != IS_UNDEF && zval_is_true(&retval))
call_result = 0;
else
call_result = -1;
zval_ptr_dtor(&retval);
zval_ptr_dtor(&func_name);
return call_result;
}
static int php_userstreamop_seek(php_stream *stream, zend_off_t offset, int whence, zend_off_t *newoffs)
{
zval func_name;
zval retval;
int call_result, ret;
php_userstream_data_t *us = (php_userstream_data_t *)stream->abstract;
zval args[2];
assert(us != NULL);
ZVAL_STRINGL(&func_name, USERSTREAM_SEEK, sizeof(USERSTREAM_SEEK)-1);
ZVAL_LONG(&args[0], offset);
ZVAL_LONG(&args[1], whence);
call_result = call_user_function(NULL,
Z_ISUNDEF(us->object)? NULL : &us->object,
&func_name,
&retval,
2, args);
zval_ptr_dtor(&args[0]);
zval_ptr_dtor(&args[1]);
zval_ptr_dtor(&func_name);
if (call_result == FAILURE) {
/* stream_seek is not implemented, so disable seeks for this stream */
stream->flags |= PHP_STREAM_FLAG_NO_SEEK;
/* there should be no retval to clean up */
zval_ptr_dtor(&retval);
return -1;
} else if (call_result == SUCCESS && Z_TYPE(retval) != IS_UNDEF && zval_is_true(&retval)) {
ret = 0;
} else {
ret = -1;
}
zval_ptr_dtor(&retval);
ZVAL_UNDEF(&retval);
if (ret) {
return ret;
}
/* now determine where we are */
ZVAL_STRINGL(&func_name, USERSTREAM_TELL, sizeof(USERSTREAM_TELL)-1);
call_result = call_user_function(NULL,
Z_ISUNDEF(us->object)? NULL : &us->object,
&func_name,
&retval,
0, NULL);
if (call_result == SUCCESS && Z_TYPE(retval) == IS_LONG) {
*newoffs = Z_LVAL(retval);
ret = 0;
} else if (call_result == FAILURE) {
php_error_docref(NULL, E_WARNING, "%s::" USERSTREAM_TELL " is not implemented!", ZSTR_VAL(us->wrapper->ce->name));
ret = -1;
} else {
ret = -1;
}
zval_ptr_dtor(&retval);
zval_ptr_dtor(&func_name);
return ret;
}
/* parse the return value from one of the stat functions and store the
* relevant fields into the statbuf provided */
static int statbuf_from_array(zval *array, php_stream_statbuf *ssb)
{
zval *elem;
#define STAT_PROP_ENTRY_EX(name, name2) \
if (NULL != (elem = zend_hash_str_find(Z_ARRVAL_P(array), #name, sizeof(#name)-1))) { \
ssb->sb.st_##name2 = zval_get_long(elem); \
}
#define STAT_PROP_ENTRY(name) STAT_PROP_ENTRY_EX(name,name)
memset(ssb, 0, sizeof(php_stream_statbuf));
STAT_PROP_ENTRY(dev);
STAT_PROP_ENTRY(ino);
STAT_PROP_ENTRY(mode);
STAT_PROP_ENTRY(nlink);
STAT_PROP_ENTRY(uid);
STAT_PROP_ENTRY(gid);
#if HAVE_STRUCT_STAT_ST_RDEV
STAT_PROP_ENTRY(rdev);
#endif
STAT_PROP_ENTRY(size);
STAT_PROP_ENTRY(atime);
STAT_PROP_ENTRY(mtime);
STAT_PROP_ENTRY(ctime);
#ifdef HAVE_STRUCT_STAT_ST_BLKSIZE
STAT_PROP_ENTRY(blksize);
#endif
#ifdef HAVE_STRUCT_STAT_ST_BLOCKS
STAT_PROP_ENTRY(blocks);
#endif
#undef STAT_PROP_ENTRY
#undef STAT_PROP_ENTRY_EX
return SUCCESS;
}
static int php_userstreamop_stat(php_stream *stream, php_stream_statbuf *ssb)
{
zval func_name;
zval retval;
int call_result;
php_userstream_data_t *us = (php_userstream_data_t *)stream->abstract;
int ret = -1;
ZVAL_STRINGL(&func_name, USERSTREAM_STAT, sizeof(USERSTREAM_STAT)-1);
call_result = call_user_function(NULL,
Z_ISUNDEF(us->object)? NULL : &us->object,
&func_name,
&retval,
0, NULL);
if (call_result == SUCCESS && Z_TYPE(retval) == IS_ARRAY) {
if (SUCCESS == statbuf_from_array(&retval, ssb))
ret = 0;
} else {
if (call_result == FAILURE) {
php_error_docref(NULL, E_WARNING, "%s::" USERSTREAM_STAT " is not implemented!",
ZSTR_VAL(us->wrapper->ce->name));
}
}
zval_ptr_dtor(&retval);
zval_ptr_dtor(&func_name);
return ret;
}
static int php_userstreamop_set_option(php_stream *stream, int option, int value, void *ptrparam) {
zval func_name;
zval retval;
int call_result;
php_userstream_data_t *us = (php_userstream_data_t *)stream->abstract;
int ret = PHP_STREAM_OPTION_RETURN_NOTIMPL;
zval args[3];
switch (option) {
case PHP_STREAM_OPTION_CHECK_LIVENESS:
ZVAL_STRINGL(&func_name, USERSTREAM_EOF, sizeof(USERSTREAM_EOF)-1);
call_result = call_user_function(NULL, Z_ISUNDEF(us->object)? NULL : &us->object, &func_name, &retval, 0, NULL);
if (call_result == SUCCESS && (Z_TYPE(retval) == IS_FALSE || Z_TYPE(retval) == IS_TRUE)) {
ret = zval_is_true(&retval) ? PHP_STREAM_OPTION_RETURN_ERR : PHP_STREAM_OPTION_RETURN_OK;
} else {
ret = PHP_STREAM_OPTION_RETURN_ERR;
php_error_docref(NULL, E_WARNING,
"%s::" USERSTREAM_EOF " is not implemented! Assuming EOF",
ZSTR_VAL(us->wrapper->ce->name));
}
zval_ptr_dtor(&retval);
zval_ptr_dtor(&func_name);
break;
case PHP_STREAM_OPTION_LOCKING:
ZVAL_LONG(&args[0], 0);
if (value & LOCK_NB) {
Z_LVAL_P(&args[0]) |= PHP_LOCK_NB;
}
switch(value & ~LOCK_NB) {
case LOCK_SH:
Z_LVAL_P(&args[0]) |= PHP_LOCK_SH;
break;
case LOCK_EX:
Z_LVAL_P(&args[0]) |= PHP_LOCK_EX;
break;
case LOCK_UN:
Z_LVAL_P(&args[0]) |= PHP_LOCK_UN;
break;
}
/* TODO wouldblock */
ZVAL_STRINGL(&func_name, USERSTREAM_LOCK, sizeof(USERSTREAM_LOCK)-1);
call_result = call_user_function(NULL,
Z_ISUNDEF(us->object)? NULL : &us->object,
&func_name,
&retval,
1, args);
if (call_result == SUCCESS && (Z_TYPE(retval) == IS_FALSE || Z_TYPE(retval) == IS_TRUE)) {
ret = (Z_TYPE(retval) == IS_FALSE);
} else if (call_result == FAILURE) {
if (value == 0) {
/* lock support test (TODO: more check) */
ret = PHP_STREAM_OPTION_RETURN_OK;
} else {
php_error_docref(NULL, E_WARNING, "%s::" USERSTREAM_LOCK " is not implemented!",
ZSTR_VAL(us->wrapper->ce->name));
ret = PHP_STREAM_OPTION_RETURN_ERR;
}
}
zval_ptr_dtor(&retval);
zval_ptr_dtor(&func_name);
zval_ptr_dtor(&args[0]);
break;
case PHP_STREAM_OPTION_TRUNCATE_API:
ZVAL_STRINGL(&func_name, USERSTREAM_TRUNCATE, sizeof(USERSTREAM_TRUNCATE)-1);
switch (value) {
case PHP_STREAM_TRUNCATE_SUPPORTED:
if (zend_is_callable_ex(&func_name,
Z_ISUNDEF(us->object)? NULL : Z_OBJ(us->object),
0, NULL, NULL, NULL))
ret = PHP_STREAM_OPTION_RETURN_OK;
else
ret = PHP_STREAM_OPTION_RETURN_ERR;
break;
case PHP_STREAM_TRUNCATE_SET_SIZE: {
ptrdiff_t new_size = *(ptrdiff_t*) ptrparam;
if (new_size >= 0 && new_size <= (ptrdiff_t)LONG_MAX) {
ZVAL_LONG(&args[0], (zend_long)new_size);
call_result = call_user_function(NULL,
Z_ISUNDEF(us->object)? NULL : &us->object,
&func_name,
&retval,
1, args);
if (call_result == SUCCESS && Z_TYPE(retval) != IS_UNDEF) {
if (Z_TYPE(retval) == IS_FALSE || Z_TYPE(retval) == IS_TRUE) {
ret = (Z_TYPE(retval) == IS_TRUE) ? PHP_STREAM_OPTION_RETURN_OK :
PHP_STREAM_OPTION_RETURN_ERR;
} else {
php_error_docref(NULL, E_WARNING,
"%s::" USERSTREAM_TRUNCATE " did not return a boolean!",
ZSTR_VAL(us->wrapper->ce->name));
}
} else {
php_error_docref(NULL, E_WARNING,
"%s::" USERSTREAM_TRUNCATE " is not implemented!",
ZSTR_VAL(us->wrapper->ce->name));
}
zval_ptr_dtor(&retval);
zval_ptr_dtor(&args[0]);
} else { /* bad new size */
ret = PHP_STREAM_OPTION_RETURN_ERR;
}
break;
}
}
zval_ptr_dtor(&func_name);
break;
case PHP_STREAM_OPTION_READ_BUFFER:
case PHP_STREAM_OPTION_WRITE_BUFFER:
case PHP_STREAM_OPTION_READ_TIMEOUT:
case PHP_STREAM_OPTION_BLOCKING: {
ZVAL_STRINGL(&func_name, USERSTREAM_SET_OPTION, sizeof(USERSTREAM_SET_OPTION)-1);
ZVAL_LONG(&args[0], option);
ZVAL_NULL(&args[1]);
ZVAL_NULL(&args[2]);
switch(option) {
case PHP_STREAM_OPTION_READ_BUFFER:
case PHP_STREAM_OPTION_WRITE_BUFFER:
ZVAL_LONG(&args[1], value);
if (ptrparam) {
ZVAL_LONG(&args[2], *(long *)ptrparam);
} else {
ZVAL_LONG(&args[2], BUFSIZ);
}
break;
case PHP_STREAM_OPTION_READ_TIMEOUT: {
struct timeval tv = *(struct timeval*)ptrparam;
ZVAL_LONG(&args[1], tv.tv_sec);
ZVAL_LONG(&args[2], tv.tv_usec);
break;
}
case PHP_STREAM_OPTION_BLOCKING:
ZVAL_LONG(&args[1], value);
break;
default:
break;
}
call_result = call_user_function(NULL,
Z_ISUNDEF(us->object)? NULL : &us->object,
&func_name,
&retval,
3, args);
if (call_result == FAILURE) {
php_error_docref(NULL, E_WARNING, "%s::" USERSTREAM_SET_OPTION " is not implemented!",
ZSTR_VAL(us->wrapper->ce->name));
ret = PHP_STREAM_OPTION_RETURN_ERR;
} else if (zend_is_true(&retval)) {
ret = PHP_STREAM_OPTION_RETURN_OK;
} else {
ret = PHP_STREAM_OPTION_RETURN_ERR;
}
zval_ptr_dtor(&retval);
zval_ptr_dtor(&args[2]);
zval_ptr_dtor(&args[1]);
zval_ptr_dtor(&args[0]);
zval_ptr_dtor(&func_name);
break;
}
}
return ret;
}
static int user_wrapper_unlink(php_stream_wrapper *wrapper, const char *url, int options, php_stream_context *context)
{
struct php_user_stream_wrapper *uwrap = (struct php_user_stream_wrapper*)wrapper->abstract;
zval zfuncname, zretval;
zval args[1];
int call_result;
zval object;
int ret = 0;
/* create an instance of our class */
user_stream_create_object(uwrap, context, &object);
if (Z_TYPE(object) == IS_UNDEF) {
return ret;
}
/* call the unlink method */
ZVAL_STRING(&args[0], url);
ZVAL_STRING(&zfuncname, USERSTREAM_UNLINK);
call_result = call_user_function(NULL,
&object,
&zfuncname,
&zretval,
1, args);
if (call_result == SUCCESS && (Z_TYPE(zretval) == IS_FALSE || Z_TYPE(zretval) == IS_TRUE)) {
ret = (Z_TYPE(zretval) == IS_TRUE);
} else if (call_result == FAILURE) {
php_error_docref(NULL, E_WARNING, "%s::" USERSTREAM_UNLINK " is not implemented!", ZSTR_VAL(uwrap->ce->name));
}
/* clean up */
zval_ptr_dtor(&object);
zval_ptr_dtor(&zretval);
zval_ptr_dtor(&zfuncname);
zval_ptr_dtor(&args[0]);
return ret;
}
static int user_wrapper_rename(php_stream_wrapper *wrapper, const char *url_from, const char *url_to,
int options, php_stream_context *context)
{
struct php_user_stream_wrapper *uwrap = (struct php_user_stream_wrapper*)wrapper->abstract;
zval zfuncname, zretval;
zval args[2];
int call_result;
zval object;
int ret = 0;
/* create an instance of our class */
user_stream_create_object(uwrap, context, &object);
if (Z_TYPE(object) == IS_UNDEF) {
return ret;
}
/* call the rename method */
ZVAL_STRING(&args[0], url_from);
ZVAL_STRING(&args[1], url_to);
ZVAL_STRING(&zfuncname, USERSTREAM_RENAME);
call_result = call_user_function(NULL,
&object,
&zfuncname,
&zretval,
2, args);
if (call_result == SUCCESS && (Z_TYPE(zretval) == IS_FALSE || Z_TYPE(zretval) == IS_TRUE)) {
ret = (Z_TYPE(zretval) == IS_TRUE);
} else if (call_result == FAILURE) {
php_error_docref(NULL, E_WARNING, "%s::" USERSTREAM_RENAME " is not implemented!", ZSTR_VAL(uwrap->ce->name));
}
/* clean up */
zval_ptr_dtor(&object);
zval_ptr_dtor(&zretval);
zval_ptr_dtor(&zfuncname);
zval_ptr_dtor(&args[1]);
zval_ptr_dtor(&args[0]);
return ret;
}
static int user_wrapper_mkdir(php_stream_wrapper *wrapper, const char *url, int mode,
int options, php_stream_context *context)
{
struct php_user_stream_wrapper *uwrap = (struct php_user_stream_wrapper*)wrapper->abstract;
zval zfuncname, zretval;
zval args[3];
int call_result;
zval object;
int ret = 0;
/* create an instance of our class */
user_stream_create_object(uwrap, context, &object);
if (Z_TYPE(object) == IS_UNDEF) {
return ret;
}
/* call the mkdir method */
ZVAL_STRING(&args[0], url);
ZVAL_LONG(&args[1], mode);
ZVAL_LONG(&args[2], options);
ZVAL_STRING(&zfuncname, USERSTREAM_MKDIR);
call_result = call_user_function(NULL,
&object,
&zfuncname,
&zretval,
3, args);
if (call_result == SUCCESS && (Z_TYPE(zretval) == IS_FALSE || Z_TYPE(zretval) == IS_TRUE)) {
ret = (Z_TYPE(zretval) == IS_TRUE);
} else if (call_result == FAILURE) {
php_error_docref(NULL, E_WARNING, "%s::" USERSTREAM_MKDIR " is not implemented!", ZSTR_VAL(uwrap->ce->name));
}
/* clean up */
zval_ptr_dtor(&object);
zval_ptr_dtor(&zretval);
zval_ptr_dtor(&zfuncname);
zval_ptr_dtor(&args[2]);
zval_ptr_dtor(&args[1]);
zval_ptr_dtor(&args[0]);
return ret;
}
static int user_wrapper_rmdir(php_stream_wrapper *wrapper, const char *url,
int options, php_stream_context *context)
{
struct php_user_stream_wrapper *uwrap = (struct php_user_stream_wrapper*)wrapper->abstract;
zval zfuncname, zretval;
zval args[2];
int call_result;
zval object;
int ret = 0;
/* create an instance of our class */
user_stream_create_object(uwrap, context, &object);
if (Z_TYPE(object) == IS_UNDEF) {
return ret;
}
/* call the rmdir method */
ZVAL_STRING(&args[0], url);
ZVAL_LONG(&args[1], options);
ZVAL_STRING(&zfuncname, USERSTREAM_RMDIR);
call_result = call_user_function(NULL,
&object,
&zfuncname,
&zretval,
2, args);
if (call_result == SUCCESS && (Z_TYPE(zretval) == IS_FALSE || Z_TYPE(zretval) == IS_TRUE)) {
ret = (Z_TYPE(zretval) == IS_TRUE);
} else if (call_result == FAILURE) {
php_error_docref(NULL, E_WARNING, "%s::" USERSTREAM_RMDIR " is not implemented!", ZSTR_VAL(uwrap->ce->name));
}
/* clean up */
zval_ptr_dtor(&object);
zval_ptr_dtor(&zretval);
zval_ptr_dtor(&zfuncname);
zval_ptr_dtor(&args[1]);
zval_ptr_dtor(&args[0]);
return ret;
}
static int user_wrapper_metadata(php_stream_wrapper *wrapper, const char *url, int option,
void *value, php_stream_context *context)
{
struct php_user_stream_wrapper *uwrap = (struct php_user_stream_wrapper*)wrapper->abstract;
zval zfuncname, zretval;
zval args[3];
int call_result;
zval object;
int ret = 0;
switch(option) {
case PHP_STREAM_META_TOUCH:
array_init(&args[2]);
if(value) {
struct utimbuf *newtime = (struct utimbuf *)value;
add_index_long(&args[2], 0, newtime->modtime);
add_index_long(&args[2], 1, newtime->actime);
}
break;
case PHP_STREAM_META_GROUP:
case PHP_STREAM_META_OWNER:
case PHP_STREAM_META_ACCESS:
ZVAL_LONG(&args[2], *(long *)value);
break;
case PHP_STREAM_META_GROUP_NAME:
case PHP_STREAM_META_OWNER_NAME:
ZVAL_STRING(&args[2], value);
break;
default:
php_error_docref(NULL, E_WARNING, "Unknown option %d for " USERSTREAM_METADATA, option);
zval_ptr_dtor(&args[2]);
return ret;
}
/* create an instance of our class */
user_stream_create_object(uwrap, context, &object);
if (Z_TYPE(object) == IS_UNDEF) {
zval_ptr_dtor(&args[2]);
return ret;
}
/* call the mkdir method */
ZVAL_STRING(&args[0], url);
ZVAL_LONG(&args[1], option);
ZVAL_STRING(&zfuncname, USERSTREAM_METADATA);
call_result = call_user_function(NULL,
&object,
&zfuncname,
&zretval,
3, args);
if (call_result == SUCCESS && (Z_TYPE(zretval) == IS_FALSE || Z_TYPE(zretval) == IS_TRUE)) {
ret = Z_TYPE(zretval) == IS_TRUE;
} else if (call_result == FAILURE) {
php_error_docref(NULL, E_WARNING, "%s::" USERSTREAM_METADATA " is not implemented!", ZSTR_VAL(uwrap->ce->name));
}
/* clean up */
zval_ptr_dtor(&object);
zval_ptr_dtor(&zretval);
zval_ptr_dtor(&zfuncname);
zval_ptr_dtor(&args[0]);
zval_ptr_dtor(&args[1]);
zval_ptr_dtor(&args[2]);
return ret;
}
static int user_wrapper_stat_url(php_stream_wrapper *wrapper, const char *url, int flags,
php_stream_statbuf *ssb, php_stream_context *context)
{
struct php_user_stream_wrapper *uwrap = (struct php_user_stream_wrapper*)wrapper->abstract;
zval zfuncname, zretval;
zval args[2];
int call_result;
zval object;
int ret = -1;
/* create an instance of our class */
user_stream_create_object(uwrap, context, &object);
if (Z_TYPE(object) == IS_UNDEF) {
return ret;
}
/* call it's stat_url method - set up params first */
ZVAL_STRING(&args[0], url);
ZVAL_LONG(&args[1], flags);
ZVAL_STRING(&zfuncname, USERSTREAM_STATURL);
call_result = call_user_function(NULL,
&object,
&zfuncname,
&zretval,
2, args);
if (call_result == SUCCESS && Z_TYPE(zretval) == IS_ARRAY) {
/* We got the info we needed */
if (SUCCESS == statbuf_from_array(&zretval, ssb))
ret = 0;
} else {
if (call_result == FAILURE) {
php_error_docref(NULL, E_WARNING, "%s::" USERSTREAM_STATURL " is not implemented!",
ZSTR_VAL(uwrap->ce->name));
}
}
/* clean up */
zval_ptr_dtor(&object);
zval_ptr_dtor(&zretval);
zval_ptr_dtor(&zfuncname);
zval_ptr_dtor(&args[1]);
zval_ptr_dtor(&args[0]);
return ret;
}
static ssize_t php_userstreamop_readdir(php_stream *stream, char *buf, size_t count)
{
zval func_name;
zval retval;
int call_result;
size_t didread = 0;
php_userstream_data_t *us = (php_userstream_data_t *)stream->abstract;
php_stream_dirent *ent = (php_stream_dirent*)buf;
/* avoid problems if someone mis-uses the stream */
if (count != sizeof(php_stream_dirent))
return -1;
ZVAL_STRINGL(&func_name, USERSTREAM_DIR_READ, sizeof(USERSTREAM_DIR_READ)-1);
call_result = call_user_function(NULL,
Z_ISUNDEF(us->object)? NULL : &us->object,
&func_name,
&retval,
0, NULL);
if (call_result == SUCCESS && Z_TYPE(retval) != IS_FALSE && Z_TYPE(retval) != IS_TRUE) {
convert_to_string(&retval);
PHP_STRLCPY(ent->d_name, Z_STRVAL(retval), sizeof(ent->d_name), Z_STRLEN(retval));
didread = sizeof(php_stream_dirent);
} else if (call_result == FAILURE) {
php_error_docref(NULL, E_WARNING, "%s::" USERSTREAM_DIR_READ " is not implemented!",
ZSTR_VAL(us->wrapper->ce->name));
}
zval_ptr_dtor(&retval);
zval_ptr_dtor(&func_name);
return didread;
}
static int php_userstreamop_closedir(php_stream *stream, int close_handle)
{
zval func_name;
zval retval;
php_userstream_data_t *us = (php_userstream_data_t *)stream->abstract;
assert(us != NULL);
ZVAL_STRINGL(&func_name, USERSTREAM_DIR_CLOSE, sizeof(USERSTREAM_DIR_CLOSE)-1);
call_user_function(NULL,
Z_ISUNDEF(us->object)? NULL : &us->object,
&func_name,
&retval,
0, NULL);
zval_ptr_dtor(&retval);
zval_ptr_dtor(&func_name);
zval_ptr_dtor(&us->object);
ZVAL_UNDEF(&us->object);
efree(us);
return 0;
}
static int php_userstreamop_rewinddir(php_stream *stream, zend_off_t offset, int whence, zend_off_t *newoffs)
{
zval func_name;
zval retval;
php_userstream_data_t *us = (php_userstream_data_t *)stream->abstract;
ZVAL_STRINGL(&func_name, USERSTREAM_DIR_REWIND, sizeof(USERSTREAM_DIR_REWIND)-1);
call_user_function(NULL,
Z_ISUNDEF(us->object)? NULL : &us->object,
&func_name,
&retval,
0, NULL);
zval_ptr_dtor(&retval);
zval_ptr_dtor(&func_name);
return 0;
}
static int php_userstreamop_cast(php_stream *stream, int castas, void **retptr)
{
php_userstream_data_t *us = (php_userstream_data_t *)stream->abstract;
zval func_name;
zval retval;
zval args[1];
php_stream * intstream = NULL;
int call_result;
int ret = FAILURE;
ZVAL_STRINGL(&func_name, USERSTREAM_CAST, sizeof(USERSTREAM_CAST)-1);
switch(castas) {
case PHP_STREAM_AS_FD_FOR_SELECT:
ZVAL_LONG(&args[0], PHP_STREAM_AS_FD_FOR_SELECT);
break;
default:
ZVAL_LONG(&args[0], PHP_STREAM_AS_STDIO);
break;
}
call_result = call_user_function(NULL,
Z_ISUNDEF(us->object)? NULL : &us->object,
&func_name,
&retval,
1, args);
do {
if (call_result == FAILURE) {
php_error_docref(NULL, E_WARNING, "%s::" USERSTREAM_CAST " is not implemented!",
ZSTR_VAL(us->wrapper->ce->name));
break;
}
if (!zend_is_true(&retval)) {
break;
}
php_stream_from_zval_no_verify(intstream, &retval);
if (!intstream) {
php_error_docref(NULL, E_WARNING, "%s::" USERSTREAM_CAST " must return a stream resource",
ZSTR_VAL(us->wrapper->ce->name));
break;
}
if (intstream == stream) {
php_error_docref(NULL, E_WARNING, "%s::" USERSTREAM_CAST " must not return itself",
ZSTR_VAL(us->wrapper->ce->name));
intstream = NULL;
break;
}
ret = php_stream_cast(intstream, castas, retptr, 1);
} while (0);
zval_ptr_dtor(&retval);
zval_ptr_dtor(&func_name);
zval_ptr_dtor(&args[0]);
return ret;
}
const php_stream_ops php_stream_userspace_ops = {
php_userstreamop_write, php_userstreamop_read,
php_userstreamop_close, php_userstreamop_flush,
"user-space",
php_userstreamop_seek,
php_userstreamop_cast,
php_userstreamop_stat,
php_userstreamop_set_option,
};
const php_stream_ops php_stream_userspace_dir_ops = {
NULL, /* write */
php_userstreamop_readdir,
php_userstreamop_closedir,
NULL, /* flush */
"user-space-dir",
php_userstreamop_rewinddir,
NULL, /* cast */
NULL, /* stat */
NULL /* set_option */
};
| {
"language": "C"
} |
/* Audio Library for Teensy 3.X
* Copyright (c) 2015, Hedde Bosman
*
* Development of this audio library was funded by PJRC.COM, LLC by sales of
* Teensy and Audio Adaptor boards. Please support PJRC's efforts to develop
* open source software by purchasing Teensy or other PJRC products.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice, development funding notice, and this permission
* notice shall be included in all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
* THE SOFTWARE.
*/
#include <Arduino.h>
#include "effect_midside.h"
void AudioEffectMidSide::update(void)
{
audio_block_t *blocka, *blockb;
uint32_t *pa, *pb, *end;
uint32_t a12, a34; //, a56, a78;
uint32_t b12, b34; //, b56, b78;
blocka = receiveWritable(0); // left (encoding) or mid (decoding)
blockb = receiveWritable(1); // right (encoding) or side (decoding)
if (!blocka || !blockb) {
if (blocka) release(blocka); // maybe an extra if statement here but if one
if (blockb) release(blockb); // of the blocks is NULL then it's trouble anyway
return;
}
#if defined(__ARM_ARCH_7EM__)
pa = (uint32_t *)(blocka->data);
pb = (uint32_t *)(blockb->data);
end = pa + AUDIO_BLOCK_SAMPLES/2;
if (encoding) {
while (pa < end) {
// mid[i] = (blocka[i] + blockb[i])/2; // div2 to prevent overflows
// side[i] = (blocka[i] - blockb[i])/2; // div2 to prevent overflows
// L[i] = (mid[i] + side[i])/2;
// R[i] = (mid[i] - side[i])/2;
a12 = signed_halving_add_16_and_16(*pa, *pb); // mid12
a34 = signed_halving_add_16_and_16(*(pa+1), *(pb+1)); //mid34
b12 = signed_halving_subtract_16_and_16(*pa, *pb); // side12
b34 = signed_halving_subtract_16_and_16(*(pa+1), *(pb+1)); // side34
*pa++ = a12;
*pa++ = a34;
*pb++ = b12;
*pb++ = b34;
}
} else {
while (pa < end) {
// L[i] = mid[i] + side[i]);
// R[i] = mid[i] - side[i]);
// Because the /2 has already been applied in the encoding,
// we shouldn't have to add it here.
// However... because of the posibility that the user has
// modified mid or side such that
// it could overflow, we have to:
// a) preventively do a (x/2+y/2)*2 again, causing bit reduction
// or b) perform saturation or something.
// While (b) could produce artifacts if saturated, I'll go for
// that option to preserve precision
// QADD16 and QSUB16 perform saturating add/sub
a12 = signed_add_16_and_16(*pa, *pb); // left12
a34 = signed_add_16_and_16(*(pa+1), *(pb+1)); // left34
b12 = signed_subtract_16_and_16(*pa, *pb); // right12
b34 = signed_subtract_16_and_16(*(pa+1), *(pb+1)); // right34
*pa++ = a12;
*pa++ = a34;
*pb++ = b12;
*pb++ = b34;
}
}
transmit(blocka, 0); // mid (encoding) or left (decoding)
transmit(blockb, 1); // side (encoding) or right (decoding)
#endif
release(blocka);
release(blockb);
}
| {
"language": "C"
} |
/*------------------------------------------------------------------------------
Copyright (c) 2000 Tyrell Corporation. All rights reserved.
Tyrell Config
File : DarkIceConfig.h
Version : $Revision$
Author : $Author$
Location : $Source$
Copyright notice:
This program is free software; you can redistribute it and/or
modify it under the terms of the GNU General Public License
as published by the Free Software Foundation; either version 2
of the License, or (at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
------------------------------------------------------------------------------*/
#ifndef CONFIG_H
#define CONFIG_H
#ifndef __cplusplus
#error This is a C++ include file
#endif
/* ============================================================ include files */
#include <map>
#include <string>
#include <iostream>
#include "Referable.h"
#include "ConfigSection.h"
/* ================================================================ constants */
/* =================================================================== macros */
/* =============================================================== data types */
/**
* A configuration file representation. The file is of the syntax:
*
* <pre>
* [section1]
* # this is a whole line comment
* key = value
* an ugly key name = long value # this end is a comment too
*
* [section2]
* # this is a whole line comment in section 2
* key = value
* an ugly key name = long value # this end is a comment too
* </pre>
*
* also empty lines are ignored and all white space is removed
* from the front and end of keys / values
*
* Knwon problem: you can't use '#' in any part of a key / value pair
*
* @author $Author$
* @version $Revision$
*/
class Config : public virtual Referable
{
private:
/**
* Type declaration of the hash table type.
*/
typedef std::map<std::string, ConfigSection> TableType;
/**
* Hash table holding the configuration sections.
*
* @see ConfigSection
*/
TableType table;
/**
* Hash table holding the configuration sections.
*
* @see ConfigSection
*/
std::string currentSection;
protected:
public:
/**
* Default constructor.
*
* @exception Exception
*/
inline
Config ( void ) throw ( Exception )
{
}
/**
* Constructor based on an input stream.
*
* @param is configuration will be read from this input stream
* until end of stream is reached.
* @exception Exception
*/
inline
Config ( std::istream & is ) throw ( Exception )
{
read( is );
}
/**
* Destructor.
*
* @exception Exception
*/
inline virtual
~Config ( void ) throw ( Exception )
{
}
/* TODO
inline
Config ( const Config & di ) throw ( Exception )
{
}
inline Config &
operator= ( const Config * di ) throw ( Exception )
{
}
*/
/**
* Delete the configuration information stored in the object.
* Resets the object to a clean state.
*
* @exception Exception
*/
inline virtual void
reset ( void ) throw ( Exception )
{
table.clear();
currentSection = "";
}
/**
* Read a line of confiugration information.
*
* @param line the line to read.
* @return true if the line was correct, false otherwise.
* @exception Exception
*/
virtual bool
addLine ( const char * line ) throw ( Exception );
/**
* Read a line of confiugration information.
*
* @param is the input stream to read from
* @return true if the line was correct, false otherwise.
* @exception Exception
*/
virtual void
read ( std::istream & is ) throw ( Exception );
/**
* Get a ConfigSection by name.
*
* @param key the name of the ConfigSection
* @return the ConfigSection requested, or NULL if it doesn't exists.
* @exception Exception
*/
virtual const ConfigSection *
get ( const char * key ) const throw ( Exception );
};
/* ================================================= external data structures */
/* ====================================================== function prototypes */
#endif /* CONFIG_H */
/*------------------------------------------------------------------------------
$Source$
$Log$
Revision 1.2 2005/04/14 11:53:17 darkeye
fixed API documentation issues
Revision 1.1 2005/04/04 08:36:17 darkeye
commited changes to enable Jack support
thanks to Nicholas J. Humfrey, njh@ecs.soton.ac.uk
Revision 1.5 2002/05/28 12:35:41 darkeye
code cleanup: compiles under gcc-c++ 3.1, using -pedantic option
Revision 1.4 2001/09/05 20:11:15 darkeye
removed dependency on locally stored SGI STL header files
now compiler-supplied C++ library STL header files are used
compiles under GNU C++ 3
hash_map (an SGI extension to STL) replaced with map
std:: namespace prefix added to all STL class references
Revision 1.3 2000/11/13 18:46:50 darkeye
added kdoc-style documentation comments
Revision 1.2 2000/11/09 22:07:19 darkeye
added constructor with istream
Revision 1.1 2000/11/08 17:29:50 darkeye
added configuration file reader
Revision 1.1.1.1 2000/11/05 10:05:50 darkeye
initial version
------------------------------------------------------------------------------*/
| {
"language": "C"
} |
/*
* EFI Variables - efivars.c
*
* Copyright (C) 2001,2003,2004 Dell <Matt_Domsch@dell.com>
* Copyright (C) 2004 Intel Corporation <matthew.e.tolentino@intel.com>
*
* This code takes all variables accessible from EFI runtime and
* exports them via sysfs
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*
* Changelog:
*
* 17 May 2004 - Matt Domsch <Matt_Domsch@dell.com>
* remove check for efi_enabled in exit
* add MODULE_VERSION
*
* 26 Apr 2004 - Matt Domsch <Matt_Domsch@dell.com>
* minor bug fixes
*
* 21 Apr 2004 - Matt Tolentino <matthew.e.tolentino@intel.com)
* converted driver to export variable information via sysfs
* and moved to drivers/firmware directory
* bumped revision number to v0.07 to reflect conversion & move
*
* 10 Dec 2002 - Matt Domsch <Matt_Domsch@dell.com>
* fix locking per Peter Chubb's findings
*
* 25 Mar 2002 - Matt Domsch <Matt_Domsch@dell.com>
* move uuid_unparse() to include/asm-ia64/efi.h:efi_guid_unparse()
*
* 12 Feb 2002 - Matt Domsch <Matt_Domsch@dell.com>
* use list_for_each_safe when deleting vars.
* remove ifdef CONFIG_SMP around include <linux/smp.h>
* v0.04 release to linux-ia64@linuxia64.org
*
* 20 April 2001 - Matt Domsch <Matt_Domsch@dell.com>
* Moved vars from /proc/efi to /proc/efi/vars, and made
* efi.c own the /proc/efi directory.
* v0.03 release to linux-ia64@linuxia64.org
*
* 26 March 2001 - Matt Domsch <Matt_Domsch@dell.com>
* At the request of Stephane, moved ownership of /proc/efi
* to efi.c, and now efivars lives under /proc/efi/vars.
*
* 12 March 2001 - Matt Domsch <Matt_Domsch@dell.com>
* Feedback received from Stephane Eranian incorporated.
* efivar_write() checks copy_from_user() return value.
* efivar_read/write() returns proper errno.
* v0.02 release to linux-ia64@linuxia64.org
*
* 26 February 2001 - Matt Domsch <Matt_Domsch@dell.com>
* v0.01 release to linux-ia64@linuxia64.org
*/
#include <linux/capability.h>
#include <linux/types.h>
#include <linux/errno.h>
#include <linux/init.h>
#include <linux/mm.h>
#include <linux/module.h>
#include <linux/string.h>
#include <linux/smp.h>
#include <linux/efi.h>
#include <linux/sysfs.h>
#include <linux/kobject.h>
#include <linux/device.h>
#include <linux/slab.h>
#include <linux/pstore.h>
#include <asm/uaccess.h>
#define EFIVARS_VERSION "0.08"
#define EFIVARS_DATE "2004-May-17"
MODULE_AUTHOR("Matt Domsch <Matt_Domsch@Dell.com>");
MODULE_DESCRIPTION("sysfs interface to EFI Variables");
MODULE_LICENSE("GPL");
MODULE_VERSION(EFIVARS_VERSION);
#define DUMP_NAME_LEN 52
/*
* The maximum size of VariableName + Data = 1024
* Therefore, it's reasonable to save that much
* space in each part of the structure,
* and we use a page for reading/writing.
*/
struct efi_variable {
efi_char16_t VariableName[1024/sizeof(efi_char16_t)];
efi_guid_t VendorGuid;
unsigned long DataSize;
__u8 Data[1024];
efi_status_t Status;
__u32 Attributes;
} __attribute__((packed));
struct efivar_entry {
struct efivars *efivars;
struct efi_variable var;
struct list_head list;
struct kobject kobj;
};
struct efivar_attribute {
struct attribute attr;
ssize_t (*show) (struct efivar_entry *entry, char *buf);
ssize_t (*store)(struct efivar_entry *entry, const char *buf, size_t count);
};
#define PSTORE_EFI_ATTRIBUTES \
(EFI_VARIABLE_NON_VOLATILE | \
EFI_VARIABLE_BOOTSERVICE_ACCESS | \
EFI_VARIABLE_RUNTIME_ACCESS)
#define EFIVAR_ATTR(_name, _mode, _show, _store) \
struct efivar_attribute efivar_attr_##_name = { \
.attr = {.name = __stringify(_name), .mode = _mode}, \
.show = _show, \
.store = _store, \
};
#define to_efivar_attr(_attr) container_of(_attr, struct efivar_attribute, attr)
#define to_efivar_entry(obj) container_of(obj, struct efivar_entry, kobj)
/*
* Prototype for sysfs creation function
*/
static int
efivar_create_sysfs_entry(struct efivars *efivars,
unsigned long variable_name_size,
efi_char16_t *variable_name,
efi_guid_t *vendor_guid);
/* Return the number of unicode characters in data */
static unsigned long
utf16_strnlen(efi_char16_t *s, size_t maxlength)
{
unsigned long length = 0;
while (*s++ != 0 && length < maxlength)
length++;
return length;
}
static inline unsigned long
utf16_strlen(efi_char16_t *s)
{
return utf16_strnlen(s, ~0UL);
}
/*
* Return the number of bytes is the length of this string
* Note: this is NOT the same as the number of unicode characters
*/
static inline unsigned long
utf16_strsize(efi_char16_t *data, unsigned long maxlength)
{
return utf16_strnlen(data, maxlength/sizeof(efi_char16_t)) * sizeof(efi_char16_t);
}
static inline int
utf16_strncmp(const efi_char16_t *a, const efi_char16_t *b, size_t len)
{
while (1) {
if (len == 0)
return 0;
if (*a < *b)
return -1;
if (*a > *b)
return 1;
if (*a == 0) /* implies *b == 0 */
return 0;
a++;
b++;
len--;
}
}
static bool
validate_device_path(struct efi_variable *var, int match, u8 *buffer,
unsigned long len)
{
struct efi_generic_dev_path *node;
int offset = 0;
node = (struct efi_generic_dev_path *)buffer;
if (len < sizeof(*node))
return false;
while (offset <= len - sizeof(*node) &&
node->length >= sizeof(*node) &&
node->length <= len - offset) {
offset += node->length;
if ((node->type == EFI_DEV_END_PATH ||
node->type == EFI_DEV_END_PATH2) &&
node->sub_type == EFI_DEV_END_ENTIRE)
return true;
node = (struct efi_generic_dev_path *)(buffer + offset);
}
/*
* If we're here then either node->length pointed past the end
* of the buffer or we reached the end of the buffer without
* finding a device path end node.
*/
return false;
}
static bool
validate_boot_order(struct efi_variable *var, int match, u8 *buffer,
unsigned long len)
{
/* An array of 16-bit integers */
if ((len % 2) != 0)
return false;
return true;
}
static bool
validate_load_option(struct efi_variable *var, int match, u8 *buffer,
unsigned long len)
{
u16 filepathlength;
int i, desclength = 0, namelen;
namelen = utf16_strnlen(var->VariableName, sizeof(var->VariableName));
/* Either "Boot" or "Driver" followed by four digits of hex */
for (i = match; i < match+4; i++) {
if (var->VariableName[i] > 127 ||
hex_to_bin(var->VariableName[i] & 0xff) < 0)
return true;
}
/* Reject it if there's 4 digits of hex and then further content */
if (namelen > match + 4)
return false;
/* A valid entry must be at least 8 bytes */
if (len < 8)
return false;
filepathlength = buffer[4] | buffer[5] << 8;
/*
* There's no stored length for the description, so it has to be
* found by hand
*/
desclength = utf16_strsize((efi_char16_t *)(buffer + 6), len - 6) + 2;
/* Each boot entry must have a descriptor */
if (!desclength)
return false;
/*
* If the sum of the length of the description, the claimed filepath
* length and the original header are greater than the length of the
* variable, it's malformed
*/
if ((desclength + filepathlength + 6) > len)
return false;
/*
* And, finally, check the filepath
*/
return validate_device_path(var, match, buffer + desclength + 6,
filepathlength);
}
static bool
validate_uint16(struct efi_variable *var, int match, u8 *buffer,
unsigned long len)
{
/* A single 16-bit integer */
if (len != 2)
return false;
return true;
}
static bool
validate_ascii_string(struct efi_variable *var, int match, u8 *buffer,
unsigned long len)
{
int i;
for (i = 0; i < len; i++) {
if (buffer[i] > 127)
return false;
if (buffer[i] == 0)
return true;
}
return false;
}
struct variable_validate {
char *name;
bool (*validate)(struct efi_variable *var, int match, u8 *data,
unsigned long len);
};
static const struct variable_validate variable_validate[] = {
{ "BootNext", validate_uint16 },
{ "BootOrder", validate_boot_order },
{ "DriverOrder", validate_boot_order },
{ "Boot*", validate_load_option },
{ "Driver*", validate_load_option },
{ "ConIn", validate_device_path },
{ "ConInDev", validate_device_path },
{ "ConOut", validate_device_path },
{ "ConOutDev", validate_device_path },
{ "ErrOut", validate_device_path },
{ "ErrOutDev", validate_device_path },
{ "Timeout", validate_uint16 },
{ "Lang", validate_ascii_string },
{ "PlatformLang", validate_ascii_string },
{ "", NULL },
};
static bool
validate_var(struct efi_variable *var, u8 *data, unsigned long len)
{
int i;
u16 *unicode_name = var->VariableName;
for (i = 0; variable_validate[i].validate != NULL; i++) {
const char *name = variable_validate[i].name;
int match;
for (match = 0; ; match++) {
char c = name[match];
u16 u = unicode_name[match];
/* All special variables are plain ascii */
if (u > 127)
return true;
/* Wildcard in the matching name means we've matched */
if (c == '*')
return variable_validate[i].validate(var,
match, data, len);
/* Case sensitive match */
if (c != u)
break;
/* Reached the end of the string while matching */
if (!c)
return variable_validate[i].validate(var,
match, data, len);
}
}
return true;
}
static efi_status_t
get_var_data_locked(struct efivars *efivars, struct efi_variable *var)
{
efi_status_t status;
var->DataSize = 1024;
status = efivars->ops->get_variable(var->VariableName,
&var->VendorGuid,
&var->Attributes,
&var->DataSize,
var->Data);
return status;
}
static efi_status_t
get_var_data(struct efivars *efivars, struct efi_variable *var)
{
efi_status_t status;
spin_lock(&efivars->lock);
status = get_var_data_locked(efivars, var);
spin_unlock(&efivars->lock);
if (status != EFI_SUCCESS) {
printk(KERN_WARNING "efivars: get_variable() failed 0x%lx!\n",
status);
}
return status;
}
static ssize_t
efivar_guid_read(struct efivar_entry *entry, char *buf)
{
struct efi_variable *var = &entry->var;
char *str = buf;
if (!entry || !buf)
return 0;
efi_guid_unparse(&var->VendorGuid, str);
str += strlen(str);
str += sprintf(str, "\n");
return str - buf;
}
static ssize_t
efivar_attr_read(struct efivar_entry *entry, char *buf)
{
struct efi_variable *var = &entry->var;
char *str = buf;
efi_status_t status;
if (!entry || !buf)
return -EINVAL;
status = get_var_data(entry->efivars, var);
if (status != EFI_SUCCESS)
return -EIO;
if (var->Attributes & 0x1)
str += sprintf(str, "EFI_VARIABLE_NON_VOLATILE\n");
if (var->Attributes & 0x2)
str += sprintf(str, "EFI_VARIABLE_BOOTSERVICE_ACCESS\n");
if (var->Attributes & 0x4)
str += sprintf(str, "EFI_VARIABLE_RUNTIME_ACCESS\n");
return str - buf;
}
static ssize_t
efivar_size_read(struct efivar_entry *entry, char *buf)
{
struct efi_variable *var = &entry->var;
char *str = buf;
efi_status_t status;
if (!entry || !buf)
return -EINVAL;
status = get_var_data(entry->efivars, var);
if (status != EFI_SUCCESS)
return -EIO;
str += sprintf(str, "0x%lx\n", var->DataSize);
return str - buf;
}
static ssize_t
efivar_data_read(struct efivar_entry *entry, char *buf)
{
struct efi_variable *var = &entry->var;
efi_status_t status;
if (!entry || !buf)
return -EINVAL;
status = get_var_data(entry->efivars, var);
if (status != EFI_SUCCESS)
return -EIO;
memcpy(buf, var->Data, var->DataSize);
return var->DataSize;
}
/*
* We allow each variable to be edited via rewriting the
* entire efi variable structure.
*/
static ssize_t
efivar_store_raw(struct efivar_entry *entry, const char *buf, size_t count)
{
struct efi_variable *new_var, *var = &entry->var;
struct efivars *efivars = entry->efivars;
efi_status_t status = EFI_NOT_FOUND;
if (count != sizeof(struct efi_variable))
return -EINVAL;
new_var = (struct efi_variable *)buf;
/*
* If only updating the variable data, then the name
* and guid should remain the same
*/
if (memcmp(new_var->VariableName, var->VariableName, sizeof(var->VariableName)) ||
efi_guidcmp(new_var->VendorGuid, var->VendorGuid)) {
printk(KERN_ERR "efivars: Cannot edit the wrong variable!\n");
return -EINVAL;
}
if ((new_var->DataSize <= 0) || (new_var->Attributes == 0)){
printk(KERN_ERR "efivars: DataSize & Attributes must be valid!\n");
return -EINVAL;
}
if ((new_var->Attributes & ~EFI_VARIABLE_MASK) != 0 ||
validate_var(new_var, new_var->Data, new_var->DataSize) == false) {
printk(KERN_ERR "efivars: Malformed variable content\n");
return -EINVAL;
}
spin_lock(&efivars->lock);
status = efivars->ops->set_variable(new_var->VariableName,
&new_var->VendorGuid,
new_var->Attributes,
new_var->DataSize,
new_var->Data);
spin_unlock(&efivars->lock);
if (status != EFI_SUCCESS) {
printk(KERN_WARNING "efivars: set_variable() failed: status=%lx\n",
status);
return -EIO;
}
memcpy(&entry->var, new_var, count);
return count;
}
static ssize_t
efivar_show_raw(struct efivar_entry *entry, char *buf)
{
struct efi_variable *var = &entry->var;
efi_status_t status;
if (!entry || !buf)
return 0;
status = get_var_data(entry->efivars, var);
if (status != EFI_SUCCESS)
return -EIO;
memcpy(buf, var, sizeof(*var));
return sizeof(*var);
}
/*
* Generic read/write functions that call the specific functions of
* the attributes...
*/
static ssize_t efivar_attr_show(struct kobject *kobj, struct attribute *attr,
char *buf)
{
struct efivar_entry *var = to_efivar_entry(kobj);
struct efivar_attribute *efivar_attr = to_efivar_attr(attr);
ssize_t ret = -EIO;
if (!capable(CAP_SYS_ADMIN))
return -EACCES;
if (efivar_attr->show) {
ret = efivar_attr->show(var, buf);
}
return ret;
}
static ssize_t efivar_attr_store(struct kobject *kobj, struct attribute *attr,
const char *buf, size_t count)
{
struct efivar_entry *var = to_efivar_entry(kobj);
struct efivar_attribute *efivar_attr = to_efivar_attr(attr);
ssize_t ret = -EIO;
if (!capable(CAP_SYS_ADMIN))
return -EACCES;
if (efivar_attr->store)
ret = efivar_attr->store(var, buf, count);
return ret;
}
static const struct sysfs_ops efivar_attr_ops = {
.show = efivar_attr_show,
.store = efivar_attr_store,
};
static void efivar_release(struct kobject *kobj)
{
struct efivar_entry *var = container_of(kobj, struct efivar_entry, kobj);
kfree(var);
}
static EFIVAR_ATTR(guid, 0400, efivar_guid_read, NULL);
static EFIVAR_ATTR(attributes, 0400, efivar_attr_read, NULL);
static EFIVAR_ATTR(size, 0400, efivar_size_read, NULL);
static EFIVAR_ATTR(data, 0400, efivar_data_read, NULL);
static EFIVAR_ATTR(raw_var, 0600, efivar_show_raw, efivar_store_raw);
static struct attribute *def_attrs[] = {
&efivar_attr_guid.attr,
&efivar_attr_size.attr,
&efivar_attr_attributes.attr,
&efivar_attr_data.attr,
&efivar_attr_raw_var.attr,
NULL,
};
static struct kobj_type efivar_ktype = {
.release = efivar_release,
.sysfs_ops = &efivar_attr_ops,
.default_attrs = def_attrs,
};
static struct pstore_info efi_pstore_info;
static inline void
efivar_unregister(struct efivar_entry *var)
{
kobject_put(&var->kobj);
}
#ifdef CONFIG_PSTORE
static int efi_pstore_open(struct pstore_info *psi)
{
struct efivars *efivars = psi->data;
spin_lock(&efivars->lock);
efivars->walk_entry = list_first_entry(&efivars->list,
struct efivar_entry, list);
return 0;
}
static int efi_pstore_close(struct pstore_info *psi)
{
struct efivars *efivars = psi->data;
spin_unlock(&efivars->lock);
return 0;
}
static ssize_t efi_pstore_read(u64 *id, enum pstore_type_id *type,
struct timespec *timespec,
char **buf, struct pstore_info *psi)
{
efi_guid_t vendor = LINUX_EFI_CRASH_GUID;
struct efivars *efivars = psi->data;
char name[DUMP_NAME_LEN];
int i;
unsigned int part, size;
unsigned long time;
while (&efivars->walk_entry->list != &efivars->list) {
if (!efi_guidcmp(efivars->walk_entry->var.VendorGuid,
vendor)) {
for (i = 0; i < DUMP_NAME_LEN; i++) {
name[i] = efivars->walk_entry->var.VariableName[i];
}
if (sscanf(name, "dump-type%u-%u-%lu", type, &part, &time) == 3) {
*id = part;
timespec->tv_sec = time;
timespec->tv_nsec = 0;
get_var_data_locked(efivars, &efivars->walk_entry->var);
size = efivars->walk_entry->var.DataSize;
*buf = kmalloc(size, GFP_KERNEL);
if (*buf == NULL)
return -ENOMEM;
memcpy(*buf, efivars->walk_entry->var.Data,
size);
efivars->walk_entry = list_entry(efivars->walk_entry->list.next,
struct efivar_entry, list);
return size;
}
}
efivars->walk_entry = list_entry(efivars->walk_entry->list.next,
struct efivar_entry, list);
}
return 0;
}
static int efi_pstore_write(enum pstore_type_id type,
enum kmsg_dump_reason reason, u64 *id,
unsigned int part, size_t size, struct pstore_info *psi)
{
char name[DUMP_NAME_LEN];
char stub_name[DUMP_NAME_LEN];
efi_char16_t efi_name[DUMP_NAME_LEN];
efi_guid_t vendor = LINUX_EFI_CRASH_GUID;
struct efivars *efivars = psi->data;
struct efivar_entry *entry, *found = NULL;
int i, ret = 0;
sprintf(stub_name, "dump-type%u-%u-", type, part);
sprintf(name, "%s%lu", stub_name, get_seconds());
spin_lock(&efivars->lock);
for (i = 0; i < DUMP_NAME_LEN; i++)
efi_name[i] = stub_name[i];
/*
* Clean up any entries with the same name
*/
list_for_each_entry(entry, &efivars->list, list) {
get_var_data_locked(efivars, &entry->var);
if (efi_guidcmp(entry->var.VendorGuid, vendor))
continue;
if (utf16_strncmp(entry->var.VariableName, efi_name,
utf16_strlen(efi_name)))
continue;
/* Needs to be a prefix */
if (entry->var.VariableName[utf16_strlen(efi_name)] == 0)
continue;
/* found */
found = entry;
efivars->ops->set_variable(entry->var.VariableName,
&entry->var.VendorGuid,
PSTORE_EFI_ATTRIBUTES,
0, NULL);
}
if (found)
list_del(&found->list);
for (i = 0; i < DUMP_NAME_LEN; i++)
efi_name[i] = name[i];
efivars->ops->set_variable(efi_name, &vendor, PSTORE_EFI_ATTRIBUTES,
size, psi->buf);
spin_unlock(&efivars->lock);
if (found)
efivar_unregister(found);
if (size)
ret = efivar_create_sysfs_entry(efivars,
utf16_strsize(efi_name,
DUMP_NAME_LEN * 2),
efi_name, &vendor);
*id = part;
return ret;
};
static int efi_pstore_erase(enum pstore_type_id type, u64 id,
struct pstore_info *psi)
{
efi_pstore_write(type, 0, &id, (unsigned int)id, 0, psi);
return 0;
}
#else
static int efi_pstore_open(struct pstore_info *psi)
{
return 0;
}
static int efi_pstore_close(struct pstore_info *psi)
{
return 0;
}
static ssize_t efi_pstore_read(u64 *id, enum pstore_type_id *type,
struct timespec *timespec,
char **buf, struct pstore_info *psi)
{
return -1;
}
static int efi_pstore_write(enum pstore_type_id type,
enum kmsg_dump_reason reason, u64 *id,
unsigned int part, size_t size, struct pstore_info *psi)
{
return 0;
}
static int efi_pstore_erase(enum pstore_type_id type, u64 id,
struct pstore_info *psi)
{
return 0;
}
#endif
static struct pstore_info efi_pstore_info = {
.owner = THIS_MODULE,
.name = "efi",
.open = efi_pstore_open,
.close = efi_pstore_close,
.read = efi_pstore_read,
.write = efi_pstore_write,
.erase = efi_pstore_erase,
};
static ssize_t efivar_create(struct file *filp, struct kobject *kobj,
struct bin_attribute *bin_attr,
char *buf, loff_t pos, size_t count)
{
struct efi_variable *new_var = (struct efi_variable *)buf;
struct efivars *efivars = bin_attr->private;
struct efivar_entry *search_efivar, *n;
unsigned long strsize1, strsize2;
efi_status_t status = EFI_NOT_FOUND;
int found = 0;
if (!capable(CAP_SYS_ADMIN))
return -EACCES;
if ((new_var->Attributes & ~EFI_VARIABLE_MASK) != 0 ||
validate_var(new_var, new_var->Data, new_var->DataSize) == false) {
printk(KERN_ERR "efivars: Malformed variable content\n");
return -EINVAL;
}
spin_lock(&efivars->lock);
/*
* Does this variable already exist?
*/
list_for_each_entry_safe(search_efivar, n, &efivars->list, list) {
strsize1 = utf16_strsize(search_efivar->var.VariableName, 1024);
strsize2 = utf16_strsize(new_var->VariableName, 1024);
if (strsize1 == strsize2 &&
!memcmp(&(search_efivar->var.VariableName),
new_var->VariableName, strsize1) &&
!efi_guidcmp(search_efivar->var.VendorGuid,
new_var->VendorGuid)) {
found = 1;
break;
}
}
if (found) {
spin_unlock(&efivars->lock);
return -EINVAL;
}
/* now *really* create the variable via EFI */
status = efivars->ops->set_variable(new_var->VariableName,
&new_var->VendorGuid,
new_var->Attributes,
new_var->DataSize,
new_var->Data);
if (status != EFI_SUCCESS) {
printk(KERN_WARNING "efivars: set_variable() failed: status=%lx\n",
status);
spin_unlock(&efivars->lock);
return -EIO;
}
spin_unlock(&efivars->lock);
/* Create the entry in sysfs. Locking is not required here */
status = efivar_create_sysfs_entry(efivars,
utf16_strsize(new_var->VariableName,
1024),
new_var->VariableName,
&new_var->VendorGuid);
if (status) {
printk(KERN_WARNING "efivars: variable created, but sysfs entry wasn't.\n");
}
return count;
}
static ssize_t efivar_delete(struct file *filp, struct kobject *kobj,
struct bin_attribute *bin_attr,
char *buf, loff_t pos, size_t count)
{
struct efi_variable *del_var = (struct efi_variable *)buf;
struct efivars *efivars = bin_attr->private;
struct efivar_entry *search_efivar, *n;
unsigned long strsize1, strsize2;
efi_status_t status = EFI_NOT_FOUND;
int found = 0;
if (!capable(CAP_SYS_ADMIN))
return -EACCES;
spin_lock(&efivars->lock);
/*
* Does this variable already exist?
*/
list_for_each_entry_safe(search_efivar, n, &efivars->list, list) {
strsize1 = utf16_strsize(search_efivar->var.VariableName, 1024);
strsize2 = utf16_strsize(del_var->VariableName, 1024);
if (strsize1 == strsize2 &&
!memcmp(&(search_efivar->var.VariableName),
del_var->VariableName, strsize1) &&
!efi_guidcmp(search_efivar->var.VendorGuid,
del_var->VendorGuid)) {
found = 1;
break;
}
}
if (!found) {
spin_unlock(&efivars->lock);
return -EINVAL;
}
/* force the Attributes/DataSize to 0 to ensure deletion */
del_var->Attributes = 0;
del_var->DataSize = 0;
status = efivars->ops->set_variable(del_var->VariableName,
&del_var->VendorGuid,
del_var->Attributes,
del_var->DataSize,
del_var->Data);
if (status != EFI_SUCCESS) {
printk(KERN_WARNING "efivars: set_variable() failed: status=%lx\n",
status);
spin_unlock(&efivars->lock);
return -EIO;
}
list_del(&search_efivar->list);
/* We need to release this lock before unregistering. */
spin_unlock(&efivars->lock);
efivar_unregister(search_efivar);
/* It's dead Jim.... */
return count;
}
/*
* Let's not leave out systab information that snuck into
* the efivars driver
*/
static ssize_t systab_show(struct kobject *kobj,
struct kobj_attribute *attr, char *buf)
{
char *str = buf;
if (!kobj || !buf)
return -EINVAL;
if (efi.mps != EFI_INVALID_TABLE_ADDR)
str += sprintf(str, "MPS=0x%lx\n", efi.mps);
if (efi.acpi20 != EFI_INVALID_TABLE_ADDR)
str += sprintf(str, "ACPI20=0x%lx\n", efi.acpi20);
if (efi.acpi != EFI_INVALID_TABLE_ADDR)
str += sprintf(str, "ACPI=0x%lx\n", efi.acpi);
if (efi.smbios != EFI_INVALID_TABLE_ADDR)
str += sprintf(str, "SMBIOS=0x%lx\n", efi.smbios);
if (efi.hcdp != EFI_INVALID_TABLE_ADDR)
str += sprintf(str, "HCDP=0x%lx\n", efi.hcdp);
if (efi.boot_info != EFI_INVALID_TABLE_ADDR)
str += sprintf(str, "BOOTINFO=0x%lx\n", efi.boot_info);
if (efi.uga != EFI_INVALID_TABLE_ADDR)
str += sprintf(str, "UGA=0x%lx\n", efi.uga);
return str - buf;
}
static struct kobj_attribute efi_attr_systab =
__ATTR(systab, 0400, systab_show, NULL);
static struct attribute *efi_subsys_attrs[] = {
&efi_attr_systab.attr,
NULL, /* maybe more in the future? */
};
static struct attribute_group efi_subsys_attr_group = {
.attrs = efi_subsys_attrs,
};
static struct kobject *efi_kobj;
/*
* efivar_create_sysfs_entry()
* Requires:
* variable_name_size = number of bytes required to hold
* variable_name (not counting the NULL
* character at the end.
* efivars->lock is not held on entry or exit.
* Returns 1 on failure, 0 on success
*/
static int
efivar_create_sysfs_entry(struct efivars *efivars,
unsigned long variable_name_size,
efi_char16_t *variable_name,
efi_guid_t *vendor_guid)
{
int i, short_name_size = variable_name_size / sizeof(efi_char16_t) + 38;
char *short_name;
struct efivar_entry *new_efivar;
short_name = kzalloc(short_name_size + 1, GFP_KERNEL);
new_efivar = kzalloc(sizeof(struct efivar_entry), GFP_KERNEL);
if (!short_name || !new_efivar) {
kfree(short_name);
kfree(new_efivar);
return 1;
}
new_efivar->efivars = efivars;
memcpy(new_efivar->var.VariableName, variable_name,
variable_name_size);
memcpy(&(new_efivar->var.VendorGuid), vendor_guid, sizeof(efi_guid_t));
/* Convert Unicode to normal chars (assume top bits are 0),
ala UTF-8 */
for (i=0; i < (int)(variable_name_size / sizeof(efi_char16_t)); i++) {
short_name[i] = variable_name[i] & 0xFF;
}
/* This is ugly, but necessary to separate one vendor's
private variables from another's. */
*(short_name + strlen(short_name)) = '-';
efi_guid_unparse(vendor_guid, short_name + strlen(short_name));
new_efivar->kobj.kset = efivars->kset;
i = kobject_init_and_add(&new_efivar->kobj, &efivar_ktype, NULL,
"%s", short_name);
if (i) {
kfree(short_name);
kfree(new_efivar);
return 1;
}
kobject_uevent(&new_efivar->kobj, KOBJ_ADD);
kfree(short_name);
short_name = NULL;
spin_lock(&efivars->lock);
list_add(&new_efivar->list, &efivars->list);
spin_unlock(&efivars->lock);
return 0;
}
static int
create_efivars_bin_attributes(struct efivars *efivars)
{
struct bin_attribute *attr;
int error;
/* new_var */
attr = kzalloc(sizeof(*attr), GFP_KERNEL);
if (!attr)
return -ENOMEM;
attr->attr.name = "new_var";
attr->attr.mode = 0200;
attr->write = efivar_create;
attr->private = efivars;
efivars->new_var = attr;
/* del_var */
attr = kzalloc(sizeof(*attr), GFP_KERNEL);
if (!attr) {
error = -ENOMEM;
goto out_free;
}
attr->attr.name = "del_var";
attr->attr.mode = 0200;
attr->write = efivar_delete;
attr->private = efivars;
efivars->del_var = attr;
sysfs_bin_attr_init(efivars->new_var);
sysfs_bin_attr_init(efivars->del_var);
/* Register */
error = sysfs_create_bin_file(&efivars->kset->kobj,
efivars->new_var);
if (error) {
printk(KERN_ERR "efivars: unable to create new_var sysfs file"
" due to error %d\n", error);
goto out_free;
}
error = sysfs_create_bin_file(&efivars->kset->kobj,
efivars->del_var);
if (error) {
printk(KERN_ERR "efivars: unable to create del_var sysfs file"
" due to error %d\n", error);
sysfs_remove_bin_file(&efivars->kset->kobj,
efivars->new_var);
goto out_free;
}
return 0;
out_free:
kfree(efivars->del_var);
efivars->del_var = NULL;
kfree(efivars->new_var);
efivars->new_var = NULL;
return error;
}
void unregister_efivars(struct efivars *efivars)
{
struct efivar_entry *entry, *n;
list_for_each_entry_safe(entry, n, &efivars->list, list) {
spin_lock(&efivars->lock);
list_del(&entry->list);
spin_unlock(&efivars->lock);
efivar_unregister(entry);
}
if (efivars->new_var)
sysfs_remove_bin_file(&efivars->kset->kobj, efivars->new_var);
if (efivars->del_var)
sysfs_remove_bin_file(&efivars->kset->kobj, efivars->del_var);
kfree(efivars->new_var);
kfree(efivars->del_var);
kset_unregister(efivars->kset);
}
EXPORT_SYMBOL_GPL(unregister_efivars);
int register_efivars(struct efivars *efivars,
const struct efivar_operations *ops,
struct kobject *parent_kobj)
{
efi_status_t status = EFI_NOT_FOUND;
efi_guid_t vendor_guid;
efi_char16_t *variable_name;
unsigned long variable_name_size = 1024;
int error = 0;
variable_name = kzalloc(variable_name_size, GFP_KERNEL);
if (!variable_name) {
printk(KERN_ERR "efivars: Memory allocation failed.\n");
return -ENOMEM;
}
spin_lock_init(&efivars->lock);
INIT_LIST_HEAD(&efivars->list);
efivars->ops = ops;
efivars->kset = kset_create_and_add("vars", NULL, parent_kobj);
if (!efivars->kset) {
printk(KERN_ERR "efivars: Subsystem registration failed.\n");
error = -ENOMEM;
goto out;
}
/*
* Per EFI spec, the maximum storage allocated for both
* the variable name and variable data is 1024 bytes.
*/
do {
variable_name_size = 1024;
status = ops->get_next_variable(&variable_name_size,
variable_name,
&vendor_guid);
switch (status) {
case EFI_SUCCESS:
efivar_create_sysfs_entry(efivars,
variable_name_size,
variable_name,
&vendor_guid);
break;
case EFI_NOT_FOUND:
break;
default:
printk(KERN_WARNING "efivars: get_next_variable: status=%lx\n",
status);
status = EFI_NOT_FOUND;
break;
}
} while (status != EFI_NOT_FOUND);
error = create_efivars_bin_attributes(efivars);
if (error)
unregister_efivars(efivars);
efivars->efi_pstore_info = efi_pstore_info;
efivars->efi_pstore_info.buf = kmalloc(4096, GFP_KERNEL);
if (efivars->efi_pstore_info.buf) {
efivars->efi_pstore_info.bufsize = 1024;
efivars->efi_pstore_info.data = efivars;
spin_lock_init(&efivars->efi_pstore_info.buf_lock);
pstore_register(&efivars->efi_pstore_info);
}
out:
kfree(variable_name);
return error;
}
EXPORT_SYMBOL_GPL(register_efivars);
static struct efivars __efivars;
static struct efivar_operations ops;
/*
* For now we register the efi subsystem with the firmware subsystem
* and the vars subsystem with the efi subsystem. In the future, it
* might make sense to split off the efi subsystem into its own
* driver, but for now only efivars will register with it, so just
* include it here.
*/
static int __init
efivars_init(void)
{
int error = 0;
printk(KERN_INFO "EFI Variables Facility v%s %s\n", EFIVARS_VERSION,
EFIVARS_DATE);
if (!efi_enabled)
return 0;
/* For now we'll register the efi directory at /sys/firmware/efi */
efi_kobj = kobject_create_and_add("efi", firmware_kobj);
if (!efi_kobj) {
printk(KERN_ERR "efivars: Firmware registration failed.\n");
return -ENOMEM;
}
ops.get_variable = efi.get_variable;
ops.set_variable = efi.set_variable;
ops.get_next_variable = efi.get_next_variable;
error = register_efivars(&__efivars, &ops, efi_kobj);
if (error)
goto err_put;
/* Don't forget the systab entry */
error = sysfs_create_group(efi_kobj, &efi_subsys_attr_group);
if (error) {
printk(KERN_ERR
"efivars: Sysfs attribute export failed with error %d.\n",
error);
goto err_unregister;
}
return 0;
err_unregister:
unregister_efivars(&__efivars);
err_put:
kobject_put(efi_kobj);
return error;
}
static void __exit
efivars_exit(void)
{
if (efi_enabled) {
unregister_efivars(&__efivars);
kobject_put(efi_kobj);
}
}
module_init(efivars_init);
module_exit(efivars_exit);
| {
"language": "C"
} |
/*
* DTLS cookie callbacks implementation
*
* Copyright (C) 2006-2015, ARM Limited, All Rights Reserved
* SPDX-License-Identifier: Apache-2.0
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* This file is part of mbed TLS (https://tls.mbed.org)
*/
/*
* These session callbacks use a simple chained list
* to store and retrieve the session information.
*/
#if !defined(MBEDTLS_CONFIG_FILE)
#include "mbedtls/config.h"
#else
#include MBEDTLS_CONFIG_FILE
#endif
#if defined(MBEDTLS_SSL_COOKIE_C)
#if defined(MBEDTLS_PLATFORM_C)
#include "mbedtls/platform.h"
#else
#define mbedtls_calloc calloc
#define mbedtls_free free
#endif
#include "mbedtls/ssl_cookie.h"
#include "mbedtls/ssl_internal.h"
#include <string.h>
/* Implementation that should never be optimized out by the compiler */
static void mbedtls_zeroize( void *v, size_t n ) {
volatile unsigned char *p = v; while( n-- ) *p++ = 0;
}
/*
* If DTLS is in use, then at least one of SHA-1, SHA-256, SHA-512 is
* available. Try SHA-256 first, 512 wastes resources since we need to stay
* with max 32 bytes of cookie for DTLS 1.0
*/
#if defined(MBEDTLS_SHA256_C)
#define COOKIE_MD MBEDTLS_MD_SHA224
#define COOKIE_MD_OUTLEN 32
#define COOKIE_HMAC_LEN 28
#elif defined(MBEDTLS_SHA512_C)
#define COOKIE_MD MBEDTLS_MD_SHA384
#define COOKIE_MD_OUTLEN 48
#define COOKIE_HMAC_LEN 28
#elif defined(MBEDTLS_SHA1_C)
#define COOKIE_MD MBEDTLS_MD_SHA1
#define COOKIE_MD_OUTLEN 20
#define COOKIE_HMAC_LEN 20
#else
#error "DTLS hello verify needs SHA-1 or SHA-2"
#endif
/*
* Cookies are formed of a 4-bytes timestamp (or serial number) and
* an HMAC of timestemp and client ID.
*/
#define COOKIE_LEN ( 4 + COOKIE_HMAC_LEN )
void mbedtls_ssl_cookie_init( mbedtls_ssl_cookie_ctx *ctx )
{
mbedtls_md_init( &ctx->hmac_ctx );
#if !defined(MBEDTLS_HAVE_TIME)
ctx->serial = 0;
#endif
ctx->timeout = MBEDTLS_SSL_COOKIE_TIMEOUT;
#if defined(MBEDTLS_THREADING_C)
mbedtls_mutex_init( &ctx->mutex );
#endif
}
void mbedtls_ssl_cookie_set_timeout( mbedtls_ssl_cookie_ctx *ctx, unsigned long delay )
{
ctx->timeout = delay;
}
void mbedtls_ssl_cookie_free( mbedtls_ssl_cookie_ctx *ctx )
{
mbedtls_md_free( &ctx->hmac_ctx );
#if defined(MBEDTLS_THREADING_C)
mbedtls_mutex_init( &ctx->mutex );
#endif
mbedtls_zeroize( ctx, sizeof( mbedtls_ssl_cookie_ctx ) );
}
int mbedtls_ssl_cookie_setup( mbedtls_ssl_cookie_ctx *ctx,
int (*f_rng)(void *, unsigned char *, size_t),
void *p_rng )
{
int ret;
unsigned char key[COOKIE_MD_OUTLEN];
if( ( ret = f_rng( p_rng, key, sizeof( key ) ) ) != 0 )
return( ret );
ret = mbedtls_md_setup( &ctx->hmac_ctx, mbedtls_md_info_from_type( COOKIE_MD ), 1 );
if( ret != 0 )
return( ret );
ret = mbedtls_md_hmac_starts( &ctx->hmac_ctx, key, sizeof( key ) );
if( ret != 0 )
return( ret );
mbedtls_zeroize( key, sizeof( key ) );
return( 0 );
}
/*
* Generate the HMAC part of a cookie
*/
static int ssl_cookie_hmac( mbedtls_md_context_t *hmac_ctx,
const unsigned char time[4],
unsigned char **p, unsigned char *end,
const unsigned char *cli_id, size_t cli_id_len )
{
unsigned char hmac_out[COOKIE_MD_OUTLEN];
if( (size_t)( end - *p ) < COOKIE_HMAC_LEN )
return( MBEDTLS_ERR_SSL_BUFFER_TOO_SMALL );
if( mbedtls_md_hmac_reset( hmac_ctx ) != 0 ||
mbedtls_md_hmac_update( hmac_ctx, time, 4 ) != 0 ||
mbedtls_md_hmac_update( hmac_ctx, cli_id, cli_id_len ) != 0 ||
mbedtls_md_hmac_finish( hmac_ctx, hmac_out ) != 0 )
{
return( MBEDTLS_ERR_SSL_INTERNAL_ERROR );
}
memcpy( *p, hmac_out, COOKIE_HMAC_LEN );
*p += COOKIE_HMAC_LEN;
return( 0 );
}
/*
* Generate cookie for DTLS ClientHello verification
*/
int mbedtls_ssl_cookie_write( void *p_ctx,
unsigned char **p, unsigned char *end,
const unsigned char *cli_id, size_t cli_id_len )
{
int ret;
mbedtls_ssl_cookie_ctx *ctx = (mbedtls_ssl_cookie_ctx *) p_ctx;
unsigned long t;
if( ctx == NULL || cli_id == NULL )
return( MBEDTLS_ERR_SSL_BAD_INPUT_DATA );
if( (size_t)( end - *p ) < COOKIE_LEN )
return( MBEDTLS_ERR_SSL_BUFFER_TOO_SMALL );
#if defined(MBEDTLS_HAVE_TIME)
t = (unsigned long) mbedtls_time( NULL );
#else
t = ctx->serial++;
#endif
(*p)[0] = (unsigned char)( t >> 24 );
(*p)[1] = (unsigned char)( t >> 16 );
(*p)[2] = (unsigned char)( t >> 8 );
(*p)[3] = (unsigned char)( t );
*p += 4;
#if defined(MBEDTLS_THREADING_C)
if( ( ret = mbedtls_mutex_lock( &ctx->mutex ) ) != 0 )
return( MBEDTLS_ERR_SSL_INTERNAL_ERROR + ret );
#endif
ret = ssl_cookie_hmac( &ctx->hmac_ctx, *p - 4,
p, end, cli_id, cli_id_len );
#if defined(MBEDTLS_THREADING_C)
if( mbedtls_mutex_unlock( &ctx->mutex ) != 0 )
return( MBEDTLS_ERR_SSL_INTERNAL_ERROR +
MBEDTLS_ERR_THREADING_MUTEX_ERROR );
#endif
return( ret );
}
/*
* Check a cookie
*/
int mbedtls_ssl_cookie_check( void *p_ctx,
const unsigned char *cookie, size_t cookie_len,
const unsigned char *cli_id, size_t cli_id_len )
{
unsigned char ref_hmac[COOKIE_HMAC_LEN];
int ret = 0;
unsigned char *p = ref_hmac;
mbedtls_ssl_cookie_ctx *ctx = (mbedtls_ssl_cookie_ctx *) p_ctx;
unsigned long cur_time, cookie_time;
if( ctx == NULL || cli_id == NULL )
return( MBEDTLS_ERR_SSL_BAD_INPUT_DATA );
if( cookie_len != COOKIE_LEN )
return( -1 );
#if defined(MBEDTLS_THREADING_C)
if( ( ret = mbedtls_mutex_lock( &ctx->mutex ) ) != 0 )
return( MBEDTLS_ERR_SSL_INTERNAL_ERROR + ret );
#endif
if( ssl_cookie_hmac( &ctx->hmac_ctx, cookie,
&p, p + sizeof( ref_hmac ),
cli_id, cli_id_len ) != 0 )
ret = -1;
#if defined(MBEDTLS_THREADING_C)
if( mbedtls_mutex_unlock( &ctx->mutex ) != 0 )
return( MBEDTLS_ERR_SSL_INTERNAL_ERROR +
MBEDTLS_ERR_THREADING_MUTEX_ERROR );
#endif
if( ret != 0 )
return( ret );
if( mbedtls_ssl_safer_memcmp( cookie + 4, ref_hmac, sizeof( ref_hmac ) ) != 0 )
return( -1 );
#if defined(MBEDTLS_HAVE_TIME)
cur_time = (unsigned long) mbedtls_time( NULL );
#else
cur_time = ctx->serial;
#endif
cookie_time = ( (unsigned long) cookie[0] << 24 ) |
( (unsigned long) cookie[1] << 16 ) |
( (unsigned long) cookie[2] << 8 ) |
( (unsigned long) cookie[3] );
if( ctx->timeout != 0 && cur_time - cookie_time > ctx->timeout )
return( -1 );
return( 0 );
}
#endif /* MBEDTLS_SSL_COOKIE_C */
| {
"language": "C"
} |
/*
*******************************************************************************
*
* Copyright (C) 2005-2010, International Business Machines
* Corporation and others. All Rights Reserved.
*
*******************************************************************************
* file name: writesrc.h
* encoding: US-ASCII
* tab size: 8 (not used)
* indentation:4
*
* created on: 2005apr23
* created by: Markus W. Scherer
*
* Helper functions for writing source code for data.
*/
#ifndef __WRITESRC_H__
#define __WRITESRC_H__
#include <stdio.h>
#include "unicode/utypes.h"
#include "utrie2.h"
/**
* Create a source text file and write a header comment with the ICU copyright.
* Writes a C/Java-style comment.
*/
U_CAPI FILE * U_EXPORT2
usrc_create(const char *path, const char *filename);
/**
* Create a source text file and write a header comment with the ICU copyright.
* Writes the comment with # lines, as used in scripts and text data.
*/
U_CAPI FILE * U_EXPORT2
usrc_createTextData(const char *path, const char *filename);
/**
* Write the contents of an array of 8/16/32-bit words.
* The prefix and postfix are optional (can be NULL) and are written first/last.
* The prefix may contain a %ld or similar field for the array length.
* The {} and declaration etc. need to be included in prefix/postfix or
* printed before and after the array contents.
*/
U_CAPI void U_EXPORT2
usrc_writeArray(FILE *f,
const char *prefix,
const void *p, int32_t width, int32_t length,
const char *postfix);
/**
* Calls usrc_writeArray() for the index and data arrays of a frozen UTrie2.
* Only the index array is written for a 16-bit UTrie2. In this case, dataPrefix
* is ignored and can be NULL.
*/
U_CAPI void U_EXPORT2
usrc_writeUTrie2Arrays(FILE *f,
const char *indexPrefix, const char *dataPrefix,
const UTrie2 *pTrie,
const char *postfix);
/**
* Writes the UTrie2 struct values.
* The {} and declaration etc. need to be included in prefix/postfix or
* printed before and after the array contents.
*/
U_CAPI void U_EXPORT2
usrc_writeUTrie2Struct(FILE *f,
const char *prefix,
const UTrie2 *pTrie,
const char *indexName, const char *dataName,
const char *postfix);
#endif
| {
"language": "C"
} |
/*
* Copyright (C) 2017-2020 Yannick Jadoul
*
* This file is part of Parselmouth.
*
* Parselmouth is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* Parselmouth is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with Parselmouth. If not, see <http://www.gnu.org/licenses/>
*/
#include "Parselmouth.h"
#include "utils/SignatureCast.h"
#include "utils/pybind11/NumericPredicates.h"
#include <pybind11/numpy.h>
#include <praat/dwtools/MFCC.h>
#include <praat/dwtools/Spectrogram_extensions.h>
namespace py = pybind11;
using namespace py::literals;
namespace parselmouth {
PRAAT_CLASS_BINDING(MFCC) {
using signature_cast_placeholder::_;
// TODO Constructor from Sound? Other constructors?
// TODO To MelSpectrogram..., To TableOfReal... (? -> pandas?),
/*def("to_mel_spectrogram", // TODO Uncomment once we have a MelSpectrogram!
&MFCC_to_MelSpectrogram,
"from_coefficient"_a = 0, "to_coefficient"_a = 0, "include_c0"_a = true);*/
def("to_matrix_features",
args_cast<_, Positive<_>, _>(MFCC_to_Matrix_features),
"window_length"_a = 0.025, "include_energy"_a = false);
def("extract_features",
args_cast<_, Positive<_>, _>(MFCC_to_Matrix_features),
"window_length"_a = 0.025, "include_energy"_a = false);
def("to_sound",
&MFCC_to_Sound);
def("cross_correlate",
&MFCCs_convolve,
"other"_a.none(false), "scaling"_a = kSounds_convolve_scaling::PEAK_099, "signal_outside_time_domain"_a = kSounds_convolve_signalOutsideTimeDomain::ZERO);
def("convolve",
&MFCCs_convolve,
"other"_a.none(false), "scaling"_a = kSounds_convolve_scaling::PEAK_099, "signal_outside_time_domain"_a = kSounds_convolve_signalOutsideTimeDomain::ZERO);
}
} // namespace parselmouth
| {
"language": "C"
} |
// Copyright (c) Open Enclave SDK contributors.
// Licensed under the MIT License.
#include <openenclave/bits/sgx/sgxtypes.h>
#include <openenclave/internal/crypto/sha.h>
#include <openenclave/internal/elf.h>
#include <openenclave/internal/hexdump.h>
#include <openenclave/internal/types.h>
#include <stdio.h>
#include <string.h>
#include "oe_err.h"
#include "oeinfo.h"
static bool verbose_opt = false;
static void _dump_entry_point(const elf64_t* elf)
{
elf64_sym_t sym;
const char* name;
if (elf64_find_dynamic_symbol_by_address(
elf, elf64_get_header(elf)->e_entry, STT_FUNC, &sym) != 0)
{
oe_err("Cannot find entry point symbol");
return;
}
if (!(name = elf64_get_string_from_dynstr(elf, sym.st_name)))
{
oe_err("Cannot resolve entry point name");
return;
}
if (strcmp(name, "_start") != 0)
{
oe_err("Invalid entry point name: %s", name);
return;
}
printf("=== Entry point: \n");
printf("name=%s\n", name);
printf("address=%#016llx\n", OE_LLX(sym.st_value));
printf("\n");
}
/* The provided public_key_modulus must be in little-endian
* format for this function, which is the format used in the
* sgx_sigstruct_t.modulus field.
*/
static void _dump_mrsigner(
const uint8_t* public_key_modulus,
size_t public_key_modulus_size)
{
OE_SHA256 mrsigner = {0};
/* Check if modulus value is not set */
size_t i = 0;
while (i < public_key_modulus_size && public_key_modulus[i] == 0)
i++;
if (public_key_modulus_size > i)
oe_sha256(public_key_modulus, public_key_modulus_size, &mrsigner);
oe_hex_dump(mrsigner.buf, sizeof(mrsigner.buf));
}
static void _dump_enclave_properties(const oe_sgx_enclave_properties_t* props)
{
const sgx_sigstruct_t* sigstruct;
printf("=== SGX Enclave Properties:\n");
printf("product_id=%u\n", props->config.product_id);
printf("security_version=%u\n", props->config.security_version);
bool debug = props->config.attributes & OE_SGX_FLAGS_DEBUG;
printf("debug=%u\n", debug);
printf("xfrm=%#016llx\n", OE_LLX(props->config.xfrm));
printf(
"num_heap_pages=%llu\n",
OE_LLU(props->header.size_settings.num_heap_pages));
printf(
"num_stack_pages=%llu\n",
OE_LLU(props->header.size_settings.num_stack_pages));
printf("num_tcs=%llu\n", OE_LLU(props->header.size_settings.num_tcs));
sigstruct = (const sgx_sigstruct_t*)props->sigstruct;
printf("mrenclave=");
oe_hex_dump(sigstruct->enclavehash, sizeof(sigstruct->enclavehash));
printf("mrsigner=");
_dump_mrsigner(sigstruct->modulus, sizeof(sigstruct->modulus));
printf("signature=");
oe_hex_dump(sigstruct->signature, sizeof(sigstruct->signature));
printf("\n");
if (verbose_opt)
__sgx_dump_sigstruct(sigstruct);
}
int oedump(const char* enc_bin)
{
int ret = 1;
elf64_t elf;
oe_sgx_enclave_properties_t props;
/* Load the ELF-64 object */
if (elf64_load(enc_bin, &elf) != 0)
{
oe_err("Failed to load %s as ELF64", enc_bin);
goto done;
}
/* Load the SGX enclave properties */
if (oe_read_oeinfo_sgx(enc_bin, &props) != OE_OK)
{
oe_err(
"Failed to load SGX enclave properties from %s section",
OE_INFO_SECTION_NAME);
}
printf("\n");
/* Dump the entry point */
_dump_entry_point(&elf);
/* Dump the signature section */
_dump_enclave_properties(&props);
oe_print_err_count();
ret = 0;
done:
elf64_unload(&elf);
return ret;
}
| {
"language": "C"
} |
/*
Drawpile - a collaborative drawing program.
Copyright (C) 2013-2015 Calle Laakkonen
Drawpile is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
Drawpile is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with Drawpile. If not, see <http://www.gnu.org/licenses/>.
*/
#ifndef SELECTIONITEM_H
#define SELECTIONITEM_H
#include "canvas/selection.h"
#include <QGraphicsObject>
namespace drawingboard {
class SelectionItem : public QGraphicsObject
{
public:
enum { Type= UserType + 11 };
SelectionItem(canvas::Selection *selection, QGraphicsItem *parent=0);
QRectF boundingRect() const;
int type() const { return Type; }
void marchingAnts();
private slots:
void onShapeChanged();
void onAdjustmentModeChanged();
protected:
void paint(QPainter *painter, const QStyleOptionGraphicsItem *options, QWidget *);
private:
QPolygonF m_shape;
canvas::Selection *m_selection;
qreal m_marchingants;
};
}
#endif // SELECTIONITEM_H
| {
"language": "C"
} |
// Copyright 2015-2016 Kevin B. Hendricks, Stratford Ontario All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
#ifndef GUMBO_EDIT_H_
#define GUMBO_EDIT_H_
#include "gumbo.h"
#ifdef __cplusplus
extern "C" {
#endif
// See gumbo.h for:
// void gumbo_create_node(void);
// void gumbo_destroy_node(GumboNode* node)
// create and initialize a completely new output tree
GumboOutput* gumbo_new_output_init(void);
// Creates an text node of specified type and returns it.
// Types are GUMBO_NODE_TEXT, GUMBO_NODE_WHITESPACE, GUMBO_NODE_CDATA, and GUMBO_NODE_COMMENT
// No entities are allowed (replace them with their utf-8 character equivalents)
// Note: CDATA and COMMENTS text should NOT include their respective delimiters
// ie. No <-- --> and not CDATA[[ and ]]
// Note: Use gumbo_destroy_node(GumboNode * node) to properly destroy the node if outside
// the final output tree
GumboNode* gumbo_create_text_node(GumboNodeType type, const char * text);
// Creates an element node with the tag (enum) in the specified namespace and returns it.
// Since no original text exists, any created element tag must already exist in the tag_enum.h
// This is why we have expanded the set of recognized tags to include all svg and mathml tags
// Note: Use gumbo_destroy_node(GumboNode * node) to properly destroy the node if outside
// the final output tree
GumboNode* gumbo_create_element_node(GumboTag tag, GumboNamespaceEnum gns);
// Creates an template node and returns it.
// Note: Use gumbo_destroy_node(GumboNode * node) to properly destroy the node if outside
// the final output tree.
GumboNode* gumbo_create_template_node(void);
// Appends a node to the end of its parent, setting the "parent" and
// "index_within_parent" fields appropriately.
void gumbo_append_node(GumboNode* parent, GumboNode* node);
// Inserts a node at the specified index in the specified parent,
// updating the "parent" and "index_within_parent" fields of it and all its siblings.
// If the index is -1, this simply calls gumbo_append_node.
void gumbo_insert_node(GumboNode* node, GumboNode* target_parent, int target_index);
// removes a node from its parent but does not destroy it
// Note: Use gumbo_destroy_node(GumboNode * node) to properly destroy the node if outside
// the final output tree.
void gumbo_remove_from_parent(GumboNode* node);
// Clones attributes, tags, etc. of a node, but does not copy the content (its children).
// The clone shares no structure with the original node: all owned strings and
// values are fresh copies.
// Note: Use gumbo_destroy_node(GumboNode * node) to properly destroy the node if outside
// the output tree.
GumboNode* clone_element_node(const GumboNode* node);
// interface from attribute.h
void gumbo_attribute_set_value(GumboAttribute *attr, const char *value);
void gumbo_destroy_attribute(GumboAttribute* attribute);
void gumbo_element_set_attribute(GumboElement *element, const char *name, const char *value);
void gumbo_element_remove_attribute_at(GumboElement *element, unsigned int pos);
void gumbo_element_remove_attribute(GumboElement *element, GumboAttribute *attr);
// interface from vector.h
// Initializes a new GumboVector with the specified initial capacity.
void gumbo_vector_init(size_t initial_capacity, GumboVector* vector);
// Frees the memory used by an GumboVector. Does not free the contained pointers.
void gumbo_vector_destroy(GumboVector* vector);
// Adds a new element to an GumboVector.
void gumbo_vector_add(void* element, GumboVector* vector);
// Removes and returns the element most recently added to the GumboVector.
// Ownership is transferred to caller. Capacity is unchanged. If the vector is
// empty, NULL is returned.
void* gumbo_vector_pop(GumboVector* vector);
// Inserts an element at a specific index. This is potentially O(N) time, but
// is necessary for some of the spec's behavior.
void gumbo_vector_insert_at(void* element, int index, GumboVector* vector);
// Removes an element from the vector, or does nothing if the element is not in the vector.
void gumbo_vector_remove(const void* element, GumboVector* vector);
// Removes and returns an element at a specific index. Note that this is
// potentially O(N) time and should be used sparingly.
void* gumbo_vector_remove_at(int index, GumboVector* vector);
int gumbo_vector_index_of(GumboVector* vector, const void* element);
void gumbo_vector_splice(int where, int n_to_remove, void **data, int n_to_insert, GumboVector* vector);
#ifdef __cplusplus
}
#endif
#endif // GUMBO_EDIT_H_
| {
"language": "C"
} |
#ifndef FM_H
#define FM_H
/////////////////////////
// OS-independent includes
/////////////////////////
#include <mex.h>
#include <engine.h>
#include <math.h>
#include <stdio.h>
#include <string.h>
#include <vector>
#include <algorithm>
//#include "fheap/fib.h"
//#include "fheap/fibpriv.h"
/////////////////////////
// numerical macros
/////////////////////////
#undef MIN
#undef MAX
#define MIN(a, b) ((a) < (b) ? (a) : (b)) //!< Returns the min value between a and b
#define MINMIN(a,b,c) ((a) < (b) ? MIN(a,c) : MIN(b,c))
#define MAX(a, b) ((a) > (b) ? (a) : (b)) //!< Returns the max value between a and b
#define MAXMAX(a,b,c) ((a) > (b) ? MAX (a,c) : MAX (b,c))
#define SCALE_01(x,rMin,rMax) ((x-rMin)/(rMax-rMin))
#define ABS(a) ((a) > 0 ? (a) : -(a)) //!< Returns the absolute value a
#define SIGN(a) ((a) > 0 ? 1 : -1) //!< Returns the sign of a
#define SQR(x) ((x)*(x)) //!< Returns x square
#define CUBE(x) ((x)*(x)*(x)) //!< Returns x cube
#define CLAMP_01(x) if( (x)<0 ) x=0; if( (x)>1 ) x=1
#define CLAMP(x, a,b) if( (x)<a ) x=a; if( (x)>b ) x=b
#define SWAP(x,y) x^=y; y^=x; x^=y
#define ORDER(x,y) if(x>y){ GW_SWAP(x,y); }
/////////////////////////
// generic macros
/////////////////////////
/** a random number in [0-1] */
#define RAND ((double) (rand()%10000))/10000
/** a random number in [a,b] */
#define RAND_RANGE(a,b) (a)+((b)-(a))*((GW_Float) (rand()%10000))/10000
/** delete a single pointer */
#define DELETE(p) {if (p!=NULL) delete p; p=NULL;}
/** delete an array pointer */
#define DELETEARRAY(p) {if (p!=NULL) delete [] p; p=NULL;}
/////////////////////////
// some constants
/////////////////////////
#define TRUE true
#define FALSE false
/** to make aproximate computations (derivation, float comparaisons ...) */
#define EPSILON 1e-6
/** very big number */
#define INFINITE 1e9
/////////////////////////
// numerical constants
/////////////////////////
/** pi */
#define PI 3.1415926535897932384626433832795028841971693993751f
/** pi/2 */
#define HALFPI 1.57079632679489661923f
/** 2*pi */
#define TWOPI 6.28318530717958647692f
/** 1/pi */
#define GW_INVPI 0.31830988618379067154f
/** 180/pi */
#define RADTODEG(x) (x)*57.2957795130823208768f
/** pi/180 */
#define DEGTORAD(x) (x)*0.01745329251994329577f
/** e */
#define EXP 2.71828182845904523536f
/** 1/log10(2) */
#define ILOG2 3.32192809488736234787f
/** 1/3 */
#define INV3 0.33333333333333333333f
/** 1/6 */
#define INV6 0.16666666666666666666f
/** 1/9 */
#define INV7 0.14285714285714285714f
/** 1/9 */
#define INV9 0.11111111111111111111f
/** 1/255 */
#define INV255 0.00392156862745098039f
/** sqrt(2) */
#define SQRT2 1.41421356237f
//================================================================
typedef std::vector<int> LISTofINT;
typedef std::vector<short> LISTofSHORT;
typedef std::vector<float> LISTofFLOAT;
typedef std::vector<bool> LISTofBOOL;
typedef std::vector<LISTofINT> LISTofLISTofINT;
typedef std::vector<LISTofBOOL> LISTofLISTofBOOL;
class MIN_PATH
{
public:
int* V;
float U;
LISTofINT points;
};
typedef std::vector<MIN_PATH> LISTofMIN_PATH;
class KEYPOINT
{
public:
int point;
int V;
float U;
};
typedef std::vector<KEYPOINT> LISTofKEYPOINT;
//================================================================
inline int log2(int n)
{
int x = 0;
while (n > 1)
{
x++;
n /= 2;
}
return x;
}
inline int cmp(float a, float b)
{
if (a < b)
return -1;
if (a == b)
return 0;
return 1;
}
int sign(int v)
{
return v > 0 ? 1 : (v < 0 ? -1 : 0);
}
#endif // #ifndef FM_H
| {
"language": "C"
} |
/* SoX Resampler Library Copyright (c) 2007-16 robs@users.sourceforge.net
* Licence for this file: LGPL v2.1 See LICENCE for details. */
#if !defined soxr_internal_included
#define soxr_internal_included
#include "std-types.h"
#undef min
#undef max
#define min(a, b) ((a) <= (b) ? (a) : (b))
#define max(a, b) ((a) >= (b) ? (a) : (b))
#define range_limit(x, lower, upper) (min(max(x, lower), upper))
#define linear_to_dB(x) (log10(x) * 20)
#define array_length(a) (sizeof(a)/sizeof(a[0]))
#if !defined AL
#define AL(a) array_length(a)
#endif
#define iAL(a) (int)AL(a)
#define sqr(a) ((a) * (a))
#if defined __GNUC__
#define UNUSED __attribute__ ((unused))
#else
#define UNUSED
#endif
#if !WITH_DEV_TRACE
#ifdef __GNUC__
void lsx_dummy(char const *, ...);
#else
static __inline void lsx_dummy(char const * x, ...) {}
#endif
#define lsx_debug if(0) lsx_dummy
#define lsx_debug_more lsx_debug
#else
extern int _soxr_trace_level;
void _soxr_trace(char const * fmt, ...);
#define lsx_debug if (_soxr_trace_level > 0) _soxr_trace
#define lsx_debug_more if (_soxr_trace_level > 1) _soxr_trace
#endif
/* soxr_quality_spec_t.flags: */
#define SOXR_ROLLOFF_LSR2Q 3u /* Reserved for internal use. */
#define SOXR_ROLLOFF_MASK 3u /* For masking these bits. */
#define SOXR_MAINTAIN_3DB_PT 4u /* Reserved for internal use. */
#define SOXR_PROMOTE_TO_LQ 64u /* Reserved for internal use. */
/* soxr_runtime_spec_t.flags: */
#define SOXR_STRICT_BUFFERING 4u /* Reserved for future use. */
#define SOXR_NOSMALLINTOPT 8u /* For test purposes only. */
/* soxr_quality_spec recipe: */
#define SOXR_PRECISIONQ 11 /* Quality specified by the precision parameter. */
#define SOXR_PHASE_MASK 0x30 /* For masking these bits. */
/* soxr_quality_spec flags: */
#define RESET_ON_CLEAR (1u<<31)
#endif
| {
"language": "C"
} |
/***
Copyright (c) 2013 CommonsWare, LLC
Licensed under the Apache License, Version 2.0 (the "License"); you may
not use this file except in compliance with the License. You may obtain
a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package com.commonsware.cwac.camera;
import android.graphics.Bitmap;
import android.hardware.Camera;
import android.media.MediaRecorder;
/**
* Specification of a CameraHost, which is the primary way
* by which an app will interact with the library. This
* allows for a single code base supporting those using
* CameraView directly, CameraFragment, the CameraFragment
* for the Android Support package's backport of fragments,
* and who knows what else in the future.
*
* A concrete implementation of this class,
* SimpleCameraHost, provides reasonable defaults for all of
* the functionality. Hence, you can either extend
* SimpleCameraHost and override where needed, or implement
* your own CameraHost from scratch. *
*/
public interface CameraHost extends Camera.AutoFocusCallback {
/**
* Indication of what purpose we plan to put the camera
* towards. If your use of the camera is single-purpose,
* return STILL_ONLY (for photos) or VIDEO_ONLY (for
* videos). If you support both (all the time or via some
* sort of user-selectable mode), use ANY. NONE indicates
* that something else should be making this decision
* (for internal use only).
*/
public enum RecordingHint {
STILL_ONLY, VIDEO_ONLY, ANY, NONE
}
/**
* Indication of why we were unable to open up a camera.
* NO_CAMERAS_REPORTED will be used if getCameraId()
* returns a negative number. Exceptions raised when the
* camera is opened will return UNKNOWN.
*/
public enum FailureReason {
NO_CAMERAS_REPORTED(1), UNKNOWN(2);
int value;
private FailureReason(int value) {
this.value=value;
}
}
/**
* Implement this to configure the Camera.Parameters just
* prior to taking a photo.
*
* @param parameters
* the Camera.Parameters to be modified
* @return the Camera.Parameters that was passed in
*/
Camera.Parameters adjustPictureParameters(PictureTransaction xact, Camera.Parameters parameters);
/**
* Implement this to configure the Camera.Parameters for
* the purposes of the preview. Note that you will have
* another chance to configure the Camera.Parameters for a
* specific photo via adjustPictureParameters().
*
* @param parameters
* the Camera.Parameters to be modified
* @return the Camera.Parameters that was passed in
*/
Camera.Parameters adjustPreviewParameters(Camera.Parameters parameters);
/**
* This will be called by the library to let you know that
* auto-focus is available for your use, so you can update
* your UI accordingly.
*/
void autoFocusAvailable();
/**
* This will be called by the library to let you know that
* auto-focus is not available for your use, so you can
* update your UI accordingly.
*/
void autoFocusUnavailable();
/**
* This will be called by the library to give you a chance
* to configure the audio of the MediaRecorder, just prior
* to beginning to record a video. Please ONLY configure
* audio here.
*
* @param cameraId
* the camera that will be used for recording
* @param recorder
* the MediaRecorder to be configured
*/
void configureRecorderAudio(int cameraId, MediaRecorder recorder);
/**
* This will be called by the library to give you a chance
* to configure the output of the MediaRecorder, just
* prior to beginning to record a video. Please ONLY
* configure output here.
*
* @param cameraId
* the camera that will be used for recording
* @param recorder
* the MediaRecorder to be configured
*/
void configureRecorderOutput(int cameraId, MediaRecorder recorder);
/**
* This will be called by the library to give you a chance
* to configure the profile of the MediaRecorder, just
* prior to beginning to record a video. Please ONLY
* configure profile here.
*
* @param cameraId
* the camera that will be used for recording
* @param recorder
* the MediaRecorder to be configured
*/
void configureRecorderProfile(int cameraId, MediaRecorder recorder);
/**
* @return the ID of the camera that you want to use for
* previews and picture/video taking with the
* associated CameraView instance
*/
int getCameraId();
/**
* @return the DeviceProfile to use for custom-tailoring
* the behavior of CameraView, to overcome
* device-specific idiosyncrasies
*/
DeviceProfile getDeviceProfile();
/**
* Called to allow you to be able to indicate what size
* photo should be taken.
*
* @param parameters
* the current camera parameters
* @return the size of photo to take (note: must be a
* supported size!)
*/
Camera.Size getPictureSize(PictureTransaction xact, Camera.Parameters parameters);
/**
* Called to allow you to indicate what size preview
* should be used
*
* @param displayOrientation
* orientation of the display in degrees
* @param width
* width of the available preview space
* @param height
* height of the available preview space
* @param parameters
* the current camera parameters
* @return the size of the preview to use (note: must be a
* supported preview size!)
*/
Camera.Size getPreviewSize(int displayOrientation, int width,
int height, Camera.Parameters parameters);
/**
* Same as getPreviewSize(), but called when we anticipate
* taking videos, as some devices may work better with
* lower-resolution previews, to reduce CPU load
*
* @param displayOrientation
* orientation of the display in degrees
* @param width
* width of the available preview space
* @param height
* height of the available preview space
* @param parameters
* the current camera parameters
* @param deviceHint
* the size that the device itself thinks should
* be used for video, which sometimes is
* ridiculously low
* @return the size of the preview to use (note: must be a
* supported preview size!)
*/
Camera.Size getPreferredPreviewSizeForVideo(int displayOrientation,
int width,
int height,
Camera.Parameters parameters,
Camera.Size deviceHint);
/**
* @return the Camera.ShutterCallback to be used with the
* camera, for sound effects and such
*/
Camera.ShutterCallback getShutterCallback();
/**
* Called when something blows up in CameraView, to allow
* you to alert the user as you see fit
*
* @param e
* an Exception indicating what went wrong
*/
void handleException(Exception e);
/**
* @return true if you want the saved output to be
* mirrored when using the front-facing camera,
* false to leave it alone
*/
boolean mirrorFFC();
/**
* Called when a picture has been taken. This will be
* called on a background thread.
*
* @param bitmap
* Bitmap of the picture
*/
void saveImage(PictureTransaction xact, Bitmap bitmap);
/**
* Called when a picture has been taken. This will be
* called on a background thread.
*
* @param image
* byte array of the picture data (e.g., JPEG)
*/
void saveImage(PictureTransaction xact, byte[] image);
/**
* @return true if you want the camera to keep the preview
* disabled after taking a picture (e.g., you want
* to present the picture to the user for editing
* or processing), false if you want preview to be
* re-enabled (e.g., you want the user to be able
* to take another picture right away)
*/
boolean useSingleShotMode();
/**
* @return a RecordingHint value indicating what you
* intend to use the camera for
*/
RecordingHint getRecordingHint();
/**
* Called when we failed to open the camera for one reason
* or another, so you can let the user know
*
* @param reason
* a FailureReason indicating what went wrong
*/
void onCameraFail(FailureReason reason);
boolean useFullBleedPreview();
float maxPictureCleanupHeapUsage();
}
| {
"language": "C"
} |
/* nautilus-bookmark.c - implementation of individual bookmarks.
*
* Copyright (C) 1999, 2000 Eazel, Inc.
* Copyright (C) 2011, Red Hat, Inc.
*
* The Gnome Library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Library General Public License as
* published by the Free Software Foundation; either version 2 of the
* License, or (at your option) any later version.
*
* The Gnome Library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Library General Public License for more details.
*
* You should have received a copy of the GNU Library General Public
* License along with the Gnome Library; see the file COPYING.LIB. If not,
* see <http://www.gnu.org/licenses/>.
*
* Authors: John Sullivan <sullivan@eazel.com>
* Cosimo Cecchi <cosimoc@redhat.com>
*/
#include <config.h>
#include "nautilus-bookmark.h"
#include <eel/eel-vfs-extensions.h>
#include <gio/gio.h>
#include <glib/gi18n.h>
#include <gtk/gtk.h>
#include "nautilus-file.h"
#include "nautilus-file-utilities.h"
#include "nautilus-icon-names.h"
#define DEBUG_FLAG NAUTILUS_DEBUG_BOOKMARKS
#include "nautilus-debug.h"
enum
{
CONTENTS_CHANGED,
LAST_SIGNAL
};
enum
{
PROP_NAME = 1,
PROP_CUSTOM_NAME,
PROP_LOCATION,
PROP_ICON,
PROP_SYMBOLIC_ICON,
NUM_PROPERTIES
};
#define ELLIPSISED_MENU_ITEM_MIN_CHARS 32
static GParamSpec *properties[NUM_PROPERTIES] = { NULL };
static guint signals[LAST_SIGNAL];
struct _NautilusBookmark
{
GObject parent_instance;
char *name;
gboolean has_custom_name;
GFile *location;
GIcon *icon;
GIcon *symbolic_icon;
NautilusFile *file;
char *scroll_file;
gboolean exists;
guint exists_id;
GCancellable *cancellable;
};
static void nautilus_bookmark_disconnect_file (NautilusBookmark *file);
G_DEFINE_TYPE (NautilusBookmark, nautilus_bookmark, G_TYPE_OBJECT);
static void
nautilus_bookmark_set_name_internal (NautilusBookmark *bookmark,
const char *new_name)
{
if (g_strcmp0 (bookmark->name, new_name) != 0)
{
g_free (bookmark->name);
bookmark->name = g_strdup (new_name);
g_object_notify_by_pspec (G_OBJECT (bookmark), properties[PROP_NAME]);
}
}
static void
bookmark_set_name_from_ready_file (NautilusBookmark *self,
NautilusFile *file)
{
g_autofree gchar *display_name = NULL;
if (self->has_custom_name)
{
return;
}
display_name = nautilus_file_get_display_name (self->file);
if (nautilus_file_is_other_locations (self->file))
{
nautilus_bookmark_set_name_internal (self, _("Other Locations"));
}
else if (nautilus_file_is_home (self->file))
{
nautilus_bookmark_set_name_internal (self, _("Home"));
}
else if (g_strcmp0 (self->name, display_name) != 0)
{
nautilus_bookmark_set_name_internal (self, display_name);
DEBUG ("%s: name changed to %s", nautilus_bookmark_get_name (self), display_name);
}
}
static void
bookmark_file_changed_callback (NautilusFile *file,
NautilusBookmark *bookmark)
{
g_autoptr (GFile) location = NULL;
g_assert (file == bookmark->file);
DEBUG ("%s: file changed", nautilus_bookmark_get_name (bookmark));
location = nautilus_file_get_location (file);
if (!g_file_equal (bookmark->location, location) &&
!nautilus_file_is_in_trash (file))
{
DEBUG ("%s: file got moved", nautilus_bookmark_get_name (bookmark));
g_object_unref (bookmark->location);
bookmark->location = g_object_ref (location);
g_object_notify_by_pspec (G_OBJECT (bookmark), properties[PROP_LOCATION]);
g_signal_emit (bookmark, signals[CONTENTS_CHANGED], 0);
}
if (nautilus_file_is_gone (file) ||
nautilus_file_is_in_trash (file))
{
/* The file we were monitoring has been trashed, deleted,
* or moved in a way that we didn't notice. We should make
* a spanking new NautilusFile object for this
* location so if a new file appears in this place
* we will notice. However, we can't immediately do so
* because creating a new NautilusFile directly as a result
* of noticing a file goes away may trigger i/o on that file
* again, noticeing it is gone, leading to a loop.
* So, the new NautilusFile is created when the bookmark
* is used again. However, this is not really a problem, as
* we don't want to change the icon or anything about the
* bookmark just because its not there anymore.
*/
DEBUG ("%s: trashed", nautilus_bookmark_get_name (bookmark));
nautilus_bookmark_disconnect_file (bookmark);
}
else
{
bookmark_set_name_from_ready_file (bookmark, file);
}
}
static void
apply_warning_emblem (GIcon **base,
gboolean symbolic)
{
GIcon *emblemed_icon;
g_autoptr (GIcon) warning = NULL;
g_autoptr (GEmblem) emblem = NULL;
if (symbolic)
{
warning = g_themed_icon_new ("dialog-warning-symbolic");
}
else
{
warning = g_themed_icon_new ("dialog-warning");
}
emblem = g_emblem_new (warning);
emblemed_icon = g_emblemed_icon_new (*base, emblem);
g_object_unref (*base);
*base = emblemed_icon;
}
gboolean
nautilus_bookmark_get_is_builtin (NautilusBookmark *bookmark)
{
GUserDirectory xdg_type;
/* if this is not an XDG dir, it's never builtin */
if (!nautilus_bookmark_get_xdg_type (bookmark, &xdg_type))
{
return FALSE;
}
/* exclude XDG locations which are not in our builtin list */
return (xdg_type != G_USER_DIRECTORY_DESKTOP) &&
(xdg_type != G_USER_DIRECTORY_TEMPLATES) &&
(xdg_type != G_USER_DIRECTORY_PUBLIC_SHARE);
}
gboolean
nautilus_bookmark_get_xdg_type (NautilusBookmark *bookmark,
GUserDirectory *directory)
{
gboolean match;
GFile *location;
const gchar *path;
GUserDirectory dir;
match = FALSE;
for (dir = 0; dir < G_USER_N_DIRECTORIES; dir++)
{
path = g_get_user_special_dir (dir);
if (!path)
{
continue;
}
location = g_file_new_for_path (path);
match = g_file_equal (location, bookmark->location);
g_object_unref (location);
if (match)
{
break;
}
}
if (match && directory != NULL)
{
*directory = dir;
}
return match;
}
static GIcon *
get_native_icon (NautilusBookmark *bookmark,
gboolean symbolic)
{
GUserDirectory xdg_type;
GIcon *icon = NULL;
if (bookmark->file == NULL)
{
goto out;
}
if (!nautilus_bookmark_get_xdg_type (bookmark, &xdg_type))
{
goto out;
}
if (xdg_type < G_USER_N_DIRECTORIES)
{
if (symbolic)
{
icon = nautilus_special_directory_get_symbolic_icon (xdg_type);
}
else
{
icon = nautilus_special_directory_get_icon (xdg_type);
}
}
out:
if (icon == NULL)
{
if (symbolic)
{
icon = g_themed_icon_new (NAUTILUS_ICON_FOLDER);
}
else
{
icon = g_themed_icon_new (NAUTILUS_ICON_FULLCOLOR_FOLDER);
}
}
return icon;
}
static void
nautilus_bookmark_set_icon_to_default (NautilusBookmark *bookmark)
{
g_autoptr (GIcon) icon = NULL;
g_autoptr (GIcon) symbolic_icon = NULL;
if (g_file_is_native (bookmark->location))
{
symbolic_icon = get_native_icon (bookmark, TRUE);
icon = get_native_icon (bookmark, FALSE);
}
else
{
symbolic_icon = g_themed_icon_new (NAUTILUS_ICON_FOLDER_REMOTE);
icon = g_themed_icon_new (NAUTILUS_ICON_FULLCOLOR_FOLDER_REMOTE);
}
if (!bookmark->exists)
{
DEBUG ("%s: file does not exist, add emblem", nautilus_bookmark_get_name (bookmark));
apply_warning_emblem (&icon, FALSE);
apply_warning_emblem (&symbolic_icon, TRUE);
}
DEBUG ("%s: setting icon to default", nautilus_bookmark_get_name (bookmark));
g_object_set (bookmark,
"icon", icon,
"symbolic-icon", symbolic_icon,
NULL);
}
static void
nautilus_bookmark_disconnect_file (NautilusBookmark *bookmark)
{
if (bookmark->file != NULL)
{
DEBUG ("%s: disconnecting file",
nautilus_bookmark_get_name (bookmark));
g_signal_handlers_disconnect_by_func (bookmark->file,
G_CALLBACK (bookmark_file_changed_callback),
bookmark);
g_clear_object (&bookmark->file);
}
if (bookmark->cancellable != NULL)
{
g_cancellable_cancel (bookmark->cancellable);
g_clear_object (&bookmark->cancellable);
}
if (bookmark->exists_id != 0)
{
g_source_remove (bookmark->exists_id);
bookmark->exists_id = 0;
}
}
static void
nautilus_bookmark_connect_file (NautilusBookmark *bookmark)
{
if (bookmark->file != NULL)
{
DEBUG ("%s: file already connected, returning",
nautilus_bookmark_get_name (bookmark));
return;
}
if (bookmark->exists)
{
DEBUG ("%s: creating file", nautilus_bookmark_get_name (bookmark));
bookmark->file = nautilus_file_get (bookmark->location);
g_assert (!nautilus_file_is_gone (bookmark->file));
g_signal_connect_object (bookmark->file, "changed",
G_CALLBACK (bookmark_file_changed_callback), bookmark, 0);
}
if (bookmark->icon == NULL ||
bookmark->symbolic_icon == NULL)
{
nautilus_bookmark_set_icon_to_default (bookmark);
}
if (bookmark->file != NULL &&
nautilus_file_check_if_ready (bookmark->file, NAUTILUS_FILE_ATTRIBUTE_INFO))
{
bookmark_set_name_from_ready_file (bookmark, bookmark->file);
}
if (bookmark->name == NULL)
{
bookmark->name = nautilus_compute_title_for_location (bookmark->location);
}
}
static void
nautilus_bookmark_set_exists (NautilusBookmark *bookmark,
gboolean exists)
{
if (bookmark->exists == exists)
{
return;
}
bookmark->exists = exists;
DEBUG ("%s: setting bookmark to exist: %d\n",
nautilus_bookmark_get_name (bookmark), exists);
/* refresh icon */
nautilus_bookmark_set_icon_to_default (bookmark);
}
static gboolean
exists_non_native_idle_cb (gpointer user_data)
{
NautilusBookmark *bookmark = user_data;
bookmark->exists_id = 0;
nautilus_bookmark_set_exists (bookmark, FALSE);
return FALSE;
}
static void
exists_query_info_ready_cb (GObject *source,
GAsyncResult *res,
gpointer user_data)
{
g_autoptr (GFileInfo) info = NULL;
NautilusBookmark *bookmark;
g_autoptr (GError) error = NULL;
gboolean exists = FALSE;
info = g_file_query_info_finish (G_FILE (source), res, &error);
if (!info && g_error_matches (error, G_IO_ERROR, G_IO_ERROR_CANCELLED))
{
return;
}
bookmark = user_data;
if (info)
{
exists = TRUE;
g_clear_object (&bookmark->cancellable);
}
nautilus_bookmark_set_exists (bookmark, exists);
}
static void
nautilus_bookmark_update_exists (NautilusBookmark *bookmark)
{
/* Convert to a path, returning FALSE if not local. */
if (!g_file_is_native (bookmark->location) &&
bookmark->exists_id == 0)
{
bookmark->exists_id =
g_idle_add (exists_non_native_idle_cb, bookmark);
return;
}
if (bookmark->cancellable != NULL)
{
return;
}
bookmark->cancellable = g_cancellable_new ();
g_file_query_info_async (bookmark->location,
G_FILE_ATTRIBUTE_STANDARD_TYPE,
0, G_PRIORITY_DEFAULT,
bookmark->cancellable,
exists_query_info_ready_cb, bookmark);
}
/* GObject methods */
static void
nautilus_bookmark_set_property (GObject *object,
guint property_id,
const GValue *value,
GParamSpec *pspec)
{
NautilusBookmark *self = NAUTILUS_BOOKMARK (object);
GIcon *new_icon;
switch (property_id)
{
case PROP_ICON:
{
new_icon = g_value_get_object (value);
if (new_icon != NULL && !g_icon_equal (self->icon, new_icon))
{
g_clear_object (&self->icon);
self->icon = g_object_ref (new_icon);
}
}
break;
case PROP_SYMBOLIC_ICON:
{
new_icon = g_value_get_object (value);
if (new_icon != NULL && !g_icon_equal (self->symbolic_icon, new_icon))
{
g_clear_object (&self->symbolic_icon);
self->symbolic_icon = g_object_ref (new_icon);
}
}
break;
case PROP_LOCATION:
{
self->location = g_value_dup_object (value);
}
break;
case PROP_CUSTOM_NAME:
{
self->has_custom_name = g_value_get_boolean (value);
}
break;
case PROP_NAME:
{
nautilus_bookmark_set_name_internal (self, g_value_get_string (value));
}
break;
default:
{
G_OBJECT_WARN_INVALID_PROPERTY_ID (object, property_id, pspec);
}
break;
}
}
static void
nautilus_bookmark_get_property (GObject *object,
guint property_id,
GValue *value,
GParamSpec *pspec)
{
NautilusBookmark *self = NAUTILUS_BOOKMARK (object);
switch (property_id)
{
case PROP_NAME:
{
g_value_set_string (value, self->name);
}
break;
case PROP_ICON:
{
g_value_set_object (value, self->icon);
}
break;
case PROP_SYMBOLIC_ICON:
{
g_value_set_object (value, self->symbolic_icon);
}
break;
case PROP_LOCATION:
{
g_value_set_object (value, self->location);
}
break;
case PROP_CUSTOM_NAME:
{
g_value_set_boolean (value, self->has_custom_name);
}
break;
default:
{
G_OBJECT_WARN_INVALID_PROPERTY_ID (object, property_id, pspec);
}
break;
}
}
static void
nautilus_bookmark_finalize (GObject *object)
{
NautilusBookmark *bookmark;
g_assert (NAUTILUS_IS_BOOKMARK (object));
bookmark = NAUTILUS_BOOKMARK (object);
nautilus_bookmark_disconnect_file (bookmark);
g_object_unref (bookmark->location);
g_clear_object (&bookmark->icon);
g_clear_object (&bookmark->symbolic_icon);
g_free (bookmark->name);
g_free (bookmark->scroll_file);
G_OBJECT_CLASS (nautilus_bookmark_parent_class)->finalize (object);
}
static void
nautilus_bookmark_constructed (GObject *obj)
{
NautilusBookmark *self = NAUTILUS_BOOKMARK (obj);
nautilus_bookmark_connect_file (self);
nautilus_bookmark_update_exists (self);
}
static void
nautilus_bookmark_class_init (NautilusBookmarkClass *class)
{
GObjectClass *oclass = G_OBJECT_CLASS (class);
oclass->finalize = nautilus_bookmark_finalize;
oclass->get_property = nautilus_bookmark_get_property;
oclass->set_property = nautilus_bookmark_set_property;
oclass->constructed = nautilus_bookmark_constructed;
signals[CONTENTS_CHANGED] =
g_signal_new ("contents-changed",
G_TYPE_FROM_CLASS (class),
G_SIGNAL_RUN_LAST,
0,
NULL, NULL,
g_cclosure_marshal_VOID__VOID,
G_TYPE_NONE, 0);
properties[PROP_NAME] =
g_param_spec_string ("name",
"Bookmark's name",
"The name of this bookmark",
NULL,
G_PARAM_READWRITE | G_PARAM_STATIC_STRINGS | G_PARAM_CONSTRUCT);
properties[PROP_CUSTOM_NAME] =
g_param_spec_boolean ("custom-name",
"Whether the bookmark has a custom name",
"Whether the bookmark has a custom name",
FALSE,
G_PARAM_READWRITE | G_PARAM_STATIC_STRINGS | G_PARAM_CONSTRUCT);
properties[PROP_LOCATION] =
g_param_spec_object ("location",
"Bookmark's location",
"The location of this bookmark",
G_TYPE_FILE,
G_PARAM_READWRITE | G_PARAM_STATIC_STRINGS | G_PARAM_CONSTRUCT_ONLY);
properties[PROP_ICON] =
g_param_spec_object ("icon",
"Bookmark's icon",
"The icon of this bookmark",
G_TYPE_ICON,
G_PARAM_READWRITE | G_PARAM_STATIC_STRINGS);
properties[PROP_SYMBOLIC_ICON] =
g_param_spec_object ("symbolic-icon",
"Bookmark's symbolic icon",
"The symbolic icon of this bookmark",
G_TYPE_ICON,
G_PARAM_READWRITE | G_PARAM_STATIC_STRINGS);
g_object_class_install_properties (oclass, NUM_PROPERTIES, properties);
}
static void
nautilus_bookmark_init (NautilusBookmark *bookmark)
{
bookmark->exists = TRUE;
}
const gchar *
nautilus_bookmark_get_name (NautilusBookmark *bookmark)
{
g_return_val_if_fail (NAUTILUS_IS_BOOKMARK (bookmark), NULL);
return bookmark->name;
}
gboolean
nautilus_bookmark_get_has_custom_name (NautilusBookmark *bookmark)
{
g_return_val_if_fail (NAUTILUS_IS_BOOKMARK (bookmark), FALSE);
return (bookmark->has_custom_name);
}
/**
* nautilus_bookmark_compare_with:
*
* Check whether two bookmarks are considered identical.
* @a: first NautilusBookmark*.
* @b: second NautilusBookmark*.
*
* Return value: 0 if @a and @b have same name and uri, 1 otherwise
* (GCompareFunc style)
**/
int
nautilus_bookmark_compare_with (gconstpointer a,
gconstpointer b)
{
NautilusBookmark *bookmark_a;
NautilusBookmark *bookmark_b;
g_return_val_if_fail (NAUTILUS_IS_BOOKMARK ((gpointer) a), 1);
g_return_val_if_fail (NAUTILUS_IS_BOOKMARK ((gpointer) b), 1);
bookmark_a = NAUTILUS_BOOKMARK ((gpointer) a);
bookmark_b = NAUTILUS_BOOKMARK ((gpointer) b);
if (!g_file_equal (bookmark_a->location,
bookmark_b->location))
{
return 1;
}
if (g_strcmp0 (bookmark_a->name,
bookmark_b->name) != 0)
{
return 1;
}
return 0;
}
GIcon *
nautilus_bookmark_get_symbolic_icon (NautilusBookmark *bookmark)
{
g_return_val_if_fail (NAUTILUS_IS_BOOKMARK (bookmark), NULL);
/* Try to connect a file in case file exists now but didn't earlier. */
nautilus_bookmark_connect_file (bookmark);
if (bookmark->symbolic_icon)
{
return g_object_ref (bookmark->symbolic_icon);
}
return NULL;
}
GIcon *
nautilus_bookmark_get_icon (NautilusBookmark *bookmark)
{
g_return_val_if_fail (NAUTILUS_IS_BOOKMARK (bookmark), NULL);
/* Try to connect a file in case file exists now but didn't earlier. */
nautilus_bookmark_connect_file (bookmark);
if (bookmark->icon)
{
return g_object_ref (bookmark->icon);
}
return NULL;
}
GFile *
nautilus_bookmark_get_location (NautilusBookmark *bookmark)
{
g_return_val_if_fail (NAUTILUS_IS_BOOKMARK (bookmark), NULL);
/* Try to connect a file in case file exists now but didn't earlier.
* This allows a bookmark to update its image properly in the case
* where a new file appears with the same URI as a previously-deleted
* file. Calling connect_file here means that attempts to activate the
* bookmark will update its image if possible.
*/
nautilus_bookmark_connect_file (bookmark);
return g_object_ref (bookmark->location);
}
char *
nautilus_bookmark_get_uri (NautilusBookmark *bookmark)
{
g_autoptr (GFile) file = NULL;
file = nautilus_bookmark_get_location (bookmark);
return g_file_get_uri (file);
}
NautilusBookmark *
nautilus_bookmark_new (GFile *location,
const gchar *custom_name)
{
NautilusBookmark *new_bookmark;
new_bookmark = NAUTILUS_BOOKMARK (g_object_new (NAUTILUS_TYPE_BOOKMARK,
"location", location,
"name", custom_name,
"custom-name", custom_name != NULL,
NULL));
return new_bookmark;
}
/**
* nautilus_bookmark_menu_item_new:
*
* Return a menu item representing a bookmark.
* @bookmark: The bookmark the menu item represents.
* Return value: A newly-created bookmark, not yet shown.
**/
GtkWidget *
nautilus_bookmark_menu_item_new (NautilusBookmark *bookmark)
{
GtkWidget *menu_item;
GtkLabel *label;
const char *name;
name = nautilus_bookmark_get_name (bookmark);
menu_item = gtk_menu_item_new_with_label (name);
label = GTK_LABEL (gtk_bin_get_child (GTK_BIN (menu_item)));
gtk_label_set_use_underline (label, FALSE);
gtk_label_set_ellipsize (label, PANGO_ELLIPSIZE_END);
gtk_label_set_max_width_chars (label, ELLIPSISED_MENU_ITEM_MIN_CHARS);
return menu_item;
}
void
nautilus_bookmark_set_scroll_pos (NautilusBookmark *bookmark,
const char *uri)
{
g_free (bookmark->scroll_file);
bookmark->scroll_file = g_strdup (uri);
}
char *
nautilus_bookmark_get_scroll_pos (NautilusBookmark *bookmark)
{
return g_strdup (bookmark->scroll_file);
}
| {
"language": "C"
} |
#include "Python.h"
#ifdef X87_DOUBLE_ROUNDING
/* On x86 platforms using an x87 FPU, this function is called from the
Py_FORCE_DOUBLE macro (defined in pymath.h) to force a floating-point
number out of an 80-bit x87 FPU register and into a 64-bit memory location,
thus rounding from extended precision to double precision. */
double _Py_force_double(double x)
{
volatile double y;
y = x;
return y;
}
#endif
#ifdef HAVE_GCC_ASM_FOR_X87
/* inline assembly for getting and setting the 387 FPU control word on
gcc/x86 */
#ifdef _Py_MEMORY_SANITIZER
__attribute__((no_sanitize_memory))
#endif
unsigned short _Py_get_387controlword(void) {
unsigned short cw;
__asm__ __volatile__ ("fnstcw %0" : "=m" (cw));
return cw;
}
void _Py_set_387controlword(unsigned short cw) {
__asm__ __volatile__ ("fldcw %0" : : "m" (cw));
}
#endif
#ifndef HAVE_HYPOT
double hypot(double x, double y)
{
double yx;
x = fabs(x);
y = fabs(y);
if (x < y) {
double temp = x;
x = y;
y = temp;
}
if (x == 0.)
return 0.;
else {
yx = y/x;
return x*sqrt(1.+yx*yx);
}
}
#endif /* HAVE_HYPOT */
#ifndef HAVE_COPYSIGN
double
copysign(double x, double y)
{
/* use atan2 to distinguish -0. from 0. */
if (y > 0. || (y == 0. && atan2(y, -1.) > 0.)) {
return fabs(x);
} else {
return -fabs(x);
}
}
#endif /* HAVE_COPYSIGN */
#ifndef HAVE_ROUND
double
round(double x)
{
double absx, y;
absx = fabs(x);
y = floor(absx);
if (absx - y >= 0.5)
y += 1.0;
return copysign(y, x);
}
#endif /* HAVE_ROUND */
| {
"language": "C"
} |
#include "ge.h"
/*
r = p + q
*/
void ge_add(ge_p1p1 *r,const ge_p3 *p,const ge_cached *q)
{
fe t0;
#include "ge_add.h"
}
| {
"language": "C"
} |
/* SPDX-License-Identifier: GPL-2.0+ */
/*
* (C) Copyright 2017 Rockchip Electronics Co., Ltd
*/
#ifndef __CONFIG_RK3128_COMMON_H
#define __CONFIG_RK3128_COMMON_H
#include "rockchip-common.h"
#define CONFIG_SYS_MAXARGS 16
#define CONFIG_BAUDRATE 115200
#define CONFIG_SYS_MALLOC_LEN (32 << 20)
#define CONFIG_SYS_CBSIZE 1024
#define CONFIG_SKIP_LOWLEVEL_INIT
#define CONFIG_SYS_TIMER_RATE (24 * 1000 * 1000)
#define CONFIG_SYS_TIMER_BASE 0x200440a0 /* TIMER5 */
#define CONFIG_SYS_TIMER_COUNTER (CONFIG_SYS_TIMER_BASE + 8)
#define CONFIG_SYS_INIT_SP_ADDR 0x60100000
#define CONFIG_SYS_LOAD_ADDR 0x60800800
#define CONFIG_SYS_BOOTM_LEN (64 << 20) /* 64M */
/* MMC/SD IP block */
#define CONFIG_BOUNCE_BUFFER
/* RAW SD card / eMMC locations. */
#define CONFIG_SYS_SPI_U_BOOT_OFFS (128 << 10)
#define CONFIG_SYS_MMCSD_FS_BOOT_PARTITION 1
#define CONFIG_SYS_SDRAM_BASE 0x60000000
#define CONFIG_NR_DRAM_BANKS 2
#define SDRAM_MAX_SIZE 0x80000000
#define CONFIG_SPI_FLASH
#define CONFIG_SF_DEFAULT_SPEED 20000000
#define CONFIG_USB_OHCI_NEW
#define CONFIG_SYS_USB_OHCI_MAX_ROOT_PORTS 1
#ifndef CONFIG_SPL_BUILD
/* usb mass storage */
#define ENV_MEM_LAYOUT_SETTINGS \
"scriptaddr=0x60500000\0" \
"pxefile_addr_r=0x60600000\0" \
"fdt_addr_r=0x61f00000\0" \
"kernel_addr_r=0x62000000\0" \
"ramdisk_addr_r=0x64000000\0"
#include <config_distro_bootcmd.h>
#define CONFIG_EXTRA_ENV_SETTINGS \
ENV_MEM_LAYOUT_SETTINGS \
"partitions=" PARTS_DEFAULT \
BOOTENV
#endif
#endif
| {
"language": "C"
} |
/*
* Copyright (c) 2016, Devan Lai
*
* Permission to use, copy, modify, and/or distribute this software
* for any purpose with or without fee is hereby granted, provided
* that the above copyright notice and this permission notice
* appear in all copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
* WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
* AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR
* CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
* LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT,
* NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
* CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
#ifndef WEBUSB_DEFS_H_INCLUDED
#define WEBUSB_DEFS_H_INCLUDED
#include <stdint.h>
#define WEBUSB_REQ_GET_URL 0x02
#define WEBUSB_DT_URL 3
#define WEBUSB_URL_SCHEME_HTTP 0
#define WEBUSB_URL_SCHEME_HTTPS 1
struct webusb_platform_descriptor {
uint8_t bLength;
uint8_t bDescriptorType;
uint8_t bDevCapabilityType;
uint8_t bReserved;
uint8_t platformCapabilityUUID[16];
uint16_t bcdVersion;
uint8_t bVendorCode;
uint8_t iLandingPage;
} __attribute__((packed));
#define WEBUSB_PLATFORM_DESCRIPTOR_SIZE sizeof(struct webusb_platform_descriptor)
#define WEBUSB_UUID {0x38, 0xB6, 0x08, 0x34, 0xA9, 0x09, 0xA0, 0x47,0x8B, 0xFD, 0xA0, 0x76, 0x88, 0x15, 0xB6, 0x65}
struct webusb_url_descriptor {
uint8_t bLength;
uint8_t bDescriptorType;
uint8_t bScheme;
char URL[];
} __attribute__((packed));
#define WEBUSB_DT_URL_DESCRIPTOR_SIZE 3
#endif
| {
"language": "C"
} |
/*
* Arch specific code for mt7621 based boards, based on code for Ralink boards
*
* Copyright (C) 2018 Tobias Schramm <tobleminer@gmail.com>
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 as published
* by the Free Software Foundation.
*/
#include <stddef.h>
#include <stdint.h>
#include "config.h"
#define READREG(r) *(volatile uint32_t *)(r)
#define WRITEREG(r,v) *(volatile uint32_t *)(r) = v
#define KSEG1ADDR(_x) (((_x) & 0x1fffffff) | 0xa0000000)
#define UART_BASE 0xBE000C00
#define UART_TBR_OFFSET 0x00
#define UART_LSR_OFFSET 0x14
#define UART_LSR_TEMT (1 << 6)
#define UART_READ(r) READREG(UART_BASE + (r))
#define UART_WRITE(r,v) WRITEREG(UART_BASE + (r), (v))
void board_putc(int ch)
{
while (((UART_READ(UART_LSR_OFFSET)) & UART_LSR_TEMT) == 0);
UART_WRITE(UART_TBR_OFFSET, ch);
while (((UART_READ(UART_LSR_OFFSET)) & UART_LSR_TEMT) == 0);
}
void board_init(void)
{
}
| {
"language": "C"
} |
/*
* Copyright (c) 2012, Texas Instruments Incorporated - http://www.ti.com/
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
* FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
* COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
* STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
* OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/**
* \addtogroup cc2538
* @{
*
* \defgroup cc2538-rtimer cc2538 rtimer
*
* Implementation of the rtimer module for the cc2538
*
* The rtimer runs on the Sleep Timer. This is a design choice, as many parts
* of Contiki like rtimers with a value of RTIMER_ARCH_SECOND being a power of
* two. The ST runs on the 32kHz clock, which can provide us with an excellent
* value of 32768 for RTIMER_ARCH_SECOND.
*
* Additionally, since the ST keeps running in PM2, we can do things like drop
* to PM2 and schedule a wake-up time through the rtimer API.
*
* \note If the 32kHz clock is running on the 32kHz RC OSC, the rtimer is
* not 100% accurate (the RC OSC does not run at exactly 32.768 kHz). For
* applications requiring higher accuracy, the 32kHz clock should be changed to
* use the XOSC as its source. To see which low-frequency OSC the 32kHz clock
* is running on, see cpu/cc2538/clock.c.
*
* \sa cpu/cc2538/clock.c
* @{
*/
/**
* \file
* Header file for the cc2538 rtimer driver
*/
#ifndef RTIMER_ARCH_H_
#define RTIMER_ARCH_H_
#include "contiki.h"
#include "dev/gptimer.h"
/* Do the math in 32bits to save precision.
* Round to nearest integer rather than truncate. */
#define US_TO_RTIMERTICKS(US) ((US) >= 0 ? \
(((int32_t)(US) * (RTIMER_ARCH_SECOND) + 500000) / 1000000L) : \
((int32_t)(US) * (RTIMER_ARCH_SECOND) - 500000) / 1000000L)
#define RTIMERTICKS_TO_US(T) ((T) >= 0 ? \
(((int32_t)(T) * 1000000L + ((RTIMER_ARCH_SECOND) / 2)) / (RTIMER_ARCH_SECOND)) : \
((int32_t)(T) * 1000000L - ((RTIMER_ARCH_SECOND) / 2)) / (RTIMER_ARCH_SECOND))
/* A 64-bit version because the 32-bit one cannot handle T >= 4295 ticks.
Intended only for positive values of T. */
#define RTIMERTICKS_TO_US_64(T) ((uint32_t)(((uint64_t)(T) * 1000000 + ((RTIMER_ARCH_SECOND) / 2)) / (RTIMER_ARCH_SECOND)))
/** \sa RTIMER_NOW() */
rtimer_clock_t rtimer_arch_now(void);
/**
* \brief Get the time of the next scheduled rtimer trigger
* \return The time next rtimer ISR is scheduled for
*/
rtimer_clock_t rtimer_arch_next_trigger(void);
#endif /* RTIMER_ARCH_H_ */
/**
* @}
* @}
*/
| {
"language": "C"
} |
/*
Copyright (C) 1997-2001 Id Software, Inc.
This program is free software; you can redistribute it and/or
modify it under the terms of the GNU General Public License
as published by the Free Software Foundation; either version 2
of the License, or (at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
See the GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
*/
/*
** GLW_IMP.C
**
** This file contains ALL Win32 specific stuff having to do with the
** OpenGL refresh. When a port is being made the following functions
** must be implemented by the port:
**
** GLimp_EndFrame
** GLimp_Init
** GLimp_Shutdown
** GLimp_SwitchFullscreen
**
*/
#include <assert.h>
#include <windows.h>
#include "../ref_gl/gl_local.h"
#include "glw_win.h"
#include "winquake.h"
static qboolean GLimp_SwitchFullscreen( int width, int height );
qboolean GLimp_InitGL (void);
glwstate_t glw_state;
extern cvar_t *vid_fullscreen;
extern cvar_t *vid_ref;
static qboolean VerifyDriver( void )
{
char buffer[1024];
strcpy( buffer, qglGetString( GL_RENDERER ) );
strlwr( buffer );
if ( strcmp( buffer, "gdi generic" ) == 0 )
if ( !glw_state.mcd_accelerated )
return false;
return true;
}
/*
** VID_CreateWindow
*/
#define WINDOW_CLASS_NAME "Quake 2"
qboolean VID_CreateWindow( int width, int height, qboolean fullscreen )
{
WNDCLASS wc;
RECT r;
cvar_t *vid_xpos, *vid_ypos;
int stylebits;
int x, y, w, h;
int exstyle;
/* Register the frame class */
wc.style = 0;
wc.lpfnWndProc = (WNDPROC)glw_state.wndproc;
wc.cbClsExtra = 0;
wc.cbWndExtra = 0;
wc.hInstance = glw_state.hInstance;
wc.hIcon = 0;
wc.hCursor = LoadCursor (NULL,IDC_ARROW);
wc.hbrBackground = (void *)COLOR_GRAYTEXT;
wc.lpszMenuName = 0;
wc.lpszClassName = WINDOW_CLASS_NAME;
if (!RegisterClass (&wc) )
ri.Sys_Error (ERR_FATAL, "Couldn't register window class");
if (fullscreen)
{
exstyle = WS_EX_TOPMOST;
stylebits = WS_POPUP|WS_VISIBLE;
}
else
{
exstyle = 0;
stylebits = WINDOW_STYLE;
}
r.left = 0;
r.top = 0;
r.right = width;
r.bottom = height;
AdjustWindowRect (&r, stylebits, FALSE);
w = r.right - r.left;
h = r.bottom - r.top;
if (fullscreen)
{
x = 0;
y = 0;
}
else
{
vid_xpos = ri.Cvar_Get ("vid_xpos", "0", 0);
vid_ypos = ri.Cvar_Get ("vid_ypos", "0", 0);
x = vid_xpos->value;
y = vid_ypos->value;
}
glw_state.hWnd = CreateWindowEx (
exstyle,
WINDOW_CLASS_NAME,
"Quake 2",
stylebits,
x, y, w, h,
NULL,
NULL,
glw_state.hInstance,
NULL);
if (!glw_state.hWnd)
ri.Sys_Error (ERR_FATAL, "Couldn't create window");
ShowWindow( glw_state.hWnd, SW_SHOW );
UpdateWindow( glw_state.hWnd );
// init all the gl stuff for the window
if (!GLimp_InitGL ())
{
ri.Con_Printf( PRINT_ALL, "VID_CreateWindow() - GLimp_InitGL failed\n");
return false;
}
SetForegroundWindow( glw_state.hWnd );
SetFocus( glw_state.hWnd );
// let the sound and input subsystems know about the new window
ri.Vid_NewWindow (width, height);
return true;
}
/*
** GLimp_SetMode
*/
rserr_t GLimp_SetMode( int *pwidth, int *pheight, int mode, qboolean fullscreen )
{
int width, height;
const char *win_fs[] = { "W", "FS" };
ri.Con_Printf( PRINT_ALL, "Initializing OpenGL display\n");
ri.Con_Printf (PRINT_ALL, "...setting mode %d:", mode );
if ( !ri.Vid_GetModeInfo( &width, &height, mode ) )
{
ri.Con_Printf( PRINT_ALL, " invalid mode\n" );
return rserr_invalid_mode;
}
ri.Con_Printf( PRINT_ALL, " %d %d %s\n", width, height, win_fs[fullscreen] );
// destroy the existing window
if (glw_state.hWnd)
{
GLimp_Shutdown ();
}
// do a CDS if needed
if ( fullscreen )
{
DEVMODE dm;
ri.Con_Printf( PRINT_ALL, "...attempting fullscreen\n" );
memset( &dm, 0, sizeof( dm ) );
dm.dmSize = sizeof( dm );
dm.dmPelsWidth = width;
dm.dmPelsHeight = height;
dm.dmFields = DM_PELSWIDTH | DM_PELSHEIGHT;
if ( gl_bitdepth->value != 0 )
{
dm.dmBitsPerPel = gl_bitdepth->value;
dm.dmFields |= DM_BITSPERPEL;
ri.Con_Printf( PRINT_ALL, "...using gl_bitdepth of %d\n", ( int ) gl_bitdepth->value );
}
else
{
HDC hdc = GetDC( NULL );
int bitspixel = GetDeviceCaps( hdc, BITSPIXEL );
ri.Con_Printf( PRINT_ALL, "...using desktop display depth of %d\n", bitspixel );
ReleaseDC( 0, hdc );
}
ri.Con_Printf( PRINT_ALL, "...calling CDS: " );
if ( ChangeDisplaySettings( &dm, CDS_FULLSCREEN ) == DISP_CHANGE_SUCCESSFUL )
{
*pwidth = width;
*pheight = height;
gl_state.fullscreen = true;
ri.Con_Printf( PRINT_ALL, "ok\n" );
if ( !VID_CreateWindow (width, height, true) )
return rserr_invalid_mode;
return rserr_ok;
}
else
{
*pwidth = width;
*pheight = height;
ri.Con_Printf( PRINT_ALL, "failed\n" );
ri.Con_Printf( PRINT_ALL, "...calling CDS assuming dual monitors:" );
dm.dmPelsWidth = width * 2;
dm.dmPelsHeight = height;
dm.dmFields = DM_PELSWIDTH | DM_PELSHEIGHT;
if ( gl_bitdepth->value != 0 )
{
dm.dmBitsPerPel = gl_bitdepth->value;
dm.dmFields |= DM_BITSPERPEL;
}
/*
** our first CDS failed, so maybe we're running on some weird dual monitor
** system
*/
if ( ChangeDisplaySettings( &dm, CDS_FULLSCREEN ) != DISP_CHANGE_SUCCESSFUL )
{
ri.Con_Printf( PRINT_ALL, " failed\n" );
ri.Con_Printf( PRINT_ALL, "...setting windowed mode\n" );
ChangeDisplaySettings( 0, 0 );
*pwidth = width;
*pheight = height;
gl_state.fullscreen = false;
if ( !VID_CreateWindow (width, height, false) )
return rserr_invalid_mode;
return rserr_invalid_fullscreen;
}
else
{
ri.Con_Printf( PRINT_ALL, " ok\n" );
if ( !VID_CreateWindow (width, height, true) )
return rserr_invalid_mode;
gl_state.fullscreen = true;
return rserr_ok;
}
}
}
else
{
ri.Con_Printf( PRINT_ALL, "...setting windowed mode\n" );
ChangeDisplaySettings( 0, 0 );
*pwidth = width;
*pheight = height;
gl_state.fullscreen = false;
if ( !VID_CreateWindow (width, height, false) )
return rserr_invalid_mode;
}
return rserr_ok;
}
/*
** GLimp_Shutdown
**
** This routine does all OS specific shutdown procedures for the OpenGL
** subsystem. Under OpenGL this means NULLing out the current DC and
** HGLRC, deleting the rendering context, and releasing the DC acquired
** for the window. The state structure is also nulled out.
**
*/
void GLimp_Shutdown( void )
{
if ( qwglMakeCurrent && !qwglMakeCurrent( NULL, NULL ) )
ri.Con_Printf( PRINT_ALL, "ref_gl::R_Shutdown() - wglMakeCurrent failed\n");
if ( glw_state.hGLRC )
{
if ( qwglDeleteContext && !qwglDeleteContext( glw_state.hGLRC ) )
ri.Con_Printf( PRINT_ALL, "ref_gl::R_Shutdown() - wglDeleteContext failed\n");
glw_state.hGLRC = NULL;
}
if (glw_state.hDC)
{
if ( !ReleaseDC( glw_state.hWnd, glw_state.hDC ) )
ri.Con_Printf( PRINT_ALL, "ref_gl::R_Shutdown() - ReleaseDC failed\n" );
glw_state.hDC = NULL;
}
if (glw_state.hWnd)
{
DestroyWindow ( glw_state.hWnd );
glw_state.hWnd = NULL;
}
if ( glw_state.log_fp )
{
fclose( glw_state.log_fp );
glw_state.log_fp = 0;
}
UnregisterClass (WINDOW_CLASS_NAME, glw_state.hInstance);
if ( gl_state.fullscreen )
{
ChangeDisplaySettings( 0, 0 );
gl_state.fullscreen = false;
}
}
/*
** GLimp_Init
**
** This routine is responsible for initializing the OS specific portions
** of OpenGL. Under Win32 this means dealing with the pixelformats and
** doing the wgl interface stuff.
*/
qboolean GLimp_Init( void *hinstance, void *wndproc )
{
#define OSR2_BUILD_NUMBER 1111
OSVERSIONINFO vinfo;
vinfo.dwOSVersionInfoSize = sizeof(vinfo);
glw_state.allowdisplaydepthchange = false;
if ( GetVersionEx( &vinfo) )
{
if ( vinfo.dwMajorVersion > 4 )
{
glw_state.allowdisplaydepthchange = true;
}
else if ( vinfo.dwMajorVersion == 4 )
{
if ( vinfo.dwPlatformId == VER_PLATFORM_WIN32_NT )
{
glw_state.allowdisplaydepthchange = true;
}
else if ( vinfo.dwPlatformId == VER_PLATFORM_WIN32_WINDOWS )
{
if ( LOWORD( vinfo.dwBuildNumber ) >= OSR2_BUILD_NUMBER )
{
glw_state.allowdisplaydepthchange = true;
}
}
}
}
else
{
ri.Con_Printf( PRINT_ALL, "GLimp_Init() - GetVersionEx failed\n" );
return false;
}
glw_state.hInstance = ( HINSTANCE ) hinstance;
glw_state.wndproc = wndproc;
return true;
}
qboolean GLimp_InitGL (void)
{
PIXELFORMATDESCRIPTOR pfd =
{
sizeof(PIXELFORMATDESCRIPTOR), // size of this pfd
1, // version number
PFD_DRAW_TO_WINDOW | // support window
PFD_SUPPORT_OPENGL | // support OpenGL
PFD_DOUBLEBUFFER, // double buffered
PFD_TYPE_RGBA, // RGBA type
24, // 24-bit color depth
0, 0, 0, 0, 0, 0, // color bits ignored
0, // no alpha buffer
0, // shift bit ignored
0, // no accumulation buffer
0, 0, 0, 0, // accum bits ignored
32, // 32-bit z-buffer
0, // no stencil buffer
0, // no auxiliary buffer
PFD_MAIN_PLANE, // main layer
0, // reserved
0, 0, 0 // layer masks ignored
};
int pixelformat;
cvar_t *stereo;
stereo = ri.Cvar_Get( "cl_stereo", "0", 0 );
/*
** set PFD_STEREO if necessary
*/
if ( stereo->value != 0 )
{
ri.Con_Printf( PRINT_ALL, "...attempting to use stereo\n" );
pfd.dwFlags |= PFD_STEREO;
gl_state.stereo_enabled = true;
}
else
{
gl_state.stereo_enabled = false;
}
/*
** figure out if we're running on a minidriver or not
*/
if ( strstr( gl_driver->string, "opengl32" ) != 0 )
glw_state.minidriver = false;
else
glw_state.minidriver = true;
/*
** Get a DC for the specified window
*/
if ( glw_state.hDC != NULL )
ri.Con_Printf( PRINT_ALL, "GLimp_Init() - non-NULL DC exists\n" );
if ( ( glw_state.hDC = GetDC( glw_state.hWnd ) ) == NULL )
{
ri.Con_Printf( PRINT_ALL, "GLimp_Init() - GetDC failed\n" );
return false;
}
if ( glw_state.minidriver )
{
if ( (pixelformat = qwglChoosePixelFormat( glw_state.hDC, &pfd)) == 0 )
{
ri.Con_Printf (PRINT_ALL, "GLimp_Init() - qwglChoosePixelFormat failed\n");
return false;
}
if ( qwglSetPixelFormat( glw_state.hDC, pixelformat, &pfd) == FALSE )
{
ri.Con_Printf (PRINT_ALL, "GLimp_Init() - qwglSetPixelFormat failed\n");
return false;
}
qwglDescribePixelFormat( glw_state.hDC, pixelformat, sizeof( pfd ), &pfd );
}
else
{
if ( ( pixelformat = ChoosePixelFormat( glw_state.hDC, &pfd)) == 0 )
{
ri.Con_Printf (PRINT_ALL, "GLimp_Init() - ChoosePixelFormat failed\n");
return false;
}
if ( SetPixelFormat( glw_state.hDC, pixelformat, &pfd) == FALSE )
{
ri.Con_Printf (PRINT_ALL, "GLimp_Init() - SetPixelFormat failed\n");
return false;
}
DescribePixelFormat( glw_state.hDC, pixelformat, sizeof( pfd ), &pfd );
if ( !( pfd.dwFlags & PFD_GENERIC_ACCELERATED ) )
{
extern cvar_t *gl_allow_software;
if ( gl_allow_software->value )
glw_state.mcd_accelerated = true;
else
glw_state.mcd_accelerated = false;
}
else
{
glw_state.mcd_accelerated = true;
}
}
/*
** report if stereo is desired but unavailable
*/
if ( !( pfd.dwFlags & PFD_STEREO ) && ( stereo->value != 0 ) )
{
ri.Con_Printf( PRINT_ALL, "...failed to select stereo pixel format\n" );
ri.Cvar_SetValue( "cl_stereo", 0 );
gl_state.stereo_enabled = false;
}
/*
** startup the OpenGL subsystem by creating a context and making
** it current
*/
if ( ( glw_state.hGLRC = qwglCreateContext( glw_state.hDC ) ) == 0 )
{
ri.Con_Printf (PRINT_ALL, "GLimp_Init() - qwglCreateContext failed\n");
goto fail;
}
if ( !qwglMakeCurrent( glw_state.hDC, glw_state.hGLRC ) )
{
ri.Con_Printf (PRINT_ALL, "GLimp_Init() - qwglMakeCurrent failed\n");
goto fail;
}
if ( !VerifyDriver() )
{
ri.Con_Printf( PRINT_ALL, "GLimp_Init() - no hardware acceleration detected\n" );
goto fail;
}
/*
** print out PFD specifics
*/
ri.Con_Printf( PRINT_ALL, "GL PFD: color(%d-bits) Z(%d-bit)\n", ( int ) pfd.cColorBits, ( int ) pfd.cDepthBits );
return true;
fail:
if ( glw_state.hGLRC )
{
qwglDeleteContext( glw_state.hGLRC );
glw_state.hGLRC = NULL;
}
if ( glw_state.hDC )
{
ReleaseDC( glw_state.hWnd, glw_state.hDC );
glw_state.hDC = NULL;
}
return false;
}
/*
** GLimp_BeginFrame
*/
void GLimp_BeginFrame( float camera_separation )
{
if ( gl_bitdepth->modified )
{
if ( gl_bitdepth->value != 0 && !glw_state.allowdisplaydepthchange )
{
ri.Cvar_SetValue( "gl_bitdepth", 0 );
ri.Con_Printf( PRINT_ALL, "gl_bitdepth requires Win95 OSR2.x or WinNT 4.x\n" );
}
gl_bitdepth->modified = false;
}
if ( camera_separation < 0 && gl_state.stereo_enabled )
{
qglDrawBuffer( GL_BACK_LEFT );
}
else if ( camera_separation > 0 && gl_state.stereo_enabled )
{
qglDrawBuffer( GL_BACK_RIGHT );
}
else
{
qglDrawBuffer( GL_BACK );
}
}
/*
** GLimp_EndFrame
**
** Responsible for doing a swapbuffers and possibly for other stuff
** as yet to be determined. Probably better not to make this a GLimp
** function and instead do a call to GLimp_SwapBuffers.
*/
void GLimp_EndFrame (void)
{
int err;
err = qglGetError();
assert( err == GL_NO_ERROR );
if ( stricmp( gl_drawbuffer->string, "GL_BACK" ) == 0 )
{
if ( !qwglSwapBuffers( glw_state.hDC ) )
ri.Sys_Error( ERR_FATAL, "GLimp_EndFrame() - SwapBuffers() failed!\n" );
}
}
/*
** GLimp_AppActivate
*/
void GLimp_AppActivate( qboolean active )
{
if ( active )
{
SetForegroundWindow( glw_state.hWnd );
ShowWindow( glw_state.hWnd, SW_RESTORE );
}
else
{
if ( vid_fullscreen->value )
ShowWindow( glw_state.hWnd, SW_MINIMIZE );
}
}
| {
"language": "C"
} |
/* **********************************************************
* Copyright (c) 2014 Google, Inc. All rights reserved.
* Copyright (c) 2003-2008 VMware, Inc. All rights reserved.
* **********************************************************/
/*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* * Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* * Neither the name of VMware, Inc. nor the names of its contributors may be
* used to endorse or promote products derived from this software without
* specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL VMWARE, INC. OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
* DAMAGE.
*/
#include <assert.h>
#include <stdio.h>
#include <stdlib.h>
#include "tools.h"
int
main(int argc, char *argv[])
{
print("Inside main\n");
exit(0);
return 0;
}
| {
"language": "C"
} |
/* This file is part of the bladeRF project:
* http://www.github.com/nuand/bladeRF
*
* Copyright (c) 2015 Nuand LLC
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
* THE SOFTWARE.
*/
#ifndef PKT_8x32_H_
#define PKT_8x32_H_
#include <stdint.h>
#include "pkt_handler.h"
#include "nios_pkt_8x32.h"
void pkt_8x32(struct pkt_buf *b);
#define PKT_8x32 { \
.magic = NIOS_PKT_8x32_MAGIC, \
.init = NULL, \
.exec = pkt_8x32, \
.do_work = NULL, \
}
#endif
| {
"language": "C"
} |
//======== (C) Copyright 2002 Charles G. Cleveland All rights reserved. =========
//
// The copyright to the contents herein is the property of Charles G. Cleveland.
// The contents may be used and/or copied only with the written permission of
// Charles G. Cleveland, or in accordance with the terms and conditions stipulated in
// the agreement/contract under which the contents have been supplied.
//
// Purpose:
//
// $Workfile: AvHClientUtil.h $
// $Date: 2002/07/23 16:59:41 $
//
//-------------------------------------------------------------------------------
// $Log: AvHClientUtil.h,v $
// Revision 1.3 2002/07/23 16:59:41 Flayra
// - AvHCUWorldToScreen now returns true if the position is in front of player
//
// Revision 1.2 2002/05/23 02:34:00 Flayra
// - Post-crash checkin. Restored @Backup from around 4/16. Contains changes for last four weeks of development.
//
//===============================================================================
#ifndef AVH_CLIENTUTIL_H
#define AVH_CLIENTUTIL_H
#include "cl_dll/wrect.h"
#include "cl_dll/cl_dll.h"
#include "cl_dll/ammo.h"
#include "cl_dll/chudmisc.h"
#include "util/nowarnings.h"
#include "common/cl_entity.h"
#include "mod/AvHConstants.h"
#include "mod/AvHMessage.h"
#include "mod/AvHSpecials.h"
int AvHCUGetIconHeightForPlayer(AvHUser3 theUser3);
void AvHCUGetViewAngles(cl_entity_t* inEntity, float* outViewAngles);
void AvHCUGetViewOrigin(cl_entity_t* inEntity, float* outViewOrigin);
bool AvHCUWorldToScreen(float* inWorldCoords, float* outScreenCoords);
bool AvHCUGetIsEntityInPVSAndVisible(int inEntityIndex);
void AvHCUTrimExtraneousLocationText(string& ioTranslatedString);
#endif
| {
"language": "C"
} |
/* arch/arm/mach-msm/smd_qmi.c
*
* QMI Control Driver -- Manages network data connections.
*
* Copyright (C) 2007 Google, Inc.
* Author: Brian Swetland <swetland@google.com>
*
* This software is licensed under the terms of the GNU General Public
* License version 2, as published by the Free Software Foundation, and
* may be copied, distributed, and modified under those terms.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
*/
#include <linux/module.h>
#include <linux/fs.h>
#include <linux/cdev.h>
#include <linux/device.h>
#include <linux/sched.h>
#include <linux/wait.h>
#include <linux/miscdevice.h>
#include <linux/workqueue.h>
#include <linux/wakelock.h>
#include <asm/uaccess.h>
#include <mach/msm_smd.h>
#define QMI_CTL 0x00
#define QMI_WDS 0x01
#define QMI_DMS 0x02
#define QMI_NAS 0x03
#define QMI_RESULT_SUCCESS 0x0000
#define QMI_RESULT_FAILURE 0x0001
struct qmi_msg {
unsigned char service;
unsigned char client_id;
unsigned short txn_id;
unsigned short type;
unsigned short size;
unsigned char *tlv;
};
#define qmi_ctl_client_id 0
#define STATE_OFFLINE 0
#define STATE_QUERYING 1
#define STATE_ONLINE 2
struct qmi_ctxt {
struct miscdevice misc;
struct mutex lock;
unsigned char ctl_txn_id;
unsigned char wds_client_id;
unsigned short wds_txn_id;
unsigned wds_busy;
unsigned wds_handle;
unsigned state_dirty;
unsigned state;
unsigned char addr[4];
unsigned char mask[4];
unsigned char gateway[4];
unsigned char dns1[4];
unsigned char dns2[4];
smd_channel_t *ch;
const char *ch_name;
struct wake_lock wake_lock;
struct work_struct open_work;
struct work_struct read_work;
};
static struct qmi_ctxt *qmi_minor_to_ctxt(unsigned n);
static void qmi_read_work(struct work_struct *ws);
static void qmi_open_work(struct work_struct *work);
void qmi_ctxt_init(struct qmi_ctxt *ctxt, unsigned n)
{
mutex_init(&ctxt->lock);
INIT_WORK(&ctxt->read_work, qmi_read_work);
INIT_WORK(&ctxt->open_work, qmi_open_work);
wake_lock_init(&ctxt->wake_lock, WAKE_LOCK_SUSPEND, ctxt->misc.name);
ctxt->ctl_txn_id = 1;
ctxt->wds_txn_id = 1;
ctxt->wds_busy = 1;
ctxt->state = STATE_OFFLINE;
}
static struct workqueue_struct *qmi_wq;
static int verbose = 0;
/* anyone waiting for a state change waits here */
static DECLARE_WAIT_QUEUE_HEAD(qmi_wait_queue);
static void qmi_dump_msg(struct qmi_msg *msg, const char *prefix)
{
unsigned sz, n;
unsigned char *x;
if (!verbose)
return;
printk(KERN_INFO
"qmi: %s: svc=%02x cid=%02x tid=%04x type=%04x size=%04x\n",
prefix, msg->service, msg->client_id,
msg->txn_id, msg->type, msg->size);
x = msg->tlv;
sz = msg->size;
while (sz >= 3) {
sz -= 3;
n = x[1] | (x[2] << 8);
if (n > sz)
break;
printk(KERN_INFO "qmi: %s: tlv: %02x %04x { ",
prefix, x[0], n);
x += 3;
sz -= n;
while (n-- > 0)
printk("%02x ", *x++);
printk("}\n");
}
}
int qmi_add_tlv(struct qmi_msg *msg,
unsigned type, unsigned size, const void *data)
{
unsigned char *x = msg->tlv + msg->size;
x[0] = type;
x[1] = size;
x[2] = size >> 8;
memcpy(x + 3, data, size);
msg->size += (size + 3);
return 0;
}
/* Extract a tagged item from a qmi message buffer,
** taking care not to overrun the buffer.
*/
static int qmi_get_tlv(struct qmi_msg *msg,
unsigned type, unsigned size, void *data)
{
unsigned char *x = msg->tlv;
unsigned len = msg->size;
unsigned n;
while (len >= 3) {
len -= 3;
/* size of this item */
n = x[1] | (x[2] << 8);
if (n > len)
break;
if (x[0] == type) {
if (n != size)
return -1;
memcpy(data, x + 3, size);
return 0;
}
x += (n + 3);
len -= n;
}
return -1;
}
static unsigned qmi_get_status(struct qmi_msg *msg, unsigned *error)
{
unsigned short status[2];
if (qmi_get_tlv(msg, 0x02, sizeof(status), status)) {
*error = 0;
return QMI_RESULT_FAILURE;
} else {
*error = status[1];
return status[0];
}
}
/* 0x01 <qmux-header> <payload> */
#define QMUX_HEADER 13
/* should be >= HEADER + FOOTER */
#define QMUX_OVERHEAD 16
static int qmi_send(struct qmi_ctxt *ctxt, struct qmi_msg *msg)
{
unsigned char *data;
unsigned hlen;
unsigned len;
int r;
qmi_dump_msg(msg, "send");
if (msg->service == QMI_CTL) {
hlen = QMUX_HEADER - 1;
} else {
hlen = QMUX_HEADER;
}
/* QMUX length is total header + total payload - IFC selector */
len = hlen + msg->size - 1;
if (len > 0xffff)
return -1;
data = msg->tlv - hlen;
/* prepend encap and qmux header */
*data++ = 0x01; /* ifc selector */
/* qmux header */
*data++ = len;
*data++ = len >> 8;
*data++ = 0x00; /* flags: client */
*data++ = msg->service;
*data++ = msg->client_id;
/* qmi header */
*data++ = 0x00; /* flags: send */
*data++ = msg->txn_id;
if (msg->service != QMI_CTL)
*data++ = msg->txn_id >> 8;
*data++ = msg->type;
*data++ = msg->type >> 8;
*data++ = msg->size;
*data++ = msg->size >> 8;
/* len + 1 takes the interface selector into account */
r = smd_write(ctxt->ch, msg->tlv - hlen, len + 1);
if (r != len) {
return -1;
} else {
return 0;
}
}
static void qmi_process_ctl_msg(struct qmi_ctxt *ctxt, struct qmi_msg *msg)
{
unsigned err;
if (msg->type == 0x0022) {
unsigned char n[2];
if (qmi_get_status(msg, &err))
return;
if (qmi_get_tlv(msg, 0x01, sizeof(n), n))
return;
if (n[0] == QMI_WDS) {
printk(KERN_INFO
"qmi: ctl: wds use client_id 0x%02x\n", n[1]);
ctxt->wds_client_id = n[1];
ctxt->wds_busy = 0;
}
}
}
static int qmi_network_get_profile(struct qmi_ctxt *ctxt);
static void swapaddr(unsigned char *src, unsigned char *dst)
{
dst[0] = src[3];
dst[1] = src[2];
dst[2] = src[1];
dst[3] = src[0];
}
static unsigned char zero[4];
static void qmi_read_runtime_profile(struct qmi_ctxt *ctxt, struct qmi_msg *msg)
{
unsigned char tmp[4];
unsigned r;
r = qmi_get_tlv(msg, 0x1e, 4, tmp);
swapaddr(r ? zero : tmp, ctxt->addr);
r = qmi_get_tlv(msg, 0x21, 4, tmp);
swapaddr(r ? zero : tmp, ctxt->mask);
r = qmi_get_tlv(msg, 0x20, 4, tmp);
swapaddr(r ? zero : tmp, ctxt->gateway);
r = qmi_get_tlv(msg, 0x15, 4, tmp);
swapaddr(r ? zero : tmp, ctxt->dns1);
r = qmi_get_tlv(msg, 0x16, 4, tmp);
swapaddr(r ? zero : tmp, ctxt->dns2);
}
static void qmi_process_unicast_wds_msg(struct qmi_ctxt *ctxt,
struct qmi_msg *msg)
{
unsigned err;
switch (msg->type) {
case 0x0021:
if (qmi_get_status(msg, &err)) {
printk(KERN_ERR
"qmi: wds: network stop failed (%04x)\n", err);
} else {
printk(KERN_INFO
"qmi: wds: network stopped\n");
ctxt->state = STATE_OFFLINE;
ctxt->state_dirty = 1;
}
break;
case 0x0020:
if (qmi_get_status(msg, &err)) {
printk(KERN_ERR
"qmi: wds: network start failed (%04x)\n", err);
} else if (qmi_get_tlv(msg, 0x01, sizeof(ctxt->wds_handle), &ctxt->wds_handle)) {
printk(KERN_INFO
"qmi: wds no handle?\n");
} else {
printk(KERN_INFO
"qmi: wds: got handle 0x%08x\n",
ctxt->wds_handle);
}
break;
case 0x002D:
printk("qmi: got network profile\n");
if (ctxt->state == STATE_QUERYING) {
qmi_read_runtime_profile(ctxt, msg);
ctxt->state = STATE_ONLINE;
ctxt->state_dirty = 1;
}
break;
default:
printk(KERN_ERR "qmi: unknown msg type 0x%04x\n", msg->type);
}
ctxt->wds_busy = 0;
}
static void qmi_process_broadcast_wds_msg(struct qmi_ctxt *ctxt,
struct qmi_msg *msg)
{
if (msg->type == 0x0022) {
unsigned char n[2];
if (qmi_get_tlv(msg, 0x01, sizeof(n), n))
return;
switch (n[0]) {
case 1:
printk(KERN_INFO "qmi: wds: DISCONNECTED\n");
ctxt->state = STATE_OFFLINE;
ctxt->state_dirty = 1;
break;
case 2:
printk(KERN_INFO "qmi: wds: CONNECTED\n");
ctxt->state = STATE_QUERYING;
ctxt->state_dirty = 1;
qmi_network_get_profile(ctxt);
break;
case 3:
printk(KERN_INFO "qmi: wds: SUSPENDED\n");
ctxt->state = STATE_OFFLINE;
ctxt->state_dirty = 1;
}
} else {
printk(KERN_ERR "qmi: unknown bcast msg type 0x%04x\n", msg->type);
}
}
static void qmi_process_wds_msg(struct qmi_ctxt *ctxt,
struct qmi_msg *msg)
{
printk("wds: %04x @ %02x\n", msg->type, msg->client_id);
if (msg->client_id == ctxt->wds_client_id) {
qmi_process_unicast_wds_msg(ctxt, msg);
} else if (msg->client_id == 0xff) {
qmi_process_broadcast_wds_msg(ctxt, msg);
} else {
printk(KERN_ERR
"qmi_process_wds_msg client id 0x%02x unknown\n",
msg->client_id);
}
}
static void qmi_process_qmux(struct qmi_ctxt *ctxt,
unsigned char *buf, unsigned sz)
{
struct qmi_msg msg;
/* require a full header */
if (sz < 5)
return;
/* require a size that matches the buffer size */
if (sz != (buf[0] | (buf[1] << 8)))
return;
/* only messages from a service (bit7=1) are allowed */
if (buf[2] != 0x80)
return;
msg.service = buf[3];
msg.client_id = buf[4];
/* annoyingly, CTL messages have a shorter TID */
if (buf[3] == 0) {
if (sz < 7)
return;
msg.txn_id = buf[6];
buf += 7;
sz -= 7;
} else {
if (sz < 8)
return;
msg.txn_id = buf[6] | (buf[7] << 8);
buf += 8;
sz -= 8;
}
/* no type and size!? */
if (sz < 4)
return;
sz -= 4;
msg.type = buf[0] | (buf[1] << 8);
msg.size = buf[2] | (buf[3] << 8);
msg.tlv = buf + 4;
if (sz != msg.size)
return;
qmi_dump_msg(&msg, "recv");
mutex_lock(&ctxt->lock);
switch (msg.service) {
case QMI_CTL:
qmi_process_ctl_msg(ctxt, &msg);
break;
case QMI_WDS:
qmi_process_wds_msg(ctxt, &msg);
break;
default:
printk(KERN_ERR "qmi: msg from unknown svc 0x%02x\n",
msg.service);
break;
}
mutex_unlock(&ctxt->lock);
wake_up(&qmi_wait_queue);
}
#define QMI_MAX_PACKET (256 + QMUX_OVERHEAD)
static void qmi_read_work(struct work_struct *ws)
{
struct qmi_ctxt *ctxt = container_of(ws, struct qmi_ctxt, read_work);
struct smd_channel *ch = ctxt->ch;
unsigned char buf[QMI_MAX_PACKET];
int sz;
for (;;) {
sz = smd_cur_packet_size(ch);
if (sz == 0)
break;
if (sz < smd_read_avail(ch))
break;
if (sz > QMI_MAX_PACKET) {
smd_read(ch, 0, sz);
continue;
}
if (smd_read(ch, buf, sz) != sz) {
printk(KERN_ERR "qmi: not enough data?!\n");
continue;
}
/* interface selector must be 1 */
if (buf[0] != 0x01)
continue;
qmi_process_qmux(ctxt, buf + 1, sz - 1);
}
}
static int qmi_request_wds_cid(struct qmi_ctxt *ctxt);
static void qmi_open_work(struct work_struct *ws)
{
struct qmi_ctxt *ctxt = container_of(ws, struct qmi_ctxt, open_work);
mutex_lock(&ctxt->lock);
qmi_request_wds_cid(ctxt);
mutex_unlock(&ctxt->lock);
}
static void qmi_notify(void *priv, unsigned event)
{
struct qmi_ctxt *ctxt = priv;
switch (event) {
case SMD_EVENT_DATA: {
int sz;
sz = smd_cur_packet_size(ctxt->ch);
if ((sz > 0) && (sz <= smd_read_avail(ctxt->ch))) {
wake_lock_timeout(&ctxt->wake_lock, HZ / 2);
queue_work(qmi_wq, &ctxt->read_work);
}
break;
}
case SMD_EVENT_OPEN:
printk(KERN_INFO "qmi: smd opened\n");
queue_work(qmi_wq, &ctxt->open_work);
break;
case SMD_EVENT_CLOSE:
printk(KERN_INFO "qmi: smd closed\n");
break;
}
}
static int qmi_request_wds_cid(struct qmi_ctxt *ctxt)
{
unsigned char data[64 + QMUX_OVERHEAD];
struct qmi_msg msg;
unsigned char n;
msg.service = QMI_CTL;
msg.client_id = qmi_ctl_client_id;
msg.txn_id = ctxt->ctl_txn_id;
msg.type = 0x0022;
msg.size = 0;
msg.tlv = data + QMUX_HEADER;
ctxt->ctl_txn_id += 2;
n = QMI_WDS;
qmi_add_tlv(&msg, 0x01, 0x01, &n);
return qmi_send(ctxt, &msg);
}
static int qmi_network_get_profile(struct qmi_ctxt *ctxt)
{
unsigned char data[96 + QMUX_OVERHEAD];
struct qmi_msg msg;
msg.service = QMI_WDS;
msg.client_id = ctxt->wds_client_id;
msg.txn_id = ctxt->wds_txn_id;
msg.type = 0x002D;
msg.size = 0;
msg.tlv = data + QMUX_HEADER;
ctxt->wds_txn_id += 2;
return qmi_send(ctxt, &msg);
}
static int qmi_network_up(struct qmi_ctxt *ctxt, char *apn)
{
unsigned char data[96 + QMUX_OVERHEAD];
struct qmi_msg msg;
char *auth_type;
char *user;
char *pass;
for (user = apn; *user; user++) {
if (*user == ' ') {
*user++ = 0;
break;
}
}
for (pass = user; *pass; pass++) {
if (*pass == ' ') {
*pass++ = 0;
break;
}
}
for (auth_type = pass; *auth_type; auth_type++) {
if (*auth_type == ' ') {
*auth_type++ = 0;
break;
}
}
msg.service = QMI_WDS;
msg.client_id = ctxt->wds_client_id;
msg.txn_id = ctxt->wds_txn_id;
msg.type = 0x0020;
msg.size = 0;
msg.tlv = data + QMUX_HEADER;
ctxt->wds_txn_id += 2;
qmi_add_tlv(&msg, 0x14, strlen(apn), apn);
if (*auth_type)
qmi_add_tlv(&msg, 0x16, strlen(auth_type), auth_type);
if (*user) {
if (!*auth_type) {
unsigned char x;
x = 3;
qmi_add_tlv(&msg, 0x16, 1, &x);
}
qmi_add_tlv(&msg, 0x17, strlen(user), user);
if (*pass)
qmi_add_tlv(&msg, 0x18, strlen(pass), pass);
}
return qmi_send(ctxt, &msg);
}
static int qmi_network_down(struct qmi_ctxt *ctxt)
{
unsigned char data[16 + QMUX_OVERHEAD];
struct qmi_msg msg;
msg.service = QMI_WDS;
msg.client_id = ctxt->wds_client_id;
msg.txn_id = ctxt->wds_txn_id;
msg.type = 0x0021;
msg.size = 0;
msg.tlv = data + QMUX_HEADER;
ctxt->wds_txn_id += 2;
qmi_add_tlv(&msg, 0x01, sizeof(ctxt->wds_handle), &ctxt->wds_handle);
return qmi_send(ctxt, &msg);
}
static int qmi_print_state(struct qmi_ctxt *ctxt, char *buf, int max)
{
int i;
char *statename;
if (ctxt->state == STATE_ONLINE) {
statename = "up";
} else if (ctxt->state == STATE_OFFLINE) {
statename = "down";
} else {
statename = "busy";
}
i = scnprintf(buf, max, "STATE=%s\n", statename);
i += scnprintf(buf + i, max - i, "CID=%d\n",ctxt->wds_client_id);
if (ctxt->state != STATE_ONLINE){
return i;
}
i += scnprintf(buf + i, max - i, "ADDR=%d.%d.%d.%d\n",
ctxt->addr[0], ctxt->addr[1], ctxt->addr[2], ctxt->addr[3]);
i += scnprintf(buf + i, max - i, "MASK=%d.%d.%d.%d\n",
ctxt->mask[0], ctxt->mask[1], ctxt->mask[2], ctxt->mask[3]);
i += scnprintf(buf + i, max - i, "GATEWAY=%d.%d.%d.%d\n",
ctxt->gateway[0], ctxt->gateway[1], ctxt->gateway[2],
ctxt->gateway[3]);
i += scnprintf(buf + i, max - i, "DNS1=%d.%d.%d.%d\n",
ctxt->dns1[0], ctxt->dns1[1], ctxt->dns1[2], ctxt->dns1[3]);
i += scnprintf(buf + i, max - i, "DNS2=%d.%d.%d.%d\n",
ctxt->dns2[0], ctxt->dns2[1], ctxt->dns2[2], ctxt->dns2[3]);
return i;
}
static ssize_t qmi_read(struct file *fp, char __user *buf,
size_t count, loff_t *pos)
{
struct qmi_ctxt *ctxt = fp->private_data;
char msg[256];
int len;
int r;
mutex_lock(&ctxt->lock);
for (;;) {
if (ctxt->state_dirty) {
ctxt->state_dirty = 0;
len = qmi_print_state(ctxt, msg, 256);
break;
}
mutex_unlock(&ctxt->lock);
r = wait_event_interruptible(qmi_wait_queue, ctxt->state_dirty);
if (r < 0)
return r;
mutex_lock(&ctxt->lock);
}
mutex_unlock(&ctxt->lock);
if (len > count)
len = count;
if (copy_to_user(buf, msg, len))
return -EFAULT;
return len;
}
static ssize_t qmi_write(struct file *fp, const char __user *buf,
size_t count, loff_t *pos)
{
struct qmi_ctxt *ctxt = fp->private_data;
unsigned char cmd[64];
int len;
int r;
if (count < 1)
return 0;
len = count > 63 ? 63 : count;
if (copy_from_user(cmd, buf, len))
return -EFAULT;
cmd[len] = 0;
/* lazy */
if (cmd[len-1] == '\n') {
cmd[len-1] = 0;
len--;
}
if (!strncmp(cmd, "verbose", 7)) {
verbose = 1;
} else if (!strncmp(cmd, "terse", 5)) {
verbose = 0;
} else if (!strncmp(cmd, "poll", 4)) {
ctxt->state_dirty = 1;
wake_up(&qmi_wait_queue);
} else if (!strncmp(cmd, "down", 4)) {
retry_down:
mutex_lock(&ctxt->lock);
if (ctxt->wds_busy) {
mutex_unlock(&ctxt->lock);
r = wait_event_interruptible(qmi_wait_queue, !ctxt->wds_busy);
if (r < 0)
return r;
goto retry_down;
}
ctxt->wds_busy = 1;
qmi_network_down(ctxt);
mutex_unlock(&ctxt->lock);
} else if (!strncmp(cmd, "up:", 3)) {
retry_up:
mutex_lock(&ctxt->lock);
if (ctxt->wds_busy) {
mutex_unlock(&ctxt->lock);
r = wait_event_interruptible(qmi_wait_queue, !ctxt->wds_busy);
if (r < 0)
return r;
goto retry_up;
}
ctxt->wds_busy = 1;
qmi_network_up(ctxt, cmd+3);
mutex_unlock(&ctxt->lock);
} else {
return -EINVAL;
}
return count;
}
static int qmi_open(struct inode *ip, struct file *fp)
{
struct qmi_ctxt *ctxt = qmi_minor_to_ctxt(MINOR(ip->i_rdev));
int r = 0;
if (!ctxt) {
printk(KERN_ERR "unknown qmi misc %d\n", MINOR(ip->i_rdev));
return -ENODEV;
}
fp->private_data = ctxt;
mutex_lock(&ctxt->lock);
if (ctxt->ch == 0)
r = smd_open(ctxt->ch_name, &ctxt->ch, ctxt, qmi_notify);
if (r == 0)
wake_up(&qmi_wait_queue);
mutex_unlock(&ctxt->lock);
return r;
}
static int qmi_release(struct inode *ip, struct file *fp)
{
return 0;
}
static struct file_operations qmi_fops = {
.owner = THIS_MODULE,
.read = qmi_read,
.write = qmi_write,
.open = qmi_open,
.release = qmi_release,
};
static struct qmi_ctxt qmi_device0 = {
.ch_name = "SMD_DATA5_CNTL",
.misc = {
.minor = MISC_DYNAMIC_MINOR,
.name = "qmi0",
.fops = &qmi_fops,
}
};
static struct qmi_ctxt qmi_device1 = {
.ch_name = "SMD_DATA6_CNTL",
.misc = {
.minor = MISC_DYNAMIC_MINOR,
.name = "qmi1",
.fops = &qmi_fops,
}
};
static struct qmi_ctxt qmi_device2 = {
.ch_name = "SMD_DATA7_CNTL",
.misc = {
.minor = MISC_DYNAMIC_MINOR,
.name = "qmi2",
.fops = &qmi_fops,
}
};
static struct qmi_ctxt *qmi_minor_to_ctxt(unsigned n)
{
if (n == qmi_device0.misc.minor)
return &qmi_device0;
if (n == qmi_device1.misc.minor)
return &qmi_device1;
if (n == qmi_device2.misc.minor)
return &qmi_device2;
return 0;
}
static int __init qmi_init(void)
{
int ret;
qmi_wq = create_singlethread_workqueue("qmi");
if (qmi_wq == 0)
return -ENOMEM;
qmi_ctxt_init(&qmi_device0, 0);
qmi_ctxt_init(&qmi_device1, 1);
qmi_ctxt_init(&qmi_device2, 2);
ret = misc_register(&qmi_device0.misc);
if (ret == 0)
ret = misc_register(&qmi_device1.misc);
if (ret == 0)
ret = misc_register(&qmi_device2.misc);
return ret;
}
module_init(qmi_init);
| {
"language": "C"
} |
/*
* AMR narrowband decoder
* Copyright (c) 2006-2007 Robert Swain
* Copyright (c) 2009 Colin McQuillan
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
/**
* @file
* AMR narrowband decoder
*
* This decoder uses floats for simplicity and so is not bit-exact. One
* difference is that differences in phase can accumulate. The test sequences
* in 3GPP TS 26.074 can still be useful.
*
* - Comparing this file's output to the output of the ref decoder gives a
* PSNR of 30 to 80. Plotting the output samples shows a difference in
* phase in some areas.
*
* - Comparing both decoders against their input, this decoder gives a similar
* PSNR. If the test sequence homing frames are removed (this decoder does
* not detect them), the PSNR is at least as good as the reference on 140
* out of 169 tests.
*/
#include <string.h>
#include <math.h>
#include "libavutil/channel_layout.h"
#include "libavutil/float_dsp.h"
#include "avcodec.h"
#include "libavutil/common.h"
#include "libavutil/avassert.h"
#include "celp_math.h"
#include "celp_filters.h"
#include "acelp_filters.h"
#include "acelp_vectors.h"
#include "acelp_pitch_delay.h"
#include "lsp.h"
#include "amr.h"
#include "internal.h"
#include "amrnbdata.h"
#define AMR_BLOCK_SIZE 160 ///< samples per frame
#define AMR_SAMPLE_BOUND 32768.0 ///< threshold for synthesis overflow
/**
* Scale from constructed speech to [-1,1]
*
* AMR is designed to produce 16-bit PCM samples (3GPP TS 26.090 4.2) but
* upscales by two (section 6.2.2).
*
* Fundamentally, this scale is determined by energy_mean through
* the fixed vector contribution to the excitation vector.
*/
#define AMR_SAMPLE_SCALE (2.0 / 32768.0)
/** Prediction factor for 12.2kbit/s mode */
#define PRED_FAC_MODE_12k2 0.65
#define LSF_R_FAC (8000.0 / 32768.0) ///< LSF residual tables to Hertz
#define MIN_LSF_SPACING (50.0488 / 8000.0) ///< Ensures stability of LPC filter
#define PITCH_LAG_MIN_MODE_12k2 18 ///< Lower bound on decoded lag search in 12.2kbit/s mode
/** Initial energy in dB. Also used for bad frames (unimplemented). */
#define MIN_ENERGY -14.0
/** Maximum sharpening factor
*
* The specification says 0.8, which should be 13107, but the reference C code
* uses 13017 instead. (Amusingly the same applies to SHARP_MAX in g729dec.c.)
*/
#define SHARP_MAX 0.79449462890625
/** Number of impulse response coefficients used for tilt factor */
#define AMR_TILT_RESPONSE 22
/** Tilt factor = 1st reflection coefficient * gamma_t */
#define AMR_TILT_GAMMA_T 0.8
/** Adaptive gain control factor used in post-filter */
#define AMR_AGC_ALPHA 0.9
typedef struct AMRContext {
AMRNBFrame frame; ///< decoded AMR parameters (lsf coefficients, codebook indexes, etc)
uint8_t bad_frame_indicator; ///< bad frame ? 1 : 0
enum Mode cur_frame_mode;
int16_t prev_lsf_r[LP_FILTER_ORDER]; ///< residual LSF vector from previous subframe
double lsp[4][LP_FILTER_ORDER]; ///< lsp vectors from current frame
double prev_lsp_sub4[LP_FILTER_ORDER]; ///< lsp vector for the 4th subframe of the previous frame
float lsf_q[4][LP_FILTER_ORDER]; ///< Interpolated LSF vector for fixed gain smoothing
float lsf_avg[LP_FILTER_ORDER]; ///< vector of averaged lsf vector
float lpc[4][LP_FILTER_ORDER]; ///< lpc coefficient vectors for 4 subframes
uint8_t pitch_lag_int; ///< integer part of pitch lag from current subframe
float excitation_buf[PITCH_DELAY_MAX + LP_FILTER_ORDER + 1 + AMR_SUBFRAME_SIZE]; ///< current excitation and all necessary excitation history
float *excitation; ///< pointer to the current excitation vector in excitation_buf
float pitch_vector[AMR_SUBFRAME_SIZE]; ///< adaptive code book (pitch) vector
float fixed_vector[AMR_SUBFRAME_SIZE]; ///< algebraic codebook (fixed) vector (must be kept zero between frames)
float prediction_error[4]; ///< quantified prediction errors {20log10(^gamma_gc)} for previous four subframes
float pitch_gain[5]; ///< quantified pitch gains for the current and previous four subframes
float fixed_gain[5]; ///< quantified fixed gains for the current and previous four subframes
float beta; ///< previous pitch_gain, bounded by [0.0,SHARP_MAX]
uint8_t diff_count; ///< the number of subframes for which diff has been above 0.65
uint8_t hang_count; ///< the number of subframes since a hangover period started
float prev_sparse_fixed_gain; ///< previous fixed gain; used by anti-sparseness processing to determine "onset"
uint8_t prev_ir_filter_nr; ///< previous impulse response filter "impNr": 0 - strong, 1 - medium, 2 - none
uint8_t ir_filter_onset; ///< flag for impulse response filter strength
float postfilter_mem[10]; ///< previous intermediate values in the formant filter
float tilt_mem; ///< previous input to tilt compensation filter
float postfilter_agc; ///< previous factor used for adaptive gain control
float high_pass_mem[2]; ///< previous intermediate values in the high-pass filter
float samples_in[LP_FILTER_ORDER + AMR_SUBFRAME_SIZE]; ///< floating point samples
ACELPFContext acelpf_ctx; ///< context for filters for ACELP-based codecs
ACELPVContext acelpv_ctx; ///< context for vector operations for ACELP-based codecs
CELPFContext celpf_ctx; ///< context for filters for CELP-based codecs
CELPMContext celpm_ctx; ///< context for fixed point math operations
} AMRContext;
/** Double version of ff_weighted_vector_sumf() */
static void weighted_vector_sumd(double *out, const double *in_a,
const double *in_b, double weight_coeff_a,
double weight_coeff_b, int length)
{
int i;
for (i = 0; i < length; i++)
out[i] = weight_coeff_a * in_a[i]
+ weight_coeff_b * in_b[i];
}
static av_cold int amrnb_decode_init(AVCodecContext *avctx)
{
AMRContext *p = avctx->priv_data;
int i;
if (avctx->channels > 1) {
avpriv_report_missing_feature(avctx, "multi-channel AMR");
return AVERROR_PATCHWELCOME;
}
avctx->channels = 1;
avctx->channel_layout = AV_CH_LAYOUT_MONO;
if (!avctx->sample_rate)
avctx->sample_rate = 8000;
avctx->sample_fmt = AV_SAMPLE_FMT_FLT;
// p->excitation always points to the same position in p->excitation_buf
p->excitation = &p->excitation_buf[PITCH_DELAY_MAX + LP_FILTER_ORDER + 1];
for (i = 0; i < LP_FILTER_ORDER; i++) {
p->prev_lsp_sub4[i] = lsp_sub4_init[i] * 1000 / (float)(1 << 15);
p->lsf_avg[i] = p->lsf_q[3][i] = lsp_avg_init[i] / (float)(1 << 15);
}
for (i = 0; i < 4; i++)
p->prediction_error[i] = MIN_ENERGY;
ff_acelp_filter_init(&p->acelpf_ctx);
ff_acelp_vectors_init(&p->acelpv_ctx);
ff_celp_filter_init(&p->celpf_ctx);
ff_celp_math_init(&p->celpm_ctx);
return 0;
}
/**
* Unpack an RFC4867 speech frame into the AMR frame mode and parameters.
*
* The order of speech bits is specified by 3GPP TS 26.101.
*
* @param p the context
* @param buf pointer to the input buffer
* @param buf_size size of the input buffer
*
* @return the frame mode
*/
static enum Mode unpack_bitstream(AMRContext *p, const uint8_t *buf,
int buf_size)
{
enum Mode mode;
// Decode the first octet.
mode = buf[0] >> 3 & 0x0F; // frame type
p->bad_frame_indicator = (buf[0] & 0x4) != 0x4; // quality bit
if (mode >= N_MODES || buf_size < frame_sizes_nb[mode] + 1) {
return NO_DATA;
}
if (mode < MODE_DTX)
ff_amr_bit_reorder((uint16_t *) &p->frame, sizeof(AMRNBFrame), buf + 1,
amr_unpacking_bitmaps_per_mode[mode]);
return mode;
}
/// @name AMR pitch LPC coefficient decoding functions
/// @{
/**
* Interpolate the LSF vector (used for fixed gain smoothing).
* The interpolation is done over all four subframes even in MODE_12k2.
*
* @param[in] ctx The Context
* @param[in,out] lsf_q LSFs in [0,1] for each subframe
* @param[in] lsf_new New LSFs in [0,1] for subframe 4
*/
static void interpolate_lsf(ACELPVContext *ctx, float lsf_q[4][LP_FILTER_ORDER], float *lsf_new)
{
int i;
for (i = 0; i < 4; i++)
ctx->weighted_vector_sumf(lsf_q[i], lsf_q[3], lsf_new,
0.25 * (3 - i), 0.25 * (i + 1),
LP_FILTER_ORDER);
}
/**
* Decode a set of 5 split-matrix quantized lsf indexes into an lsp vector.
*
* @param p the context
* @param lsp output LSP vector
* @param lsf_no_r LSF vector without the residual vector added
* @param lsf_quantizer pointers to LSF dictionary tables
* @param quantizer_offset offset in tables
* @param sign for the 3 dictionary table
* @param update store data for computing the next frame's LSFs
*/
static void lsf2lsp_for_mode12k2(AMRContext *p, double lsp[LP_FILTER_ORDER],
const float lsf_no_r[LP_FILTER_ORDER],
const int16_t *lsf_quantizer[5],
const int quantizer_offset,
const int sign, const int update)
{
int16_t lsf_r[LP_FILTER_ORDER]; // residual LSF vector
float lsf_q[LP_FILTER_ORDER]; // quantified LSF vector
int i;
for (i = 0; i < LP_FILTER_ORDER >> 1; i++)
memcpy(&lsf_r[i << 1], &lsf_quantizer[i][quantizer_offset],
2 * sizeof(*lsf_r));
if (sign) {
lsf_r[4] *= -1;
lsf_r[5] *= -1;
}
if (update)
memcpy(p->prev_lsf_r, lsf_r, LP_FILTER_ORDER * sizeof(*lsf_r));
for (i = 0; i < LP_FILTER_ORDER; i++)
lsf_q[i] = lsf_r[i] * (LSF_R_FAC / 8000.0) + lsf_no_r[i] * (1.0 / 8000.0);
ff_set_min_dist_lsf(lsf_q, MIN_LSF_SPACING, LP_FILTER_ORDER);
if (update)
interpolate_lsf(&p->acelpv_ctx, p->lsf_q, lsf_q);
ff_acelp_lsf2lspd(lsp, lsf_q, LP_FILTER_ORDER);
}
/**
* Decode a set of 5 split-matrix quantized lsf indexes into 2 lsp vectors.
*
* @param p pointer to the AMRContext
*/
static void lsf2lsp_5(AMRContext *p)
{
const uint16_t *lsf_param = p->frame.lsf;
float lsf_no_r[LP_FILTER_ORDER]; // LSFs without the residual vector
const int16_t *lsf_quantizer[5];
int i;
lsf_quantizer[0] = lsf_5_1[lsf_param[0]];
lsf_quantizer[1] = lsf_5_2[lsf_param[1]];
lsf_quantizer[2] = lsf_5_3[lsf_param[2] >> 1];
lsf_quantizer[3] = lsf_5_4[lsf_param[3]];
lsf_quantizer[4] = lsf_5_5[lsf_param[4]];
for (i = 0; i < LP_FILTER_ORDER; i++)
lsf_no_r[i] = p->prev_lsf_r[i] * LSF_R_FAC * PRED_FAC_MODE_12k2 + lsf_5_mean[i];
lsf2lsp_for_mode12k2(p, p->lsp[1], lsf_no_r, lsf_quantizer, 0, lsf_param[2] & 1, 0);
lsf2lsp_for_mode12k2(p, p->lsp[3], lsf_no_r, lsf_quantizer, 2, lsf_param[2] & 1, 1);
// interpolate LSP vectors at subframes 1 and 3
weighted_vector_sumd(p->lsp[0], p->prev_lsp_sub4, p->lsp[1], 0.5, 0.5, LP_FILTER_ORDER);
weighted_vector_sumd(p->lsp[2], p->lsp[1] , p->lsp[3], 0.5, 0.5, LP_FILTER_ORDER);
}
/**
* Decode a set of 3 split-matrix quantized lsf indexes into an lsp vector.
*
* @param p pointer to the AMRContext
*/
static void lsf2lsp_3(AMRContext *p)
{
const uint16_t *lsf_param = p->frame.lsf;
int16_t lsf_r[LP_FILTER_ORDER]; // residual LSF vector
float lsf_q[LP_FILTER_ORDER]; // quantified LSF vector
const int16_t *lsf_quantizer;
int i, j;
lsf_quantizer = (p->cur_frame_mode == MODE_7k95 ? lsf_3_1_MODE_7k95 : lsf_3_1)[lsf_param[0]];
memcpy(lsf_r, lsf_quantizer, 3 * sizeof(*lsf_r));
lsf_quantizer = lsf_3_2[lsf_param[1] << (p->cur_frame_mode <= MODE_5k15)];
memcpy(lsf_r + 3, lsf_quantizer, 3 * sizeof(*lsf_r));
lsf_quantizer = (p->cur_frame_mode <= MODE_5k15 ? lsf_3_3_MODE_5k15 : lsf_3_3)[lsf_param[2]];
memcpy(lsf_r + 6, lsf_quantizer, 4 * sizeof(*lsf_r));
// calculate mean-removed LSF vector and add mean
for (i = 0; i < LP_FILTER_ORDER; i++)
lsf_q[i] = (lsf_r[i] + p->prev_lsf_r[i] * pred_fac[i]) * (LSF_R_FAC / 8000.0) + lsf_3_mean[i] * (1.0 / 8000.0);
ff_set_min_dist_lsf(lsf_q, MIN_LSF_SPACING, LP_FILTER_ORDER);
// store data for computing the next frame's LSFs
interpolate_lsf(&p->acelpv_ctx, p->lsf_q, lsf_q);
memcpy(p->prev_lsf_r, lsf_r, LP_FILTER_ORDER * sizeof(*lsf_r));
ff_acelp_lsf2lspd(p->lsp[3], lsf_q, LP_FILTER_ORDER);
// interpolate LSP vectors at subframes 1, 2 and 3
for (i = 1; i <= 3; i++)
for(j = 0; j < LP_FILTER_ORDER; j++)
p->lsp[i-1][j] = p->prev_lsp_sub4[j] +
(p->lsp[3][j] - p->prev_lsp_sub4[j]) * 0.25 * i;
}
/// @}
/// @name AMR pitch vector decoding functions
/// @{
/**
* Like ff_decode_pitch_lag(), but with 1/6 resolution
*/
static void decode_pitch_lag_1_6(int *lag_int, int *lag_frac, int pitch_index,
const int prev_lag_int, const int subframe)
{
if (subframe == 0 || subframe == 2) {
if (pitch_index < 463) {
*lag_int = (pitch_index + 107) * 10923 >> 16;
*lag_frac = pitch_index - *lag_int * 6 + 105;
} else {
*lag_int = pitch_index - 368;
*lag_frac = 0;
}
} else {
*lag_int = ((pitch_index + 5) * 10923 >> 16) - 1;
*lag_frac = pitch_index - *lag_int * 6 - 3;
*lag_int += av_clip(prev_lag_int - 5, PITCH_LAG_MIN_MODE_12k2,
PITCH_DELAY_MAX - 9);
}
}
static void decode_pitch_vector(AMRContext *p,
const AMRNBSubframe *amr_subframe,
const int subframe)
{
int pitch_lag_int, pitch_lag_frac;
enum Mode mode = p->cur_frame_mode;
if (p->cur_frame_mode == MODE_12k2) {
decode_pitch_lag_1_6(&pitch_lag_int, &pitch_lag_frac,
amr_subframe->p_lag, p->pitch_lag_int,
subframe);
} else {
ff_decode_pitch_lag(&pitch_lag_int, &pitch_lag_frac,
amr_subframe->p_lag,
p->pitch_lag_int, subframe,
mode != MODE_4k75 && mode != MODE_5k15,
mode <= MODE_6k7 ? 4 : (mode == MODE_7k95 ? 5 : 6));
pitch_lag_frac *= 2;
}
p->pitch_lag_int = pitch_lag_int; // store previous lag in a uint8_t
pitch_lag_int += pitch_lag_frac > 0;
/* Calculate the pitch vector by interpolating the past excitation at the
pitch lag using a b60 hamming windowed sinc function. */
p->acelpf_ctx.acelp_interpolatef(p->excitation,
p->excitation + 1 - pitch_lag_int,
ff_b60_sinc, 6,
pitch_lag_frac + 6 - 6*(pitch_lag_frac > 0),
10, AMR_SUBFRAME_SIZE);
memcpy(p->pitch_vector, p->excitation, AMR_SUBFRAME_SIZE * sizeof(float));
}
/// @}
/// @name AMR algebraic code book (fixed) vector decoding functions
/// @{
/**
* Decode a 10-bit algebraic codebook index from a 10.2 kbit/s frame.
*/
static void decode_10bit_pulse(int code, int pulse_position[8],
int i1, int i2, int i3)
{
// coded using 7+3 bits with the 3 LSBs being, individually, the LSB of 1 of
// the 3 pulses and the upper 7 bits being coded in base 5
const uint8_t *positions = base_five_table[code >> 3];
pulse_position[i1] = (positions[2] << 1) + ( code & 1);
pulse_position[i2] = (positions[1] << 1) + ((code >> 1) & 1);
pulse_position[i3] = (positions[0] << 1) + ((code >> 2) & 1);
}
/**
* Decode the algebraic codebook index to pulse positions and signs and
* construct the algebraic codebook vector for MODE_10k2.
*
* @param fixed_index positions of the eight pulses
* @param fixed_sparse pointer to the algebraic codebook vector
*/
static void decode_8_pulses_31bits(const int16_t *fixed_index,
AMRFixed *fixed_sparse)
{
int pulse_position[8];
int i, temp;
decode_10bit_pulse(fixed_index[4], pulse_position, 0, 4, 1);
decode_10bit_pulse(fixed_index[5], pulse_position, 2, 6, 5);
// coded using 5+2 bits with the 2 LSBs being, individually, the LSB of 1 of
// the 2 pulses and the upper 5 bits being coded in base 5
temp = ((fixed_index[6] >> 2) * 25 + 12) >> 5;
pulse_position[3] = temp % 5;
pulse_position[7] = temp / 5;
if (pulse_position[7] & 1)
pulse_position[3] = 4 - pulse_position[3];
pulse_position[3] = (pulse_position[3] << 1) + ( fixed_index[6] & 1);
pulse_position[7] = (pulse_position[7] << 1) + ((fixed_index[6] >> 1) & 1);
fixed_sparse->n = 8;
for (i = 0; i < 4; i++) {
const int pos1 = (pulse_position[i] << 2) + i;
const int pos2 = (pulse_position[i + 4] << 2) + i;
const float sign = fixed_index[i] ? -1.0 : 1.0;
fixed_sparse->x[i ] = pos1;
fixed_sparse->x[i + 4] = pos2;
fixed_sparse->y[i ] = sign;
fixed_sparse->y[i + 4] = pos2 < pos1 ? -sign : sign;
}
}
/**
* Decode the algebraic codebook index to pulse positions and signs,
* then construct the algebraic codebook vector.
*
* nb of pulses | bits encoding pulses
* For MODE_4k75 or MODE_5k15, 2 | 1-3, 4-6, 7
* MODE_5k9, 2 | 1, 2-4, 5-6, 7-9
* MODE_6k7, 3 | 1-3, 4, 5-7, 8, 9-11
* MODE_7k4 or MODE_7k95, 4 | 1-3, 4-6, 7-9, 10, 11-13
*
* @param fixed_sparse pointer to the algebraic codebook vector
* @param pulses algebraic codebook indexes
* @param mode mode of the current frame
* @param subframe current subframe number
*/
static void decode_fixed_sparse(AMRFixed *fixed_sparse, const uint16_t *pulses,
const enum Mode mode, const int subframe)
{
av_assert1(MODE_4k75 <= (signed)mode && mode <= MODE_12k2);
if (mode == MODE_12k2) {
ff_decode_10_pulses_35bits(pulses, fixed_sparse, gray_decode, 5, 3);
} else if (mode == MODE_10k2) {
decode_8_pulses_31bits(pulses, fixed_sparse);
} else {
int *pulse_position = fixed_sparse->x;
int i, pulse_subset;
const int fixed_index = pulses[0];
if (mode <= MODE_5k15) {
pulse_subset = ((fixed_index >> 3) & 8) + (subframe << 1);
pulse_position[0] = ( fixed_index & 7) * 5 + track_position[pulse_subset];
pulse_position[1] = ((fixed_index >> 3) & 7) * 5 + track_position[pulse_subset + 1];
fixed_sparse->n = 2;
} else if (mode == MODE_5k9) {
pulse_subset = ((fixed_index & 1) << 1) + 1;
pulse_position[0] = ((fixed_index >> 1) & 7) * 5 + pulse_subset;
pulse_subset = (fixed_index >> 4) & 3;
pulse_position[1] = ((fixed_index >> 6) & 7) * 5 + pulse_subset + (pulse_subset == 3 ? 1 : 0);
fixed_sparse->n = pulse_position[0] == pulse_position[1] ? 1 : 2;
} else if (mode == MODE_6k7) {
pulse_position[0] = (fixed_index & 7) * 5;
pulse_subset = (fixed_index >> 2) & 2;
pulse_position[1] = ((fixed_index >> 4) & 7) * 5 + pulse_subset + 1;
pulse_subset = (fixed_index >> 6) & 2;
pulse_position[2] = ((fixed_index >> 8) & 7) * 5 + pulse_subset + 2;
fixed_sparse->n = 3;
} else { // mode <= MODE_7k95
pulse_position[0] = gray_decode[ fixed_index & 7];
pulse_position[1] = gray_decode[(fixed_index >> 3) & 7] + 1;
pulse_position[2] = gray_decode[(fixed_index >> 6) & 7] + 2;
pulse_subset = (fixed_index >> 9) & 1;
pulse_position[3] = gray_decode[(fixed_index >> 10) & 7] + pulse_subset + 3;
fixed_sparse->n = 4;
}
for (i = 0; i < fixed_sparse->n; i++)
fixed_sparse->y[i] = (pulses[1] >> i) & 1 ? 1.0 : -1.0;
}
}
/**
* Apply pitch lag to obtain the sharpened fixed vector (section 6.1.2)
*
* @param p the context
* @param subframe unpacked amr subframe
* @param mode mode of the current frame
* @param fixed_sparse sparse representation of the fixed vector
*/
static void pitch_sharpening(AMRContext *p, int subframe, enum Mode mode,
AMRFixed *fixed_sparse)
{
// The spec suggests the current pitch gain is always used, but in other
// modes the pitch and codebook gains are jointly quantized (sec 5.8.2)
// so the codebook gain cannot depend on the quantized pitch gain.
if (mode == MODE_12k2)
p->beta = FFMIN(p->pitch_gain[4], 1.0);
fixed_sparse->pitch_lag = p->pitch_lag_int;
fixed_sparse->pitch_fac = p->beta;
// Save pitch sharpening factor for the next subframe
// MODE_4k75 only updates on the 2nd and 4th subframes - this follows from
// the fact that the gains for two subframes are jointly quantized.
if (mode != MODE_4k75 || subframe & 1)
p->beta = av_clipf(p->pitch_gain[4], 0.0, SHARP_MAX);
}
/// @}
/// @name AMR gain decoding functions
/// @{
/**
* fixed gain smoothing
* Note that where the spec specifies the "spectrum in the q domain"
* in section 6.1.4, in fact frequencies should be used.
*
* @param p the context
* @param lsf LSFs for the current subframe, in the range [0,1]
* @param lsf_avg averaged LSFs
* @param mode mode of the current frame
*
* @return fixed gain smoothed
*/
static float fixed_gain_smooth(AMRContext *p , const float *lsf,
const float *lsf_avg, const enum Mode mode)
{
float diff = 0.0;
int i;
for (i = 0; i < LP_FILTER_ORDER; i++)
diff += fabs(lsf_avg[i] - lsf[i]) / lsf_avg[i];
// If diff is large for ten subframes, disable smoothing for a 40-subframe
// hangover period.
p->diff_count++;
if (diff <= 0.65)
p->diff_count = 0;
if (p->diff_count > 10) {
p->hang_count = 0;
p->diff_count--; // don't let diff_count overflow
}
if (p->hang_count < 40) {
p->hang_count++;
} else if (mode < MODE_7k4 || mode == MODE_10k2) {
const float smoothing_factor = av_clipf(4.0 * diff - 1.6, 0.0, 1.0);
const float fixed_gain_mean = (p->fixed_gain[0] + p->fixed_gain[1] +
p->fixed_gain[2] + p->fixed_gain[3] +
p->fixed_gain[4]) * 0.2;
return smoothing_factor * p->fixed_gain[4] +
(1.0 - smoothing_factor) * fixed_gain_mean;
}
return p->fixed_gain[4];
}
/**
* Decode pitch gain and fixed gain factor (part of section 6.1.3).
*
* @param p the context
* @param amr_subframe unpacked amr subframe
* @param mode mode of the current frame
* @param subframe current subframe number
* @param fixed_gain_factor decoded gain correction factor
*/
static void decode_gains(AMRContext *p, const AMRNBSubframe *amr_subframe,
const enum Mode mode, const int subframe,
float *fixed_gain_factor)
{
if (mode == MODE_12k2 || mode == MODE_7k95) {
p->pitch_gain[4] = qua_gain_pit [amr_subframe->p_gain ]
* (1.0 / 16384.0);
*fixed_gain_factor = qua_gain_code[amr_subframe->fixed_gain]
* (1.0 / 2048.0);
} else {
const uint16_t *gains;
if (mode >= MODE_6k7) {
gains = gains_high[amr_subframe->p_gain];
} else if (mode >= MODE_5k15) {
gains = gains_low [amr_subframe->p_gain];
} else {
// gain index is only coded in subframes 0,2 for MODE_4k75
gains = gains_MODE_4k75[(p->frame.subframe[subframe & 2].p_gain << 1) + (subframe & 1)];
}
p->pitch_gain[4] = gains[0] * (1.0 / 16384.0);
*fixed_gain_factor = gains[1] * (1.0 / 4096.0);
}
}
/// @}
/// @name AMR preprocessing functions
/// @{
/**
* Circularly convolve a sparse fixed vector with a phase dispersion impulse
* response filter (D.6.2 of G.729 and 6.1.5 of AMR).
*
* @param out vector with filter applied
* @param in source vector
* @param filter phase filter coefficients
*
* out[n] = sum(i,0,len-1){ in[i] * filter[(len + n - i)%len] }
*/
static void apply_ir_filter(float *out, const AMRFixed *in,
const float *filter)
{
float filter1[AMR_SUBFRAME_SIZE], ///< filters at pitch lag*1 and *2
filter2[AMR_SUBFRAME_SIZE];
int lag = in->pitch_lag;
float fac = in->pitch_fac;
int i;
if (lag < AMR_SUBFRAME_SIZE) {
ff_celp_circ_addf(filter1, filter, filter, lag, fac,
AMR_SUBFRAME_SIZE);
if (lag < AMR_SUBFRAME_SIZE >> 1)
ff_celp_circ_addf(filter2, filter, filter1, lag, fac,
AMR_SUBFRAME_SIZE);
}
memset(out, 0, sizeof(float) * AMR_SUBFRAME_SIZE);
for (i = 0; i < in->n; i++) {
int x = in->x[i];
float y = in->y[i];
const float *filterp;
if (x >= AMR_SUBFRAME_SIZE - lag) {
filterp = filter;
} else if (x >= AMR_SUBFRAME_SIZE - (lag << 1)) {
filterp = filter1;
} else
filterp = filter2;
ff_celp_circ_addf(out, out, filterp, x, y, AMR_SUBFRAME_SIZE);
}
}
/**
* Reduce fixed vector sparseness by smoothing with one of three IR filters.
* Also know as "adaptive phase dispersion".
*
* This implements 3GPP TS 26.090 section 6.1(5).
*
* @param p the context
* @param fixed_sparse algebraic codebook vector
* @param fixed_vector unfiltered fixed vector
* @param fixed_gain smoothed gain
* @param out space for modified vector if necessary
*/
static const float *anti_sparseness(AMRContext *p, AMRFixed *fixed_sparse,
const float *fixed_vector,
float fixed_gain, float *out)
{
int ir_filter_nr;
if (p->pitch_gain[4] < 0.6) {
ir_filter_nr = 0; // strong filtering
} else if (p->pitch_gain[4] < 0.9) {
ir_filter_nr = 1; // medium filtering
} else
ir_filter_nr = 2; // no filtering
// detect 'onset'
if (fixed_gain > 2.0 * p->prev_sparse_fixed_gain) {
p->ir_filter_onset = 2;
} else if (p->ir_filter_onset)
p->ir_filter_onset--;
if (!p->ir_filter_onset) {
int i, count = 0;
for (i = 0; i < 5; i++)
if (p->pitch_gain[i] < 0.6)
count++;
if (count > 2)
ir_filter_nr = 0;
if (ir_filter_nr > p->prev_ir_filter_nr + 1)
ir_filter_nr--;
} else if (ir_filter_nr < 2)
ir_filter_nr++;
// Disable filtering for very low level of fixed_gain.
// Note this step is not specified in the technical description but is in
// the reference source in the function Ph_disp.
if (fixed_gain < 5.0)
ir_filter_nr = 2;
if (p->cur_frame_mode != MODE_7k4 && p->cur_frame_mode < MODE_10k2
&& ir_filter_nr < 2) {
apply_ir_filter(out, fixed_sparse,
(p->cur_frame_mode == MODE_7k95 ?
ir_filters_lookup_MODE_7k95 :
ir_filters_lookup)[ir_filter_nr]);
fixed_vector = out;
}
// update ir filter strength history
p->prev_ir_filter_nr = ir_filter_nr;
p->prev_sparse_fixed_gain = fixed_gain;
return fixed_vector;
}
/// @}
/// @name AMR synthesis functions
/// @{
/**
* Conduct 10th order linear predictive coding synthesis.
*
* @param p pointer to the AMRContext
* @param lpc pointer to the LPC coefficients
* @param fixed_gain fixed codebook gain for synthesis
* @param fixed_vector algebraic codebook vector
* @param samples pointer to the output speech samples
* @param overflow 16-bit overflow flag
*/
static int synthesis(AMRContext *p, float *lpc,
float fixed_gain, const float *fixed_vector,
float *samples, uint8_t overflow)
{
int i;
float excitation[AMR_SUBFRAME_SIZE];
// if an overflow has been detected, the pitch vector is scaled down by a
// factor of 4
if (overflow)
for (i = 0; i < AMR_SUBFRAME_SIZE; i++)
p->pitch_vector[i] *= 0.25;
p->acelpv_ctx.weighted_vector_sumf(excitation, p->pitch_vector, fixed_vector,
p->pitch_gain[4], fixed_gain, AMR_SUBFRAME_SIZE);
// emphasize pitch vector contribution
if (p->pitch_gain[4] > 0.5 && !overflow) {
float energy = p->celpm_ctx.dot_productf(excitation, excitation,
AMR_SUBFRAME_SIZE);
float pitch_factor =
p->pitch_gain[4] *
(p->cur_frame_mode == MODE_12k2 ?
0.25 * FFMIN(p->pitch_gain[4], 1.0) :
0.5 * FFMIN(p->pitch_gain[4], SHARP_MAX));
for (i = 0; i < AMR_SUBFRAME_SIZE; i++)
excitation[i] += pitch_factor * p->pitch_vector[i];
ff_scale_vector_to_given_sum_of_squares(excitation, excitation, energy,
AMR_SUBFRAME_SIZE);
}
p->celpf_ctx.celp_lp_synthesis_filterf(samples, lpc, excitation,
AMR_SUBFRAME_SIZE,
LP_FILTER_ORDER);
// detect overflow
for (i = 0; i < AMR_SUBFRAME_SIZE; i++)
if (fabsf(samples[i]) > AMR_SAMPLE_BOUND) {
return 1;
}
return 0;
}
/// @}
/// @name AMR update functions
/// @{
/**
* Update buffers and history at the end of decoding a subframe.
*
* @param p pointer to the AMRContext
*/
static void update_state(AMRContext *p)
{
memcpy(p->prev_lsp_sub4, p->lsp[3], LP_FILTER_ORDER * sizeof(p->lsp[3][0]));
memmove(&p->excitation_buf[0], &p->excitation_buf[AMR_SUBFRAME_SIZE],
(PITCH_DELAY_MAX + LP_FILTER_ORDER + 1) * sizeof(float));
memmove(&p->pitch_gain[0], &p->pitch_gain[1], 4 * sizeof(float));
memmove(&p->fixed_gain[0], &p->fixed_gain[1], 4 * sizeof(float));
memmove(&p->samples_in[0], &p->samples_in[AMR_SUBFRAME_SIZE],
LP_FILTER_ORDER * sizeof(float));
}
/// @}
/// @name AMR Postprocessing functions
/// @{
/**
* Get the tilt factor of a formant filter from its transfer function
*
* @param p The Context
* @param lpc_n LP_FILTER_ORDER coefficients of the numerator
* @param lpc_d LP_FILTER_ORDER coefficients of the denominator
*/
static float tilt_factor(AMRContext *p, float *lpc_n, float *lpc_d)
{
float rh0, rh1; // autocorrelation at lag 0 and 1
// LP_FILTER_ORDER prior zeros are needed for ff_celp_lp_synthesis_filterf
float impulse_buffer[LP_FILTER_ORDER + AMR_TILT_RESPONSE] = { 0 };
float *hf = impulse_buffer + LP_FILTER_ORDER; // start of impulse response
hf[0] = 1.0;
memcpy(hf + 1, lpc_n, sizeof(float) * LP_FILTER_ORDER);
p->celpf_ctx.celp_lp_synthesis_filterf(hf, lpc_d, hf,
AMR_TILT_RESPONSE,
LP_FILTER_ORDER);
rh0 = p->celpm_ctx.dot_productf(hf, hf, AMR_TILT_RESPONSE);
rh1 = p->celpm_ctx.dot_productf(hf, hf + 1, AMR_TILT_RESPONSE - 1);
// The spec only specifies this check for 12.2 and 10.2 kbit/s
// modes. But in the ref source the tilt is always non-negative.
return rh1 >= 0.0 ? rh1 / rh0 * AMR_TILT_GAMMA_T : 0.0;
}
/**
* Perform adaptive post-filtering to enhance the quality of the speech.
* See section 6.2.1.
*
* @param p pointer to the AMRContext
* @param lpc interpolated LP coefficients for this subframe
* @param buf_out output of the filter
*/
static void postfilter(AMRContext *p, float *lpc, float *buf_out)
{
int i;
float *samples = p->samples_in + LP_FILTER_ORDER; // Start of input
float speech_gain = p->celpm_ctx.dot_productf(samples, samples,
AMR_SUBFRAME_SIZE);
float pole_out[AMR_SUBFRAME_SIZE + LP_FILTER_ORDER]; // Output of pole filter
const float *gamma_n, *gamma_d; // Formant filter factor table
float lpc_n[LP_FILTER_ORDER], lpc_d[LP_FILTER_ORDER]; // Transfer function coefficients
if (p->cur_frame_mode == MODE_12k2 || p->cur_frame_mode == MODE_10k2) {
gamma_n = ff_pow_0_7;
gamma_d = ff_pow_0_75;
} else {
gamma_n = ff_pow_0_55;
gamma_d = ff_pow_0_7;
}
for (i = 0; i < LP_FILTER_ORDER; i++) {
lpc_n[i] = lpc[i] * gamma_n[i];
lpc_d[i] = lpc[i] * gamma_d[i];
}
memcpy(pole_out, p->postfilter_mem, sizeof(float) * LP_FILTER_ORDER);
p->celpf_ctx.celp_lp_synthesis_filterf(pole_out + LP_FILTER_ORDER, lpc_d, samples,
AMR_SUBFRAME_SIZE, LP_FILTER_ORDER);
memcpy(p->postfilter_mem, pole_out + AMR_SUBFRAME_SIZE,
sizeof(float) * LP_FILTER_ORDER);
p->celpf_ctx.celp_lp_zero_synthesis_filterf(buf_out, lpc_n,
pole_out + LP_FILTER_ORDER,
AMR_SUBFRAME_SIZE, LP_FILTER_ORDER);
ff_tilt_compensation(&p->tilt_mem, tilt_factor(p, lpc_n, lpc_d), buf_out,
AMR_SUBFRAME_SIZE);
ff_adaptive_gain_control(buf_out, buf_out, speech_gain, AMR_SUBFRAME_SIZE,
AMR_AGC_ALPHA, &p->postfilter_agc);
}
/// @}
static int amrnb_decode_frame(AVCodecContext *avctx, void *data,
int *got_frame_ptr, AVPacket *avpkt)
{
AMRContext *p = avctx->priv_data; // pointer to private data
AVFrame *frame = data;
const uint8_t *buf = avpkt->data;
int buf_size = avpkt->size;
float *buf_out; // pointer to the output data buffer
int i, subframe, ret;
float fixed_gain_factor;
AMRFixed fixed_sparse = {0}; // fixed vector up to anti-sparseness processing
float spare_vector[AMR_SUBFRAME_SIZE]; // extra stack space to hold result from anti-sparseness processing
float synth_fixed_gain; // the fixed gain that synthesis should use
const float *synth_fixed_vector; // pointer to the fixed vector that synthesis should use
/* get output buffer */
frame->nb_samples = AMR_BLOCK_SIZE;
if ((ret = ff_get_buffer(avctx, frame, 0)) < 0)
return ret;
buf_out = (float *)frame->data[0];
p->cur_frame_mode = unpack_bitstream(p, buf, buf_size);
if (p->cur_frame_mode == NO_DATA) {
av_log(avctx, AV_LOG_ERROR, "Corrupt bitstream\n");
return AVERROR_INVALIDDATA;
}
if (p->cur_frame_mode == MODE_DTX) {
avpriv_report_missing_feature(avctx, "dtx mode");
av_log(avctx, AV_LOG_INFO, "Note: libopencore_amrnb supports dtx\n");
return AVERROR_PATCHWELCOME;
}
if (p->cur_frame_mode == MODE_12k2) {
lsf2lsp_5(p);
} else
lsf2lsp_3(p);
for (i = 0; i < 4; i++)
ff_acelp_lspd2lpc(p->lsp[i], p->lpc[i], 5);
for (subframe = 0; subframe < 4; subframe++) {
const AMRNBSubframe *amr_subframe = &p->frame.subframe[subframe];
decode_pitch_vector(p, amr_subframe, subframe);
decode_fixed_sparse(&fixed_sparse, amr_subframe->pulses,
p->cur_frame_mode, subframe);
// The fixed gain (section 6.1.3) depends on the fixed vector
// (section 6.1.2), but the fixed vector calculation uses
// pitch sharpening based on the on the pitch gain (section 6.1.3).
// So the correct order is: pitch gain, pitch sharpening, fixed gain.
decode_gains(p, amr_subframe, p->cur_frame_mode, subframe,
&fixed_gain_factor);
pitch_sharpening(p, subframe, p->cur_frame_mode, &fixed_sparse);
if (fixed_sparse.pitch_lag == 0) {
av_log(avctx, AV_LOG_ERROR, "The file is corrupted, pitch_lag = 0 is not allowed\n");
return AVERROR_INVALIDDATA;
}
ff_set_fixed_vector(p->fixed_vector, &fixed_sparse, 1.0,
AMR_SUBFRAME_SIZE);
p->fixed_gain[4] =
ff_amr_set_fixed_gain(fixed_gain_factor,
p->celpm_ctx.dot_productf(p->fixed_vector,
p->fixed_vector,
AMR_SUBFRAME_SIZE) /
AMR_SUBFRAME_SIZE,
p->prediction_error,
energy_mean[p->cur_frame_mode], energy_pred_fac);
// The excitation feedback is calculated without any processing such
// as fixed gain smoothing. This isn't mentioned in the specification.
for (i = 0; i < AMR_SUBFRAME_SIZE; i++)
p->excitation[i] *= p->pitch_gain[4];
ff_set_fixed_vector(p->excitation, &fixed_sparse, p->fixed_gain[4],
AMR_SUBFRAME_SIZE);
// In the ref decoder, excitation is stored with no fractional bits.
// This step prevents buzz in silent periods. The ref encoder can
// emit long sequences with pitch factor greater than one. This
// creates unwanted feedback if the excitation vector is nonzero.
// (e.g. test sequence T19_795.COD in 3GPP TS 26.074)
for (i = 0; i < AMR_SUBFRAME_SIZE; i++)
p->excitation[i] = truncf(p->excitation[i]);
// Smooth fixed gain.
// The specification is ambiguous, but in the reference source, the
// smoothed value is NOT fed back into later fixed gain smoothing.
synth_fixed_gain = fixed_gain_smooth(p, p->lsf_q[subframe],
p->lsf_avg, p->cur_frame_mode);
synth_fixed_vector = anti_sparseness(p, &fixed_sparse, p->fixed_vector,
synth_fixed_gain, spare_vector);
if (synthesis(p, p->lpc[subframe], synth_fixed_gain,
synth_fixed_vector, &p->samples_in[LP_FILTER_ORDER], 0))
// overflow detected -> rerun synthesis scaling pitch vector down
// by a factor of 4, skipping pitch vector contribution emphasis
// and adaptive gain control
synthesis(p, p->lpc[subframe], synth_fixed_gain,
synth_fixed_vector, &p->samples_in[LP_FILTER_ORDER], 1);
postfilter(p, p->lpc[subframe], buf_out + subframe * AMR_SUBFRAME_SIZE);
// update buffers and history
ff_clear_fixed_vector(p->fixed_vector, &fixed_sparse, AMR_SUBFRAME_SIZE);
update_state(p);
}
p->acelpf_ctx.acelp_apply_order_2_transfer_function(buf_out,
buf_out, highpass_zeros,
highpass_poles,
highpass_gain * AMR_SAMPLE_SCALE,
p->high_pass_mem, AMR_BLOCK_SIZE);
/* Update averaged lsf vector (used for fixed gain smoothing).
*
* Note that lsf_avg should not incorporate the current frame's LSFs
* for fixed_gain_smooth.
* The specification has an incorrect formula: the reference decoder uses
* qbar(n-1) rather than qbar(n) in section 6.1(4) equation 71. */
p->acelpv_ctx.weighted_vector_sumf(p->lsf_avg, p->lsf_avg, p->lsf_q[3],
0.84, 0.16, LP_FILTER_ORDER);
*got_frame_ptr = 1;
/* return the amount of bytes consumed if everything was OK */
return frame_sizes_nb[p->cur_frame_mode] + 1; // +7 for rounding and +8 for TOC
}
AVCodec ff_amrnb_decoder = {
.name = "amrnb",
.long_name = NULL_IF_CONFIG_SMALL("AMR-NB (Adaptive Multi-Rate NarrowBand)"),
.type = AVMEDIA_TYPE_AUDIO,
.id = AV_CODEC_ID_AMR_NB,
.priv_data_size = sizeof(AMRContext),
.init = amrnb_decode_init,
.decode = amrnb_decode_frame,
.capabilities = AV_CODEC_CAP_DR1,
.sample_fmts = (const enum AVSampleFormat[]){ AV_SAMPLE_FMT_FLT,
AV_SAMPLE_FMT_NONE },
};
| {
"language": "C"
} |
/*
* Copyright © 2008 Red Hat, Inc
*
* Permission to use, copy, modify, distribute, and sell this software
* and its documentation for any purpose is hereby granted without
* fee, provided that the above copyright notice appear in all copies
* and that both that copyright notice and this permission notice
* appear in supporting documentation, and that the name of the
* copyright holders not be used in advertising or publicity
* pertaining to distribution of the software without specific,
* written prior permission. The copyright holders make no
* representations about the suitability of this software for any
* purpose. It is provided "as is" without express or implied
* warranty.
*
* THE COPYRIGHT HOLDERS DISCLAIM ALL WARRANTIES WITH REGARD TO THIS
* SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND
* FITNESS, IN NO EVENT SHALL THE COPYRIGHT HOLDERS BE LIABLE FOR ANY
* SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN
* AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING
* OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS
* SOFTWARE.
*/
#ifndef _GLX_dri_common_h
#define _GLX_dri_common_h
typedef struct __GLXDRIconfig __GLXDRIconfig;
struct __GLXDRIconfig {
__GLXconfig config;
const __DRIconfig *driConfig;
};
__GLXconfig *
glxConvertConfigs(const __DRIcoreExtension *core,
const __DRIconfig **configs, unsigned int drawableType);
extern const __DRIsystemTimeExtension systemTimeExtension;
void *
glxProbeDriver(const char *name,
void **coreExt, const char *coreName, int coreVersion,
void **renderExt, const char *renderName, int renderVersion);
#endif
| {
"language": "C"
} |
/******************************************************************************
* Copyright (C) 2009 - 2020 Xilinx, Inc. All rights reserved.
* SPDX-License-Identifier: MIT
******************************************************************************/
/****************************************************************************/
/**
*
* @file xdmaps_selftest.c
* @addtogroup dmaps_v2_6
* @{
*
* This file contains the self-test functions for the XDmaPs driver.
*
* <pre>
* MODIFICATION HISTORY:
*
* Ver Who Date Changes
* ----- ------ -------- -----------------------------------------------
* 1.00 hbm 03/29/2010 First Release
* </pre>
*
******************************************************************************/
/***************************** Include Files *********************************/
#include "xstatus.h"
#include "xdmaps.h"
/************************** Constant Definitions *****************************/
/**************************** Type Definitions *******************************/
/***************** Macros (Inline Functions) Definitions *********************/
/************************** Variable Definitions *****************************/
/************************** Function Prototypes ******************************/
/****************************************************************************/
/**
*
* This function runs a self-test on the driver and hardware device. This self
* test performs a local loopback and verifies data can be sent and received.
*
* The time for this test is proportional to the baud rate that has been set
* prior to calling this function.
*
* The mode and control registers are restored before return.
*
* @param InstPtr is a pointer to the XDmaPs instance
*
* @return
*
* - XST_SUCCESS if the test was successful
* - XST_FAILURE if the test failed
*
* @note
*
* This function can hang if the hardware is not functioning properly.
*
******************************************************************************/
int XDmaPs_SelfTest(XDmaPs *InstPtr)
{
u32 BaseAddr = InstPtr->Config.BaseAddress;
int i;
if (XDmaPs_ReadReg(BaseAddr, XDMAPS_DBGSTATUS_OFFSET)
& XDMAPS_DBGSTATUS_BUSY)
return XST_FAILURE;
for (i = 0; i < XDMAPS_CHANNELS_PER_DEV; i++) {
if (XDmaPs_ReadReg(BaseAddr,
XDmaPs_CSn_OFFSET(i)))
return XST_FAILURE;
}
return XST_SUCCESS;
}
/** @} */
| {
"language": "C"
} |