repo_id
stringlengths 27
162
| file_path
stringlengths 42
195
| content
stringlengths 4
5.16M
| __index_level_0__
int64 0
0
|
---|---|---|---|
D://workCode//uploadProject\awtk\3rd\agg | D://workCode//uploadProject\awtk\3rd\agg\include\agg_array.h | //----------------------------------------------------------------------------
// Anti-Grain Geometry - Version 2.4
// Copyright (C) 2002-2005 Maxim Shemanarev (http://www.antigrain.com)
//
// Permission to copy, use, modify, sell and distribute this software
// is granted provided this copyright notice appears in all copies.
// This software is provided "as is" without express or implied
// warranty, and with no claim as to its suitability for any purpose.
//
//----------------------------------------------------------------------------
// Contact: mcseem@antigrain.com
// mcseemagg@yahoo.com
// http://www.antigrain.com
//----------------------------------------------------------------------------
#ifndef AGG_ARRAY_INCLUDED
#define AGG_ARRAY_INCLUDED
#include <stddef.h>
#include <string.h>
#include "agg_basics.h"
namespace agg
{
//-------------------------------------------------------pod_array_adaptor
template<class T> class pod_array_adaptor
{
public:
typedef T value_type;
pod_array_adaptor(T* array, unsigned size) :
m_array(array), m_size(size) {}
unsigned size() const { return m_size; }
const T& operator [] (unsigned i) const { return m_array[i]; }
T& operator [] (unsigned i) { return m_array[i]; }
const T& at(unsigned i) const { return m_array[i]; }
T& at(unsigned i) { return m_array[i]; }
T value_at(unsigned i) const { return m_array[i]; }
private:
T* m_array;
unsigned m_size;
};
//---------------------------------------------------------pod_auto_array
template<class T, unsigned Size> class pod_auto_array
{
public:
typedef T value_type;
typedef pod_auto_array<T, Size> self_type;
pod_auto_array() {}
explicit pod_auto_array(const T* c)
{
memcpy(m_array, c, sizeof(T) * Size);
}
const self_type& operator = (const T* c)
{
memcpy(m_array, c, sizeof(T) * Size);
return *this;
}
static unsigned size() { return Size; }
const T& operator [] (unsigned i) const { return m_array[i]; }
T& operator [] (unsigned i) { return m_array[i]; }
const T& at(unsigned i) const { return m_array[i]; }
T& at(unsigned i) { return m_array[i]; }
T value_at(unsigned i) const { return m_array[i]; }
private:
T m_array[Size];
};
//--------------------------------------------------------pod_auto_vector
template<class T, unsigned Size> class pod_auto_vector
{
public:
typedef T value_type;
typedef pod_auto_vector<T, Size> self_type;
pod_auto_vector() : m_size(0) {}
void remove_all() { m_size = 0; }
void clear() { m_size = 0; }
void add(const T& v) { m_array[m_size++] = v; }
void push_back(const T& v) { m_array[m_size++] = v; }
void inc_size(unsigned size) { m_size += size; }
unsigned size() const { return m_size; }
const T& operator [] (unsigned i) const { return m_array[i]; }
T& operator [] (unsigned i) { return m_array[i]; }
const T& at(unsigned i) const { return m_array[i]; }
T& at(unsigned i) { return m_array[i]; }
T value_at(unsigned i) const { return m_array[i]; }
private:
T m_array[Size];
unsigned m_size;
};
//---------------------------------------------------------------pod_array
template<class T> class pod_array
{
public:
typedef T value_type;
typedef pod_array<T> self_type;
~pod_array() { pod_allocator<T>::deallocate(m_array, m_size); }
pod_array() : m_array(0), m_size(0) {}
pod_array(unsigned size) :
m_array(pod_allocator<T>::allocate(size)),
m_size(size)
{}
pod_array(const self_type& v) :
m_array(pod_allocator<T>::allocate(v.m_size)),
m_size(v.m_size)
{
memcpy(m_array, v.m_array, sizeof(T) * m_size);
}
void resize(unsigned size)
{
if(size != m_size)
{
pod_allocator<T>::deallocate(m_array, m_size);
m_array = pod_allocator<T>::allocate(m_size = size);
}
}
const self_type& operator = (const self_type& v)
{
resize(v.size());
memcpy(m_array, v.m_array, sizeof(T) * m_size);
return *this;
}
unsigned size() const { return m_size; }
const T& operator [] (unsigned i) const { return m_array[i]; }
T& operator [] (unsigned i) { return m_array[i]; }
const T& at(unsigned i) const { return m_array[i]; }
T& at(unsigned i) { return m_array[i]; }
T value_at(unsigned i) const { return m_array[i]; }
const T* data() const { return m_array; }
T* data() { return m_array; }
private:
T* m_array;
unsigned m_size;
};
//--------------------------------------------------------------pod_vector
// A simple class template to store Plain Old Data, a vector
// of a fixed size. The data is continous in memory
//------------------------------------------------------------------------
template<class T> class pod_vector
{
public:
typedef T value_type;
~pod_vector() { pod_allocator<T>::deallocate(m_array, m_capacity); }
pod_vector() : m_size(0), m_capacity(0), m_array(0) {}
pod_vector(unsigned cap, unsigned extra_tail=0);
// Copying
pod_vector(const pod_vector<T>&);
const pod_vector<T>& operator = (const pod_vector<T>&);
// Set new capacity. All data is lost, size is set to zero.
void capacity(unsigned cap, unsigned extra_tail=0);
unsigned capacity() const { return m_capacity; }
// Allocate n elements. All data is lost,
// but elements can be accessed in range 0...size-1.
void allocate(unsigned size, unsigned extra_tail=0);
// Resize keeping the content.
void resize(unsigned new_size);
void zero()
{
memset(m_array, 0, sizeof(T) * m_size);
}
void add(const T& v) { m_array[m_size++] = v; }
void push_back(const T& v) { m_array[m_size++] = v; }
void insert_at(unsigned pos, const T& val);
void inc_size(unsigned size) { m_size += size; }
unsigned size() const { return m_size; }
unsigned byte_size() const { return m_size * sizeof(T); }
void serialize(int8u* ptr) const;
void deserialize(const int8u* data, unsigned byte_size);
const T& operator [] (unsigned i) const { return m_array[i]; }
T& operator [] (unsigned i) { return m_array[i]; }
const T& at(unsigned i) const { return m_array[i]; }
T& at(unsigned i) { return m_array[i]; }
T value_at(unsigned i) const { return m_array[i]; }
const T* data() const { return m_array; }
T* data() { return m_array; }
void remove_all() { m_size = 0; }
void clear() { m_size = 0; }
void cut_at(unsigned num) { if(num < m_size) m_size = num; }
private:
unsigned m_size;
unsigned m_capacity;
T* m_array;
};
//------------------------------------------------------------------------
template<class T>
void pod_vector<T>::capacity(unsigned cap, unsigned extra_tail)
{
m_size = 0;
if(cap > m_capacity)
{
pod_allocator<T>::deallocate(m_array, m_capacity);
m_capacity = cap + extra_tail;
m_array = m_capacity ? pod_allocator<T>::allocate(m_capacity) : 0;
}
}
//------------------------------------------------------------------------
template<class T>
void pod_vector<T>::allocate(unsigned size, unsigned extra_tail)
{
capacity(size, extra_tail);
m_size = size;
}
//------------------------------------------------------------------------
template<class T>
void pod_vector<T>::resize(unsigned new_size)
{
if(new_size > m_size)
{
if(new_size > m_capacity)
{
T* data = pod_allocator<T>::allocate(new_size);
memcpy(data, m_array, m_size * sizeof(T));
pod_allocator<T>::deallocate(m_array, m_capacity);
m_array = data;
}
}
else
{
m_size = new_size;
}
}
//------------------------------------------------------------------------
template<class T> pod_vector<T>::pod_vector(unsigned cap, unsigned extra_tail) :
m_size(0),
m_capacity(cap + extra_tail),
m_array(pod_allocator<T>::allocate(m_capacity)) {}
//------------------------------------------------------------------------
template<class T> pod_vector<T>::pod_vector(const pod_vector<T>& v) :
m_size(v.m_size),
m_capacity(v.m_capacity),
m_array(v.m_capacity ? pod_allocator<T>::allocate(v.m_capacity) : 0)
{
memcpy(m_array, v.m_array, sizeof(T) * v.m_size);
}
//------------------------------------------------------------------------
template<class T> const pod_vector<T>&
pod_vector<T>::operator = (const pod_vector<T>&v)
{
allocate(v.m_size);
if(v.m_size) memcpy(m_array, v.m_array, sizeof(T) * v.m_size);
return *this;
}
//------------------------------------------------------------------------
template<class T> void pod_vector<T>::serialize(int8u* ptr) const
{
if(m_size) memcpy(ptr, m_array, m_size * sizeof(T));
}
//------------------------------------------------------------------------
template<class T>
void pod_vector<T>::deserialize(const int8u* data, unsigned byte_size)
{
byte_size /= sizeof(T);
allocate(byte_size);
if(byte_size) memcpy(m_array, data, byte_size * sizeof(T));
}
//------------------------------------------------------------------------
template<class T>
void pod_vector<T>::insert_at(unsigned pos, const T& val)
{
if(pos >= m_size)
{
m_array[m_size] = val;
}
else
{
memmove(m_array + pos + 1, m_array + pos, (m_size - pos) * sizeof(T));
m_array[pos] = val;
}
++m_size;
}
//---------------------------------------------------------------pod_bvector
// A simple class template to store Plain Old Data, similar to std::deque
// It doesn't reallocate memory but instead, uses blocks of data of size
// of (1 << S), that is, power of two. The data is NOT contiguous in memory,
// so the only valid access method is operator [] or curr(), prev(), next()
//
// There reallocs occure only when the pool of pointers to blocks needs
// to be extended (it happens very rarely). You can control the value
// of increment to reallocate the pointer buffer. See the second constructor.
// By default, the incremeent value equals (1 << S), i.e., the block size.
//------------------------------------------------------------------------
template<class T, unsigned S=6> class pod_bvector
{
public:
enum block_scale_e
{
block_shift = S,
block_size = 1 << block_shift,
block_mask = block_size - 1
};
typedef T value_type;
~pod_bvector();
pod_bvector();
pod_bvector(unsigned block_ptr_inc);
// Copying
pod_bvector(const pod_bvector<T, S>& v);
const pod_bvector<T, S>& operator = (const pod_bvector<T, S>& v);
void remove_all() { m_size = 0; }
void clear() { m_size = 0; }
void free_all() { free_tail(0); }
void free_tail(unsigned size);
void add(const T& val);
void push_back(const T& val) { add(val); }
void modify_last(const T& val);
void remove_last();
int allocate_continuous_block(unsigned num_elements);
void add_array(const T* ptr, unsigned num_elem)
{
while(num_elem--)
{
add(*ptr++);
}
}
template<class DataAccessor> void add_data(DataAccessor& data)
{
while(data.size())
{
add(*data);
++data;
}
}
void cut_at(unsigned size)
{
if(size < m_size) m_size = size;
}
unsigned size() const { return m_size; }
const T& operator [] (unsigned i) const
{
return m_blocks[i >> block_shift][i & block_mask];
}
T& operator [] (unsigned i)
{
return m_blocks[i >> block_shift][i & block_mask];
}
const T& at(unsigned i) const
{
return m_blocks[i >> block_shift][i & block_mask];
}
T& at(unsigned i)
{
return m_blocks[i >> block_shift][i & block_mask];
}
T value_at(unsigned i) const
{
return m_blocks[i >> block_shift][i & block_mask];
}
const T& curr(unsigned idx) const
{
return (*this)[idx];
}
T& curr(unsigned idx)
{
return (*this)[idx];
}
const T& prev(unsigned idx) const
{
return (*this)[(idx + m_size - 1) % m_size];
}
T& prev(unsigned idx)
{
return (*this)[(idx + m_size - 1) % m_size];
}
const T& next(unsigned idx) const
{
return (*this)[(idx + 1) % m_size];
}
T& next(unsigned idx)
{
return (*this)[(idx + 1) % m_size];
}
const T& last() const
{
return (*this)[m_size - 1];
}
T& last()
{
return (*this)[m_size - 1];
}
unsigned byte_size() const;
void serialize(int8u* ptr) const;
void deserialize(const int8u* data, unsigned byte_size);
void deserialize(unsigned start, const T& empty_val,
const int8u* data, unsigned byte_size);
template<class ByteAccessor>
void deserialize(ByteAccessor data)
{
remove_all();
unsigned elem_size = data.size() / sizeof(T);
for(unsigned i = 0; i < elem_size; ++i)
{
int8u* ptr = (int8u*)data_ptr();
for(unsigned j = 0; j < sizeof(T); ++j)
{
*ptr++ = *data;
++data;
}
++m_size;
}
}
template<class ByteAccessor>
void deserialize(unsigned start, const T& empty_val, ByteAccessor data)
{
while(m_size < start)
{
add(empty_val);
}
unsigned elem_size = data.size() / sizeof(T);
for(unsigned i = 0; i < elem_size; ++i)
{
int8u* ptr;
if(start + i < m_size)
{
ptr = (int8u*)(&((*this)[start + i]));
}
else
{
ptr = (int8u*)data_ptr();
++m_size;
}
for(unsigned j = 0; j < sizeof(T); ++j)
{
*ptr++ = *data;
++data;
}
}
}
const T* block(unsigned nb) const { return m_blocks[nb]; }
private:
void allocate_block(unsigned nb);
T* data_ptr();
unsigned m_size;
unsigned m_num_blocks;
unsigned m_max_blocks;
T** m_blocks;
unsigned m_block_ptr_inc;
};
//------------------------------------------------------------------------
template<class T, unsigned S> pod_bvector<T, S>::~pod_bvector()
{
if(m_num_blocks)
{
T** blk = m_blocks + m_num_blocks - 1;
while(m_num_blocks--)
{
pod_allocator<T>::deallocate(*blk, block_size);
--blk;
}
}
pod_allocator<T*>::deallocate(m_blocks, m_max_blocks);
}
//------------------------------------------------------------------------
template<class T, unsigned S>
void pod_bvector<T, S>::free_tail(unsigned size)
{
if(size < m_size)
{
unsigned nb = (size + block_mask) >> block_shift;
while(m_num_blocks > nb)
{
pod_allocator<T>::deallocate(m_blocks[--m_num_blocks], block_size);
}
if(m_num_blocks == 0)
{
pod_allocator<T*>::deallocate(m_blocks, m_max_blocks);
m_blocks = 0;
m_max_blocks = 0;
}
m_size = size;
}
}
//------------------------------------------------------------------------
template<class T, unsigned S> pod_bvector<T, S>::pod_bvector() :
m_size(0),
m_num_blocks(0),
m_max_blocks(0),
m_blocks(0),
m_block_ptr_inc(block_size)
{
}
//------------------------------------------------------------------------
template<class T, unsigned S>
pod_bvector<T, S>::pod_bvector(unsigned block_ptr_inc) :
m_size(0),
m_num_blocks(0),
m_max_blocks(0),
m_blocks(0),
m_block_ptr_inc(block_ptr_inc)
{
}
//------------------------------------------------------------------------
template<class T, unsigned S>
pod_bvector<T, S>::pod_bvector(const pod_bvector<T, S>& v) :
m_size(v.m_size),
m_num_blocks(v.m_num_blocks),
m_max_blocks(v.m_max_blocks),
m_blocks(v.m_max_blocks ?
pod_allocator<T*>::allocate(v.m_max_blocks) :
0),
m_block_ptr_inc(v.m_block_ptr_inc)
{
unsigned i;
for(i = 0; i < v.m_num_blocks; ++i)
{
m_blocks[i] = pod_allocator<T>::allocate(block_size);
memcpy(m_blocks[i], v.m_blocks[i], block_size * sizeof(T));
}
}
//------------------------------------------------------------------------
template<class T, unsigned S>
const pod_bvector<T, S>&
pod_bvector<T, S>::operator = (const pod_bvector<T, S>& v)
{
unsigned i;
for(i = m_num_blocks; i < v.m_num_blocks; ++i)
{
allocate_block(i);
}
for(i = 0; i < v.m_num_blocks; ++i)
{
memcpy(m_blocks[i], v.m_blocks[i], block_size * sizeof(T));
}
m_size = v.m_size;
return *this;
}
//------------------------------------------------------------------------
template<class T, unsigned S>
void pod_bvector<T, S>::allocate_block(unsigned nb)
{
if(nb >= m_max_blocks)
{
T** new_blocks = pod_allocator<T*>::allocate(m_max_blocks + m_block_ptr_inc);
if(m_blocks)
{
memcpy(new_blocks,
m_blocks,
m_num_blocks * sizeof(T*));
pod_allocator<T*>::deallocate(m_blocks, m_max_blocks);
}
m_blocks = new_blocks;
m_max_blocks += m_block_ptr_inc;
}
m_blocks[nb] = pod_allocator<T>::allocate(block_size);
m_num_blocks++;
}
//------------------------------------------------------------------------
template<class T, unsigned S>
inline T* pod_bvector<T, S>::data_ptr()
{
unsigned nb = m_size >> block_shift;
if(nb >= m_num_blocks)
{
allocate_block(nb);
}
return m_blocks[nb] + (m_size & block_mask);
}
//------------------------------------------------------------------------
template<class T, unsigned S>
inline void pod_bvector<T, S>::add(const T& val)
{
*data_ptr() = val;
++m_size;
}
//------------------------------------------------------------------------
template<class T, unsigned S>
inline void pod_bvector<T, S>::remove_last()
{
if(m_size) --m_size;
}
//------------------------------------------------------------------------
template<class T, unsigned S>
void pod_bvector<T, S>::modify_last(const T& val)
{
remove_last();
add(val);
}
//------------------------------------------------------------------------
template<class T, unsigned S>
int pod_bvector<T, S>::allocate_continuous_block(unsigned num_elements)
{
if(num_elements < block_size)
{
data_ptr(); // Allocate initial block if necessary
unsigned rest = block_size - (m_size & block_mask);
unsigned index;
if(num_elements <= rest)
{
// The rest of the block is good, we can use it
//-----------------
index = m_size;
m_size += num_elements;
return index;
}
// New block
//---------------
m_size += rest;
data_ptr();
index = m_size;
m_size += num_elements;
return index;
}
return -1; // Impossible to allocate
}
//------------------------------------------------------------------------
template<class T, unsigned S>
unsigned pod_bvector<T, S>::byte_size() const
{
return m_size * sizeof(T);
}
//------------------------------------------------------------------------
template<class T, unsigned S>
void pod_bvector<T, S>::serialize(int8u* ptr) const
{
unsigned i;
for(i = 0; i < m_size; i++)
{
memcpy(ptr, &(*this)[i], sizeof(T));
ptr += sizeof(T);
}
}
//------------------------------------------------------------------------
template<class T, unsigned S>
void pod_bvector<T, S>::deserialize(const int8u* data, unsigned byte_size)
{
remove_all();
byte_size /= sizeof(T);
for(unsigned i = 0; i < byte_size; ++i)
{
T* ptr = data_ptr();
memcpy(ptr, data, sizeof(T));
++m_size;
data += sizeof(T);
}
}
// Replace or add a number of elements starting from "start" position
//------------------------------------------------------------------------
template<class T, unsigned S>
void pod_bvector<T, S>::deserialize(unsigned start, const T& empty_val,
const int8u* data, unsigned byte_size)
{
while(m_size < start)
{
add(empty_val);
}
byte_size /= sizeof(T);
for(unsigned i = 0; i < byte_size; ++i)
{
if(start + i < m_size)
{
memcpy(&((*this)[start + i]), data, sizeof(T));
}
else
{
T* ptr = data_ptr();
memcpy(ptr, data, sizeof(T));
++m_size;
}
data += sizeof(T);
}
}
//---------------------------------------------------------block_allocator
// Allocator for arbitrary POD data. Most usable in different cache
// systems for efficient memory allocations.
// Memory is allocated with blocks of fixed size ("block_size" in
// the constructor). If required size exceeds the block size the allocator
// creates a new block of the required size. However, the most efficient
// use is when the average reqired size is much less than the block size.
//------------------------------------------------------------------------
class block_allocator
{
struct block_type
{
int8u* data;
unsigned size;
};
public:
void remove_all()
{
if(m_num_blocks)
{
block_type* blk = m_blocks + m_num_blocks - 1;
while(m_num_blocks--)
{
pod_allocator<int8u>::deallocate(blk->data, blk->size);
--blk;
}
pod_allocator<block_type>::deallocate(m_blocks, m_max_blocks);
}
m_num_blocks = 0;
m_max_blocks = 0;
m_blocks = 0;
m_buf_ptr = 0;
m_rest = 0;
}
~block_allocator()
{
remove_all();
}
block_allocator(unsigned block_size, unsigned block_ptr_inc=256-8) :
m_block_size(block_size),
m_block_ptr_inc(block_ptr_inc),
m_num_blocks(0),
m_max_blocks(0),
m_blocks(0),
m_buf_ptr(0),
m_rest(0)
{
}
int8u* allocate(unsigned size, unsigned alignment=1)
{
if(size == 0) return 0;
if(size <= m_rest)
{
int8u* ptr = m_buf_ptr;
if(alignment > 1)
{
unsigned align =
(alignment - unsigned((size_t)ptr) % alignment) % alignment;
size += align;
ptr += align;
if(size <= m_rest)
{
m_rest -= size;
m_buf_ptr += size;
return ptr;
}
allocate_block(size);
return allocate(size - align, alignment);
}
m_rest -= size;
m_buf_ptr += size;
return ptr;
}
allocate_block(size + alignment - 1);
return allocate(size, alignment);
}
private:
void allocate_block(unsigned size)
{
if(size < m_block_size) size = m_block_size;
if(m_num_blocks >= m_max_blocks)
{
block_type* new_blocks =
pod_allocator<block_type>::allocate(m_max_blocks + m_block_ptr_inc);
if(m_blocks)
{
memcpy(new_blocks,
m_blocks,
m_num_blocks * sizeof(block_type));
pod_allocator<block_type>::deallocate(m_blocks, m_max_blocks);
}
m_blocks = new_blocks;
m_max_blocks += m_block_ptr_inc;
}
m_blocks[m_num_blocks].size = size;
m_blocks[m_num_blocks].data =
m_buf_ptr =
pod_allocator<int8u>::allocate(size);
m_num_blocks++;
m_rest = size;
}
unsigned m_block_size;
unsigned m_block_ptr_inc;
unsigned m_num_blocks;
unsigned m_max_blocks;
block_type* m_blocks;
int8u* m_buf_ptr;
unsigned m_rest;
};
//------------------------------------------------------------------------
enum quick_sort_threshold_e
{
quick_sort_threshold = 9
};
//-----------------------------------------------------------swap_elements
template<class T> inline void swap_elements(T& a, T& b)
{
T temp = a;
a = b;
b = temp;
}
//--------------------------------------------------------------quick_sort
template<class Array, class Less>
void quick_sort(Array& arr, Less less)
{
if(arr.size() < 2) return;
typename Array::value_type* e1;
typename Array::value_type* e2;
int stack[80];
int* top = stack;
int limit = arr.size();
int base = 0;
for(;;)
{
int len = limit - base;
int i;
int j;
int pivot;
if(len > quick_sort_threshold)
{
// we use base + len/2 as the pivot
pivot = base + len / 2;
swap_elements(arr[base], arr[pivot]);
i = base + 1;
j = limit - 1;
// now ensure that *i <= *base <= *j
e1 = &(arr[j]);
e2 = &(arr[i]);
if(less(*e1, *e2)) swap_elements(*e1, *e2);
e1 = &(arr[base]);
e2 = &(arr[i]);
if(less(*e1, *e2)) swap_elements(*e1, *e2);
e1 = &(arr[j]);
e2 = &(arr[base]);
if(less(*e1, *e2)) swap_elements(*e1, *e2);
for(;;)
{
do i++; while( less(arr[i], arr[base]) );
do j--; while( less(arr[base], arr[j]) );
if( i > j )
{
break;
}
swap_elements(arr[i], arr[j]);
}
swap_elements(arr[base], arr[j]);
// now, push the largest sub-array
if(j - base > limit - i)
{
top[0] = base;
top[1] = j;
base = i;
}
else
{
top[0] = i;
top[1] = limit;
limit = j;
}
top += 2;
}
else
{
// the sub-array is small, perform insertion sort
j = base;
i = j + 1;
for(; i < limit; j = i, i++)
{
for(; less(*(e1 = &(arr[j + 1])), *(e2 = &(arr[j]))); j--)
{
swap_elements(*e1, *e2);
if(j == base)
{
break;
}
}
}
if(top > stack)
{
top -= 2;
base = top[0];
limit = top[1];
}
else
{
break;
}
}
}
}
//------------------------------------------------------remove_duplicates
// Remove duplicates from a sorted array. It doesn't cut the
// tail of the array, it just returns the number of remaining elements.
//-----------------------------------------------------------------------
template<class Array, class Equal>
unsigned remove_duplicates(Array& arr, Equal equal)
{
if(arr.size() < 2) return arr.size();
unsigned i, j;
for(i = 1, j = 1; i < arr.size(); i++)
{
typename Array::value_type& e = arr[i];
if(!equal(e, arr[i - 1]))
{
arr[j++] = e;
}
}
return j;
}
//--------------------------------------------------------invert_container
template<class Array> void invert_container(Array& arr)
{
int i = 0;
int j = arr.size() - 1;
while(i < j)
{
swap_elements(arr[i++], arr[j--]);
}
}
//------------------------------------------------------binary_search_pos
template<class Array, class Value, class Less>
unsigned binary_search_pos(const Array& arr, const Value& val, Less less)
{
if(arr.size() == 0) return 0;
unsigned beg = 0;
unsigned end = arr.size() - 1;
if(less(val, arr[0])) return 0;
if(less(arr[end], val)) return end + 1;
while(end - beg > 1)
{
unsigned mid = (end + beg) >> 1;
if(less(val, arr[mid])) end = mid;
else beg = mid;
}
//if(beg <= 0 && less(val, arr[0])) return 0;
//if(end >= arr.size() - 1 && less(arr[end], val)) ++end;
return end;
}
//----------------------------------------------------------range_adaptor
template<class Array> class range_adaptor
{
public:
typedef typename Array::value_type value_type;
range_adaptor(Array& array, unsigned start, unsigned size) :
m_array(array), m_start(start), m_size(size)
{}
unsigned size() const { return m_size; }
const value_type& operator [] (unsigned i) const { return m_array[m_start + i]; }
value_type& operator [] (unsigned i) { return m_array[m_start + i]; }
const value_type& at(unsigned i) const { return m_array[m_start + i]; }
value_type& at(unsigned i) { return m_array[m_start + i]; }
value_type value_at(unsigned i) const { return m_array[m_start + i]; }
private:
Array& m_array;
unsigned m_start;
unsigned m_size;
};
//---------------------------------------------------------------int_less
inline bool int_less(int a, int b) { return a < b; }
//------------------------------------------------------------int_greater
inline bool int_greater(int a, int b) { return a > b; }
//----------------------------------------------------------unsigned_less
inline bool unsigned_less(unsigned a, unsigned b) { return a < b; }
//-------------------------------------------------------unsigned_greater
inline bool unsigned_greater(unsigned a, unsigned b) { return a > b; }
}
#endif
| 0 |
D://workCode//uploadProject\awtk\3rd\agg | D://workCode//uploadProject\awtk\3rd\agg\include\agg_basics.h | //----------------------------------------------------------------------------
// Anti-Grain Geometry - Version 2.4
// Copyright (C) 2002-2005 Maxim Shemanarev (http://www.antigrain.com)
//
// Permission to copy, use, modify, sell and distribute this software
// is granted provided this copyright notice appears in all copies.
// This software is provided "as is" without express or implied
// warranty, and with no claim as to its suitability for any purpose.
//
//----------------------------------------------------------------------------
// Contact: mcseem@antigrain.com
// mcseemagg@yahoo.com
// http://www.antigrain.com
//----------------------------------------------------------------------------
#ifndef AGG_BASICS_INCLUDED
#define AGG_BASICS_INCLUDED
#include <math.h>
#include "agg_config.h"
//---------------------------------------------------------AGG_CUSTOM_ALLOCATOR
#ifdef AGG_CUSTOM_ALLOCATOR
#include "agg_allocator.h"
#else
namespace agg
{
// The policy of all AGG containers and memory allocation strategy
// in general is that no allocated data requires explicit construction.
// It means that the allocator can be really simple; you can even
// replace new/delete to malloc/free. The constructors and destructors
// won't be called in this case, however everything will remain working.
// The second argument of deallocate() is the size of the allocated
// block. You can use this information if you wish.
//------------------------------------------------------------pod_allocator
template<class T> struct pod_allocator
{
static T* allocate(unsigned num) { return new T [num]; }
static void deallocate(T* ptr, unsigned) { delete [] ptr; }
};
// Single object allocator. It's also can be replaced with your custom
// allocator. The difference is that it can only allocate a single
// object and the constructor and destructor must be called.
// In AGG there is no need to allocate an array of objects with
// calling their constructors (only single ones). So that, if you
// replace these new/delete to malloc/free make sure that the in-place
// new is called and take care of calling the destructor too.
//------------------------------------------------------------obj_allocator
template<class T> struct obj_allocator
{
static T* allocate() { return new T; }
static void deallocate(T* ptr) { delete ptr; }
};
}
#endif
//-------------------------------------------------------- Default basic types
//
// If the compiler has different capacity of the basic types you can redefine
// them via the compiler command line or by generating agg_config.h that is
// empty by default.
//
#ifndef AGG_INT8
#define AGG_INT8 signed char
#endif
#ifndef AGG_INT8U
#define AGG_INT8U unsigned char
#endif
#ifndef AGG_INT16
#define AGG_INT16 short
#endif
#ifndef AGG_INT16U
#define AGG_INT16U unsigned short
#endif
#ifndef AGG_INT32
#define AGG_INT32 int
#endif
#ifndef AGG_INT32U
#define AGG_INT32U unsigned
#endif
#ifndef AGG_INT64
#if defined(_MSC_VER) || defined(__BORLANDC__)
#define AGG_INT64 signed __int64
#else
#define AGG_INT64 signed long long
#endif
#endif
#ifndef AGG_INT64U
#if defined(_MSC_VER) || defined(__BORLANDC__)
#define AGG_INT64U unsigned __int64
#else
#define AGG_INT64U unsigned long long
#endif
#endif
//------------------------------------------------ Some fixes for MS Visual C++
#if defined(_MSC_VER)
#pragma warning(disable:4786) // Identifier was truncated...
#endif
#if defined(_MSC_VER)
#define AGG_INLINE __forceinline
#else
#define AGG_INLINE inline
#endif
namespace agg
{
//-------------------------------------------------------------------------
typedef AGG_INT8 int8; //----int8
typedef AGG_INT8U int8u; //----int8u
typedef AGG_INT16 int16; //----int16
typedef AGG_INT16U int16u; //----int16u
typedef AGG_INT32 int32; //----int32
typedef AGG_INT32U int32u; //----int32u
typedef AGG_INT64 int64; //----int64
typedef AGG_INT64U int64u; //----int64u
#if defined(AGG_FISTP)
#pragma warning(push)
#pragma warning(disable : 4035) //Disable warning "no return value"
AGG_INLINE int iround(double v) //-------iround
{
int t;
__asm fld qword ptr [v]
__asm fistp dword ptr [t]
__asm mov eax, dword ptr [t]
}
AGG_INLINE unsigned uround(double v) //-------uround
{
unsigned t;
__asm fld qword ptr [v]
__asm fistp dword ptr [t]
__asm mov eax, dword ptr [t]
}
#pragma warning(pop)
AGG_INLINE int ifloor(double v)
{
return int(floor(v));
}
AGG_INLINE unsigned ufloor(double v) //-------ufloor
{
return unsigned(floor(v));
}
AGG_INLINE int iceil(double v)
{
return int(ceil(v));
}
AGG_INLINE unsigned uceil(double v) //--------uceil
{
return unsigned(ceil(v));
}
#elif defined(AGG_QIFIST)
AGG_INLINE int iround(double v)
{
return int(v);
}
AGG_INLINE int uround(double v)
{
return unsigned(v);
}
AGG_INLINE int ifloor(double v)
{
return int(floor(v));
}
AGG_INLINE unsigned ufloor(double v)
{
return unsigned(floor(v));
}
AGG_INLINE int iceil(double v)
{
return int(ceil(v));
}
AGG_INLINE unsigned uceil(double v)
{
return unsigned(ceil(v));
}
#else
AGG_INLINE int iround(double v)
{
return int((v < 0.0) ? v - 0.5 : v + 0.5);
}
AGG_INLINE int uround(double v)
{
return unsigned(v + 0.5);
}
AGG_INLINE int ifloor(double v)
{
int i = int(v);
return i - (i > v);
}
AGG_INLINE unsigned ufloor(double v)
{
return unsigned(v);
}
AGG_INLINE int iceil(double v)
{
return int(ceil(v));
}
AGG_INLINE unsigned uceil(double v)
{
return unsigned(ceil(v));
}
#endif
//---------------------------------------------------------------saturation
template<int Limit> struct saturation
{
AGG_INLINE static int iround(double v)
{
if(v < double(-Limit)) return -Limit;
if(v > double( Limit)) return Limit;
return agg::iround(v);
}
};
//------------------------------------------------------------------mul_one
template<unsigned Shift> struct mul_one
{
AGG_INLINE static unsigned mul(unsigned a, unsigned b)
{
unsigned q = a * b + (1 << (Shift-1));
return (q + (q >> Shift)) >> Shift;
}
};
//-------------------------------------------------------------------------
typedef unsigned char cover_type; //----cover_type
enum cover_scale_e
{
cover_shift = 8, //----cover_shift
cover_size = 1 << cover_shift, //----cover_size
cover_mask = cover_size - 1, //----cover_mask
cover_none = 0, //----cover_none
cover_full = cover_mask //----cover_full
};
//----------------------------------------------------poly_subpixel_scale_e
// These constants determine the subpixel accuracy, to be more precise,
// the number of bits of the fractional part of the coordinates.
// The possible coordinate capacity in bits can be calculated by formula:
// sizeof(int) * 8 - poly_subpixel_shift, i.e, for 32-bit integers and
// 8-bits fractional part the capacity is 24 bits.
enum poly_subpixel_scale_e
{
poly_subpixel_shift = 8, //----poly_subpixel_shift
poly_subpixel_scale = 1<<poly_subpixel_shift, //----poly_subpixel_scale
poly_subpixel_mask = poly_subpixel_scale-1 //----poly_subpixel_mask
};
//----------------------------------------------------------filling_rule_e
enum filling_rule_e
{
fill_non_zero,
fill_even_odd
};
//-----------------------------------------------------------------------pi
const double pi = 3.14159265358979323846;
//------------------------------------------------------------------deg2rad
inline double deg2rad(double deg)
{
return deg * pi / 180.0;
}
//------------------------------------------------------------------rad2deg
inline double rad2deg(double rad)
{
return rad * 180.0 / pi;
}
//----------------------------------------------------------------rect_base
template<class T> struct rect_base
{
typedef T value_type;
typedef rect_base<T> self_type;
T x1, y1, x2, y2;
rect_base() {}
rect_base(T x1_, T y1_, T x2_, T y2_) :
x1(x1_), y1(y1_), x2(x2_), y2(y2_) {}
void init(T x1_, T y1_, T x2_, T y2_)
{
x1 = x1_; y1 = y1_; x2 = x2_; y2 = y2_;
}
const self_type& normalize()
{
T t;
if(x1 > x2) { t = x1; x1 = x2; x2 = t; }
if(y1 > y2) { t = y1; y1 = y2; y2 = t; }
return *this;
}
bool clip(const self_type& r)
{
if(x2 > r.x2) x2 = r.x2;
if(y2 > r.y2) y2 = r.y2;
if(x1 < r.x1) x1 = r.x1;
if(y1 < r.y1) y1 = r.y1;
return x1 <= x2 && y1 <= y2;
}
bool is_valid() const
{
return x1 <= x2 && y1 <= y2;
}
bool hit_test(T x, T y) const
{
return (x >= x1 && x <= x2 && y >= y1 && y <= y2);
}
bool overlaps(const self_type& r) const
{
return !(r.x1 > x2 || r.x2 < x1
|| r.y1 > y2 || r.y2 < y1);
}
};
//-----------------------------------------------------intersect_rectangles
template<class Rect>
inline Rect intersect_rectangles(const Rect& r1, const Rect& r2)
{
Rect r = r1;
// First process x2,y2 because the other order
// results in Internal Compiler Error under
// Microsoft Visual C++ .NET 2003 69462-335-0000007-18038 in
// case of "Maximize Speed" optimization option.
//-----------------
if(r.x2 > r2.x2) r.x2 = r2.x2;
if(r.y2 > r2.y2) r.y2 = r2.y2;
if(r.x1 < r2.x1) r.x1 = r2.x1;
if(r.y1 < r2.y1) r.y1 = r2.y1;
return r;
}
//---------------------------------------------------------unite_rectangles
template<class Rect>
inline Rect unite_rectangles(const Rect& r1, const Rect& r2)
{
Rect r = r1;
if(r.x2 < r2.x2) r.x2 = r2.x2;
if(r.y2 < r2.y2) r.y2 = r2.y2;
if(r.x1 > r2.x1) r.x1 = r2.x1;
if(r.y1 > r2.y1) r.y1 = r2.y1;
return r;
}
typedef rect_base<int> rect_i; //----rect_i
typedef rect_base<float> rect_f; //----rect_f
typedef rect_base<double> rect_d; //----rect_d
//---------------------------------------------------------path_commands_e
enum path_commands_e
{
path_cmd_stop = 0, //----path_cmd_stop
path_cmd_move_to = 1, //----path_cmd_move_to
path_cmd_line_to = 2, //----path_cmd_line_to
path_cmd_curve3 = 3, //----path_cmd_curve3
path_cmd_curve4 = 4, //----path_cmd_curve4
path_cmd_curveN = 5, //----path_cmd_curveN
path_cmd_catrom = 6, //----path_cmd_catrom
path_cmd_ubspline = 7, //----path_cmd_ubspline
path_cmd_end_poly = 0x0F, //----path_cmd_end_poly
path_cmd_mask = 0x0F //----path_cmd_mask
};
//------------------------------------------------------------path_flags_e
enum path_flags_e
{
path_flags_none = 0, //----path_flags_none
path_flags_ccw = 0x10, //----path_flags_ccw
path_flags_cw = 0x20, //----path_flags_cw
path_flags_close = 0x40, //----path_flags_close
path_flags_mask = 0xF0 //----path_flags_mask
};
//---------------------------------------------------------------is_vertex
inline bool is_vertex(unsigned c)
{
return c >= path_cmd_move_to && c < path_cmd_end_poly;
}
//--------------------------------------------------------------is_drawing
inline bool is_drawing(unsigned c)
{
return c >= path_cmd_line_to && c < path_cmd_end_poly;
}
//-----------------------------------------------------------------is_stop
inline bool is_stop(unsigned c)
{
return c == path_cmd_stop;
}
//--------------------------------------------------------------is_move_to
inline bool is_move_to(unsigned c)
{
return c == path_cmd_move_to;
}
//--------------------------------------------------------------is_line_to
inline bool is_line_to(unsigned c)
{
return c == path_cmd_line_to;
}
//----------------------------------------------------------------is_curve
inline bool is_curve(unsigned c)
{
return c == path_cmd_curve3 || c == path_cmd_curve4;
}
//---------------------------------------------------------------is_curve3
inline bool is_curve3(unsigned c)
{
return c == path_cmd_curve3;
}
//---------------------------------------------------------------is_curve4
inline bool is_curve4(unsigned c)
{
return c == path_cmd_curve4;
}
//-------------------------------------------------------------is_end_poly
inline bool is_end_poly(unsigned c)
{
return (c & path_cmd_mask) == path_cmd_end_poly;
}
//----------------------------------------------------------------is_close
inline bool is_close(unsigned c)
{
return (c & ~(path_flags_cw | path_flags_ccw)) ==
(path_cmd_end_poly | path_flags_close);
}
//------------------------------------------------------------is_next_poly
inline bool is_next_poly(unsigned c)
{
return is_stop(c) || is_move_to(c) || is_end_poly(c);
}
//-------------------------------------------------------------------is_cw
inline bool is_cw(unsigned c)
{
return (c & path_flags_cw) != 0;
}
//------------------------------------------------------------------is_ccw
inline bool is_ccw(unsigned c)
{
return (c & path_flags_ccw) != 0;
}
//-------------------------------------------------------------is_oriented
inline bool is_oriented(unsigned c)
{
return (c & (path_flags_cw | path_flags_ccw)) != 0;
}
//---------------------------------------------------------------is_closed
inline bool is_closed(unsigned c)
{
return (c & path_flags_close) != 0;
}
//----------------------------------------------------------get_close_flag
inline unsigned get_close_flag(unsigned c)
{
return c & path_flags_close;
}
//-------------------------------------------------------clear_orientation
inline unsigned clear_orientation(unsigned c)
{
return c & ~(path_flags_cw | path_flags_ccw);
}
//---------------------------------------------------------get_orientation
inline unsigned get_orientation(unsigned c)
{
return c & (path_flags_cw | path_flags_ccw);
}
//---------------------------------------------------------set_orientation
inline unsigned set_orientation(unsigned c, unsigned o)
{
return clear_orientation(c) | o;
}
//--------------------------------------------------------------point_base
template<class T> struct point_base
{
typedef T value_type;
T x,y;
point_base() {}
point_base(T x_, T y_) : x(x_), y(y_) {}
};
typedef point_base<int> point_i; //-----point_i
typedef point_base<float> point_f; //-----point_f
typedef point_base<double> point_d; //-----point_d
//-------------------------------------------------------------vertex_base
template<class T> struct vertex_base
{
typedef T value_type;
T x,y;
unsigned cmd;
vertex_base() {}
vertex_base(T x_, T y_, unsigned cmd_) : x(x_), y(y_), cmd(cmd_) {}
};
typedef vertex_base<int> vertex_i; //-----vertex_i
typedef vertex_base<float> vertex_f; //-----vertex_f
typedef vertex_base<double> vertex_d; //-----vertex_d
//----------------------------------------------------------------row_info
template<class T> struct row_info
{
int x1, x2;
T* ptr;
row_info() {}
row_info(int x1_, int x2_, T* ptr_) : x1(x1_), x2(x2_), ptr(ptr_) {}
};
//----------------------------------------------------------const_row_info
template<class T> struct const_row_info
{
int x1, x2;
const T* ptr;
const_row_info() {}
const_row_info(int x1_, int x2_, const T* ptr_) :
x1(x1_), x2(x2_), ptr(ptr_) {}
};
//------------------------------------------------------------is_equal_eps
template<class T> inline bool is_equal_eps(T v1, T v2, T epsilon)
{
return fabs(v1 - v2) <= double(epsilon);
}
}
#endif
| 0 |
D://workCode//uploadProject\awtk\3rd\agg | D://workCode//uploadProject\awtk\3rd\agg\include\agg_clip_liang_barsky.h | //----------------------------------------------------------------------------
// Anti-Grain Geometry - Version 2.4
// Copyright (C) 2002-2005 Maxim Shemanarev (http://www.antigrain.com)
//
// Permission to copy, use, modify, sell and distribute this software
// is granted provided this copyright notice appears in all copies.
// This software is provided "as is" without express or implied
// warranty, and with no claim as to its suitability for any purpose.
//
//----------------------------------------------------------------------------
// Contact: mcseem@antigrain.com
// mcseemagg@yahoo.com
// http://www.antigrain.com
//----------------------------------------------------------------------------
//
// Liang-Barsky clipping
//
//----------------------------------------------------------------------------
#ifndef AGG_CLIP_LIANG_BARSKY_INCLUDED
#define AGG_CLIP_LIANG_BARSKY_INCLUDED
#include "agg_basics.h"
namespace agg
{
//------------------------------------------------------------------------
enum clipping_flags_e
{
clipping_flags_x1_clipped = 4,
clipping_flags_x2_clipped = 1,
clipping_flags_y1_clipped = 8,
clipping_flags_y2_clipped = 2,
clipping_flags_x_clipped = clipping_flags_x1_clipped | clipping_flags_x2_clipped,
clipping_flags_y_clipped = clipping_flags_y1_clipped | clipping_flags_y2_clipped
};
//----------------------------------------------------------clipping_flags
// Determine the clipping code of the vertex according to the
// Cyrus-Beck line clipping algorithm
//
// | |
// 0110 | 0010 | 0011
// | |
// -------+--------+-------- clip_box.y2
// | |
// 0100 | 0000 | 0001
// | |
// -------+--------+-------- clip_box.y1
// | |
// 1100 | 1000 | 1001
// | |
// clip_box.x1 clip_box.x2
//
//
template<class T>
inline unsigned clipping_flags(T x, T y, const rect_base<T>& clip_box)
{
return (x > clip_box.x2) |
((y > clip_box.y2) << 1) |
((x < clip_box.x1) << 2) |
((y < clip_box.y1) << 3);
}
//--------------------------------------------------------clipping_flags_x
template<class T>
inline unsigned clipping_flags_x(T x, const rect_base<T>& clip_box)
{
return (x > clip_box.x2) | ((x < clip_box.x1) << 2);
}
//--------------------------------------------------------clipping_flags_y
template<class T>
inline unsigned clipping_flags_y(T y, const rect_base<T>& clip_box)
{
return ((y > clip_box.y2) << 1) | ((y < clip_box.y1) << 3);
}
//-------------------------------------------------------clip_liang_barsky
template<class T>
inline unsigned clip_liang_barsky(T x1, T y1, T x2, T y2,
const rect_base<T>& clip_box,
T* x, T* y)
{
const double nearzero = 1e-30;
double deltax = x2 - x1;
double deltay = y2 - y1;
double xin;
double xout;
double yin;
double yout;
double tinx;
double tiny;
double toutx;
double touty;
double tin1;
double tin2;
double tout1;
unsigned np = 0;
if(deltax == 0.0)
{
// bump off of the vertical
deltax = (x1 > clip_box.x1) ? -nearzero : nearzero;
}
if(deltay == 0.0)
{
// bump off of the horizontal
deltay = (y1 > clip_box.y1) ? -nearzero : nearzero;
}
if(deltax > 0.0)
{
// points to right
xin = clip_box.x1;
xout = clip_box.x2;
}
else
{
xin = clip_box.x2;
xout = clip_box.x1;
}
if(deltay > 0.0)
{
// points up
yin = clip_box.y1;
yout = clip_box.y2;
}
else
{
yin = clip_box.y2;
yout = clip_box.y1;
}
tinx = (xin - x1) / deltax;
tiny = (yin - y1) / deltay;
if (tinx < tiny)
{
// hits x first
tin1 = tinx;
tin2 = tiny;
}
else
{
// hits y first
tin1 = tiny;
tin2 = tinx;
}
if(tin1 <= 1.0)
{
if(0.0 < tin1)
{
*x++ = (T)xin;
*y++ = (T)yin;
++np;
}
if(tin2 <= 1.0)
{
toutx = (xout - x1) / deltax;
touty = (yout - y1) / deltay;
tout1 = (toutx < touty) ? toutx : touty;
if(tin2 > 0.0 || tout1 > 0.0)
{
if(tin2 <= tout1)
{
if(tin2 > 0.0)
{
if(tinx > tiny)
{
*x++ = (T)xin;
*y++ = (T)(y1 + tinx * deltay);
}
else
{
*x++ = (T)(x1 + tiny * deltax);
*y++ = (T)yin;
}
++np;
}
if(tout1 < 1.0)
{
if(toutx < touty)
{
*x++ = (T)xout;
*y++ = (T)(y1 + toutx * deltay);
}
else
{
*x++ = (T)(x1 + touty * deltax);
*y++ = (T)yout;
}
}
else
{
*x++ = x2;
*y++ = y2;
}
++np;
}
else
{
if(tinx > tiny)
{
*x++ = (T)xin;
*y++ = (T)yout;
}
else
{
*x++ = (T)xout;
*y++ = (T)yin;
}
++np;
}
}
}
}
return np;
}
//----------------------------------------------------------------------------
template<class T>
bool clip_move_point(T x1, T y1, T x2, T y2,
const rect_base<T>& clip_box,
T* x, T* y, unsigned flags)
{
T bound;
if(flags & clipping_flags_x_clipped)
{
if(x1 == x2)
{
return false;
}
bound = (flags & clipping_flags_x1_clipped) ? clip_box.x1 : clip_box.x2;
*y = (T)(double(bound - x1) * (y2 - y1) / (x2 - x1) + y1);
*x = bound;
}
flags = clipping_flags_y(*y, clip_box);
if(flags & clipping_flags_y_clipped)
{
if(y1 == y2)
{
return false;
}
bound = (flags & clipping_flags_y1_clipped) ? clip_box.y1 : clip_box.y2;
*x = (T)(double(bound - y1) * (x2 - x1) / (y2 - y1) + x1);
*y = bound;
}
return true;
}
//-------------------------------------------------------clip_line_segment
// Returns: ret >= 4 - Fully clipped
// (ret & 1) != 0 - First point has been moved
// (ret & 2) != 0 - Second point has been moved
//
template<class T>
unsigned clip_line_segment(T* x1, T* y1, T* x2, T* y2,
const rect_base<T>& clip_box)
{
unsigned f1 = clipping_flags(*x1, *y1, clip_box);
unsigned f2 = clipping_flags(*x2, *y2, clip_box);
unsigned ret = 0;
if((f2 | f1) == 0)
{
// Fully visible
return 0;
}
if((f1 & clipping_flags_x_clipped) != 0 &&
(f1 & clipping_flags_x_clipped) == (f2 & clipping_flags_x_clipped))
{
// Fully clipped
return 4;
}
if((f1 & clipping_flags_y_clipped) != 0 &&
(f1 & clipping_flags_y_clipped) == (f2 & clipping_flags_y_clipped))
{
// Fully clipped
return 4;
}
T tx1 = *x1;
T ty1 = *y1;
T tx2 = *x2;
T ty2 = *y2;
if(f1)
{
if(!clip_move_point(tx1, ty1, tx2, ty2, clip_box, x1, y1, f1))
{
return 4;
}
if(*x1 == *x2 && *y1 == *y2)
{
return 4;
}
ret |= 1;
}
if(f2)
{
if(!clip_move_point(tx1, ty1, tx2, ty2, clip_box, x2, y2, f2))
{
return 4;
}
if(*x1 == *x2 && *y1 == *y2)
{
return 4;
}
ret |= 2;
}
return ret;
}
}
#endif
| 0 |
D://workCode//uploadProject\awtk\3rd\agg | D://workCode//uploadProject\awtk\3rd\agg\include\agg_color_gray.h | //----------------------------------------------------------------------------
// Anti-Grain Geometry - Version 2.4
// Copyright (C) 2002-2005 Maxim Shemanarev (http://www.antigrain.com)
//
// Permission to copy, use, modify, sell and distribute this software
// is granted provided this copyright notice appears in all copies.
// This software is provided "as is" without express or implied
// warranty, and with no claim as to its suitability for any purpose.
//
//----------------------------------------------------------------------------
// Contact: mcseem@antigrain.com
// mcseemagg@yahoo.com
// http://www.antigrain.com
//----------------------------------------------------------------------------
//
// Adaptation for high precision colors has been sponsored by
// Liberty Technology Systems, Inc., visit http://lib-sys.com
//
// Liberty Technology Systems, Inc. is the provider of
// PostScript and PDF technology for software developers.
//
//----------------------------------------------------------------------------
//
// color types gray8, gray16
//
//----------------------------------------------------------------------------
#ifndef AGG_COLOR_GRAY_INCLUDED
#define AGG_COLOR_GRAY_INCLUDED
#include "agg_basics.h"
#include "agg_color_rgba.h"
namespace agg
{
//===================================================================gray8
template<class Colorspace>
struct gray8T
{
typedef int8u value_type;
typedef int32u calc_type;
typedef int32 long_type;
enum base_scale_e
{
base_shift = 8,
base_scale = 1 << base_shift,
base_mask = base_scale - 1,
base_MSB = 1 << (base_shift - 1)
};
typedef gray8T self_type;
value_type v;
value_type a;
static value_type luminance(const rgba& c)
{
// Calculate grayscale value as per ITU-R BT.709.
return value_type(uround((0.2126 * c.r + 0.7152 * c.g + 0.0722 * c.b) * base_mask));
}
static value_type luminance(const rgba8& c)
{
// Calculate grayscale value as per ITU-R BT.709.
return value_type((55u * c.r + 184u * c.g + 18u * c.b) >> 8);
}
static void convert(gray8T<linear>& dst, const gray8T<sRGB>& src)
{
dst.v = sRGB_conv<value_type>::rgb_from_sRGB(src.v);
dst.a = src.a;
}
static void convert(gray8T<sRGB>& dst, const gray8T<linear>& src)
{
dst.v = sRGB_conv<value_type>::rgb_to_sRGB(src.v);
dst.a = src.a;
}
static void convert(gray8T<linear>& dst, const rgba8& src)
{
dst.v = luminance(src);
dst.a = src.a;
}
static void convert(gray8T<linear>& dst, const srgba8& src)
{
// The RGB weights are only valid for linear values.
convert(dst, rgba8(src));
}
static void convert(gray8T<sRGB>& dst, const rgba8& src)
{
dst.v = sRGB_conv<value_type>::rgb_to_sRGB(luminance(src));
dst.a = src.a;
}
static void convert(gray8T<sRGB>& dst, const srgba8& src)
{
// The RGB weights are only valid for linear values.
convert(dst, rgba8(src));
}
//--------------------------------------------------------------------
gray8T() {}
//--------------------------------------------------------------------
explicit gray8T(unsigned v_, unsigned a_ = base_mask) :
v(int8u(v_)), a(int8u(a_)) {}
//--------------------------------------------------------------------
gray8T(const self_type& c, unsigned a_) :
v(c.v), a(value_type(a_)) {}
//--------------------------------------------------------------------
gray8T(const rgba& c) :
v(luminance(c)),
a(value_type(uround(c.a * base_mask))) {}
//--------------------------------------------------------------------
template<class T>
gray8T(const gray8T<T>& c)
{
convert(*this, c);
}
//--------------------------------------------------------------------
template<class T>
gray8T(const rgba8T<T>& c)
{
convert(*this, c);
}
//--------------------------------------------------------------------
template<class T>
T convert_from_sRGB() const
{
typename T::value_type y = sRGB_conv<typename T::value_type>::rgb_from_sRGB(v);
return T(y, y, y, sRGB_conv<typename T::value_type>::alpha_from_sRGB(a));
}
template<class T>
T convert_to_sRGB() const
{
typename T::value_type y = sRGB_conv<typename T::value_type>::rgb_to_sRGB(v);
return T(y, y, y, sRGB_conv<typename T::value_type>::alpha_to_sRGB(a));
}
//--------------------------------------------------------------------
rgba8 make_rgba8(const linear&) const
{
return rgba8(v, v, v, a);
}
rgba8 make_rgba8(const sRGB&) const
{
return convert_from_sRGB<srgba8>();
}
operator rgba8() const
{
return make_rgba8(Colorspace());
}
//--------------------------------------------------------------------
srgba8 make_srgba8(const linear&) const
{
return convert_to_sRGB<rgba8>();
}
srgba8 make_srgba8(const sRGB&) const
{
return srgba8(v, v, v, a);
}
operator srgba8() const
{
return make_rgba8(Colorspace());
}
//--------------------------------------------------------------------
rgba16 make_rgba16(const linear&) const
{
rgba16::value_type rgb = (v << 8) | v;
return rgba16(rgb, rgb, rgb, (a << 8) | a);
}
rgba16 make_rgba16(const sRGB&) const
{
return convert_from_sRGB<rgba16>();
}
operator rgba16() const
{
return make_rgba16(Colorspace());
}
//--------------------------------------------------------------------
rgba32 make_rgba32(const linear&) const
{
rgba32::value_type v32 = v / 255.0f;
return rgba32(v32, v32, v32, a / 255.0f);
}
rgba32 make_rgba32(const sRGB&) const
{
return convert_from_sRGB<rgba32>();
}
operator rgba32() const
{
return make_rgba32(Colorspace());
}
//--------------------------------------------------------------------
static AGG_INLINE double to_double(value_type a)
{
return double(a) / base_mask;
}
//--------------------------------------------------------------------
static AGG_INLINE value_type from_double(double a)
{
return value_type(uround(a * base_mask));
}
//--------------------------------------------------------------------
static AGG_INLINE value_type empty_value()
{
return 0;
}
//--------------------------------------------------------------------
static AGG_INLINE value_type full_value()
{
return base_mask;
}
//--------------------------------------------------------------------
AGG_INLINE bool is_transparent() const
{
return a == 0;
}
//--------------------------------------------------------------------
AGG_INLINE bool is_opaque() const
{
return a == base_mask;
}
//--------------------------------------------------------------------
// Fixed-point multiply, exact over int8u.
static AGG_INLINE value_type multiply(value_type a, value_type b)
{
calc_type t = a * b + base_MSB;
return value_type(((t >> base_shift) + t) >> base_shift);
}
//--------------------------------------------------------------------
static AGG_INLINE value_type demultiply(value_type a, value_type b)
{
if (a * b == 0)
{
return 0;
}
else if (a >= b)
{
return base_mask;
}
else return value_type((a * base_mask + (b >> 1)) / b);
}
//--------------------------------------------------------------------
template<typename T>
static AGG_INLINE T downscale(T a)
{
return a >> base_shift;
}
//--------------------------------------------------------------------
template<typename T>
static AGG_INLINE T downshift(T a, unsigned n)
{
return a >> n;
}
//--------------------------------------------------------------------
// Fixed-point multiply, exact over int8u.
// Specifically for multiplying a color component by a cover.
static AGG_INLINE value_type mult_cover(value_type a, value_type b)
{
return multiply(a, b);
}
//--------------------------------------------------------------------
static AGG_INLINE cover_type scale_cover(cover_type a, value_type b)
{
return multiply(b, a);
}
//--------------------------------------------------------------------
// Interpolate p to q by a, assuming q is premultiplied by a.
static AGG_INLINE value_type prelerp(value_type p, value_type q, value_type a)
{
return p + q - multiply(p, a);
}
//--------------------------------------------------------------------
// Interpolate p to q by a.
static AGG_INLINE value_type lerp(value_type p, value_type q, value_type a)
{
int t = (q - p) * a + base_MSB - (p > q);
return value_type(p + (((t >> base_shift) + t) >> base_shift));
}
//--------------------------------------------------------------------
self_type& clear()
{
v = a = 0;
return *this;
}
//--------------------------------------------------------------------
self_type& transparent()
{
a = 0;
return *this;
}
//--------------------------------------------------------------------
self_type& opacity(double a_)
{
if (a_ < 0) a = 0;
else if (a_ > 1) a = 1;
else a = (value_type)uround(a_ * double(base_mask));
return *this;
}
//--------------------------------------------------------------------
double opacity() const
{
return double(a) / double(base_mask);
}
//--------------------------------------------------------------------
self_type& premultiply()
{
if (a < base_mask)
{
if (a == 0) v = 0;
else v = multiply(v, a);
}
return *this;
}
//--------------------------------------------------------------------
self_type& demultiply()
{
if (a < base_mask)
{
if (a == 0)
{
v = 0;
}
else
{
calc_type v_ = (calc_type(v) * base_mask) / a;
v = value_type((v_ > base_mask) ? (value_type)base_mask : v_);
}
}
return *this;
}
//--------------------------------------------------------------------
self_type gradient(self_type c, double k) const
{
self_type ret;
calc_type ik = uround(k * base_scale);
ret.v = lerp(v, c.v, ik);
ret.a = lerp(a, c.a, ik);
return ret;
}
//--------------------------------------------------------------------
AGG_INLINE void add(const self_type& c, unsigned cover)
{
calc_type cv, ca;
if (cover == cover_mask)
{
if (c.a == base_mask)
{
*this = c;
return;
}
else
{
cv = v + c.v;
ca = a + c.a;
}
}
else
{
cv = v + mult_cover(c.v, cover);
ca = a + mult_cover(c.a, cover);
}
v = (value_type)((cv > calc_type(base_mask)) ? calc_type(base_mask) : cv);
a = (value_type)((ca > calc_type(base_mask)) ? calc_type(base_mask) : ca);
}
//--------------------------------------------------------------------
static self_type no_color() { return self_type(0,0); }
};
typedef gray8T<linear> gray8;
typedef gray8T<sRGB> sgray8;
//==================================================================gray16
struct gray16
{
typedef int16u value_type;
typedef int32u calc_type;
typedef int64 long_type;
enum base_scale_e
{
base_shift = 16,
base_scale = 1 << base_shift,
base_mask = base_scale - 1,
base_MSB = 1 << (base_shift - 1)
};
typedef gray16 self_type;
value_type v;
value_type a;
static value_type luminance(const rgba& c)
{
// Calculate grayscale value as per ITU-R BT.709.
return value_type(uround((0.2126 * c.r + 0.7152 * c.g + 0.0722 * c.b) * base_mask));
}
static value_type luminance(const rgba16& c)
{
// Calculate grayscale value as per ITU-R BT.709.
return value_type((13933u * c.r + 46872u * c.g + 4732u * c.b) >> 16);
}
static value_type luminance(const rgba8& c)
{
return luminance(rgba16(c));
}
static value_type luminance(const srgba8& c)
{
return luminance(rgba16(c));
}
static value_type luminance(const rgba32& c)
{
return luminance(rgba(c));
}
//--------------------------------------------------------------------
gray16() {}
//--------------------------------------------------------------------
explicit gray16(unsigned v_, unsigned a_ = base_mask) :
v(int16u(v_)), a(int16u(a_)) {}
//--------------------------------------------------------------------
gray16(const self_type& c, unsigned a_) :
v(c.v), a(value_type(a_)) {}
//--------------------------------------------------------------------
gray16(const rgba& c) :
v(luminance(c)),
a((value_type)uround(c.a * double(base_mask))) {}
//--------------------------------------------------------------------
gray16(const rgba8& c) :
v(luminance(c)),
a((value_type(c.a) << 8) | c.a) {}
//--------------------------------------------------------------------
gray16(const srgba8& c) :
v(luminance(c)),
a((value_type(c.a) << 8) | c.a) {}
//--------------------------------------------------------------------
gray16(const rgba16& c) :
v(luminance(c)),
a(c.a) {}
//--------------------------------------------------------------------
gray16(const gray8& c) :
v((value_type(c.v) << 8) | c.v),
a((value_type(c.a) << 8) | c.a) {}
//--------------------------------------------------------------------
gray16(const sgray8& c) :
v(sRGB_conv<value_type>::rgb_from_sRGB(c.v)),
a(sRGB_conv<value_type>::alpha_from_sRGB(c.a)) {}
//--------------------------------------------------------------------
operator rgba8() const
{
return rgba8(v >> 8, v >> 8, v >> 8, a >> 8);
}
//--------------------------------------------------------------------
operator srgba8() const
{
value_type y = sRGB_conv<value_type>::rgb_to_sRGB(v);
return srgba8(y, y, y, sRGB_conv<value_type>::alpha_to_sRGB(a));
}
//--------------------------------------------------------------------
operator rgba16() const
{
return rgba16(v, v, v, a);
}
//--------------------------------------------------------------------
operator rgba32() const
{
rgba32::value_type v32 = v / 65535.0f;
return rgba32(v32, v32, v32, a / 65535.0f);
}
//--------------------------------------------------------------------
operator gray8() const
{
return gray8(v >> 8, a >> 8);
}
//--------------------------------------------------------------------
operator sgray8() const
{
return sgray8(
sRGB_conv<value_type>::rgb_to_sRGB(v),
sRGB_conv<value_type>::alpha_to_sRGB(a));
}
//--------------------------------------------------------------------
static AGG_INLINE double to_double(value_type a)
{
return double(a) / base_mask;
}
//--------------------------------------------------------------------
static AGG_INLINE value_type from_double(double a)
{
return value_type(uround(a * base_mask));
}
//--------------------------------------------------------------------
static AGG_INLINE value_type empty_value()
{
return 0;
}
//--------------------------------------------------------------------
static AGG_INLINE value_type full_value()
{
return base_mask;
}
//--------------------------------------------------------------------
AGG_INLINE bool is_transparent() const
{
return a == 0;
}
//--------------------------------------------------------------------
AGG_INLINE bool is_opaque() const
{
return a == base_mask;
}
//--------------------------------------------------------------------
// Fixed-point multiply, exact over int16u.
static AGG_INLINE value_type multiply(value_type a, value_type b)
{
calc_type t = a * b + base_MSB;
return value_type(((t >> base_shift) + t) >> base_shift);
}
//--------------------------------------------------------------------
static AGG_INLINE value_type demultiply(value_type a, value_type b)
{
if (a * b == 0)
{
return 0;
}
else if (a >= b)
{
return base_mask;
}
else return value_type((a * base_mask + (b >> 1)) / b);
}
//--------------------------------------------------------------------
template<typename T>
static AGG_INLINE T downscale(T a)
{
return a >> base_shift;
}
//--------------------------------------------------------------------
template<typename T>
static AGG_INLINE T downshift(T a, unsigned n)
{
return a >> n;
}
//--------------------------------------------------------------------
// Fixed-point multiply, almost exact over int16u.
// Specifically for multiplying a color component by a cover.
static AGG_INLINE value_type mult_cover(value_type a, cover_type b)
{
return multiply(a, b << 8 | b);
}
//--------------------------------------------------------------------
static AGG_INLINE cover_type scale_cover(cover_type a, value_type b)
{
return mult_cover(b, a) >> 8;
}
//--------------------------------------------------------------------
// Interpolate p to q by a, assuming q is premultiplied by a.
static AGG_INLINE value_type prelerp(value_type p, value_type q, value_type a)
{
return p + q - multiply(p, a);
}
//--------------------------------------------------------------------
// Interpolate p to q by a.
static AGG_INLINE value_type lerp(value_type p, value_type q, value_type a)
{
int t = (q - p) * a + base_MSB - (p > q);
return value_type(p + (((t >> base_shift) + t) >> base_shift));
}
//--------------------------------------------------------------------
self_type& clear()
{
v = a = 0;
return *this;
}
//--------------------------------------------------------------------
self_type& transparent()
{
a = 0;
return *this;
}
//--------------------------------------------------------------------
self_type& opacity(double a_)
{
if (a_ < 0) a = 0;
else if(a_ > 1) a = 1;
else a = (value_type)uround(a_ * double(base_mask));
return *this;
}
//--------------------------------------------------------------------
double opacity() const
{
return double(a) / double(base_mask);
}
//--------------------------------------------------------------------
self_type& premultiply()
{
if (a < base_mask)
{
if(a == 0) v = 0;
else v = multiply(v, a);
}
return *this;
}
//--------------------------------------------------------------------
self_type& demultiply()
{
if (a < base_mask)
{
if (a == 0)
{
v = 0;
}
else
{
calc_type v_ = (calc_type(v) * base_mask) / a;
v = value_type((v_ > base_mask) ? base_mask : v_);
}
}
return *this;
}
//--------------------------------------------------------------------
self_type gradient(self_type c, double k) const
{
self_type ret;
calc_type ik = uround(k * base_scale);
ret.v = lerp(v, c.v, ik);
ret.a = lerp(a, c.a, ik);
return ret;
}
//--------------------------------------------------------------------
AGG_INLINE void add(const self_type& c, unsigned cover)
{
calc_type cv, ca;
if (cover == cover_mask)
{
if (c.a == base_mask)
{
*this = c;
return;
}
else
{
cv = v + c.v;
ca = a + c.a;
}
}
else
{
cv = v + mult_cover(c.v, cover);
ca = a + mult_cover(c.a, cover);
}
v = (value_type)((cv > calc_type(base_mask)) ? calc_type(base_mask) : cv);
a = (value_type)((ca > calc_type(base_mask)) ? calc_type(base_mask) : ca);
}
//--------------------------------------------------------------------
static self_type no_color() { return self_type(0,0); }
};
//===================================================================gray32
struct gray32
{
typedef float value_type;
typedef double calc_type;
typedef double long_type;
typedef gray32 self_type;
value_type v;
value_type a;
// Calculate grayscale value as per ITU-R BT.709.
static value_type luminance(double r, double g, double b)
{
return value_type(0.2126 * r + 0.7152 * g + 0.0722 * b);
}
static value_type luminance(const rgba& c)
{
return luminance(c.r, c.g, c.b);
}
static value_type luminance(const rgba32& c)
{
return luminance(c.r, c.g, c.b);
}
static value_type luminance(const rgba8& c)
{
return luminance(c.r / 255.0, c.g / 255.0, c.g / 255.0);
}
static value_type luminance(const rgba16& c)
{
return luminance(c.r / 65535.0, c.g / 65535.0, c.g / 65535.0);
}
//--------------------------------------------------------------------
gray32() {}
//--------------------------------------------------------------------
explicit gray32(value_type v_, value_type a_ = 1) :
v(v_), a(a_) {}
//--------------------------------------------------------------------
gray32(const self_type& c, value_type a_) :
v(c.v), a(a_) {}
//--------------------------------------------------------------------
gray32(const rgba& c) :
v(luminance(c)),
a(value_type(c.a)) {}
//--------------------------------------------------------------------
gray32(const rgba8& c) :
v(luminance(c)),
a(value_type(c.a / 255.0)) {}
//--------------------------------------------------------------------
gray32(const srgba8& c) :
v(luminance(rgba32(c))),
a(value_type(c.a / 255.0)) {}
//--------------------------------------------------------------------
gray32(const rgba16& c) :
v(luminance(c)),
a(value_type(c.a / 65535.0)) {}
//--------------------------------------------------------------------
gray32(const rgba32& c) :
v(luminance(c)),
a(value_type(c.a)) {}
//--------------------------------------------------------------------
gray32(const gray8& c) :
v(value_type(c.v / 255.0)),
a(value_type(c.a / 255.0)) {}
//--------------------------------------------------------------------
gray32(const sgray8& c) :
v(sRGB_conv<value_type>::rgb_from_sRGB(c.v)),
a(sRGB_conv<value_type>::alpha_from_sRGB(c.a)) {}
//--------------------------------------------------------------------
gray32(const gray16& c) :
v(value_type(c.v / 65535.0)),
a(value_type(c.a / 65535.0)) {}
//--------------------------------------------------------------------
operator rgba() const
{
return rgba(v, v, v, a);
}
//--------------------------------------------------------------------
operator gray8() const
{
return gray8(uround(v * 255.0), uround(a * 255.0));
}
//--------------------------------------------------------------------
operator sgray8() const
{
// Return (non-premultiplied) sRGB values.
return sgray8(
sRGB_conv<value_type>::rgb_to_sRGB(v),
sRGB_conv<value_type>::alpha_to_sRGB(a));
}
//--------------------------------------------------------------------
operator gray16() const
{
return gray16(uround(v * 65535.0), uround(a * 65535.0));
}
//--------------------------------------------------------------------
operator rgba8() const
{
rgba8::value_type y = uround(v * 255.0);
return rgba8(y, y, y, uround(a * 255.0));
}
//--------------------------------------------------------------------
operator srgba8() const
{
srgba8::value_type y = sRGB_conv<value_type>::rgb_to_sRGB(v);
return srgba8(y, y, y, sRGB_conv<value_type>::alpha_to_sRGB(a));
}
//--------------------------------------------------------------------
operator rgba16() const
{
rgba16::value_type y = uround(v * 65535.0);
return rgba16(y, y, y, uround(a * 65535.0));
}
//--------------------------------------------------------------------
operator rgba32() const
{
return rgba32(v, v, v, a);
}
//--------------------------------------------------------------------
static AGG_INLINE double to_double(value_type a)
{
return a;
}
//--------------------------------------------------------------------
static AGG_INLINE value_type from_double(double a)
{
return value_type(a);
}
//--------------------------------------------------------------------
static AGG_INLINE value_type empty_value()
{
return 0;
}
//--------------------------------------------------------------------
static AGG_INLINE value_type full_value()
{
return 1;
}
//--------------------------------------------------------------------
AGG_INLINE bool is_transparent() const
{
return a <= 0;
}
//--------------------------------------------------------------------
AGG_INLINE bool is_opaque() const
{
return a >= 1;
}
//--------------------------------------------------------------------
static AGG_INLINE value_type invert(value_type x)
{
return 1 - x;
}
//--------------------------------------------------------------------
static AGG_INLINE value_type multiply(value_type a, value_type b)
{
return value_type(a * b);
}
//--------------------------------------------------------------------
static AGG_INLINE value_type demultiply(value_type a, value_type b)
{
return (b == 0) ? 0 : value_type(a / b);
}
//--------------------------------------------------------------------
template<typename T>
static AGG_INLINE T downscale(T a)
{
return a;
}
//--------------------------------------------------------------------
template<typename T>
static AGG_INLINE T downshift(T a, unsigned n)
{
return n > 0 ? a / (1 << n) : a;
}
//--------------------------------------------------------------------
static AGG_INLINE value_type mult_cover(value_type a, cover_type b)
{
return value_type(a * b / cover_mask);
}
//--------------------------------------------------------------------
static AGG_INLINE cover_type scale_cover(cover_type a, value_type b)
{
return cover_type(uround(a * b));
}
//--------------------------------------------------------------------
// Interpolate p to q by a, assuming q is premultiplied by a.
static AGG_INLINE value_type prelerp(value_type p, value_type q, value_type a)
{
return (1 - a) * p + q; // more accurate than "p + q - p * a"
}
//--------------------------------------------------------------------
// Interpolate p to q by a.
static AGG_INLINE value_type lerp(value_type p, value_type q, value_type a)
{
// The form "p + a * (q - p)" avoids a multiplication, but may produce an
// inaccurate result. For example, "p + (q - p)" may not be exactly equal
// to q. Therefore, stick to the basic expression, which at least produces
// the correct result at either extreme.
return (1 - a) * p + a * q;
}
//--------------------------------------------------------------------
self_type& clear()
{
v = a = 0;
return *this;
}
//--------------------------------------------------------------------
self_type& transparent()
{
a = 0;
return *this;
}
//--------------------------------------------------------------------
self_type& opacity(double a_)
{
if (a_ < 0) a = 0;
else if (a_ > 1) a = 1;
else a = value_type(a_);
return *this;
}
//--------------------------------------------------------------------
double opacity() const
{
return a;
}
//--------------------------------------------------------------------
self_type& premultiply()
{
if (a < 0) v = 0;
else if(a < 1) v *= a;
return *this;
}
//--------------------------------------------------------------------
self_type& demultiply()
{
if (a < 0) v = 0;
else if (a < 1) v /= a;
return *this;
}
//--------------------------------------------------------------------
self_type gradient(self_type c, double k) const
{
return self_type(
value_type(v + (c.v - v) * k),
value_type(a + (c.a - a) * k));
}
//--------------------------------------------------------------------
static self_type no_color() { return self_type(0,0); }
};
}
#endif
| 0 |
D://workCode//uploadProject\awtk\3rd\agg | D://workCode//uploadProject\awtk\3rd\agg\include\agg_color_rgba.h | //----------------------------------------------------------------------------
// Anti-Grain Geometry - Version 2.4
// Copyright (C) 2002-2005 Maxim Shemanarev (http://www.antigrain.com)
//
// Permission to copy, use, modify, sell and distribute this software
// is granted provided this copyright notice appears in all copies.
// This software is provided "as is" without express or implied
// warranty, and with no claim as to its suitability for any purpose.
//
//----------------------------------------------------------------------------
//
// Adaptation for high precision colors has been sponsored by
// Liberty Technology Systems, Inc., visit http://lib-sys.com
//
// Liberty Technology Systems, Inc. is the provider of
// PostScript and PDF technology for software developers.
//
//----------------------------------------------------------------------------
// Contact: mcseem@antigrain.com
// mcseemagg@yahoo.com
// http://www.antigrain.com
//----------------------------------------------------------------------------
#ifndef AGG_COLOR_RGBA_INCLUDED
#define AGG_COLOR_RGBA_INCLUDED
#include <math.h>
#include "agg_basics.h"
#include "agg_gamma_lut.h"
namespace agg
{
// Supported component orders for RGB and RGBA pixel formats
//=======================================================================
struct order_rgb { enum rgb_e { R=0, G=1, B=2, N=3 }; };
struct order_bgr { enum bgr_e { B=0, G=1, R=2, N=3 }; };
struct order_rgba { enum rgba_e { R=0, G=1, B=2, A=3, N=4 }; };
struct order_argb { enum argb_e { A=0, R=1, G=2, B=3, N=4 }; };
struct order_abgr { enum abgr_e { A=0, B=1, G=2, R=3, N=4 }; };
struct order_bgra { enum bgra_e { B=0, G=1, R=2, A=3, N=4 }; };
// Colorspace tag types.
struct linear {};
struct sRGB {};
//====================================================================rgba
struct rgba
{
typedef double value_type;
double r;
double g;
double b;
double a;
//--------------------------------------------------------------------
rgba() {}
//--------------------------------------------------------------------
rgba(double r_, double g_, double b_, double a_=1.0) :
r(r_), g(g_), b(b_), a(a_) {}
//--------------------------------------------------------------------
rgba(const rgba& c, double a_) : r(c.r), g(c.g), b(c.b), a(a_) {}
//--------------------------------------------------------------------
rgba& clear()
{
r = g = b = a = 0;
return *this;
}
//--------------------------------------------------------------------
rgba& transparent()
{
a = 0;
return *this;
}
//--------------------------------------------------------------------
rgba& opacity(double a_)
{
if (a_ < 0) a = 0;
else if (a_ > 1) a = 1;
else a = a_;
return *this;
}
//--------------------------------------------------------------------
double opacity() const
{
return a;
}
//--------------------------------------------------------------------
rgba& premultiply()
{
r *= a;
g *= a;
b *= a;
return *this;
}
//--------------------------------------------------------------------
rgba& premultiply(double a_)
{
if (a <= 0 || a_ <= 0)
{
r = g = b = a = 0;
}
else
{
a_ /= a;
r *= a_;
g *= a_;
b *= a_;
a = a_;
}
return *this;
}
//--------------------------------------------------------------------
rgba& demultiply()
{
if (a == 0)
{
r = g = b = 0;
}
else
{
double a_ = 1.0 / a;
r *= a_;
g *= a_;
b *= a_;
}
return *this;
}
//--------------------------------------------------------------------
rgba gradient(rgba c, double k) const
{
rgba ret;
ret.r = r + (c.r - r) * k;
ret.g = g + (c.g - g) * k;
ret.b = b + (c.b - b) * k;
ret.a = a + (c.a - a) * k;
return ret;
}
rgba& operator+=(const rgba& c)
{
r += c.r;
g += c.g;
b += c.b;
a += c.a;
return *this;
}
rgba& operator*=(double k)
{
r *= k;
g *= k;
b *= k;
a *= k;
return *this;
}
//--------------------------------------------------------------------
static rgba no_color() { return rgba(0,0,0,0); }
//--------------------------------------------------------------------
static rgba from_wavelength(double wl, double gamma = 1.0);
//--------------------------------------------------------------------
explicit rgba(double wavelen, double gamma=1.0)
{
*this = from_wavelength(wavelen, gamma);
}
};
inline rgba operator+(const rgba& a, const rgba& b)
{
return rgba(a) += b;
}
inline rgba operator*(const rgba& a, double b)
{
return rgba(a) *= b;
}
//------------------------------------------------------------------------
inline rgba rgba::from_wavelength(double wl, double gamma)
{
rgba t(0.0, 0.0, 0.0);
if (wl >= 380.0 && wl <= 440.0)
{
t.r = -1.0 * (wl - 440.0) / (440.0 - 380.0);
t.b = 1.0;
}
else if (wl >= 440.0 && wl <= 490.0)
{
t.g = (wl - 440.0) / (490.0 - 440.0);
t.b = 1.0;
}
else if (wl >= 490.0 && wl <= 510.0)
{
t.g = 1.0;
t.b = -1.0 * (wl - 510.0) / (510.0 - 490.0);
}
else if (wl >= 510.0 && wl <= 580.0)
{
t.r = (wl - 510.0) / (580.0 - 510.0);
t.g = 1.0;
}
else if (wl >= 580.0 && wl <= 645.0)
{
t.r = 1.0;
t.g = -1.0 * (wl - 645.0) / (645.0 - 580.0);
}
else if (wl >= 645.0 && wl <= 780.0)
{
t.r = 1.0;
}
double s = 1.0;
if (wl > 700.0) s = 0.3 + 0.7 * (780.0 - wl) / (780.0 - 700.0);
else if (wl < 420.0) s = 0.3 + 0.7 * (wl - 380.0) / (420.0 - 380.0);
t.r = pow(t.r * s, gamma);
t.g = pow(t.g * s, gamma);
t.b = pow(t.b * s, gamma);
return t;
}
inline rgba rgba_pre(double r, double g, double b, double a)
{
return rgba(r, g, b, a).premultiply();
}
//===================================================================rgba8
template<class Colorspace>
struct rgba8T
{
typedef int8u value_type;
typedef int32u calc_type;
typedef int32 long_type;
enum base_scale_e
{
base_shift = 8,
base_scale = 1 << base_shift,
base_mask = base_scale - 1,
base_MSB = 1 << (base_shift - 1)
};
typedef rgba8T self_type;
value_type r;
value_type g;
value_type b;
value_type a;
static void convert(rgba8T<linear>& dst, const rgba8T<sRGB>& src)
{
dst.r = sRGB_conv<value_type>::rgb_from_sRGB(src.r);
dst.g = sRGB_conv<value_type>::rgb_from_sRGB(src.g);
dst.b = sRGB_conv<value_type>::rgb_from_sRGB(src.b);
dst.a = src.a;
}
static void convert(rgba8T<sRGB>& dst, const rgba8T<linear>& src)
{
dst.r = sRGB_conv<value_type>::rgb_to_sRGB(src.r);
dst.g = sRGB_conv<value_type>::rgb_to_sRGB(src.g);
dst.b = sRGB_conv<value_type>::rgb_to_sRGB(src.b);
dst.a = src.a;
}
static void convert(rgba8T<linear>& dst, const rgba& src)
{
dst.r = value_type(uround(src.r * base_mask));
dst.g = value_type(uround(src.g * base_mask));
dst.b = value_type(uround(src.b * base_mask));
dst.a = value_type(uround(src.a * base_mask));
}
static void convert(rgba8T<sRGB>& dst, const rgba& src)
{
// Use the "float" table.
dst.r = sRGB_conv<float>::rgb_to_sRGB(float(src.r));
dst.g = sRGB_conv<float>::rgb_to_sRGB(float(src.g));
dst.b = sRGB_conv<float>::rgb_to_sRGB(float(src.b));
dst.a = sRGB_conv<float>::alpha_to_sRGB(float(src.a));
}
static void convert(rgba& dst, const rgba8T<linear>& src)
{
dst.r = src.r / 255.0;
dst.g = src.g / 255.0;
dst.b = src.b / 255.0;
dst.a = src.a / 255.0;
}
static void convert(rgba& dst, const rgba8T<sRGB>& src)
{
// Use the "float" table.
dst.r = sRGB_conv<float>::rgb_from_sRGB(src.r);
dst.g = sRGB_conv<float>::rgb_from_sRGB(src.g);
dst.b = sRGB_conv<float>::rgb_from_sRGB(src.b);
dst.a = sRGB_conv<float>::alpha_from_sRGB(src.a);
}
//--------------------------------------------------------------------
rgba8T() {}
//--------------------------------------------------------------------
rgba8T(unsigned r_, unsigned g_, unsigned b_, unsigned a_ = base_mask) :
r(value_type(r_)),
g(value_type(g_)),
b(value_type(b_)),
a(value_type(a_)) {}
//--------------------------------------------------------------------
rgba8T(const rgba& c)
{
convert(*this, c);
}
//--------------------------------------------------------------------
rgba8T(const self_type& c, unsigned a_) :
r(c.r), g(c.g), b(c.b), a(value_type(a_)) {}
//--------------------------------------------------------------------
template<class T>
rgba8T(const rgba8T<T>& c)
{
convert(*this, c);
}
//--------------------------------------------------------------------
operator rgba() const
{
rgba c;
convert(c, *this);
return c;
}
//--------------------------------------------------------------------
static AGG_INLINE double to_double(value_type a)
{
return double(a) / base_mask;
}
//--------------------------------------------------------------------
static AGG_INLINE value_type from_double(double a)
{
return value_type(uround(a * base_mask));
}
//--------------------------------------------------------------------
static AGG_INLINE value_type empty_value()
{
return 0;
}
//--------------------------------------------------------------------
static AGG_INLINE value_type full_value()
{
return base_mask;
}
//--------------------------------------------------------------------
AGG_INLINE bool is_transparent() const
{
return a == 0;
}
//--------------------------------------------------------------------
AGG_INLINE bool is_opaque() const
{
return a == base_mask;
}
//--------------------------------------------------------------------
static AGG_INLINE value_type invert(value_type x)
{
return base_mask - x;
}
//--------------------------------------------------------------------
// Fixed-point multiply, exact over int8u.
static AGG_INLINE value_type multiply(value_type a, value_type b)
{
calc_type t = a * b + base_MSB;
return value_type(((t >> base_shift) + t) >> base_shift);
}
//--------------------------------------------------------------------
static AGG_INLINE value_type demultiply(value_type a, value_type b)
{
if (a * b == 0)
{
return 0;
}
else if (a >= b)
{
return base_mask;
}
else return value_type((a * base_mask + (b >> 1)) / b);
}
//--------------------------------------------------------------------
template<typename T>
static AGG_INLINE T downscale(T a)
{
return a >> base_shift;
}
//--------------------------------------------------------------------
template<typename T>
static AGG_INLINE T downshift(T a, unsigned n)
{
return a >> n;
}
//--------------------------------------------------------------------
// Fixed-point multiply, exact over int8u.
// Specifically for multiplying a color component by a cover.
static AGG_INLINE value_type mult_cover(value_type a, cover_type b)
{
return multiply(a, b);
}
//--------------------------------------------------------------------
static AGG_INLINE cover_type scale_cover(cover_type a, value_type b)
{
return multiply(b, a);
}
//--------------------------------------------------------------------
// Interpolate p to q by a, assuming q is premultiplied by a.
static AGG_INLINE value_type prelerp(value_type p, value_type q, value_type a)
{
return p + q - multiply(p, a);
}
//--------------------------------------------------------------------
// Interpolate p to q by a.
static AGG_INLINE value_type lerp(value_type p, value_type q, value_type a)
{
int t = (q - p) * a + base_MSB - (p > q);
return value_type(p + (((t >> base_shift) + t) >> base_shift));
}
//--------------------------------------------------------------------
self_type& clear()
{
r = g = b = a = 0;
return *this;
}
//--------------------------------------------------------------------
self_type& transparent()
{
a = 0;
return *this;
}
//--------------------------------------------------------------------
self_type& opacity(double a_)
{
if (a_ < 0) a = 0;
else if (a_ > 1) a = 1;
else a = (value_type)uround(a_ * double(base_mask));
return *this;
}
//--------------------------------------------------------------------
double opacity() const
{
return double(a) / double(base_mask);
}
//--------------------------------------------------------------------
AGG_INLINE self_type& premultiply()
{
if (a != base_mask)
{
if (a == 0)
{
r = g = b = 0;
}
else
{
r = multiply(r, a);
g = multiply(g, a);
b = multiply(b, a);
}
}
return *this;
}
//--------------------------------------------------------------------
AGG_INLINE self_type& premultiply(unsigned a_)
{
if (a != base_mask || a_ < base_mask)
{
if (a == 0 || a_ == 0)
{
r = g = b = a = 0;
}
else
{
calc_type r_ = (calc_type(r) * a_) / a;
calc_type g_ = (calc_type(g) * a_) / a;
calc_type b_ = (calc_type(b) * a_) / a;
r = value_type((r_ > a_) ? a_ : r_);
g = value_type((g_ > a_) ? a_ : g_);
b = value_type((b_ > a_) ? a_ : b_);
a = value_type(a_);
}
}
return *this;
}
//--------------------------------------------------------------------
AGG_INLINE self_type& demultiply()
{
if (a < base_mask)
{
if (a == 0)
{
r = g = b = 0;
}
else
{
calc_type r_ = (calc_type(r) * base_mask) / a;
calc_type g_ = (calc_type(g) * base_mask) / a;
calc_type b_ = (calc_type(b) * base_mask) / a;
r = value_type((r_ > calc_type(base_mask)) ? calc_type(base_mask) : r_);
g = value_type((g_ > calc_type(base_mask)) ? calc_type(base_mask) : g_);
b = value_type((b_ > calc_type(base_mask)) ? calc_type(base_mask) : b_);
}
}
return *this;
}
//--------------------------------------------------------------------
AGG_INLINE self_type gradient(const self_type& c, double k) const
{
self_type ret;
calc_type ik = uround(k * base_mask);
ret.r = lerp(r, c.r, ik);
ret.g = lerp(g, c.g, ik);
ret.b = lerp(b, c.b, ik);
ret.a = lerp(a, c.a, ik);
return ret;
}
//--------------------------------------------------------------------
AGG_INLINE void add(const self_type& c, unsigned cover)
{
calc_type cr, cg, cb, ca;
if (cover == cover_mask)
{
if (c.a == base_mask)
{
*this = c;
return;
}
else
{
cr = r + c.r;
cg = g + c.g;
cb = b + c.b;
ca = a + c.a;
}
}
else
{
cr = r + mult_cover(c.r, cover);
cg = g + mult_cover(c.g, cover);
cb = b + mult_cover(c.b, cover);
ca = a + mult_cover(c.a, cover);
}
r = (value_type)((cr > calc_type(base_mask)) ? calc_type(base_mask) : cr);
g = (value_type)((cg > calc_type(base_mask)) ? calc_type(base_mask) : cg);
b = (value_type)((cb > calc_type(base_mask)) ? calc_type(base_mask) : cb);
a = (value_type)((ca > calc_type(base_mask)) ? calc_type(base_mask) : ca);
}
//--------------------------------------------------------------------
template<class GammaLUT>
AGG_INLINE void apply_gamma_dir(const GammaLUT& gamma)
{
r = gamma.dir(r);
g = gamma.dir(g);
b = gamma.dir(b);
}
//--------------------------------------------------------------------
template<class GammaLUT>
AGG_INLINE void apply_gamma_inv(const GammaLUT& gamma)
{
r = gamma.inv(r);
g = gamma.inv(g);
b = gamma.inv(b);
}
//--------------------------------------------------------------------
static self_type no_color() { return self_type(0,0,0,0); }
//--------------------------------------------------------------------
static self_type from_wavelength(double wl, double gamma = 1.0)
{
return self_type(rgba::from_wavelength(wl, gamma));
}
};
typedef rgba8T<linear> rgba8;
typedef rgba8T<sRGB> srgba8;
//-------------------------------------------------------------rgb8_packed
inline rgba8 rgb8_packed(unsigned v)
{
return rgba8((v >> 16) & 0xFF, (v >> 8) & 0xFF, v & 0xFF);
}
//-------------------------------------------------------------bgr8_packed
inline rgba8 bgr8_packed(unsigned v)
{
return rgba8(v & 0xFF, (v >> 8) & 0xFF, (v >> 16) & 0xFF);
}
//------------------------------------------------------------argb8_packed
inline rgba8 argb8_packed(unsigned v)
{
return rgba8((v >> 16) & 0xFF, (v >> 8) & 0xFF, v & 0xFF, v >> 24);
}
//---------------------------------------------------------rgba8_gamma_dir
template<class GammaLUT>
rgba8 rgba8_gamma_dir(rgba8 c, const GammaLUT& gamma)
{
return rgba8(gamma.dir(c.r), gamma.dir(c.g), gamma.dir(c.b), c.a);
}
//---------------------------------------------------------rgba8_gamma_inv
template<class GammaLUT>
rgba8 rgba8_gamma_inv(rgba8 c, const GammaLUT& gamma)
{
return rgba8(gamma.inv(c.r), gamma.inv(c.g), gamma.inv(c.b), c.a);
}
//==================================================================rgba16
struct rgba16
{
typedef int16u value_type;
typedef int32u calc_type;
typedef int64 long_type;
enum base_scale_e
{
base_shift = 16,
base_scale = 1 << base_shift,
base_mask = base_scale - 1,
base_MSB = 1 << (base_shift - 1)
};
typedef rgba16 self_type;
value_type r;
value_type g;
value_type b;
value_type a;
//--------------------------------------------------------------------
rgba16() {}
//--------------------------------------------------------------------
rgba16(unsigned r_, unsigned g_, unsigned b_, unsigned a_=base_mask) :
r(value_type(r_)),
g(value_type(g_)),
b(value_type(b_)),
a(value_type(a_)) {}
//--------------------------------------------------------------------
rgba16(const self_type& c, unsigned a_) :
r(c.r), g(c.g), b(c.b), a(value_type(a_)) {}
//--------------------------------------------------------------------
rgba16(const rgba& c) :
r((value_type)uround(c.r * double(base_mask))),
g((value_type)uround(c.g * double(base_mask))),
b((value_type)uround(c.b * double(base_mask))),
a((value_type)uround(c.a * double(base_mask))) {}
//--------------------------------------------------------------------
rgba16(const rgba8& c) :
r(value_type((value_type(c.r) << 8) | c.r)),
g(value_type((value_type(c.g) << 8) | c.g)),
b(value_type((value_type(c.b) << 8) | c.b)),
a(value_type((value_type(c.a) << 8) | c.a)) {}
//--------------------------------------------------------------------
rgba16(const srgba8& c) :
r(sRGB_conv<value_type>::rgb_from_sRGB(c.r)),
g(sRGB_conv<value_type>::rgb_from_sRGB(c.g)),
b(sRGB_conv<value_type>::rgb_from_sRGB(c.b)),
a(sRGB_conv<value_type>::alpha_from_sRGB(c.a)) {}
//--------------------------------------------------------------------
operator rgba() const
{
return rgba(
r / 65535.0,
g / 65535.0,
b / 65535.0,
a / 65535.0);
}
//--------------------------------------------------------------------
operator rgba8() const
{
return rgba8(r >> 8, g >> 8, b >> 8, a >> 8);
}
//--------------------------------------------------------------------
operator srgba8() const
{
// Return (non-premultiplied) sRGB values.
return srgba8(
sRGB_conv<value_type>::rgb_to_sRGB(r),
sRGB_conv<value_type>::rgb_to_sRGB(g),
sRGB_conv<value_type>::rgb_to_sRGB(b),
sRGB_conv<value_type>::alpha_to_sRGB(a));
}
//--------------------------------------------------------------------
static AGG_INLINE double to_double(value_type a)
{
return double(a) / base_mask;
}
//--------------------------------------------------------------------
static AGG_INLINE value_type from_double(double a)
{
return value_type(uround(a * base_mask));
}
//--------------------------------------------------------------------
static AGG_INLINE value_type empty_value()
{
return 0;
}
//--------------------------------------------------------------------
static AGG_INLINE value_type full_value()
{
return base_mask;
}
//--------------------------------------------------------------------
AGG_INLINE bool is_transparent() const
{
return a == 0;
}
//--------------------------------------------------------------------
AGG_INLINE bool is_opaque() const
{
return a == base_mask;
}
//--------------------------------------------------------------------
static AGG_INLINE value_type invert(value_type x)
{
return base_mask - x;
}
//--------------------------------------------------------------------
// Fixed-point multiply, exact over int16u.
static AGG_INLINE value_type multiply(value_type a, value_type b)
{
calc_type t = a * b + base_MSB;
return value_type(((t >> base_shift) + t) >> base_shift);
}
//--------------------------------------------------------------------
static AGG_INLINE value_type demultiply(value_type a, value_type b)
{
if (a * b == 0)
{
return 0;
}
else if (a >= b)
{
return base_mask;
}
else return value_type((a * base_mask + (b >> 1)) / b);
}
//--------------------------------------------------------------------
template<typename T>
static AGG_INLINE T downscale(T a)
{
return a >> base_shift;
}
//--------------------------------------------------------------------
template<typename T>
static AGG_INLINE T downshift(T a, unsigned n)
{
return a >> n;
}
//--------------------------------------------------------------------
// Fixed-point multiply, almost exact over int16u.
// Specifically for multiplying a color component by a cover.
static AGG_INLINE value_type mult_cover(value_type a, cover_type b)
{
return multiply(a, (b << 8) | b);
}
//--------------------------------------------------------------------
static AGG_INLINE cover_type scale_cover(cover_type a, value_type b)
{
return multiply((a << 8) | a, b) >> 8;
}
//--------------------------------------------------------------------
// Interpolate p to q by a, assuming q is premultiplied by a.
static AGG_INLINE value_type prelerp(value_type p, value_type q, value_type a)
{
return p + q - multiply(p, a);
}
//--------------------------------------------------------------------
// Interpolate p to q by a.
static AGG_INLINE value_type lerp(value_type p, value_type q, value_type a)
{
int t = (q - p) * a + base_MSB - (p > q);
return value_type(p + (((t >> base_shift) + t) >> base_shift));
}
//--------------------------------------------------------------------
self_type& clear()
{
r = g = b = a = 0;
return *this;
}
//--------------------------------------------------------------------
self_type& transparent()
{
a = 0;
return *this;
}
//--------------------------------------------------------------------
AGG_INLINE self_type& opacity(double a_)
{
if (a_ < 0) a = 0;
if (a_ > 1) a = 1;
a = value_type(uround(a_ * double(base_mask)));
return *this;
}
//--------------------------------------------------------------------
double opacity() const
{
return double(a) / double(base_mask);
}
//--------------------------------------------------------------------
AGG_INLINE self_type& premultiply()
{
if (a != base_mask)
{
if (a == 0)
{
r = g = b = 0;
}
else
{
r = multiply(r, a);
g = multiply(g, a);
b = multiply(b, a);
}
}
return *this;
}
//--------------------------------------------------------------------
AGG_INLINE self_type& premultiply(unsigned a_)
{
if (a < base_mask || a_ < base_mask)
{
if (a == 0 || a_ == 0)
{
r = g = b = a = 0;
}
else
{
calc_type r_ = (calc_type(r) * a_) / a;
calc_type g_ = (calc_type(g) * a_) / a;
calc_type b_ = (calc_type(b) * a_) / a;
r = value_type((r_ > a_) ? a_ : r_);
g = value_type((g_ > a_) ? a_ : g_);
b = value_type((b_ > a_) ? a_ : b_);
a = value_type(a_);
}
}
return *this;
}
//--------------------------------------------------------------------
AGG_INLINE self_type& demultiply()
{
if (a < base_mask)
{
if (a == 0)
{
r = g = b = 0;
}
else
{
calc_type r_ = (calc_type(r) * base_mask) / a;
calc_type g_ = (calc_type(g) * base_mask) / a;
calc_type b_ = (calc_type(b) * base_mask) / a;
r = value_type((r_ > calc_type(base_mask)) ? calc_type(base_mask) : r_);
g = value_type((g_ > calc_type(base_mask)) ? calc_type(base_mask) : g_);
b = value_type((b_ > calc_type(base_mask)) ? calc_type(base_mask) : b_);
}
}
return *this;
}
//--------------------------------------------------------------------
AGG_INLINE self_type gradient(const self_type& c, double k) const
{
self_type ret;
calc_type ik = uround(k * base_mask);
ret.r = lerp(r, c.r, ik);
ret.g = lerp(g, c.g, ik);
ret.b = lerp(b, c.b, ik);
ret.a = lerp(a, c.a, ik);
return ret;
}
//--------------------------------------------------------------------
AGG_INLINE void add(const self_type& c, unsigned cover)
{
calc_type cr, cg, cb, ca;
if (cover == cover_mask)
{
if (c.a == base_mask)
{
*this = c;
return;
}
else
{
cr = r + c.r;
cg = g + c.g;
cb = b + c.b;
ca = a + c.a;
}
}
else
{
cr = r + mult_cover(c.r, cover);
cg = g + mult_cover(c.g, cover);
cb = b + mult_cover(c.b, cover);
ca = a + mult_cover(c.a, cover);
}
r = (value_type)((cr > calc_type(base_mask)) ? calc_type(base_mask) : cr);
g = (value_type)((cg > calc_type(base_mask)) ? calc_type(base_mask) : cg);
b = (value_type)((cb > calc_type(base_mask)) ? calc_type(base_mask) : cb);
a = (value_type)((ca > calc_type(base_mask)) ? calc_type(base_mask) : ca);
}
//--------------------------------------------------------------------
template<class GammaLUT>
AGG_INLINE void apply_gamma_dir(const GammaLUT& gamma)
{
r = gamma.dir(r);
g = gamma.dir(g);
b = gamma.dir(b);
}
//--------------------------------------------------------------------
template<class GammaLUT>
AGG_INLINE void apply_gamma_inv(const GammaLUT& gamma)
{
r = gamma.inv(r);
g = gamma.inv(g);
b = gamma.inv(b);
}
//--------------------------------------------------------------------
static self_type no_color() { return self_type(0,0,0,0); }
//--------------------------------------------------------------------
static self_type from_wavelength(double wl, double gamma = 1.0)
{
return self_type(rgba::from_wavelength(wl, gamma));
}
};
//------------------------------------------------------rgba16_gamma_dir
template<class GammaLUT>
rgba16 rgba16_gamma_dir(rgba16 c, const GammaLUT& gamma)
{
return rgba16(gamma.dir(c.r), gamma.dir(c.g), gamma.dir(c.b), c.a);
}
//------------------------------------------------------rgba16_gamma_inv
template<class GammaLUT>
rgba16 rgba16_gamma_inv(rgba16 c, const GammaLUT& gamma)
{
return rgba16(gamma.inv(c.r), gamma.inv(c.g), gamma.inv(c.b), c.a);
}
//====================================================================rgba32
struct rgba32
{
typedef float value_type;
typedef double calc_type;
typedef double long_type;
typedef rgba32 self_type;
value_type r;
value_type g;
value_type b;
value_type a;
//--------------------------------------------------------------------
rgba32() {}
//--------------------------------------------------------------------
rgba32(value_type r_, value_type g_, value_type b_, value_type a_= 1) :
r(r_), g(g_), b(b_), a(a_) {}
//--------------------------------------------------------------------
rgba32(const self_type& c, float a_) :
r(c.r), g(c.g), b(c.b), a(a_) {}
//--------------------------------------------------------------------
rgba32(const rgba& c) :
r(value_type(c.r)), g(value_type(c.g)), b(value_type(c.b)), a(value_type(c.a)) {}
//--------------------------------------------------------------------
rgba32(const rgba8& c) :
r(value_type(c.r / 255.0)),
g(value_type(c.g / 255.0)),
b(value_type(c.b / 255.0)),
a(value_type(c.a / 255.0)) {}
//--------------------------------------------------------------------
rgba32(const srgba8& c) :
r(sRGB_conv<value_type>::rgb_from_sRGB(c.r)),
g(sRGB_conv<value_type>::rgb_from_sRGB(c.g)),
b(sRGB_conv<value_type>::rgb_from_sRGB(c.b)),
a(sRGB_conv<value_type>::alpha_from_sRGB(c.a)) {}
//--------------------------------------------------------------------
rgba32(const rgba16& c) :
r(value_type(c.r / 65535.0)),
g(value_type(c.g / 65535.0)),
b(value_type(c.b / 65535.0)),
a(value_type(c.a / 65535.0)) {}
//--------------------------------------------------------------------
operator rgba() const
{
return rgba(r, g, b, a);
}
//--------------------------------------------------------------------
operator rgba8() const
{
return rgba8(
uround(r * 255.0),
uround(g * 255.0),
uround(b * 255.0),
uround(a * 255.0));
}
//--------------------------------------------------------------------
operator srgba8() const
{
return srgba8(
sRGB_conv<value_type>::rgb_to_sRGB(r),
sRGB_conv<value_type>::rgb_to_sRGB(g),
sRGB_conv<value_type>::rgb_to_sRGB(b),
sRGB_conv<value_type>::alpha_to_sRGB(a));
}
//--------------------------------------------------------------------
operator rgba16() const
{
return rgba8(
uround(r * 65535.0),
uround(g * 65535.0),
uround(b * 65535.0),
uround(a * 65535.0));
}
//--------------------------------------------------------------------
static AGG_INLINE double to_double(value_type a)
{
return a;
}
//--------------------------------------------------------------------
static AGG_INLINE value_type from_double(double a)
{
return value_type(a);
}
//--------------------------------------------------------------------
static AGG_INLINE value_type empty_value()
{
return 0;
}
//--------------------------------------------------------------------
static AGG_INLINE value_type full_value()
{
return 1;
}
//--------------------------------------------------------------------
AGG_INLINE bool is_transparent() const
{
return a <= 0;
}
//--------------------------------------------------------------------
AGG_INLINE bool is_opaque() const
{
return a >= 1;
}
//--------------------------------------------------------------------
static AGG_INLINE value_type invert(value_type x)
{
return 1 - x;
}
//--------------------------------------------------------------------
static AGG_INLINE value_type multiply(value_type a, value_type b)
{
return value_type(a * b);
}
//--------------------------------------------------------------------
static AGG_INLINE value_type demultiply(value_type a, value_type b)
{
return (b == 0) ? 0 : value_type(a / b);
}
//--------------------------------------------------------------------
template<typename T>
static AGG_INLINE T downscale(T a)
{
return a;
}
//--------------------------------------------------------------------
template<typename T>
static AGG_INLINE T downshift(T a, unsigned n)
{
return n > 0 ? a / (1 << n) : a;
}
//--------------------------------------------------------------------
static AGG_INLINE value_type mult_cover(value_type a, cover_type b)
{
return value_type(a * b / cover_mask);
}
//--------------------------------------------------------------------
static AGG_INLINE cover_type scale_cover(cover_type a, value_type b)
{
return cover_type(uround(a * b));
}
//--------------------------------------------------------------------
// Interpolate p to q by a, assuming q is premultiplied by a.
static AGG_INLINE value_type prelerp(value_type p, value_type q, value_type a)
{
return (1 - a) * p + q; // more accurate than "p + q - p * a"
}
//--------------------------------------------------------------------
// Interpolate p to q by a.
static AGG_INLINE value_type lerp(value_type p, value_type q, value_type a)
{
// The form "p + a * (q - p)" avoids a multiplication, but may produce an
// inaccurate result. For example, "p + (q - p)" may not be exactly equal
// to q. Therefore, stick to the basic expression, which at least produces
// the correct result at either extreme.
return (1 - a) * p + a * q;
}
//--------------------------------------------------------------------
self_type& clear()
{
r = g = b = a = 0;
return *this;
}
//--------------------------------------------------------------------
self_type& transparent()
{
a = 0;
return *this;
}
//--------------------------------------------------------------------
AGG_INLINE self_type& opacity(double a_)
{
if (a_ < 0) a = 0;
else if (a_ > 1) a = 1;
else a = value_type(a_);
return *this;
}
//--------------------------------------------------------------------
double opacity() const
{
return a;
}
//--------------------------------------------------------------------
AGG_INLINE self_type& premultiply()
{
if (a < 1)
{
if (a <= 0)
{
r = g = b = 0;
}
else
{
r *= a;
g *= a;
b *= a;
}
}
return *this;
}
//--------------------------------------------------------------------
AGG_INLINE self_type& demultiply()
{
if (a < 1)
{
if (a <= 0)
{
r = g = b = 0;
}
else
{
r /= a;
g /= a;
b /= a;
}
}
return *this;
}
//--------------------------------------------------------------------
AGG_INLINE self_type gradient(const self_type& c, double k) const
{
self_type ret;
ret.r = value_type(r + (c.r - r) * k);
ret.g = value_type(g + (c.g - g) * k);
ret.b = value_type(b + (c.b - b) * k);
ret.a = value_type(a + (c.a - a) * k);
return ret;
}
//--------------------------------------------------------------------
AGG_INLINE void add(const self_type& c, unsigned cover)
{
if (cover == cover_mask)
{
if (c.is_opaque())
{
*this = c;
return;
}
else
{
r += c.r;
g += c.g;
b += c.b;
a += c.a;
}
}
else
{
r += mult_cover(c.r, cover);
g += mult_cover(c.g, cover);
b += mult_cover(c.b, cover);
a += mult_cover(c.a, cover);
}
if (a > 1) a = 1;
if (r > a) r = a;
if (g > a) g = a;
if (b > a) b = a;
}
//--------------------------------------------------------------------
template<class GammaLUT>
AGG_INLINE void apply_gamma_dir(const GammaLUT& gamma)
{
r = gamma.dir(r);
g = gamma.dir(g);
b = gamma.dir(b);
}
//--------------------------------------------------------------------
template<class GammaLUT>
AGG_INLINE void apply_gamma_inv(const GammaLUT& gamma)
{
r = gamma.inv(r);
g = gamma.inv(g);
b = gamma.inv(b);
}
//--------------------------------------------------------------------
static self_type no_color() { return self_type(0,0,0,0); }
//--------------------------------------------------------------------
static self_type from_wavelength(double wl, double gamma = 1)
{
return self_type(rgba::from_wavelength(wl, gamma));
}
};
}
#endif
| 0 |
D://workCode//uploadProject\awtk\3rd\agg | D://workCode//uploadProject\awtk\3rd\agg\include\agg_config.h | #ifndef AGG_CONFIG_INCLUDED
#define AGG_CONFIG_INCLUDED
// This file can be used to redefine certain data types.
//---------------------------------------
// 1. Default basic types such as:
//
// AGG_INT8
// AGG_INT8U
// AGG_INT16
// AGG_INT16U
// AGG_INT32
// AGG_INT32U
// AGG_INT64
// AGG_INT64U
//
// Just replace this file with new defines if necessary.
// For example, if your compiler doesn't have a 64 bit integer type
// you can still use AGG if you define the follows:
//
// #define AGG_INT64 int
// #define AGG_INT64U unsigned
//
// It will result in overflow in 16 bit-per-component image/pattern resampling
// but it won't result any crash and the rest of the library will remain
// fully functional.
//---------------------------------------
// 2. Default rendering_buffer type. Can be:
//
// Provides faster access for massive pixel operations,
// such as blur, image filtering:
// #define AGG_RENDERING_BUFFER row_ptr_cache<int8u>
//
// Provides cheaper creation and destruction (no mem allocs):
// #define AGG_RENDERING_BUFFER row_accessor<int8u>
//
// You can still use both of them simultaneously in your applications
// This #define is used only for default rendering_buffer type,
// in short hand typedefs like pixfmt_rgba32.
#endif
| 0 |
D://workCode//uploadProject\awtk\3rd\agg | D://workCode//uploadProject\awtk\3rd\agg\include\agg_conv_adaptor_vcgen.h | //----------------------------------------------------------------------------
// Anti-Grain Geometry - Version 2.4
// Copyright (C) 2002-2005 Maxim Shemanarev (http://www.antigrain.com)
//
// Permission to copy, use, modify, sell and distribute this software
// is granted provided this copyright notice appears in all copies.
// This software is provided "as is" without express or implied
// warranty, and with no claim as to its suitability for any purpose.
//
//----------------------------------------------------------------------------
// Contact: mcseem@antigrain.com
// mcseemagg@yahoo.com
// http://www.antigrain.com
//----------------------------------------------------------------------------
#ifndef AGG_CONV_ADAPTOR_VCGEN_INCLUDED
#define AGG_CONV_ADAPTOR_VCGEN_INCLUDED
#include "agg_basics.h"
namespace agg
{
//------------------------------------------------------------null_markers
struct null_markers
{
void remove_all() {}
void add_vertex(double, double, unsigned) {}
void prepare_src() {}
void rewind(unsigned) {}
unsigned vertex(double*, double*) { return path_cmd_stop; }
};
//------------------------------------------------------conv_adaptor_vcgen
template<class VertexSource,
class Generator,
class Markers=null_markers> class conv_adaptor_vcgen
{
enum status
{
initial,
accumulate,
generate
};
public:
explicit conv_adaptor_vcgen(VertexSource& source) :
m_source(&source),
m_status(initial)
{}
void attach(VertexSource& source) { m_source = &source; }
Generator& generator() { return m_generator; }
const Generator& generator() const { return m_generator; }
Markers& markers() { return m_markers; }
const Markers& markers() const { return m_markers; }
void rewind(unsigned path_id)
{
m_source->rewind(path_id);
m_status = initial;
}
unsigned vertex(double* x, double* y);
private:
// Prohibit copying
conv_adaptor_vcgen(const conv_adaptor_vcgen<VertexSource, Generator, Markers>&);
const conv_adaptor_vcgen<VertexSource, Generator, Markers>&
operator = (const conv_adaptor_vcgen<VertexSource, Generator, Markers>&);
VertexSource* m_source;
Generator m_generator;
Markers m_markers;
status m_status;
unsigned m_last_cmd;
double m_start_x;
double m_start_y;
};
//------------------------------------------------------------------------
template<class VertexSource, class Generator, class Markers>
unsigned conv_adaptor_vcgen<VertexSource, Generator, Markers>::vertex(double* x, double* y)
{
unsigned cmd = path_cmd_stop;
bool done = false;
while(!done)
{
switch(m_status)
{
case initial:
m_markers.remove_all();
m_last_cmd = m_source->vertex(&m_start_x, &m_start_y);
m_status = accumulate;
case accumulate:
if(is_stop(m_last_cmd)) return path_cmd_stop;
m_generator.remove_all();
m_generator.add_vertex(m_start_x, m_start_y, path_cmd_move_to);
m_markers.add_vertex(m_start_x, m_start_y, path_cmd_move_to);
for(;;)
{
cmd = m_source->vertex(x, y);
if(is_vertex(cmd))
{
m_last_cmd = cmd;
if(is_move_to(cmd))
{
m_start_x = *x;
m_start_y = *y;
break;
}
m_generator.add_vertex(*x, *y, cmd);
m_markers.add_vertex(*x, *y, path_cmd_line_to);
}
else
{
if(is_stop(cmd))
{
m_last_cmd = path_cmd_stop;
break;
}
if(is_end_poly(cmd))
{
m_generator.add_vertex(*x, *y, cmd);
break;
}
}
}
m_generator.rewind(0);
m_status = generate;
case generate:
cmd = m_generator.vertex(x, y);
if(is_stop(cmd))
{
m_status = accumulate;
break;
}
done = true;
break;
}
}
return cmd;
}
}
#endif
| 0 |
D://workCode//uploadProject\awtk\3rd\agg | D://workCode//uploadProject\awtk\3rd\agg\include\agg_conv_stroke.h | //----------------------------------------------------------------------------
// Anti-Grain Geometry - Version 2.4
// Copyright (C) 2002-2005 Maxim Shemanarev (http://www.antigrain.com)
//
// Permission to copy, use, modify, sell and distribute this software
// is granted provided this copyright notice appears in all copies.
// This software is provided "as is" without express or implied
// warranty, and with no claim as to its suitability for any purpose.
//
//----------------------------------------------------------------------------
// Contact: mcseem@antigrain.com
// mcseemagg@yahoo.com
// http://www.antigrain.com
//----------------------------------------------------------------------------
//
// conv_stroke
//
//----------------------------------------------------------------------------
#ifndef AGG_CONV_STROKE_INCLUDED
#define AGG_CONV_STROKE_INCLUDED
#include "agg_basics.h"
#include "agg_vcgen_stroke.h"
#include "agg_conv_adaptor_vcgen.h"
namespace agg
{
//-------------------------------------------------------------conv_stroke
template<class VertexSource, class Markers=null_markers>
struct conv_stroke :
public conv_adaptor_vcgen<VertexSource, vcgen_stroke, Markers>
{
typedef Markers marker_type;
typedef conv_adaptor_vcgen<VertexSource, vcgen_stroke, Markers> base_type;
conv_stroke(VertexSource& vs) :
conv_adaptor_vcgen<VertexSource, vcgen_stroke, Markers>(vs)
{
}
void line_cap(line_cap_e lc) { base_type::generator().line_cap(lc); }
void line_join(line_join_e lj) { base_type::generator().line_join(lj); }
void inner_join(inner_join_e ij) { base_type::generator().inner_join(ij); }
line_cap_e line_cap() const { return base_type::generator().line_cap(); }
line_join_e line_join() const { return base_type::generator().line_join(); }
inner_join_e inner_join() const { return base_type::generator().inner_join(); }
void width(double w) { base_type::generator().width(w); }
void miter_limit(double ml) { base_type::generator().miter_limit(ml); }
void miter_limit_theta(double t) { base_type::generator().miter_limit_theta(t); }
void inner_miter_limit(double ml) { base_type::generator().inner_miter_limit(ml); }
void approximation_scale(double as) { base_type::generator().approximation_scale(as); }
double width() const { return base_type::generator().width(); }
double miter_limit() const { return base_type::generator().miter_limit(); }
double inner_miter_limit() const { return base_type::generator().inner_miter_limit(); }
double approximation_scale() const { return base_type::generator().approximation_scale(); }
void shorten(double s) { base_type::generator().shorten(s); }
double shorten() const { return base_type::generator().shorten(); }
private:
conv_stroke(const conv_stroke<VertexSource, Markers>&);
const conv_stroke<VertexSource, Markers>&
operator = (const conv_stroke<VertexSource, Markers>&);
};
}
#endif
| 0 |
D://workCode//uploadProject\awtk\3rd\agg | D://workCode//uploadProject\awtk\3rd\agg\include\agg_dda_line.h | //----------------------------------------------------------------------------
// Anti-Grain Geometry - Version 2.4
// Copyright (C) 2002-2005 Maxim Shemanarev (http://www.antigrain.com)
//
// Permission to copy, use, modify, sell and distribute this software
// is granted provided this copyright notice appears in all copies.
// This software is provided "as is" without express or implied
// warranty, and with no claim as to its suitability for any purpose.
//
//----------------------------------------------------------------------------
// Contact: mcseem@antigrain.com
// mcseemagg@yahoo.com
// http://www.antigrain.com
//----------------------------------------------------------------------------
//
// classes dda_line_interpolator, dda2_line_interpolator
//
//----------------------------------------------------------------------------
#ifndef AGG_DDA_LINE_INCLUDED
#define AGG_DDA_LINE_INCLUDED
#include <stdlib.h>
#include "agg_basics.h"
namespace agg
{
//===================================================dda_line_interpolator
template<int FractionShift, int YShift=0> class dda_line_interpolator
{
public:
//--------------------------------------------------------------------
dda_line_interpolator() {}
//--------------------------------------------------------------------
dda_line_interpolator(int y1, int y2, unsigned count) :
m_y(y1),
m_inc(((y2 - y1) << FractionShift) / int(count)),
m_dy(0)
{
}
//--------------------------------------------------------------------
void operator ++ ()
{
m_dy += m_inc;
}
//--------------------------------------------------------------------
void operator -- ()
{
m_dy -= m_inc;
}
//--------------------------------------------------------------------
void operator += (unsigned n)
{
m_dy += m_inc * n;
}
//--------------------------------------------------------------------
void operator -= (unsigned n)
{
m_dy -= m_inc * n;
}
//--------------------------------------------------------------------
int y() const { return m_y + (m_dy >> (FractionShift-YShift)); }
int dy() const { return m_dy; }
private:
int m_y;
int m_inc;
int m_dy;
};
//=================================================dda2_line_interpolator
class dda2_line_interpolator
{
public:
typedef int save_data_type;
enum save_size_e { save_size = 2 };
//--------------------------------------------------------------------
dda2_line_interpolator() {}
//-------------------------------------------- Forward-adjusted line
dda2_line_interpolator(int y1, int y2, int count) :
m_cnt(count <= 0 ? 1 : count),
m_lft((y2 - y1) / m_cnt),
m_rem((y2 - y1) % m_cnt),
m_mod(m_rem),
m_y(y1)
{
if(m_mod <= 0)
{
m_mod += count;
m_rem += count;
m_lft--;
}
m_mod -= count;
}
//-------------------------------------------- Backward-adjusted line
dda2_line_interpolator(int y1, int y2, int count, int) :
m_cnt(count <= 0 ? 1 : count),
m_lft((y2 - y1) / m_cnt),
m_rem((y2 - y1) % m_cnt),
m_mod(m_rem),
m_y(y1)
{
if(m_mod <= 0)
{
m_mod += count;
m_rem += count;
m_lft--;
}
}
//-------------------------------------------- Backward-adjusted line
dda2_line_interpolator(int y, int count) :
m_cnt(count <= 0 ? 1 : count),
m_lft(y / m_cnt),
m_rem(y % m_cnt),
m_mod(m_rem),
m_y(0)
{
if(m_mod <= 0)
{
m_mod += count;
m_rem += count;
m_lft--;
}
}
//--------------------------------------------------------------------
void save(save_data_type* data) const
{
data[0] = m_mod;
data[1] = m_y;
}
//--------------------------------------------------------------------
void load(const save_data_type* data)
{
m_mod = data[0];
m_y = data[1];
}
//--------------------------------------------------------------------
void operator++()
{
m_mod += m_rem;
m_y += m_lft;
if(m_mod > 0)
{
m_mod -= m_cnt;
m_y++;
}
}
//--------------------------------------------------------------------
void operator--()
{
if(m_mod <= m_rem)
{
m_mod += m_cnt;
m_y--;
}
m_mod -= m_rem;
m_y -= m_lft;
}
//--------------------------------------------------------------------
void adjust_forward()
{
m_mod -= m_cnt;
}
//--------------------------------------------------------------------
void adjust_backward()
{
m_mod += m_cnt;
}
//--------------------------------------------------------------------
int mod() const { return m_mod; }
int rem() const { return m_rem; }
int lft() const { return m_lft; }
//--------------------------------------------------------------------
int y() const { return m_y; }
private:
int m_cnt;
int m_lft;
int m_rem;
int m_mod;
int m_y;
};
//---------------------------------------------line_bresenham_interpolator
class line_bresenham_interpolator
{
public:
enum subpixel_scale_e
{
subpixel_shift = 8,
subpixel_scale = 1 << subpixel_shift,
subpixel_mask = subpixel_scale - 1
};
//--------------------------------------------------------------------
static int line_lr(int v) { return v >> subpixel_shift; }
//--------------------------------------------------------------------
line_bresenham_interpolator(int x1, int y1, int x2, int y2) :
m_x1_lr(line_lr(x1)),
m_y1_lr(line_lr(y1)),
m_x2_lr(line_lr(x2)),
m_y2_lr(line_lr(y2)),
m_ver(abs(m_x2_lr - m_x1_lr) < abs(m_y2_lr - m_y1_lr)),
m_len(m_ver ? abs(m_y2_lr - m_y1_lr) :
abs(m_x2_lr - m_x1_lr)),
m_inc(m_ver ? ((y2 > y1) ? 1 : -1) : ((x2 > x1) ? 1 : -1)),
m_interpolator(m_ver ? x1 : y1,
m_ver ? x2 : y2,
m_len)
{
}
//--------------------------------------------------------------------
bool is_ver() const { return m_ver; }
unsigned len() const { return m_len; }
int inc() const { return m_inc; }
//--------------------------------------------------------------------
void hstep()
{
++m_interpolator;
m_x1_lr += m_inc;
}
//--------------------------------------------------------------------
void vstep()
{
++m_interpolator;
m_y1_lr += m_inc;
}
//--------------------------------------------------------------------
int x1() const { return m_x1_lr; }
int y1() const { return m_y1_lr; }
int x2() const { return line_lr(m_interpolator.y()); }
int y2() const { return line_lr(m_interpolator.y()); }
int x2_hr() const { return m_interpolator.y(); }
int y2_hr() const { return m_interpolator.y(); }
private:
int m_x1_lr;
int m_y1_lr;
int m_x2_lr;
int m_y2_lr;
bool m_ver;
unsigned m_len;
int m_inc;
dda2_line_interpolator m_interpolator;
};
}
#endif
| 0 |
D://workCode//uploadProject\awtk\3rd\agg | D://workCode//uploadProject\awtk\3rd\agg\include\agg_gamma_functions.h | //----------------------------------------------------------------------------
// Anti-Grain Geometry - Version 2.4
// Copyright (C) 2002-2005 Maxim Shemanarev (http://www.antigrain.com)
//
// Permission to copy, use, modify, sell and distribute this software
// is granted provided this copyright notice appears in all copies.
// This software is provided "as is" without express or implied
// warranty, and with no claim as to its suitability for any purpose.
//
//----------------------------------------------------------------------------
// Contact: mcseem@antigrain.com
// mcseemagg@yahoo.com
// http://www.antigrain.com
//----------------------------------------------------------------------------
#ifndef AGG_GAMMA_FUNCTIONS_INCLUDED
#define AGG_GAMMA_FUNCTIONS_INCLUDED
#include <math.h>
#include "agg_basics.h"
namespace agg
{
//===============================================================gamma_none
struct gamma_none
{
double operator()(double x) const { return x; }
};
//==============================================================gamma_power
class gamma_power
{
public:
gamma_power() : m_gamma(1.0) {}
gamma_power(double g) : m_gamma(g) {}
void gamma(double g) { m_gamma = g; }
double gamma() const { return m_gamma; }
double operator() (double x) const
{
return pow(x, m_gamma);
}
private:
double m_gamma;
};
//==========================================================gamma_threshold
class gamma_threshold
{
public:
gamma_threshold() : m_threshold(0.5) {}
gamma_threshold(double t) : m_threshold(t) {}
void threshold(double t) { m_threshold = t; }
double threshold() const { return m_threshold; }
double operator() (double x) const
{
return (x < m_threshold) ? 0.0 : 1.0;
}
private:
double m_threshold;
};
//============================================================gamma_linear
class gamma_linear
{
public:
gamma_linear() : m_start(0.0), m_end(1.0) {}
gamma_linear(double s, double e) : m_start(s), m_end(e) {}
void set(double s, double e) { m_start = s; m_end = e; }
void start(double s) { m_start = s; }
void end(double e) { m_end = e; }
double start() const { return m_start; }
double end() const { return m_end; }
double operator() (double x) const
{
if(x < m_start) return 0.0;
if(x > m_end) return 1.0;
return (x - m_start) / (m_end - m_start);
}
private:
double m_start;
double m_end;
};
//==========================================================gamma_multiply
class gamma_multiply
{
public:
gamma_multiply() : m_mul(1.0) {}
gamma_multiply(double v) : m_mul(v) {}
void value(double v) { m_mul = v; }
double value() const { return m_mul; }
double operator() (double x) const
{
double y = x * m_mul;
if(y > 1.0) y = 1.0;
return y;
}
private:
double m_mul;
};
inline double sRGB_to_linear(double x)
{
return (x <= 0.04045) ? (x / 12.92) : pow((x + 0.055) / (1.055), 2.4);
}
inline double linear_to_sRGB(double x)
{
return (x <= 0.0031308) ? (x * 12.92) : (1.055 * pow(x, 1 / 2.4) - 0.055);
}
}
#endif
| 0 |
D://workCode//uploadProject\awtk\3rd\agg | D://workCode//uploadProject\awtk\3rd\agg\include\agg_gamma_lut.h | //----------------------------------------------------------------------------
// Anti-Grain Geometry - Version 2.4
// Copyright (C) 2002-2005 Maxim Shemanarev (http://www.antigrain.com)
//
// Permission to copy, use, modify, sell and distribute this software
// is granted provided this copyright notice appears in all copies.
// This software is provided "as is" without express or implied
// warranty, and with no claim as to its suitability for any purpose.
//
//----------------------------------------------------------------------------
// Contact: mcseem@antigrain.com
// mcseemagg@yahoo.com
// http://www.antigrain.com
//----------------------------------------------------------------------------
#ifndef AGG_GAMMA_LUT_INCLUDED
#define AGG_GAMMA_LUT_INCLUDED
#include <math.h>
#include "agg_basics.h"
#include "agg_gamma_functions.h"
namespace agg
{
template<class LoResT=int8u,
class HiResT=int8u,
unsigned GammaShift=8,
unsigned HiResShift=8> class gamma_lut
{
public:
typedef gamma_lut<LoResT, HiResT, GammaShift, HiResShift> self_type;
enum gamma_scale_e
{
gamma_shift = GammaShift,
gamma_size = 1 << gamma_shift,
gamma_mask = gamma_size - 1
};
enum hi_res_scale_e
{
hi_res_shift = HiResShift,
hi_res_size = 1 << hi_res_shift,
hi_res_mask = hi_res_size - 1
};
~gamma_lut()
{
pod_allocator<LoResT>::deallocate(m_inv_gamma, hi_res_size);
pod_allocator<HiResT>::deallocate(m_dir_gamma, gamma_size);
}
gamma_lut() :
m_gamma(1.0),
m_dir_gamma(pod_allocator<HiResT>::allocate(gamma_size)),
m_inv_gamma(pod_allocator<LoResT>::allocate(hi_res_size))
{
unsigned i;
for(i = 0; i < gamma_size; i++)
{
m_dir_gamma[i] = HiResT(i << (hi_res_shift - gamma_shift));
}
for(i = 0; i < hi_res_size; i++)
{
m_inv_gamma[i] = LoResT(i >> (hi_res_shift - gamma_shift));
}
}
gamma_lut(double g) :
m_gamma(1.0),
m_dir_gamma(pod_allocator<HiResT>::allocate(gamma_size)),
m_inv_gamma(pod_allocator<LoResT>::allocate(hi_res_size))
{
gamma(g);
}
void gamma(double g)
{
m_gamma = g;
unsigned i;
for(i = 0; i < gamma_size; i++)
{
m_dir_gamma[i] = (HiResT)
uround(pow(i / double(gamma_mask), m_gamma) * double(hi_res_mask));
}
double inv_g = 1.0 / g;
for(i = 0; i < hi_res_size; i++)
{
m_inv_gamma[i] = (LoResT)
uround(pow(i / double(hi_res_mask), inv_g) * double(gamma_mask));
}
}
double gamma() const
{
return m_gamma;
}
HiResT dir(LoResT v) const
{
return m_dir_gamma[unsigned(v)];
}
LoResT inv(HiResT v) const
{
return m_inv_gamma[unsigned(v)];
}
private:
gamma_lut(const self_type&);
const self_type& operator = (const self_type&);
double m_gamma;
HiResT* m_dir_gamma;
LoResT* m_inv_gamma;
};
//
// sRGB support classes
//
// sRGB_lut - implements sRGB conversion for the various types.
// Base template is undefined, specializations are provided below.
template<class LinearType>
class sRGB_lut;
template<>
class sRGB_lut<float>
{
public:
sRGB_lut()
{
// Generate lookup tables.
for (int i = 0; i <= 255; ++i)
{
m_dir_table[i] = float(sRGB_to_linear(i / 255.0));
}
for (int i = 0; i <= 65535; ++i)
{
m_inv_table[i] = uround(255.0 * linear_to_sRGB(i / 65535.0));
}
}
float dir(int8u v) const
{
return m_dir_table[v];
}
int8u inv(float v) const
{
return m_inv_table[int16u(0.5 + v * 65535)];
}
private:
float m_dir_table[256];
int8u m_inv_table[65536];
};
template<>
class sRGB_lut<int16u>
{
public:
sRGB_lut()
{
// Generate lookup tables.
for (int i = 0; i <= 255; ++i)
{
m_dir_table[i] = uround(65535.0 * sRGB_to_linear(i / 255.0));
}
for (int i = 0; i <= 65535; ++i)
{
m_inv_table[i] = uround(255.0 * linear_to_sRGB(i / 65535.0));
}
}
int16u dir(int8u v) const
{
return m_dir_table[v];
}
int8u inv(int16u v) const
{
return m_inv_table[v];
}
private:
int16u m_dir_table[256];
int8u m_inv_table[65536];
};
template<>
class sRGB_lut<int8u>
{
public:
sRGB_lut()
{
// Generate lookup tables.
for (int i = 0; i <= 255; ++i)
{
m_dir_table[i] = uround(255.0 * sRGB_to_linear(i / 255.0));
m_inv_table[i] = uround(255.0 * linear_to_sRGB(i / 255.0));
}
}
int8u dir(int8u v) const
{
return m_dir_table[v];
}
int8u inv(int8u v) const
{
return m_inv_table[v];
}
private:
int8u m_dir_table[256];
int8u m_inv_table[256];
};
// Common base class for sRGB_conv objects. Defines an internal
// sRGB_lut object so that users don't have to.
template<class T>
class sRGB_conv_base
{
public:
static T rgb_from_sRGB(int8u x)
{
return lut.dir(x);
}
static int8u rgb_to_sRGB(T x)
{
return lut.inv(x);
}
private:
static sRGB_lut<T> lut;
};
// Definition of sRGB_conv_base::lut. Due to the fact that this a template,
// we don't need to place the definition in a cpp file. Hurrah.
template<class T>
sRGB_lut<T> sRGB_conv_base<T>::lut;
// Wrapper for sRGB-linear conversion.
// Base template is undefined, specializations are provided below.
template<class T>
class sRGB_conv;
template<>
class sRGB_conv<float> : public sRGB_conv_base<float>
{
public:
static float alpha_from_sRGB(int8u x)
{
static const double y = 1 / 255.0;
return float(x * y);
}
static int8u alpha_to_sRGB(float x)
{
return int8u(0.5 + x * 255);
}
};
template<>
class sRGB_conv<int16u> : public sRGB_conv_base<int16u>
{
public:
static int16u alpha_from_sRGB(int8u x)
{
return (x << 8) | x;
}
static int8u alpha_to_sRGB(int16u x)
{
return x >> 8;
}
};
template<>
class sRGB_conv<int8u> : public sRGB_conv_base<int8u>
{
public:
static int8u alpha_from_sRGB(int8u x)
{
return x;
}
static int8u alpha_to_sRGB(int8u x)
{
return x;
}
};
}
#endif
| 0 |
D://workCode//uploadProject\awtk\3rd\agg | D://workCode//uploadProject\awtk\3rd\agg\include\agg_image_accessors.h | //----------------------------------------------------------------------------
// Anti-Grain Geometry - Version 2.4
// Copyright (C) 2002-2005 Maxim Shemanarev (http://www.antigrain.com)
//
// Permission to copy, use, modify, sell and distribute this software
// is granted provided this copyright notice appears in all copies.
// This software is provided "as is" without express or implied
// warranty, and with no claim as to its suitability for any purpose.
//
//----------------------------------------------------------------------------
// Contact: mcseem@antigrain.com
// mcseemagg@yahoo.com
// http://www.antigrain.com
//----------------------------------------------------------------------------
#ifndef AGG_IMAGE_ACCESSORS_INCLUDED
#define AGG_IMAGE_ACCESSORS_INCLUDED
#include "agg_basics.h"
namespace agg
{
//-----------------------------------------------------image_accessor_clip
template<class PixFmt> class image_accessor_clip
{
public:
typedef PixFmt pixfmt_type;
typedef typename pixfmt_type::color_type color_type;
typedef typename pixfmt_type::order_type order_type;
typedef typename pixfmt_type::value_type value_type;
enum pix_width_e { pix_width = pixfmt_type::pix_width };
image_accessor_clip() {}
explicit image_accessor_clip(pixfmt_type& pixf,
const color_type& bk) :
m_pixf(&pixf)
{
pixfmt_type::make_pix(m_bk_buf, bk);
}
void attach(pixfmt_type& pixf)
{
m_pixf = &pixf;
}
void background_color(const color_type& bk)
{
pixfmt_type::make_pix(m_bk_buf, bk);
}
private:
AGG_INLINE const int8u* pixel() const
{
if(m_y >= 0 && m_y < (int)m_pixf->height() &&
m_x >= 0 && m_x < (int)m_pixf->width())
{
return m_pixf->pix_ptr(m_x, m_y);
}
return m_bk_buf;
}
public:
AGG_INLINE const int8u* span(int x, int y, unsigned len)
{
m_x = m_x0 = x;
m_y = y;
if(y >= 0 && y < (int)m_pixf->height() &&
x >= 0 && x+(int)len <= (int)m_pixf->width())
{
return m_pix_ptr = m_pixf->pix_ptr(x, y);
}
m_pix_ptr = 0;
return pixel();
}
AGG_INLINE const int8u* next_x()
{
if(m_pix_ptr) return m_pix_ptr += pix_width;
++m_x;
return pixel();
}
AGG_INLINE const int8u* next_y()
{
++m_y;
m_x = m_x0;
if(m_pix_ptr &&
m_y >= 0 && m_y < (int)m_pixf->height())
{
return m_pix_ptr = m_pixf->pix_ptr(m_x, m_y);
}
m_pix_ptr = 0;
return pixel();
}
private:
const pixfmt_type* m_pixf;
int8u m_bk_buf[pix_width];
int m_x, m_x0, m_y;
const int8u* m_pix_ptr;
};
//--------------------------------------------------image_accessor_no_clip
template<class PixFmt> class image_accessor_no_clip
{
public:
typedef PixFmt pixfmt_type;
typedef typename pixfmt_type::color_type color_type;
typedef typename pixfmt_type::order_type order_type;
typedef typename pixfmt_type::value_type value_type;
enum pix_width_e { pix_width = pixfmt_type::pix_width };
image_accessor_no_clip() {}
explicit image_accessor_no_clip(pixfmt_type& pixf) :
m_pixf(&pixf)
{}
void attach(pixfmt_type& pixf)
{
m_pixf = &pixf;
}
AGG_INLINE const int8u* span(int x, int y, unsigned)
{
m_x = x;
m_y = y;
return m_pix_ptr = m_pixf->pix_ptr(x, y);
}
AGG_INLINE const int8u* next_x()
{
return m_pix_ptr += pix_width;
}
AGG_INLINE const int8u* next_y()
{
++m_y;
return m_pix_ptr = m_pixf->pix_ptr(m_x, m_y);
}
private:
const pixfmt_type* m_pixf;
int m_x, m_y;
const int8u* m_pix_ptr;
};
//----------------------------------------------------image_accessor_clone
template<class PixFmt> class image_accessor_clone
{
public:
typedef PixFmt pixfmt_type;
typedef typename pixfmt_type::color_type color_type;
typedef typename pixfmt_type::order_type order_type;
typedef typename pixfmt_type::value_type value_type;
enum pix_width_e { pix_width = pixfmt_type::pix_width };
image_accessor_clone() {}
explicit image_accessor_clone(pixfmt_type& pixf) :
m_pixf(&pixf)
{}
void attach(pixfmt_type& pixf)
{
m_pixf = &pixf;
}
private:
AGG_INLINE const int8u* pixel() const
{
int x = m_x;
int y = m_y;
if(x < 0) x = 0;
if(y < 0) y = 0;
if(x >= (int)m_pixf->width()) x = m_pixf->width() - 1;
if(y >= (int)m_pixf->height()) y = m_pixf->height() - 1;
return m_pixf->pix_ptr(x, y);
}
public:
AGG_INLINE const int8u* span(int x, int y, unsigned len)
{
m_x = m_x0 = x;
m_y = y;
if(y >= 0 && y < (int)m_pixf->height() &&
x >= 0 && x+len <= (int)m_pixf->width())
{
return m_pix_ptr = m_pixf->pix_ptr(x, y);
}
m_pix_ptr = 0;
return pixel();
}
AGG_INLINE const int8u* next_x()
{
if(m_pix_ptr) return m_pix_ptr += pix_width;
++m_x;
return pixel();
}
AGG_INLINE const int8u* next_y()
{
++m_y;
m_x = m_x0;
if(m_pix_ptr &&
m_y >= 0 && m_y < (int)m_pixf->height())
{
return m_pix_ptr = m_pixf->pix_ptr(m_x, m_y);
}
m_pix_ptr = 0;
return pixel();
}
private:
const pixfmt_type* m_pixf;
int m_x, m_x0, m_y;
const int8u* m_pix_ptr;
};
//-----------------------------------------------------image_accessor_wrap
template<class PixFmt, class WrapX, class WrapY> class image_accessor_wrap
{
public:
typedef PixFmt pixfmt_type;
typedef typename pixfmt_type::color_type color_type;
typedef typename pixfmt_type::order_type order_type;
typedef typename pixfmt_type::value_type value_type;
enum pix_width_e { pix_width = pixfmt_type::pix_width };
image_accessor_wrap() {}
explicit image_accessor_wrap(pixfmt_type& pixf) :
m_pixf(&pixf),
m_wrap_x(pixf.width()),
m_wrap_y(pixf.height())
{}
void attach(pixfmt_type& pixf)
{
m_pixf = &pixf;
}
AGG_INLINE const int8u* span(int x, int y, unsigned)
{
m_x = x;
m_row_ptr = m_pixf->pix_ptr(0, m_wrap_y(y));
return m_row_ptr + m_wrap_x(x) * pix_width;
}
AGG_INLINE const int8u* next_x()
{
int x = ++m_wrap_x;
return m_row_ptr + x * pix_width;
}
AGG_INLINE const int8u* next_y()
{
m_row_ptr = m_pixf->pix_ptr(0, ++m_wrap_y);
return m_row_ptr + m_wrap_x(m_x) * pix_width;
}
private:
const pixfmt_type* m_pixf;
const int8u* m_row_ptr;
int m_x;
WrapX m_wrap_x;
WrapY m_wrap_y;
};
//--------------------------------------------------------wrap_mode_repeat
class wrap_mode_repeat
{
public:
wrap_mode_repeat() {}
wrap_mode_repeat(unsigned size) :
m_size(size),
m_add(size * (0x3FFFFFFF / size)),
m_value(0)
{}
AGG_INLINE unsigned operator() (int v)
{
return m_value = (unsigned(v) + m_add) % m_size;
}
AGG_INLINE unsigned operator++ ()
{
++m_value;
if(m_value >= m_size) m_value = 0;
return m_value;
}
private:
unsigned m_size;
unsigned m_add;
unsigned m_value;
};
//---------------------------------------------------wrap_mode_repeat_pow2
class wrap_mode_repeat_pow2
{
public:
wrap_mode_repeat_pow2() {}
wrap_mode_repeat_pow2(unsigned size) : m_value(0)
{
m_mask = 1;
while(m_mask < size) m_mask = (m_mask << 1) | 1;
m_mask >>= 1;
}
AGG_INLINE unsigned operator() (int v)
{
return m_value = unsigned(v) & m_mask;
}
AGG_INLINE unsigned operator++ ()
{
++m_value;
if(m_value > m_mask) m_value = 0;
return m_value;
}
private:
unsigned m_mask;
unsigned m_value;
};
//----------------------------------------------wrap_mode_repeat_auto_pow2
class wrap_mode_repeat_auto_pow2
{
public:
wrap_mode_repeat_auto_pow2() {}
wrap_mode_repeat_auto_pow2(unsigned size) :
m_size(size),
m_add(size * (0x3FFFFFFF / size)),
m_mask((m_size & (m_size-1)) ? 0 : m_size-1),
m_value(0)
{}
AGG_INLINE unsigned operator() (int v)
{
if(m_mask) return m_value = unsigned(v) & m_mask;
return m_value = (unsigned(v) + m_add) % m_size;
}
AGG_INLINE unsigned operator++ ()
{
++m_value;
if(m_value >= m_size) m_value = 0;
return m_value;
}
private:
unsigned m_size;
unsigned m_add;
unsigned m_mask;
unsigned m_value;
};
//-------------------------------------------------------wrap_mode_reflect
class wrap_mode_reflect
{
public:
wrap_mode_reflect() {}
wrap_mode_reflect(unsigned size) :
m_size(size),
m_size2(size * 2),
m_add(m_size2 * (0x3FFFFFFF / m_size2)),
m_value(0)
{}
AGG_INLINE unsigned operator() (int v)
{
m_value = (unsigned(v) + m_add) % m_size2;
if(m_value >= m_size) return m_size2 - m_value - 1;
return m_value;
}
AGG_INLINE unsigned operator++ ()
{
++m_value;
if(m_value >= m_size2) m_value = 0;
if(m_value >= m_size) return m_size2 - m_value - 1;
return m_value;
}
private:
unsigned m_size;
unsigned m_size2;
unsigned m_add;
unsigned m_value;
};
//--------------------------------------------------wrap_mode_reflect_pow2
class wrap_mode_reflect_pow2
{
public:
wrap_mode_reflect_pow2() {}
wrap_mode_reflect_pow2(unsigned size) : m_value(0)
{
m_mask = 1;
m_size = 1;
while(m_mask < size)
{
m_mask = (m_mask << 1) | 1;
m_size <<= 1;
}
}
AGG_INLINE unsigned operator() (int v)
{
m_value = unsigned(v) & m_mask;
if(m_value >= m_size) return m_mask - m_value;
return m_value;
}
AGG_INLINE unsigned operator++ ()
{
++m_value;
m_value &= m_mask;
if(m_value >= m_size) return m_mask - m_value;
return m_value;
}
private:
unsigned m_size;
unsigned m_mask;
unsigned m_value;
};
//---------------------------------------------wrap_mode_reflect_auto_pow2
class wrap_mode_reflect_auto_pow2
{
public:
wrap_mode_reflect_auto_pow2() {}
wrap_mode_reflect_auto_pow2(unsigned size) :
m_size(size),
m_size2(size * 2),
m_add(m_size2 * (0x3FFFFFFF / m_size2)),
m_mask((m_size2 & (m_size2-1)) ? 0 : m_size2-1),
m_value(0)
{}
AGG_INLINE unsigned operator() (int v)
{
m_value = m_mask ? unsigned(v) & m_mask :
(unsigned(v) + m_add) % m_size2;
if(m_value >= m_size) return m_size2 - m_value - 1;
return m_value;
}
AGG_INLINE unsigned operator++ ()
{
++m_value;
if(m_value >= m_size2) m_value = 0;
if(m_value >= m_size) return m_size2 - m_value - 1;
return m_value;
}
private:
unsigned m_size;
unsigned m_size2;
unsigned m_add;
unsigned m_mask;
unsigned m_value;
};
}
#endif
| 0 |
D://workCode//uploadProject\awtk\3rd\agg | D://workCode//uploadProject\awtk\3rd\agg\include\agg_image_filters.h | //----------------------------------------------------------------------------
// Anti-Grain Geometry - Version 2.4
// Copyright (C) 2002-2005 Maxim Shemanarev (http://www.antigrain.com)
//
// Permission to copy, use, modify, sell and distribute this software
// is granted provided this copyright notice appears in all copies.
// This software is provided "as is" without express or implied
// warranty, and with no claim as to its suitability for any purpose.
//
//----------------------------------------------------------------------------
// Contact: mcseem@antigrain.com
// mcseemagg@yahoo.com
// http://www.antigrain.com
//----------------------------------------------------------------------------
//
// Image transformation filters,
// Filtering classes (image_filter_lut, image_filter),
// Basic filter shape classes
//----------------------------------------------------------------------------
#ifndef AGG_IMAGE_FILTERS_INCLUDED
#define AGG_IMAGE_FILTERS_INCLUDED
#include "agg_array.h"
#include "agg_math.h"
namespace agg
{
// See Implementation agg_image_filters.cpp
enum image_filter_scale_e
{
image_filter_shift = 14, //----image_filter_shift
image_filter_scale = 1 << image_filter_shift, //----image_filter_scale
image_filter_mask = image_filter_scale - 1 //----image_filter_mask
};
enum image_subpixel_scale_e
{
image_subpixel_shift = 8, //----image_subpixel_shift
image_subpixel_scale = 1 << image_subpixel_shift, //----image_subpixel_scale
image_subpixel_mask = image_subpixel_scale - 1 //----image_subpixel_mask
};
//-----------------------------------------------------image_filter_lut
class image_filter_lut
{
public:
template<class FilterF> void calculate(const FilterF& filter,
bool normalization=true)
{
double r = filter.radius();
realloc_lut(r);
unsigned i;
unsigned pivot = diameter() << (image_subpixel_shift - 1);
for(i = 0; i < pivot; i++)
{
double x = double(i) / double(image_subpixel_scale);
double y = filter.calc_weight(x);
m_weight_array[pivot + i] =
m_weight_array[pivot - i] = (int16)iround(y * image_filter_scale);
}
unsigned end = (diameter() << image_subpixel_shift) - 1;
m_weight_array[0] = m_weight_array[end];
if(normalization)
{
normalize();
}
}
image_filter_lut() : m_radius(0), m_diameter(0), m_start(0) {}
template<class FilterF> image_filter_lut(const FilterF& filter,
bool normalization=true)
{
calculate(filter, normalization);
}
double radius() const { return m_radius; }
unsigned diameter() const { return m_diameter; }
int start() const { return m_start; }
const int16* weight_array() const { return &m_weight_array[0]; }
void normalize();
private:
void realloc_lut(double radius);
image_filter_lut(const image_filter_lut&);
const image_filter_lut& operator = (const image_filter_lut&);
double m_radius;
unsigned m_diameter;
int m_start;
pod_array<int16> m_weight_array;
};
//--------------------------------------------------------image_filter
template<class FilterF> class image_filter : public image_filter_lut
{
public:
image_filter()
{
calculate(m_filter_function);
}
private:
FilterF m_filter_function;
};
//-----------------------------------------------image_filter_bilinear
struct image_filter_bilinear
{
static double radius() { return 1.0; }
static double calc_weight(double x)
{
return 1.0 - x;
}
};
//-----------------------------------------------image_filter_hanning
struct image_filter_hanning
{
static double radius() { return 1.0; }
static double calc_weight(double x)
{
return 0.5 + 0.5 * cos(pi * x);
}
};
//-----------------------------------------------image_filter_hamming
struct image_filter_hamming
{
static double radius() { return 1.0; }
static double calc_weight(double x)
{
return 0.54 + 0.46 * cos(pi * x);
}
};
//-----------------------------------------------image_filter_hermite
struct image_filter_hermite
{
static double radius() { return 1.0; }
static double calc_weight(double x)
{
return (2.0 * x - 3.0) * x * x + 1.0;
}
};
//------------------------------------------------image_filter_quadric
struct image_filter_quadric
{
static double radius() { return 1.5; }
static double calc_weight(double x)
{
double t;
if(x < 0.5) return 0.75 - x * x;
if(x < 1.5) {t = x - 1.5; return 0.5 * t * t;}
return 0.0;
}
};
//------------------------------------------------image_filter_bicubic
class image_filter_bicubic
{
static double pow3(double x)
{
return (x <= 0.0) ? 0.0 : x * x * x;
}
public:
static double radius() { return 2.0; }
static double calc_weight(double x)
{
return
(1.0/6.0) *
(pow3(x + 2) - 4 * pow3(x + 1) + 6 * pow3(x) - 4 * pow3(x - 1));
}
};
//-------------------------------------------------image_filter_kaiser
class image_filter_kaiser
{
double a;
double i0a;
double epsilon;
public:
image_filter_kaiser(double b = 6.33) :
a(b), epsilon(1e-12)
{
i0a = 1.0 / bessel_i0(b);
}
static double radius() { return 1.0; }
double calc_weight(double x) const
{
return bessel_i0(a * sqrt(1. - x * x)) * i0a;
}
private:
double bessel_i0(double x) const
{
int i;
double sum, y, t;
sum = 1.;
y = x * x / 4.;
t = y;
for(i = 2; t > epsilon; i++)
{
sum += t;
t *= (double)y / (i * i);
}
return sum;
}
};
//----------------------------------------------image_filter_catrom
struct image_filter_catrom
{
static double radius() { return 2.0; }
static double calc_weight(double x)
{
if(x < 1.0) return 0.5 * (2.0 + x * x * (-5.0 + x * 3.0));
if(x < 2.0) return 0.5 * (4.0 + x * (-8.0 + x * (5.0 - x)));
return 0.;
}
};
//---------------------------------------------image_filter_mitchell
class image_filter_mitchell
{
double p0, p2, p3;
double q0, q1, q2, q3;
public:
image_filter_mitchell(double b = 1.0/3.0, double c = 1.0/3.0) :
p0((6.0 - 2.0 * b) / 6.0),
p2((-18.0 + 12.0 * b + 6.0 * c) / 6.0),
p3((12.0 - 9.0 * b - 6.0 * c) / 6.0),
q0((8.0 * b + 24.0 * c) / 6.0),
q1((-12.0 * b - 48.0 * c) / 6.0),
q2((6.0 * b + 30.0 * c) / 6.0),
q3((-b - 6.0 * c) / 6.0)
{}
static double radius() { return 2.0; }
double calc_weight(double x) const
{
if(x < 1.0) return p0 + x * x * (p2 + x * p3);
if(x < 2.0) return q0 + x * (q1 + x * (q2 + x * q3));
return 0.0;
}
};
//----------------------------------------------image_filter_spline16
struct image_filter_spline16
{
static double radius() { return 2.0; }
static double calc_weight(double x)
{
if(x < 1.0)
{
return ((x - 9.0/5.0 ) * x - 1.0/5.0 ) * x + 1.0;
}
return ((-1.0/3.0 * (x-1) + 4.0/5.0) * (x-1) - 7.0/15.0 ) * (x-1);
}
};
//---------------------------------------------image_filter_spline36
struct image_filter_spline36
{
static double radius() { return 3.0; }
static double calc_weight(double x)
{
if(x < 1.0)
{
return ((13.0/11.0 * x - 453.0/209.0) * x - 3.0/209.0) * x + 1.0;
}
if(x < 2.0)
{
return ((-6.0/11.0 * (x-1) + 270.0/209.0) * (x-1) - 156.0/ 209.0) * (x-1);
}
return ((1.0/11.0 * (x-2) - 45.0/209.0) * (x-2) + 26.0/209.0) * (x-2);
}
};
//----------------------------------------------image_filter_gaussian
struct image_filter_gaussian
{
static double radius() { return 2.0; }
static double calc_weight(double x)
{
return exp(-2.0 * x * x) * sqrt(2.0 / pi);
}
};
//------------------------------------------------image_filter_bessel
struct image_filter_bessel
{
static double radius() { return 3.2383; }
static double calc_weight(double x)
{
return (x == 0.0) ? pi / 4.0 : besj(pi * x, 1) / (2.0 * x);
}
};
//-------------------------------------------------image_filter_sinc
class image_filter_sinc
{
public:
image_filter_sinc(double r) : m_radius(r < 2.0 ? 2.0 : r) {}
double radius() const { return m_radius; }
double calc_weight(double x) const
{
if(x == 0.0) return 1.0;
x *= pi;
return sin(x) / x;
}
private:
double m_radius;
};
//-----------------------------------------------image_filter_lanczos
class image_filter_lanczos
{
public:
image_filter_lanczos(double r) : m_radius(r < 2.0 ? 2.0 : r) {}
double radius() const { return m_radius; }
double calc_weight(double x) const
{
if(x == 0.0) return 1.0;
if(x > m_radius) return 0.0;
x *= pi;
double xr = x / m_radius;
return (sin(x) / x) * (sin(xr) / xr);
}
private:
double m_radius;
};
//----------------------------------------------image_filter_blackman
class image_filter_blackman
{
public:
image_filter_blackman(double r) : m_radius(r < 2.0 ? 2.0 : r) {}
double radius() const { return m_radius; }
double calc_weight(double x) const
{
if(x == 0.0) return 1.0;
if(x > m_radius) return 0.0;
x *= pi;
double xr = x / m_radius;
return (sin(x) / x) * (0.42 + 0.5*cos(xr) + 0.08*cos(2*xr));
}
private:
double m_radius;
};
//------------------------------------------------image_filter_sinc36
class image_filter_sinc36 : public image_filter_sinc
{ public: image_filter_sinc36() : image_filter_sinc(3.0){} };
//------------------------------------------------image_filter_sinc64
class image_filter_sinc64 : public image_filter_sinc
{ public: image_filter_sinc64() : image_filter_sinc(4.0){} };
//-----------------------------------------------image_filter_sinc100
class image_filter_sinc100 : public image_filter_sinc
{ public: image_filter_sinc100() : image_filter_sinc(5.0){} };
//-----------------------------------------------image_filter_sinc144
class image_filter_sinc144 : public image_filter_sinc
{ public: image_filter_sinc144() : image_filter_sinc(6.0){} };
//-----------------------------------------------image_filter_sinc196
class image_filter_sinc196 : public image_filter_sinc
{ public: image_filter_sinc196() : image_filter_sinc(7.0){} };
//-----------------------------------------------image_filter_sinc256
class image_filter_sinc256 : public image_filter_sinc
{ public: image_filter_sinc256() : image_filter_sinc(8.0){} };
//---------------------------------------------image_filter_lanczos36
class image_filter_lanczos36 : public image_filter_lanczos
{ public: image_filter_lanczos36() : image_filter_lanczos(3.0){} };
//---------------------------------------------image_filter_lanczos64
class image_filter_lanczos64 : public image_filter_lanczos
{ public: image_filter_lanczos64() : image_filter_lanczos(4.0){} };
//--------------------------------------------image_filter_lanczos100
class image_filter_lanczos100 : public image_filter_lanczos
{ public: image_filter_lanczos100() : image_filter_lanczos(5.0){} };
//--------------------------------------------image_filter_lanczos144
class image_filter_lanczos144 : public image_filter_lanczos
{ public: image_filter_lanczos144() : image_filter_lanczos(6.0){} };
//--------------------------------------------image_filter_lanczos196
class image_filter_lanczos196 : public image_filter_lanczos
{ public: image_filter_lanczos196() : image_filter_lanczos(7.0){} };
//--------------------------------------------image_filter_lanczos256
class image_filter_lanczos256 : public image_filter_lanczos
{ public: image_filter_lanczos256() : image_filter_lanczos(8.0){} };
//--------------------------------------------image_filter_blackman36
class image_filter_blackman36 : public image_filter_blackman
{ public: image_filter_blackman36() : image_filter_blackman(3.0){} };
//--------------------------------------------image_filter_blackman64
class image_filter_blackman64 : public image_filter_blackman
{ public: image_filter_blackman64() : image_filter_blackman(4.0){} };
//-------------------------------------------image_filter_blackman100
class image_filter_blackman100 : public image_filter_blackman
{ public: image_filter_blackman100() : image_filter_blackman(5.0){} };
//-------------------------------------------image_filter_blackman144
class image_filter_blackman144 : public image_filter_blackman
{ public: image_filter_blackman144() : image_filter_blackman(6.0){} };
//-------------------------------------------image_filter_blackman196
class image_filter_blackman196 : public image_filter_blackman
{ public: image_filter_blackman196() : image_filter_blackman(7.0){} };
//-------------------------------------------image_filter_blackman256
class image_filter_blackman256 : public image_filter_blackman
{ public: image_filter_blackman256() : image_filter_blackman(8.0){} };
}
#endif
| 0 |
D://workCode//uploadProject\awtk\3rd\agg | D://workCode//uploadProject\awtk\3rd\agg\include\agg_math.h | //----------------------------------------------------------------------------
// Anti-Grain Geometry - Version 2.4
// Copyright (C) 2002-2005 Maxim Shemanarev (http://www.antigrain.com)
//
// Permission to copy, use, modify, sell and distribute this software
// is granted provided this copyright notice appears in all copies.
// This software is provided "as is" without express or implied
// warranty, and with no claim as to its suitability for any purpose.
//
//----------------------------------------------------------------------------
// Contact: mcseem@antigrain.com
// mcseemagg@yahoo.com
// http://www.antigrain.com
//----------------------------------------------------------------------------
// Bessel function (besj) was adapted for use in AGG library by Andy Wilk
// Contact: castor.vulgaris@gmail.com
//----------------------------------------------------------------------------
#ifndef AGG_MATH_INCLUDED
#define AGG_MATH_INCLUDED
#include <math.h>
#include "agg_basics.h"
namespace agg
{
//------------------------------------------------------vertex_dist_epsilon
// Coinciding points maximal distance (Epsilon)
const double vertex_dist_epsilon = 1e-14;
//-----------------------------------------------------intersection_epsilon
// See calc_intersection
const double intersection_epsilon = 1.0e-30;
//------------------------------------------------------------cross_product
AGG_INLINE double cross_product(double x1, double y1,
double x2, double y2,
double x, double y)
{
return (x - x2) * (y2 - y1) - (y - y2) * (x2 - x1);
}
//--------------------------------------------------------point_in_triangle
AGG_INLINE bool point_in_triangle(double x1, double y1,
double x2, double y2,
double x3, double y3,
double x, double y)
{
bool cp1 = cross_product(x1, y1, x2, y2, x, y) < 0.0;
bool cp2 = cross_product(x2, y2, x3, y3, x, y) < 0.0;
bool cp3 = cross_product(x3, y3, x1, y1, x, y) < 0.0;
return cp1 == cp2 && cp2 == cp3 && cp3 == cp1;
}
//-----------------------------------------------------------calc_distance
AGG_INLINE double calc_distance(double x1, double y1, double x2, double y2)
{
double dx = x2-x1;
double dy = y2-y1;
return sqrt(dx * dx + dy * dy);
}
//--------------------------------------------------------calc_sq_distance
AGG_INLINE double calc_sq_distance(double x1, double y1, double x2, double y2)
{
double dx = x2-x1;
double dy = y2-y1;
return dx * dx + dy * dy;
}
//------------------------------------------------calc_line_point_distance
AGG_INLINE double calc_line_point_distance(double x1, double y1,
double x2, double y2,
double x, double y)
{
double dx = x2-x1;
double dy = y2-y1;
double d = sqrt(dx * dx + dy * dy);
if(d < vertex_dist_epsilon)
{
return calc_distance(x1, y1, x, y);
}
return ((x - x2) * dy - (y - y2) * dx) / d;
}
//-------------------------------------------------------calc_line_point_u
AGG_INLINE double calc_segment_point_u(double x1, double y1,
double x2, double y2,
double x, double y)
{
double dx = x2 - x1;
double dy = y2 - y1;
if(dx == 0 && dy == 0)
{
return 0;
}
double pdx = x - x1;
double pdy = y - y1;
return (pdx * dx + pdy * dy) / (dx * dx + dy * dy);
}
//---------------------------------------------calc_line_point_sq_distance
AGG_INLINE double calc_segment_point_sq_distance(double x1, double y1,
double x2, double y2,
double x, double y,
double u)
{
if(u <= 0)
{
return calc_sq_distance(x, y, x1, y1);
}
else
if(u >= 1)
{
return calc_sq_distance(x, y, x2, y2);
}
return calc_sq_distance(x, y, x1 + u * (x2 - x1), y1 + u * (y2 - y1));
}
//---------------------------------------------calc_line_point_sq_distance
AGG_INLINE double calc_segment_point_sq_distance(double x1, double y1,
double x2, double y2,
double x, double y)
{
return
calc_segment_point_sq_distance(
x1, y1, x2, y2, x, y,
calc_segment_point_u(x1, y1, x2, y2, x, y));
}
//-------------------------------------------------------calc_intersection
AGG_INLINE bool calc_intersection(double ax, double ay, double bx, double by,
double cx, double cy, double dx, double dy,
double* x, double* y)
{
double num = (ay-cy) * (dx-cx) - (ax-cx) * (dy-cy);
double den = (bx-ax) * (dy-cy) - (by-ay) * (dx-cx);
if(fabs(den) < intersection_epsilon) return false;
double r = num / den;
*x = ax + r * (bx-ax);
*y = ay + r * (by-ay);
return true;
}
//-----------------------------------------------------intersection_exists
AGG_INLINE bool intersection_exists(double x1, double y1, double x2, double y2,
double x3, double y3, double x4, double y4)
{
// It's less expensive but you can't control the
// boundary conditions: Less or LessEqual
double dx1 = x2 - x1;
double dy1 = y2 - y1;
double dx2 = x4 - x3;
double dy2 = y4 - y3;
return ((x3 - x2) * dy1 - (y3 - y2) * dx1 < 0.0) !=
((x4 - x2) * dy1 - (y4 - y2) * dx1 < 0.0) &&
((x1 - x4) * dy2 - (y1 - y4) * dx2 < 0.0) !=
((x2 - x4) * dy2 - (y2 - y4) * dx2 < 0.0);
// It's is more expensive but more flexible
// in terms of boundary conditions.
//--------------------
//double den = (x2-x1) * (y4-y3) - (y2-y1) * (x4-x3);
//if(fabs(den) < intersection_epsilon) return false;
//double nom1 = (x4-x3) * (y1-y3) - (y4-y3) * (x1-x3);
//double nom2 = (x2-x1) * (y1-y3) - (y2-y1) * (x1-x3);
//double ua = nom1 / den;
//double ub = nom2 / den;
//return ua >= 0.0 && ua <= 1.0 && ub >= 0.0 && ub <= 1.0;
}
//--------------------------------------------------------calc_orthogonal
AGG_INLINE void calc_orthogonal(double thickness,
double x1, double y1,
double x2, double y2,
double* x, double* y)
{
double dx = x2 - x1;
double dy = y2 - y1;
double d = sqrt(dx*dx + dy*dy);
*x = thickness * dy / d;
*y = -thickness * dx / d;
}
//--------------------------------------------------------dilate_triangle
AGG_INLINE void dilate_triangle(double x1, double y1,
double x2, double y2,
double x3, double y3,
double *x, double* y,
double d)
{
double dx1=0.0;
double dy1=0.0;
double dx2=0.0;
double dy2=0.0;
double dx3=0.0;
double dy3=0.0;
double loc = cross_product(x1, y1, x2, y2, x3, y3);
if(fabs(loc) > intersection_epsilon)
{
if(cross_product(x1, y1, x2, y2, x3, y3) > 0.0)
{
d = -d;
}
calc_orthogonal(d, x1, y1, x2, y2, &dx1, &dy1);
calc_orthogonal(d, x2, y2, x3, y3, &dx2, &dy2);
calc_orthogonal(d, x3, y3, x1, y1, &dx3, &dy3);
}
*x++ = x1 + dx1; *y++ = y1 + dy1;
*x++ = x2 + dx1; *y++ = y2 + dy1;
*x++ = x2 + dx2; *y++ = y2 + dy2;
*x++ = x3 + dx2; *y++ = y3 + dy2;
*x++ = x3 + dx3; *y++ = y3 + dy3;
*x++ = x1 + dx3; *y++ = y1 + dy3;
}
//------------------------------------------------------calc_triangle_area
AGG_INLINE double calc_triangle_area(double x1, double y1,
double x2, double y2,
double x3, double y3)
{
return (x1*y2 - x2*y1 + x2*y3 - x3*y2 + x3*y1 - x1*y3) * 0.5;
}
//-------------------------------------------------------calc_polygon_area
template<class Storage> double calc_polygon_area(const Storage& st)
{
unsigned i;
double sum = 0.0;
double x = st[0].x;
double y = st[0].y;
double xs = x;
double ys = y;
for(i = 1; i < st.size(); i++)
{
const typename Storage::value_type& v = st[i];
sum += x * v.y - y * v.x;
x = v.x;
y = v.y;
}
return (sum + x * ys - y * xs) * 0.5;
}
//------------------------------------------------------------------------
// Tables for fast sqrt
extern int16u g_sqrt_table[1024];
extern int8 g_elder_bit_table[256];
//---------------------------------------------------------------fast_sqrt
//Fast integer Sqrt - really fast: no cycles, divisions or multiplications
#if defined(_MSC_VER)
#pragma warning(push)
#pragma warning(disable : 4035) //Disable warning "no return value"
#endif
AGG_INLINE unsigned fast_sqrt(unsigned val)
{
#if defined(_M_IX86) && defined(_MSC_VER) && !defined(AGG_NO_ASM)
//For Ix86 family processors this assembler code is used.
//The key command here is bsr - determination the number of the most
//significant bit of the value. For other processors
//(and maybe compilers) the pure C "#else" section is used.
__asm
{
mov ebx, val
mov edx, 11
bsr ecx, ebx
sub ecx, 9
jle less_than_9_bits
shr ecx, 1
adc ecx, 0
sub edx, ecx
shl ecx, 1
shr ebx, cl
less_than_9_bits:
xor eax, eax
mov ax, g_sqrt_table[ebx*2]
mov ecx, edx
shr eax, cl
}
#else
//This code is actually pure C and portable to most
//arcitectures including 64bit ones.
unsigned t = val;
int bit=0;
unsigned shift = 11;
//The following piece of code is just an emulation of the
//Ix86 assembler command "bsr" (see above). However on old
//Intels (like Intel MMX 233MHz) this code is about twice
//faster (sic!) then just one "bsr". On PIII and PIV the
//bsr is optimized quite well.
bit = t >> 24;
if(bit)
{
bit = g_elder_bit_table[bit] + 24;
}
else
{
bit = (t >> 16) & 0xFF;
if(bit)
{
bit = g_elder_bit_table[bit] + 16;
}
else
{
bit = (t >> 8) & 0xFF;
if(bit)
{
bit = g_elder_bit_table[bit] + 8;
}
else
{
bit = g_elder_bit_table[t];
}
}
}
//This code calculates the sqrt.
bit -= 9;
if(bit > 0)
{
bit = (bit >> 1) + (bit & 1);
shift -= bit;
val >>= (bit << 1);
}
return g_sqrt_table[val] >> shift;
#endif
}
#if defined(_MSC_VER)
#pragma warning(pop)
#endif
//--------------------------------------------------------------------besj
// Function BESJ calculates Bessel function of first kind of order n
// Arguments:
// n - an integer (>=0), the order
// x - value at which the Bessel function is required
//--------------------
// C++ Mathematical Library
// Convereted from equivalent FORTRAN library
// Converetd by Gareth Walker for use by course 392 computational project
// All functions tested and yield the same results as the corresponding
// FORTRAN versions.
//
// If you have any problems using these functions please report them to
// M.Muldoon@UMIST.ac.uk
//
// Documentation available on the web
// http://www.ma.umist.ac.uk/mrm/Teaching/392/libs/392.html
// Version 1.0 8/98
// 29 October, 1999
//--------------------
// Adapted for use in AGG library by Andy Wilk (castor.vulgaris@gmail.com)
//------------------------------------------------------------------------
inline double besj(double x, int n)
{
if(n < 0)
{
return 0;
}
double d = 1E-6;
double b = 0;
if(fabs(x) <= d)
{
if(n != 0) return 0;
return 1;
}
double b1 = 0; // b1 is the value from the previous iteration
// Set up a starting order for recurrence
int m1 = (int)fabs(x) + 6;
if(fabs(x) > 5)
{
m1 = (int)(fabs(1.4 * x + 60 / x));
}
int m2 = (int)(n + 2 + fabs(x) / 4);
if (m1 > m2)
{
m2 = m1;
}
// Apply recurrence down from curent max order
for(;;)
{
double c3 = 0;
double c2 = 1E-30;
double c4 = 0;
int m8 = 1;
if (m2 / 2 * 2 == m2)
{
m8 = -1;
}
int imax = m2 - 2;
for (int i = 1; i <= imax; i++)
{
double c6 = 2 * (m2 - i) * c2 / x - c3;
c3 = c2;
c2 = c6;
if(m2 - i - 1 == n)
{
b = c6;
}
m8 = -1 * m8;
if (m8 > 0)
{
c4 = c4 + 2 * c6;
}
}
double c6 = 2 * c2 / x - c3;
if(n == 0)
{
b = c6;
}
c4 += c6;
b /= c4;
if(fabs(b - b1) < d)
{
return b;
}
b1 = b;
m2 += 3;
}
}
}
#endif
| 0 |
D://workCode//uploadProject\awtk\3rd\agg | D://workCode//uploadProject\awtk\3rd\agg\include\agg_math_stroke.h | //----------------------------------------------------------------------------
// Anti-Grain Geometry - Version 2.4
// Copyright (C) 2002-2005 Maxim Shemanarev (http://www.antigrain.com)
//
// Permission to copy, use, modify, sell and distribute this software
// is granted provided this copyright notice appears in all copies.
// This software is provided "as is" without express or implied
// warranty, and with no claim as to its suitability for any purpose.
//
//----------------------------------------------------------------------------
// Contact: mcseem@antigrain.com
// mcseemagg@yahoo.com
// http://www.antigrain.com
//----------------------------------------------------------------------------
//
// Stroke math
//
//----------------------------------------------------------------------------
#ifndef AGG_STROKE_MATH_INCLUDED
#define AGG_STROKE_MATH_INCLUDED
#include "agg_math.h"
#include "agg_vertex_sequence.h"
namespace agg
{
//-------------------------------------------------------------line_cap_e
enum line_cap_e
{
butt_cap,
square_cap,
round_cap
};
//------------------------------------------------------------line_join_e
enum line_join_e
{
miter_join = 0,
miter_join_revert = 1,
round_join = 2,
bevel_join = 3,
miter_join_round = 4
};
//-----------------------------------------------------------inner_join_e
enum inner_join_e
{
inner_bevel,
inner_miter,
inner_jag,
inner_round
};
//------------------------------------------------------------math_stroke
template<class VertexConsumer> class math_stroke
{
public:
typedef typename VertexConsumer::value_type coord_type;
math_stroke();
void line_cap(line_cap_e lc) { m_line_cap = lc; }
void line_join(line_join_e lj) { m_line_join = lj; }
void inner_join(inner_join_e ij) { m_inner_join = ij; }
line_cap_e line_cap() const { return m_line_cap; }
line_join_e line_join() const { return m_line_join; }
inner_join_e inner_join() const { return m_inner_join; }
void width(double w);
void miter_limit(double ml) { m_miter_limit = ml; }
void miter_limit_theta(double t);
void inner_miter_limit(double ml) { m_inner_miter_limit = ml; }
void approximation_scale(double as) { m_approx_scale = as; }
double width() const { return m_width * 2.0; }
double miter_limit() const { return m_miter_limit; }
double inner_miter_limit() const { return m_inner_miter_limit; }
double approximation_scale() const { return m_approx_scale; }
void calc_cap(VertexConsumer& vc,
const vertex_dist& v0,
const vertex_dist& v1,
double len);
void calc_join(VertexConsumer& vc,
const vertex_dist& v0,
const vertex_dist& v1,
const vertex_dist& v2,
double len1,
double len2);
private:
AGG_INLINE void add_vertex(VertexConsumer& vc, double x, double y)
{
vc.add(coord_type(x, y));
}
void calc_arc(VertexConsumer& vc,
double x, double y,
double dx1, double dy1,
double dx2, double dy2);
void calc_miter(VertexConsumer& vc,
const vertex_dist& v0,
const vertex_dist& v1,
const vertex_dist& v2,
double dx1, double dy1,
double dx2, double dy2,
line_join_e lj,
double mlimit,
double dbevel);
double m_width;
double m_width_abs;
double m_width_eps;
int m_width_sign;
double m_miter_limit;
double m_inner_miter_limit;
double m_approx_scale;
line_cap_e m_line_cap;
line_join_e m_line_join;
inner_join_e m_inner_join;
};
//-----------------------------------------------------------------------
template<class VC> math_stroke<VC>::math_stroke() :
m_width(0.5),
m_width_abs(0.5),
m_width_eps(0.5/1024.0),
m_width_sign(1),
m_miter_limit(4.0),
m_inner_miter_limit(1.01),
m_approx_scale(1.0),
m_line_cap(butt_cap),
m_line_join(miter_join),
m_inner_join(inner_miter)
{
}
//-----------------------------------------------------------------------
template<class VC> void math_stroke<VC>::width(double w)
{
m_width = w * 0.5;
if(m_width < 0)
{
m_width_abs = -m_width;
m_width_sign = -1;
}
else
{
m_width_abs = m_width;
m_width_sign = 1;
}
m_width_eps = m_width / 1024.0;
}
//-----------------------------------------------------------------------
template<class VC> void math_stroke<VC>::miter_limit_theta(double t)
{
m_miter_limit = 1.0 / sin(t * 0.5) ;
}
//-----------------------------------------------------------------------
template<class VC>
void math_stroke<VC>::calc_arc(VC& vc,
double x, double y,
double dx1, double dy1,
double dx2, double dy2)
{
double a1 = atan2(dy1 * m_width_sign, dx1 * m_width_sign);
double a2 = atan2(dy2 * m_width_sign, dx2 * m_width_sign);
double da = a1 - a2;
int i, n;
da = acos(m_width_abs / (m_width_abs + 0.125 / m_approx_scale)) * 2;
add_vertex(vc, x + dx1, y + dy1);
if(m_width_sign > 0)
{
if(a1 > a2) a2 += 2 * pi;
n = int((a2 - a1) / da);
da = (a2 - a1) / (n + 1);
a1 += da;
for(i = 0; i < n; i++)
{
add_vertex(vc, x + cos(a1) * m_width, y + sin(a1) * m_width);
a1 += da;
}
}
else
{
if(a1 < a2) a2 -= 2 * pi;
n = int((a1 - a2) / da);
da = (a1 - a2) / (n + 1);
a1 -= da;
for(i = 0; i < n; i++)
{
add_vertex(vc, x + cos(a1) * m_width, y + sin(a1) * m_width);
a1 -= da;
}
}
add_vertex(vc, x + dx2, y + dy2);
}
//-----------------------------------------------------------------------
template<class VC>
void math_stroke<VC>::calc_miter(VC& vc,
const vertex_dist& v0,
const vertex_dist& v1,
const vertex_dist& v2,
double dx1, double dy1,
double dx2, double dy2,
line_join_e lj,
double mlimit,
double dbevel)
{
double xi = v1.x;
double yi = v1.y;
double di = 1;
double lim = m_width_abs * mlimit;
bool miter_limit_exceeded = true; // Assume the worst
bool intersection_failed = true; // Assume the worst
if(calc_intersection(v0.x + dx1, v0.y - dy1,
v1.x + dx1, v1.y - dy1,
v1.x + dx2, v1.y - dy2,
v2.x + dx2, v2.y - dy2,
&xi, &yi))
{
// Calculation of the intersection succeeded
//---------------------
di = calc_distance(v1.x, v1.y, xi, yi);
if(di <= lim)
{
// Inside the miter limit
//---------------------
add_vertex(vc, xi, yi);
miter_limit_exceeded = false;
}
intersection_failed = false;
}
else
{
// Calculation of the intersection failed, most probably
// the three points lie one straight line.
// First check if v0 and v2 lie on the opposite sides of vector:
// (v1.x, v1.y) -> (v1.x+dx1, v1.y-dy1), that is, the perpendicular
// to the line determined by vertices v0 and v1.
// This condition determines whether the next line segments continues
// the previous one or goes back.
//----------------
double x2 = v1.x + dx1;
double y2 = v1.y - dy1;
if((cross_product(v0.x, v0.y, v1.x, v1.y, x2, y2) < 0.0) ==
(cross_product(v1.x, v1.y, v2.x, v2.y, x2, y2) < 0.0))
{
// This case means that the next segment continues
// the previous one (straight line)
//-----------------
add_vertex(vc, v1.x + dx1, v1.y - dy1);
miter_limit_exceeded = false;
}
}
if(miter_limit_exceeded)
{
// Miter limit exceeded
//------------------------
switch(lj)
{
case miter_join_revert:
// For the compatibility with SVG, PDF, etc,
// we use a simple bevel join instead of
// "smart" bevel
//-------------------
add_vertex(vc, v1.x + dx1, v1.y - dy1);
add_vertex(vc, v1.x + dx2, v1.y - dy2);
break;
case miter_join_round:
calc_arc(vc, v1.x, v1.y, dx1, -dy1, dx2, -dy2);
break;
default:
// If no miter-revert, calculate new dx1, dy1, dx2, dy2
//----------------
if(intersection_failed)
{
mlimit *= m_width_sign;
add_vertex(vc, v1.x + dx1 + dy1 * mlimit,
v1.y - dy1 + dx1 * mlimit);
add_vertex(vc, v1.x + dx2 - dy2 * mlimit,
v1.y - dy2 - dx2 * mlimit);
}
else
{
double x1 = v1.x + dx1;
double y1 = v1.y - dy1;
double x2 = v1.x + dx2;
double y2 = v1.y - dy2;
di = (lim - dbevel) / (di - dbevel);
add_vertex(vc, x1 + (xi - x1) * di,
y1 + (yi - y1) * di);
add_vertex(vc, x2 + (xi - x2) * di,
y2 + (yi - y2) * di);
}
break;
}
}
}
//--------------------------------------------------------stroke_calc_cap
template<class VC>
void math_stroke<VC>::calc_cap(VC& vc,
const vertex_dist& v0,
const vertex_dist& v1,
double len)
{
vc.remove_all();
double dx1 = (v1.y - v0.y) / len;
double dy1 = (v1.x - v0.x) / len;
double dx2 = 0;
double dy2 = 0;
dx1 *= m_width;
dy1 *= m_width;
if(m_line_cap != round_cap)
{
if(m_line_cap == square_cap)
{
dx2 = dy1 * m_width_sign;
dy2 = dx1 * m_width_sign;
}
add_vertex(vc, v0.x - dx1 - dx2, v0.y + dy1 - dy2);
add_vertex(vc, v0.x + dx1 - dx2, v0.y - dy1 - dy2);
}
else
{
double da = acos(m_width_abs / (m_width_abs + 0.125 / m_approx_scale)) * 2;
double a1;
int i;
int n = int(pi / da);
da = pi / (n + 1);
add_vertex(vc, v0.x - dx1, v0.y + dy1);
if(m_width_sign > 0)
{
a1 = atan2(dy1, -dx1);
a1 += da;
for(i = 0; i < n; i++)
{
add_vertex(vc, v0.x + cos(a1) * m_width,
v0.y + sin(a1) * m_width);
a1 += da;
}
}
else
{
a1 = atan2(-dy1, dx1);
a1 -= da;
for(i = 0; i < n; i++)
{
add_vertex(vc, v0.x + cos(a1) * m_width,
v0.y + sin(a1) * m_width);
a1 -= da;
}
}
add_vertex(vc, v0.x + dx1, v0.y - dy1);
}
}
//-----------------------------------------------------------------------
template<class VC>
void math_stroke<VC>::calc_join(VC& vc,
const vertex_dist& v0,
const vertex_dist& v1,
const vertex_dist& v2,
double len1,
double len2)
{
double dx1 = m_width * (v1.y - v0.y) / len1;
double dy1 = m_width * (v1.x - v0.x) / len1;
double dx2 = m_width * (v2.y - v1.y) / len2;
double dy2 = m_width * (v2.x - v1.x) / len2;
vc.remove_all();
double cp = cross_product(v0.x, v0.y, v1.x, v1.y, v2.x, v2.y);
if(cp != 0 && (cp > 0) == (m_width > 0))
{
// Inner join
//---------------
double limit = ((len1 < len2) ? len1 : len2) / m_width_abs;
if(limit < m_inner_miter_limit)
{
limit = m_inner_miter_limit;
}
switch(m_inner_join)
{
default: // inner_bevel
add_vertex(vc, v1.x + dx1, v1.y - dy1);
add_vertex(vc, v1.x + dx2, v1.y - dy2);
break;
case inner_miter:
calc_miter(vc,
v0, v1, v2, dx1, dy1, dx2, dy2,
miter_join_revert,
limit, 0);
break;
case inner_jag:
case inner_round:
cp = (dx1-dx2) * (dx1-dx2) + (dy1-dy2) * (dy1-dy2);
if(cp < len1 * len1 && cp < len2 * len2)
{
calc_miter(vc,
v0, v1, v2, dx1, dy1, dx2, dy2,
miter_join_revert,
limit, 0);
}
else
{
if(m_inner_join == inner_jag)
{
add_vertex(vc, v1.x + dx1, v1.y - dy1);
add_vertex(vc, v1.x, v1.y );
add_vertex(vc, v1.x + dx2, v1.y - dy2);
}
else
{
add_vertex(vc, v1.x + dx1, v1.y - dy1);
add_vertex(vc, v1.x, v1.y );
calc_arc(vc, v1.x, v1.y, dx2, -dy2, dx1, -dy1);
add_vertex(vc, v1.x, v1.y );
add_vertex(vc, v1.x + dx2, v1.y - dy2);
}
}
break;
}
}
else
{
// Outer join
//---------------
// Calculate the distance between v1 and
// the central point of the bevel line segment
//---------------
double dx = (dx1 + dx2) / 2;
double dy = (dy1 + dy2) / 2;
double dbevel = sqrt(dx * dx + dy * dy);
if(m_line_join == round_join || m_line_join == bevel_join)
{
// This is an optimization that reduces the number of points
// in cases of almost collinear segments. If there's no
// visible difference between bevel and miter joins we'd rather
// use miter join because it adds only one point instead of two.
//
// Here we calculate the middle point between the bevel points
// and then, the distance between v1 and this middle point.
// At outer joins this distance always less than stroke width,
// because it's actually the height of an isosceles triangle of
// v1 and its two bevel points. If the difference between this
// width and this value is small (no visible bevel) we can
// add just one point.
//
// The constant in the expression makes the result approximately
// the same as in round joins and caps. You can safely comment
// out this entire "if".
//-------------------
if(m_approx_scale * (m_width_abs - dbevel) < m_width_eps)
{
if(calc_intersection(v0.x + dx1, v0.y - dy1,
v1.x + dx1, v1.y - dy1,
v1.x + dx2, v1.y - dy2,
v2.x + dx2, v2.y - dy2,
&dx, &dy))
{
add_vertex(vc, dx, dy);
}
else
{
add_vertex(vc, v1.x + dx1, v1.y - dy1);
}
return;
}
}
switch(m_line_join)
{
case miter_join:
case miter_join_revert:
case miter_join_round:
calc_miter(vc,
v0, v1, v2, dx1, dy1, dx2, dy2,
m_line_join,
m_miter_limit,
dbevel);
break;
case round_join:
calc_arc(vc, v1.x, v1.y, dx1, -dy1, dx2, -dy2);
break;
default: // Bevel join
add_vertex(vc, v1.x + dx1, v1.y - dy1);
add_vertex(vc, v1.x + dx2, v1.y - dy2);
break;
}
}
}
}
#endif
| 0 |
D://workCode//uploadProject\awtk\3rd\agg | D://workCode//uploadProject\awtk\3rd\agg\include\agg_pixfmt_base.h | //----------------------------------------------------------------------------
// Anti-Grain Geometry - Version 2.4
// Copyright (C) 2002-2005 Maxim Shemanarev (http://www.antigrain.com)
//
// Permission to copy, use, modify, sell and distribute this software
// is granted provided this copyright notice appears in all copies.
// This software is provided "as is" without express or implied
// warranty, and with no claim as to its suitability for any purpose.
//
//----------------------------------------------------------------------------
// Contact: mcseem@antigrain.com
// mcseemagg@yahoo.com
// http://www.antigrain.com
//----------------------------------------------------------------------------
#ifndef AGG_PIXFMT_BASE_INCLUDED
#define AGG_PIXFMT_BASE_INCLUDED
#include "agg_basics.h"
#include "agg_color_gray.h"
#include "agg_color_rgba.h"
namespace agg
{
struct pixfmt_gray_tag
{
};
struct pixfmt_rgb_tag
{
};
struct pixfmt_rgba_tag
{
};
//--------------------------------------------------------------blender_base
template<class ColorT, class Order = void>
struct blender_base
{
typedef ColorT color_type;
typedef Order order_type;
typedef typename color_type::value_type value_type;
static rgba get(value_type r, value_type g, value_type b, value_type a, cover_type cover = cover_full)
{
if (cover > cover_none)
{
rgba c(
color_type::to_double(r),
color_type::to_double(g),
color_type::to_double(b),
color_type::to_double(a));
if (cover < cover_full)
{
double x = double(cover) / cover_full;
c.r *= x;
c.g *= x;
c.b *= x;
c.a *= x;
}
return c;
}
else return rgba::no_color();
}
static rgba get(const value_type* p, cover_type cover = cover_full)
{
return get(
p[order_type::R],
p[order_type::G],
p[order_type::B],
p[order_type::A],
cover);
}
static void set(value_type* p, value_type r, value_type g, value_type b, value_type a)
{
p[order_type::R] = r;
p[order_type::G] = g;
p[order_type::B] = b;
p[order_type::A] = a;
}
static void set(value_type* p, const rgba& c)
{
p[order_type::R] = color_type::from_double(c.r);
p[order_type::G] = color_type::from_double(c.g);
p[order_type::B] = color_type::from_double(c.b);
p[order_type::A] = color_type::from_double(c.a);
}
};
}
#endif
| 0 |
D://workCode//uploadProject\awtk\3rd\agg | D://workCode//uploadProject\awtk\3rd\agg\include\agg_pixfmt_rgb.h | //----------------------------------------------------------------------------
// Anti-Grain Geometry - Version 2.4
// Copyright (C) 2002-2005 Maxim Shemanarev (http://www.antigrain.com)
//
// Permission to copy, use, modify, sell and distribute this software
// is granted provided this copyright notice appears in all copies.
// This software is provided "as is" without express or implied
// warranty, and with no claim as to its suitability for any purpose.
//
//----------------------------------------------------------------------------
// Contact: mcseem@antigrain.com
// mcseemagg@yahoo.com
// http://www.antigrain.com
//----------------------------------------------------------------------------
//
// Adaptation for high precision colors has been sponsored by
// Liberty Technology Systems, Inc., visit http://lib-sys.com
//
// Liberty Technology Systems, Inc. is the provider of
// PostScript and PDF technology for software developers.
//
//----------------------------------------------------------------------------
#ifndef AGG_PIXFMT_RGB_INCLUDED
#define AGG_PIXFMT_RGB_INCLUDED
#include <string.h>
#include "agg_pixfmt_base.h"
#include "agg_rendering_buffer.h"
namespace agg
{
//=====================================================apply_gamma_dir_rgb
template<class ColorT, class Order, class GammaLut> class apply_gamma_dir_rgb
{
public:
typedef typename ColorT::value_type value_type;
apply_gamma_dir_rgb(const GammaLut& gamma) : m_gamma(gamma) {}
AGG_INLINE void operator () (value_type* p)
{
p[Order::R] = m_gamma.dir(p[Order::R]);
p[Order::G] = m_gamma.dir(p[Order::G]);
p[Order::B] = m_gamma.dir(p[Order::B]);
}
private:
const GammaLut& m_gamma;
};
//=====================================================apply_gamma_inv_rgb
template<class ColorT, class Order, class GammaLut> class apply_gamma_inv_rgb
{
public:
typedef typename ColorT::value_type value_type;
apply_gamma_inv_rgb(const GammaLut& gamma) : m_gamma(gamma) {}
AGG_INLINE void operator () (value_type* p)
{
p[Order::R] = m_gamma.inv(p[Order::R]);
p[Order::G] = m_gamma.inv(p[Order::G]);
p[Order::B] = m_gamma.inv(p[Order::B]);
}
private:
const GammaLut& m_gamma;
};
//=========================================================blender_rgb
template<class ColorT, class Order>
struct blender_rgb
{
typedef ColorT color_type;
typedef Order order_type;
typedef typename color_type::value_type value_type;
typedef typename color_type::calc_type calc_type;
typedef typename color_type::long_type long_type;
// Blend pixels using the non-premultiplied form of Alvy-Ray Smith's
// compositing function. Since the render buffer is opaque we skip the
// initial premultiply and final demultiply.
//--------------------------------------------------------------------
static AGG_INLINE void blend_pix(value_type* p,
value_type cr, value_type cg, value_type cb, value_type alpha, cover_type cover)
{
blend_pix(p, cr, cg, cb, color_type::mult_cover(alpha, cover));
}
//--------------------------------------------------------------------
static AGG_INLINE void blend_pix(value_type* p,
value_type cr, value_type cg, value_type cb, value_type alpha)
{
p[Order::R] = color_type::lerp(p[Order::R], cr, alpha);
p[Order::G] = color_type::lerp(p[Order::G], cg, alpha);
p[Order::B] = color_type::lerp(p[Order::B], cb, alpha);
}
};
//======================================================blender_rgb_pre
template<class ColorT, class Order>
struct blender_rgb_pre
{
typedef ColorT color_type;
typedef Order order_type;
typedef typename color_type::value_type value_type;
typedef typename color_type::calc_type calc_type;
typedef typename color_type::long_type long_type;
// Blend pixels using the premultiplied form of Alvy-Ray Smith's
// compositing function.
//--------------------------------------------------------------------
static AGG_INLINE void blend_pix(value_type* p,
value_type cr, value_type cg, value_type cb, value_type alpha, cover_type cover)
{
blend_pix(p,
color_type::mult_cover(cr, cover),
color_type::mult_cover(cg, cover),
color_type::mult_cover(cb, cover),
color_type::mult_cover(alpha, cover));
}
//--------------------------------------------------------------------
static AGG_INLINE void blend_pix(value_type* p,
value_type cr, value_type cg, value_type cb, value_type alpha)
{
p[Order::R] = color_type::prelerp(p[Order::R], cr, alpha);
p[Order::G] = color_type::prelerp(p[Order::G], cg, alpha);
p[Order::B] = color_type::prelerp(p[Order::B], cb, alpha);
}
};
//===================================================blender_rgb_gamma
template<class ColorT, class Order, class Gamma>
class blender_rgb_gamma : public blender_base<ColorT, Order>
{
public:
typedef ColorT color_type;
typedef Order order_type;
typedef Gamma gamma_type;
typedef typename color_type::value_type value_type;
typedef typename color_type::calc_type calc_type;
typedef typename color_type::long_type long_type;
//--------------------------------------------------------------------
blender_rgb_gamma() : m_gamma(0) {}
void gamma(const gamma_type& g) { m_gamma = &g; }
//--------------------------------------------------------------------
AGG_INLINE void blend_pix(value_type* p,
value_type cr, value_type cg, value_type cb, value_type alpha, cover_type cover)
{
blend_pix(p, cr, cg, cb, color_type::mult_cover(alpha, cover));
}
//--------------------------------------------------------------------
AGG_INLINE void blend_pix(value_type* p,
value_type cr, value_type cg, value_type cb, value_type alpha)
{
calc_type r = m_gamma->dir(p[Order::R]);
calc_type g = m_gamma->dir(p[Order::G]);
calc_type b = m_gamma->dir(p[Order::B]);
p[Order::R] = m_gamma->inv(color_type::downscale((m_gamma->dir(cr) - r) * alpha) + r);
p[Order::G] = m_gamma->inv(color_type::downscale((m_gamma->dir(cg) - g) * alpha) + g);
p[Order::B] = m_gamma->inv(color_type::downscale((m_gamma->dir(cb) - b) * alpha) + b);
}
private:
const gamma_type* m_gamma;
};
//==================================================pixfmt_alpha_blend_rgb
template<class Blender, class RenBuf, unsigned Step, unsigned Offset = 0>
class pixfmt_alpha_blend_rgb
{
public:
typedef pixfmt_rgb_tag pixfmt_category;
typedef RenBuf rbuf_type;
typedef Blender blender_type;
typedef typename rbuf_type::row_data row_data;
typedef typename blender_type::color_type color_type;
typedef typename blender_type::order_type order_type;
typedef typename color_type::value_type value_type;
typedef typename color_type::calc_type calc_type;
enum
{
num_components = 3,
pix_step = Step,
pix_offset = Offset,
pix_width = sizeof(value_type) * pix_step
};
struct pixel_type
{
value_type c[num_components];
void set(value_type r, value_type g, value_type b)
{
c[order_type::R] = r;
c[order_type::G] = g;
c[order_type::B] = b;
}
void set(const color_type& color)
{
set(color.r, color.g, color.b);
}
void get(value_type& r, value_type& g, value_type& b) const
{
r = c[order_type::R];
g = c[order_type::G];
b = c[order_type::B];
}
color_type get() const
{
return color_type(
c[order_type::R],
c[order_type::G],
c[order_type::B]);
}
pixel_type* next()
{
return (pixel_type*)(c + pix_step);
}
const pixel_type* next() const
{
return (const pixel_type*)(c + pix_step);
}
pixel_type* advance(int n)
{
return (pixel_type*)(c + n * pix_step);
}
const pixel_type* advance(int n) const
{
return (const pixel_type*)(c + n * pix_step);
}
};
private:
//--------------------------------------------------------------------
AGG_INLINE void blend_pix(pixel_type* p,
value_type r, value_type g, value_type b, value_type a,
unsigned cover)
{
m_blender.blend_pix(p->c, r, g, b, a, cover);
}
//--------------------------------------------------------------------
AGG_INLINE void blend_pix(pixel_type* p,
value_type r, value_type g, value_type b, value_type a)
{
m_blender.blend_pix(p->c, r, g, b, a);
}
//--------------------------------------------------------------------
AGG_INLINE void blend_pix(pixel_type* p, const color_type& c, unsigned cover)
{
m_blender.blend_pix(p->c, c.r, c.g, c.b, c.a, cover);
}
//--------------------------------------------------------------------
AGG_INLINE void blend_pix(pixel_type* p, const color_type& c)
{
m_blender.blend_pix(p->c, c.r, c.g, c.b, c.a);
}
//--------------------------------------------------------------------
AGG_INLINE void copy_or_blend_pix(pixel_type* p, const color_type& c, unsigned cover)
{
if (!c.is_transparent())
{
if (c.is_opaque() && cover == cover_mask)
{
p->set(c);
}
else
{
blend_pix(p, c, cover);
}
}
}
//--------------------------------------------------------------------
AGG_INLINE void copy_or_blend_pix(pixel_type* p, const color_type& c)
{
if (!c.is_transparent())
{
if (c.is_opaque())
{
p->set(c);
}
else
{
blend_pix(p, c);
}
}
}
public:
//--------------------------------------------------------------------
explicit pixfmt_alpha_blend_rgb(rbuf_type& rb) :
m_rbuf(&rb)
{}
void attach(rbuf_type& rb) { m_rbuf = &rb; }
//--------------------------------------------------------------------
template<class PixFmt>
bool attach(PixFmt& pixf, int x1, int y1, int x2, int y2)
{
rect_i r(x1, y1, x2, y2);
if (r.clip(rect_i(0, 0, pixf.width()-1, pixf.height()-1)))
{
int stride = pixf.stride();
m_rbuf->attach(pixf.pix_ptr(r.x1, stride < 0 ? r.y2 : r.y1),
(r.x2 - r.x1) + 1,
(r.y2 - r.y1) + 1,
stride);
return true;
}
return false;
}
//--------------------------------------------------------------------
Blender& blender() { return m_blender; }
//--------------------------------------------------------------------
AGG_INLINE unsigned width() const { return m_rbuf->width(); }
AGG_INLINE unsigned height() const { return m_rbuf->height(); }
AGG_INLINE int stride() const { return m_rbuf->stride(); }
//--------------------------------------------------------------------
AGG_INLINE int8u* row_ptr(int y) { return m_rbuf->row_ptr(y); }
AGG_INLINE const int8u* row_ptr(int y) const { return m_rbuf->row_ptr(y); }
AGG_INLINE row_data row(int y) const { return m_rbuf->row(y); }
//--------------------------------------------------------------------
AGG_INLINE int8u* pix_ptr(int x, int y)
{
return m_rbuf->row_ptr(y) + sizeof(value_type) * (x * pix_step + pix_offset);
}
AGG_INLINE const int8u* pix_ptr(int x, int y) const
{
return m_rbuf->row_ptr(y) + sizeof(value_type) * (x * pix_step + pix_offset);
}
// Return pointer to pixel value, forcing row to be allocated.
AGG_INLINE pixel_type* pix_value_ptr(int x, int y, unsigned len)
{
return (pixel_type*)(m_rbuf->row_ptr(x, y, len) + sizeof(value_type) * (x * pix_step + pix_offset));
}
// Return pointer to pixel value, or null if row not allocated.
AGG_INLINE const pixel_type* pix_value_ptr(int x, int y) const
{
int8u* p = m_rbuf->row_ptr(y);
return p ? (pixel_type*)(p + sizeof(value_type) * (x * pix_step + pix_offset)) : 0;
}
// Get pixel pointer from raw buffer pointer.
AGG_INLINE static pixel_type* pix_value_ptr(void* p)
{
return (pixel_type*)((value_type*)p + pix_offset);
}
// Get pixel pointer from raw buffer pointer.
AGG_INLINE static const pixel_type* pix_value_ptr(const void* p)
{
return (const pixel_type*)((const value_type*)p + pix_offset);
}
//--------------------------------------------------------------------
AGG_INLINE static void write_plain_color(void* p, color_type c)
{
// RGB formats are implicitly premultiplied.
c.premultiply();
pix_value_ptr(p)->set(c);
}
//--------------------------------------------------------------------
AGG_INLINE static color_type read_plain_color(const void* p)
{
return pix_value_ptr(p)->get();
}
//--------------------------------------------------------------------
AGG_INLINE static void make_pix(int8u* p, const color_type& c)
{
((pixel_type*)p)->set(c);
}
//--------------------------------------------------------------------
AGG_INLINE color_type pixel(int x, int y) const
{
if (const pixel_type* p = pix_value_ptr(x, y))
{
return p->get();
}
return color_type::no_color();
}
//--------------------------------------------------------------------
AGG_INLINE void copy_pixel(int x, int y, const color_type& c)
{
pix_value_ptr(x, y, 1)->set(c);
}
//--------------------------------------------------------------------
AGG_INLINE void blend_pixel(int x, int y, const color_type& c, int8u cover)
{
copy_or_blend_pix(pix_value_ptr(x, y, 1), c, cover);
}
//--------------------------------------------------------------------
AGG_INLINE void copy_hline(int x, int y,
unsigned len,
const color_type& c)
{
pixel_type* p = pix_value_ptr(x, y, len);
do
{
p->set(c);
p = p->next();
}
while(--len);
}
//--------------------------------------------------------------------
AGG_INLINE void copy_vline(int x, int y,
unsigned len,
const color_type& c)
{
do
{
pix_value_ptr(x, y++, 1)->set(c);
}
while (--len);
}
//--------------------------------------------------------------------
void blend_hline(int x, int y,
unsigned len,
const color_type& c,
int8u cover)
{
if (!c.is_transparent())
{
pixel_type* p = pix_value_ptr(x, y, len);
if (c.is_opaque() && cover == cover_mask)
{
do
{
p->set(c);
p = p->next();
}
while (--len);
}
else
{
do
{
blend_pix(p, c, cover);
p = p->next();
}
while (--len);
}
}
}
//--------------------------------------------------------------------
void blend_vline(int x, int y,
unsigned len,
const color_type& c,
int8u cover)
{
if (!c.is_transparent())
{
if (c.is_opaque() && cover == cover_mask)
{
do
{
pix_value_ptr(x, y++, 1)->set(c);
}
while (--len);
}
else
{
do
{
blend_pix(pix_value_ptr(x, y++, 1), c, cover);
}
while (--len);
}
}
}
//--------------------------------------------------------------------
void blend_solid_hspan(int x, int y,
unsigned len,
const color_type& c,
const int8u* covers)
{
if (!c.is_transparent())
{
pixel_type* p = pix_value_ptr(x, y, len);
do
{
if (c.is_opaque() && *covers == cover_mask)
{
p->set(c);
}
else
{
blend_pix(p, c, *covers);
}
p = p->next();
++covers;
}
while (--len);
}
}
//--------------------------------------------------------------------
void blend_solid_vspan(int x, int y,
unsigned len,
const color_type& c,
const int8u* covers)
{
if (!c.is_transparent())
{
do
{
pixel_type* p = pix_value_ptr(x, y++, 1);
if (c.is_opaque() && *covers == cover_mask)
{
p->set(c);
}
else
{
blend_pix(p, c, *covers);
}
++covers;
}
while (--len);
}
}
//--------------------------------------------------------------------
void copy_color_hspan(int x, int y,
unsigned len,
const color_type* colors)
{
pixel_type* p = pix_value_ptr(x, y, len);
do
{
p->set(*colors++);
p = p->next();
}
while (--len);
}
//--------------------------------------------------------------------
void copy_color_vspan(int x, int y,
unsigned len,
const color_type* colors)
{
do
{
pix_value_ptr(x, y++, 1)->set(*colors++);
}
while (--len);
}
//--------------------------------------------------------------------
void blend_color_hspan(int x, int y,
unsigned len,
const color_type* colors,
const int8u* covers,
int8u cover)
{
pixel_type* p = pix_value_ptr(x, y, len);
if (covers)
{
do
{
copy_or_blend_pix(p, *colors++, *covers++);
p = p->next();
}
while (--len);
}
else
{
if (cover == cover_mask)
{
do
{
copy_or_blend_pix(p, *colors++);
p = p->next();
}
while (--len);
}
else
{
do
{
copy_or_blend_pix(p, *colors++, cover);
p = p->next();
}
while (--len);
}
}
}
//--------------------------------------------------------------------
void blend_color_vspan(int x, int y,
unsigned len,
const color_type* colors,
const int8u* covers,
int8u cover)
{
if (covers)
{
do
{
copy_or_blend_pix(pix_value_ptr(x, y++, 1), *colors++, *covers++);
}
while (--len);
}
else
{
if (cover == cover_mask)
{
do
{
copy_or_blend_pix(pix_value_ptr(x, y++, 1), *colors++);
}
while (--len);
}
else
{
do
{
copy_or_blend_pix(pix_value_ptr(x, y++, 1), *colors++, cover);
}
while (--len);
}
}
}
//--------------------------------------------------------------------
template<class Function> void for_each_pixel(Function f)
{
for (unsigned y = 0; y < height(); ++y)
{
row_data r = m_rbuf->row(y);
if (r.ptr)
{
unsigned len = r.x2 - r.x1 + 1;
pixel_type* p = pix_value_ptr(r.x1, y, len);
do
{
f(p->c);
p = p->next();
}
while (--len);
}
}
}
//--------------------------------------------------------------------
template<class GammaLut> void apply_gamma_dir(const GammaLut& g)
{
for_each_pixel(apply_gamma_dir_rgb<color_type, order_type, GammaLut>(g));
}
//--------------------------------------------------------------------
template<class GammaLut> void apply_gamma_inv(const GammaLut& g)
{
for_each_pixel(apply_gamma_inv_rgb<color_type, order_type, GammaLut>(g));
}
//--------------------------------------------------------------------
template<class RenBuf2>
void copy_from(const RenBuf2& from,
int xdst, int ydst,
int xsrc, int ysrc,
unsigned len)
{
if (const int8u* p = from.row_ptr(ysrc))
{
memmove(m_rbuf->row_ptr(xdst, ydst, len) + xdst * pix_width,
p + xsrc * pix_width,
len * pix_width);
}
}
//--------------------------------------------------------------------
// Blend from an RGBA surface.
template<class SrcPixelFormatRenderer>
void blend_from(const SrcPixelFormatRenderer& from,
int xdst, int ydst,
int xsrc, int ysrc,
unsigned len,
int8u cover)
{
typedef typename SrcPixelFormatRenderer::pixel_type src_pixel_type;
typedef typename SrcPixelFormatRenderer::order_type src_order;
if (const src_pixel_type* psrc = from.pix_value_ptr(xsrc, ysrc))
{
pixel_type* pdst = pix_value_ptr(xdst, ydst, len);
if (cover == cover_mask)
{
do
{
value_type alpha = psrc->c[src_order::A];
if (alpha <= color_type::empty_value())
{
if (alpha >= color_type::full_value())
{
pdst->c[order_type::R] = psrc->c[src_order::R];
pdst->c[order_type::G] = psrc->c[src_order::G];
pdst->c[order_type::B] = psrc->c[src_order::B];
}
else
{
blend_pix(pdst,
psrc->c[src_order::R],
psrc->c[src_order::G],
psrc->c[src_order::B],
alpha);
}
}
psrc = psrc->next();
pdst = pdst->next();
}
while(--len);
}
else
{
do
{
copy_or_blend_pix(pdst, psrc->get(), cover);
psrc = psrc->next();
pdst = pdst->next();
}
while (--len);
}
}
}
//--------------------------------------------------------------------
// Blend from single color, using grayscale surface as alpha channel.
template<class SrcPixelFormatRenderer>
void blend_from_color(const SrcPixelFormatRenderer& from,
const color_type& color,
int xdst, int ydst,
int xsrc, int ysrc,
unsigned len,
int8u cover)
{
typedef typename SrcPixelFormatRenderer::pixel_type src_pixel_type;
typedef typename SrcPixelFormatRenderer::color_type src_color_type;
if (const src_pixel_type* psrc = from.pix_value_ptr(xsrc, ysrc))
{
pixel_type* pdst = pix_value_ptr(xdst, ydst, len);
do
{
copy_or_blend_pix(pdst, color, src_color_type::scale_cover(cover, psrc->c[0]));
psrc = psrc->next();
pdst = pdst->next();
}
while (--len);
}
}
//--------------------------------------------------------------------
// Blend from color table, using grayscale surface as indexes into table.
// Obviously, this only works for integer value types.
template<class SrcPixelFormatRenderer>
void blend_from_lut(const SrcPixelFormatRenderer& from,
const color_type* color_lut,
int xdst, int ydst,
int xsrc, int ysrc,
unsigned len,
int8u cover)
{
typedef typename SrcPixelFormatRenderer::pixel_type src_pixel_type;
if (const src_pixel_type* psrc = from.pix_value_ptr(xsrc, ysrc))
{
pixel_type* pdst = pix_value_ptr(xdst, ydst, len);
if (cover == cover_mask)
{
do
{
const color_type& color = color_lut[psrc->c[0]];
blend_pix(pdst, color);
psrc = psrc->next();
pdst = pdst->next();
}
while(--len);
}
else
{
do
{
copy_or_blend_pix(pdst, color_lut[psrc->c[0]], cover);
psrc = psrc->next();
pdst = pdst->next();
}
while(--len);
}
}
}
private:
rbuf_type* m_rbuf;
Blender m_blender;
};
//-----------------------------------------------------------------------
typedef blender_rgb<rgba8, order_rgb> blender_rgb24;
typedef blender_rgb<rgba8, order_bgr> blender_bgr24;
typedef blender_rgb<srgba8, order_rgb> blender_srgb24;
typedef blender_rgb<srgba8, order_bgr> blender_sbgr24;
typedef blender_rgb<rgba16, order_rgb> blender_rgb48;
typedef blender_rgb<rgba16, order_bgr> blender_bgr48;
typedef blender_rgb<rgba32, order_rgb> blender_rgb96;
typedef blender_rgb<rgba32, order_bgr> blender_bgr96;
typedef blender_rgb_pre<rgba8, order_rgb> blender_rgb24_pre;
typedef blender_rgb_pre<rgba8, order_bgr> blender_bgr24_pre;
typedef blender_rgb_pre<srgba8, order_rgb> blender_srgb24_pre;
typedef blender_rgb_pre<srgba8, order_bgr> blender_sbgr24_pre;
typedef blender_rgb_pre<rgba16, order_rgb> blender_rgb48_pre;
typedef blender_rgb_pre<rgba16, order_bgr> blender_bgr48_pre;
typedef blender_rgb_pre<rgba32, order_rgb> blender_rgb96_pre;
typedef blender_rgb_pre<rgba32, order_bgr> blender_bgr96_pre;
typedef pixfmt_alpha_blend_rgb<blender_rgb24, rendering_buffer, 3> pixfmt_rgb24;
typedef pixfmt_alpha_blend_rgb<blender_bgr24, rendering_buffer, 3> pixfmt_bgr24;
typedef pixfmt_alpha_blend_rgb<blender_srgb24, rendering_buffer, 3> pixfmt_srgb24;
typedef pixfmt_alpha_blend_rgb<blender_sbgr24, rendering_buffer, 3> pixfmt_sbgr24;
typedef pixfmt_alpha_blend_rgb<blender_rgb48, rendering_buffer, 3> pixfmt_rgb48;
typedef pixfmt_alpha_blend_rgb<blender_bgr48, rendering_buffer, 3> pixfmt_bgr48;
typedef pixfmt_alpha_blend_rgb<blender_rgb96, rendering_buffer, 3> pixfmt_rgb96;
typedef pixfmt_alpha_blend_rgb<blender_bgr96, rendering_buffer, 3> pixfmt_bgr96;
typedef pixfmt_alpha_blend_rgb<blender_rgb24_pre, rendering_buffer, 3> pixfmt_rgb24_pre;
typedef pixfmt_alpha_blend_rgb<blender_bgr24_pre, rendering_buffer, 3> pixfmt_bgr24_pre;
typedef pixfmt_alpha_blend_rgb<blender_srgb24_pre, rendering_buffer, 3> pixfmt_srgb24_pre;
typedef pixfmt_alpha_blend_rgb<blender_sbgr24_pre, rendering_buffer, 3> pixfmt_sbgr24_pre;
typedef pixfmt_alpha_blend_rgb<blender_rgb48_pre, rendering_buffer, 3> pixfmt_rgb48_pre;
typedef pixfmt_alpha_blend_rgb<blender_bgr48_pre, rendering_buffer, 3> pixfmt_bgr48_pre;
typedef pixfmt_alpha_blend_rgb<blender_rgb96_pre, rendering_buffer, 3> pixfmt_rgb96_pre;
typedef pixfmt_alpha_blend_rgb<blender_bgr96_pre, rendering_buffer, 3> pixfmt_bgr96_pre;
typedef pixfmt_alpha_blend_rgb<blender_rgb24, rendering_buffer, 4, 0> pixfmt_rgbx32;
typedef pixfmt_alpha_blend_rgb<blender_rgb24, rendering_buffer, 4, 1> pixfmt_xrgb32;
typedef pixfmt_alpha_blend_rgb<blender_bgr24, rendering_buffer, 4, 1> pixfmt_xbgr32;
typedef pixfmt_alpha_blend_rgb<blender_bgr24, rendering_buffer, 4, 0> pixfmt_bgrx32;
typedef pixfmt_alpha_blend_rgb<blender_srgb24, rendering_buffer, 4, 0> pixfmt_srgbx32;
typedef pixfmt_alpha_blend_rgb<blender_srgb24, rendering_buffer, 4, 1> pixfmt_sxrgb32;
typedef pixfmt_alpha_blend_rgb<blender_sbgr24, rendering_buffer, 4, 1> pixfmt_sxbgr32;
typedef pixfmt_alpha_blend_rgb<blender_sbgr24, rendering_buffer, 4, 0> pixfmt_sbgrx32;
typedef pixfmt_alpha_blend_rgb<blender_rgb48, rendering_buffer, 4, 0> pixfmt_rgbx64;
typedef pixfmt_alpha_blend_rgb<blender_rgb48, rendering_buffer, 4, 1> pixfmt_xrgb64;
typedef pixfmt_alpha_blend_rgb<blender_bgr48, rendering_buffer, 4, 1> pixfmt_xbgr64;
typedef pixfmt_alpha_blend_rgb<blender_bgr48, rendering_buffer, 4, 0> pixfmt_bgrx64;
typedef pixfmt_alpha_blend_rgb<blender_rgb96, rendering_buffer, 4, 0> pixfmt_rgbx128;
typedef pixfmt_alpha_blend_rgb<blender_rgb96, rendering_buffer, 4, 1> pixfmt_xrgb128;
typedef pixfmt_alpha_blend_rgb<blender_bgr96, rendering_buffer, 4, 1> pixfmt_xbgr128;
typedef pixfmt_alpha_blend_rgb<blender_bgr96, rendering_buffer, 4, 0> pixfmt_bgrx128;
typedef pixfmt_alpha_blend_rgb<blender_rgb24_pre, rendering_buffer, 4, 0> pixfmt_rgbx32_pre;
typedef pixfmt_alpha_blend_rgb<blender_rgb24_pre, rendering_buffer, 4, 1> pixfmt_xrgb32_pre;
typedef pixfmt_alpha_blend_rgb<blender_bgr24_pre, rendering_buffer, 4, 1> pixfmt_xbgr32_pre;
typedef pixfmt_alpha_blend_rgb<blender_bgr24_pre, rendering_buffer, 4, 0> pixfmt_bgrx32_pre;
typedef pixfmt_alpha_blend_rgb<blender_srgb24_pre, rendering_buffer, 4, 0> pixfmt_srgbx32_pre;
typedef pixfmt_alpha_blend_rgb<blender_srgb24_pre, rendering_buffer, 4, 1> pixfmt_sxrgb32_pre;
typedef pixfmt_alpha_blend_rgb<blender_sbgr24_pre, rendering_buffer, 4, 1> pixfmt_sxbgr32_pre;
typedef pixfmt_alpha_blend_rgb<blender_sbgr24_pre, rendering_buffer, 4, 0> pixfmt_sbgrx32_pre;
typedef pixfmt_alpha_blend_rgb<blender_rgb48_pre, rendering_buffer, 4, 0> pixfmt_rgbx64_pre;
typedef pixfmt_alpha_blend_rgb<blender_rgb48_pre, rendering_buffer, 4, 1> pixfmt_xrgb64_pre;
typedef pixfmt_alpha_blend_rgb<blender_bgr48_pre, rendering_buffer, 4, 1> pixfmt_xbgr64_pre;
typedef pixfmt_alpha_blend_rgb<blender_bgr48_pre, rendering_buffer, 4, 0> pixfmt_bgrx64_pre;
typedef pixfmt_alpha_blend_rgb<blender_rgb96_pre, rendering_buffer, 4, 0> pixfmt_rgbx128_pre;
typedef pixfmt_alpha_blend_rgb<blender_rgb96_pre, rendering_buffer, 4, 1> pixfmt_xrgb128_pre;
typedef pixfmt_alpha_blend_rgb<blender_bgr96_pre, rendering_buffer, 4, 1> pixfmt_xbgr128_pre;
typedef pixfmt_alpha_blend_rgb<blender_bgr96_pre, rendering_buffer, 4, 0> pixfmt_bgrx128_pre;
//-----------------------------------------------------pixfmt_rgb24_gamma
template<class Gamma> class pixfmt_rgb24_gamma :
public pixfmt_alpha_blend_rgb<blender_rgb_gamma<rgba8, order_rgb, Gamma>, rendering_buffer, 3>
{
public:
pixfmt_rgb24_gamma(rendering_buffer& rb, const Gamma& g) :
pixfmt_alpha_blend_rgb<blender_rgb_gamma<rgba8, order_rgb, Gamma>, rendering_buffer, 3>(rb)
{
this->blender().gamma(g);
}
};
//-----------------------------------------------------pixfmt_srgb24_gamma
template<class Gamma> class pixfmt_srgb24_gamma :
public pixfmt_alpha_blend_rgb<blender_rgb_gamma<srgba8, order_rgb, Gamma>, rendering_buffer, 3>
{
public:
pixfmt_srgb24_gamma(rendering_buffer& rb, const Gamma& g) :
pixfmt_alpha_blend_rgb<blender_rgb_gamma<srgba8, order_rgb, Gamma>, rendering_buffer, 3>(rb)
{
this->blender().gamma(g);
}
};
//-----------------------------------------------------pixfmt_bgr24_gamma
template<class Gamma> class pixfmt_bgr24_gamma :
public pixfmt_alpha_blend_rgb<blender_rgb_gamma<rgba8, order_bgr, Gamma>, rendering_buffer, 3>
{
public:
pixfmt_bgr24_gamma(rendering_buffer& rb, const Gamma& g) :
pixfmt_alpha_blend_rgb<blender_rgb_gamma<rgba8, order_bgr, Gamma>, rendering_buffer, 3>(rb)
{
this->blender().gamma(g);
}
};
//-----------------------------------------------------pixfmt_sbgr24_gamma
template<class Gamma> class pixfmt_sbgr24_gamma :
public pixfmt_alpha_blend_rgb<blender_rgb_gamma<srgba8, order_bgr, Gamma>, rendering_buffer, 3>
{
public:
pixfmt_sbgr24_gamma(rendering_buffer& rb, const Gamma& g) :
pixfmt_alpha_blend_rgb<blender_rgb_gamma<srgba8, order_bgr, Gamma>, rendering_buffer, 3>(rb)
{
this->blender().gamma(g);
}
};
//-----------------------------------------------------pixfmt_rgb48_gamma
template<class Gamma> class pixfmt_rgb48_gamma :
public pixfmt_alpha_blend_rgb<blender_rgb_gamma<rgba16, order_rgb, Gamma>, rendering_buffer, 3>
{
public:
pixfmt_rgb48_gamma(rendering_buffer& rb, const Gamma& g) :
pixfmt_alpha_blend_rgb<blender_rgb_gamma<rgba16, order_rgb, Gamma>, rendering_buffer, 3>(rb)
{
this->blender().gamma(g);
}
};
//-----------------------------------------------------pixfmt_bgr48_gamma
template<class Gamma> class pixfmt_bgr48_gamma :
public pixfmt_alpha_blend_rgb<blender_rgb_gamma<rgba16, order_bgr, Gamma>, rendering_buffer, 3>
{
public:
pixfmt_bgr48_gamma(rendering_buffer& rb, const Gamma& g) :
pixfmt_alpha_blend_rgb<blender_rgb_gamma<rgba16, order_bgr, Gamma>, rendering_buffer, 3>(rb)
{
this->blender().gamma(g);
}
};
}
#endif
| 0 |
D://workCode//uploadProject\awtk\3rd\agg | D://workCode//uploadProject\awtk\3rd\agg\include\agg_pixfmt_rgba.h | //----------------------------------------------------------------------------
// Anti-Grain Geometry - Version 2.4
// Copyright (C) 2002-2005 Maxim Shemanarev (http://www.antigrain.com)
//
// Permission to copy, use, modify, sell and distribute this software
// is granted provided this copyright notice appears in all copies.
// This software is provided "as is" without express or implied
// warranty, and with no claim as to its suitability for any purpose.
//
//----------------------------------------------------------------------------
// Contact: mcseem@antigrain.com
// mcseemagg@yahoo.com
// http://www.antigrain.com
//----------------------------------------------------------------------------
//
// Adaptation for high precision colors has been sponsored by
// Liberty Technology Systems, Inc., visit http://lib-sys.com
//
// Liberty Technology Systems, Inc. is the provider of
// PostScript and PDF technology for software developers.
//
//----------------------------------------------------------------------------
#ifndef AGG_PIXFMT_RGBA_INCLUDED
#define AGG_PIXFMT_RGBA_INCLUDED
#include <string.h>
#include <math.h>
#include "agg_pixfmt_base.h"
#include "agg_rendering_buffer.h"
namespace agg
{
template<class T> inline T sd_min(T a, T b) { return (a < b) ? a : b; }
template<class T> inline T sd_max(T a, T b) { return (a > b) ? a : b; }
inline rgba & clip(rgba & c)
{
if (c.a > 1) c.a = 1; else if (c.a < 0) c.a = 0;
if (c.r > c.a) c.r = c.a; else if (c.r < 0) c.r = 0;
if (c.g > c.a) c.g = c.a; else if (c.g < 0) c.g = 0;
if (c.b > c.a) c.b = c.a; else if (c.b < 0) c.b = 0;
return c;
}
//=========================================================multiplier_rgba
template<class ColorT, class Order>
struct multiplier_rgba
{
typedef ColorT color_type;
typedef typename color_type::value_type value_type;
//--------------------------------------------------------------------
static AGG_INLINE void premultiply(value_type* p)
{
value_type a = p[Order::A];
p[Order::R] = color_type::multiply(p[Order::R], a);
p[Order::G] = color_type::multiply(p[Order::G], a);
p[Order::B] = color_type::multiply(p[Order::B], a);
}
//--------------------------------------------------------------------
static AGG_INLINE void demultiply(value_type* p)
{
value_type a = p[Order::A];
p[Order::R] = color_type::demultiply(p[Order::R], a);
p[Order::G] = color_type::demultiply(p[Order::G], a);
p[Order::B] = color_type::demultiply(p[Order::B], a);
}
};
//=====================================================apply_gamma_dir_rgba
template<class ColorT, class Order, class GammaLut>
class apply_gamma_dir_rgba
{
public:
typedef ColorT color_type;
typedef typename color_type::value_type value_type;
apply_gamma_dir_rgba(const GammaLut& gamma) : m_gamma(gamma) {}
AGG_INLINE void operator () (value_type* p)
{
p[Order::R] = m_gamma.dir(p[Order::R]);
p[Order::G] = m_gamma.dir(p[Order::G]);
p[Order::B] = m_gamma.dir(p[Order::B]);
}
private:
const GammaLut& m_gamma;
};
//=====================================================apply_gamma_inv_rgba
template<class ColorT, class Order, class GammaLut> class apply_gamma_inv_rgba
{
public:
typedef ColorT color_type;
typedef typename color_type::value_type value_type;
apply_gamma_inv_rgba(const GammaLut& gamma) : m_gamma(gamma) {}
AGG_INLINE void operator () (value_type* p)
{
p[Order::R] = m_gamma.inv(p[Order::R]);
p[Order::G] = m_gamma.inv(p[Order::G]);
p[Order::B] = m_gamma.inv(p[Order::B]);
}
private:
const GammaLut& m_gamma;
};
template<class ColorT, class Order>
struct conv_rgba_pre
{
typedef ColorT color_type;
typedef Order order_type;
typedef typename color_type::value_type value_type;
//--------------------------------------------------------------------
static AGG_INLINE void set_plain_color(value_type* p, color_type c)
{
c.premultiply();
p[Order::R] = c.r;
p[Order::G] = c.g;
p[Order::B] = c.b;
p[Order::A] = c.a;
}
//--------------------------------------------------------------------
static AGG_INLINE color_type get_plain_color(const value_type* p)
{
return color_type(
p[Order::R],
p[Order::G],
p[Order::B],
p[Order::A]).demultiply();
}
};
template<class ColorT, class Order>
struct conv_rgba_plain
{
typedef ColorT color_type;
typedef Order order_type;
typedef typename color_type::value_type value_type;
//--------------------------------------------------------------------
static AGG_INLINE void set_plain_color(value_type* p, color_type c)
{
p[Order::R] = c.r;
p[Order::G] = c.g;
p[Order::B] = c.b;
p[Order::A] = c.a;
}
//--------------------------------------------------------------------
static AGG_INLINE color_type get_plain_color(const value_type* p)
{
return color_type(
p[Order::R],
p[Order::G],
p[Order::B],
p[Order::A]);
}
};
//=============================================================blender_rgba
// Blends "plain" (i.e. non-premultiplied) colors into a premultiplied buffer.
template<class ColorT, class Order>
struct blender_rgba : conv_rgba_pre<ColorT, Order>
{
typedef ColorT color_type;
typedef Order order_type;
typedef typename color_type::value_type value_type;
typedef typename color_type::calc_type calc_type;
typedef typename color_type::long_type long_type;
// Blend pixels using the non-premultiplied form of Alvy-Ray Smith's
// compositing function. Since the render buffer is in fact premultiplied
// we omit the initial premultiplication and final demultiplication.
//--------------------------------------------------------------------
static AGG_INLINE void blend_pix(value_type* p,
value_type cr, value_type cg, value_type cb, value_type alpha, cover_type cover)
{
blend_pix(p, cr, cg, cb, color_type::mult_cover(alpha, cover));
}
//--------------------------------------------------------------------
static AGG_INLINE void blend_pix(value_type* p,
value_type cr, value_type cg, value_type cb, value_type alpha)
{
p[Order::R] = color_type::lerp(p[Order::R], cr, alpha);
p[Order::G] = color_type::lerp(p[Order::G], cg, alpha);
p[Order::B] = color_type::lerp(p[Order::B], cb, alpha);
p[Order::A] = color_type::prelerp(p[Order::A], alpha, alpha);
}
};
//========================================================blender_rgba_pre
// Blends premultiplied colors into a premultiplied buffer.
template<class ColorT, class Order>
struct blender_rgba_pre : conv_rgba_pre<ColorT, Order>
{
typedef ColorT color_type;
typedef Order order_type;
typedef typename color_type::value_type value_type;
typedef typename color_type::calc_type calc_type;
typedef typename color_type::long_type long_type;
// Blend pixels using the premultiplied form of Alvy-Ray Smith's
// compositing function.
//--------------------------------------------------------------------
static AGG_INLINE void blend_pix(value_type* p,
value_type cr, value_type cg, value_type cb, value_type alpha, cover_type cover)
{
blend_pix(p,
color_type::mult_cover(cr, cover),
color_type::mult_cover(cg, cover),
color_type::mult_cover(cb, cover),
color_type::mult_cover(alpha, cover));
}
//--------------------------------------------------------------------
static AGG_INLINE void blend_pix(value_type* p,
value_type cr, value_type cg, value_type cb, value_type alpha)
{
p[Order::R] = color_type::prelerp(p[Order::R], cr, alpha);
p[Order::G] = color_type::prelerp(p[Order::G], cg, alpha);
p[Order::B] = color_type::prelerp(p[Order::B], cb, alpha);
p[Order::A] = color_type::prelerp(p[Order::A], alpha, alpha);
}
};
//======================================================blender_rgba_plain
// Blends "plain" (non-premultiplied) colors into a plain (non-premultiplied) buffer.
template<class ColorT, class Order>
struct blender_rgba_plain : conv_rgba_plain<ColorT, Order>
{
typedef ColorT color_type;
typedef Order order_type;
typedef typename color_type::value_type value_type;
typedef typename color_type::calc_type calc_type;
typedef typename color_type::long_type long_type;
// Blend pixels using the non-premultiplied form of Alvy-Ray Smith's
// compositing function.
//--------------------------------------------------------------------
static AGG_INLINE void blend_pix(value_type* p,
value_type cr, value_type cg, value_type cb, value_type alpha, cover_type cover)
{
blend_pix(p, cr, cg, cb, color_type::mult_cover(alpha, cover));
}
//--------------------------------------------------------------------
static AGG_INLINE void blend_pix(value_type* p,
value_type cr, value_type cg, value_type cb, value_type alpha)
{
if (alpha > color_type::empty_value())
{
calc_type a = p[Order::A];
calc_type r = color_type::multiply(p[Order::R], a);
calc_type g = color_type::multiply(p[Order::G], a);
calc_type b = color_type::multiply(p[Order::B], a);
p[Order::R] = color_type::lerp(r, cr, alpha);
p[Order::G] = color_type::lerp(g, cg, alpha);
p[Order::B] = color_type::lerp(b, cb, alpha);
p[Order::A] = color_type::prelerp(a, alpha, alpha);
multiplier_rgba<ColorT, Order>::demultiply(p);
}
}
};
// SVG compositing operations.
// For specifications, see http://www.w3.org/TR/SVGCompositing/
//=========================================================comp_op_rgba_clear
template<class ColorT, class Order>
struct comp_op_rgba_clear : blender_base<ColorT, Order>
{
typedef ColorT color_type;
typedef typename color_type::value_type value_type;
using blender_base<ColorT, Order>::get;
using blender_base<ColorT, Order>::set;
// Dca' = 0
// Da' = 0
static AGG_INLINE void blend_pix(value_type* p,
value_type r, value_type g, value_type b, value_type a, cover_type cover)
{
if (cover >= cover_full)
{
p[0] = p[1] = p[2] = p[3] = color_type::empty_value();
}
else if (cover > cover_none)
{
set(p, get(p, cover_full - cover));
}
}
};
//===========================================================comp_op_rgba_src
template<class ColorT, class Order>
struct comp_op_rgba_src : blender_base<ColorT, Order>
{
typedef ColorT color_type;
typedef typename color_type::value_type value_type;
using blender_base<ColorT, Order>::get;
using blender_base<ColorT, Order>::set;
// Dca' = Sca
// Da' = Sa
static AGG_INLINE void blend_pix(value_type* p,
value_type r, value_type g, value_type b, value_type a, cover_type cover)
{
if (cover >= cover_full)
{
set(p, r, g, b, a);
}
else
{
rgba s = get(r, g, b, a, cover);
rgba d = get(p, cover_full - cover);
d.r += s.r;
d.g += s.g;
d.b += s.b;
d.a += s.a;
set(p, d);
}
}
};
//===========================================================comp_op_rgba_dst
template<class ColorT, class Order>
struct comp_op_rgba_dst : blender_base<ColorT, Order>
{
typedef ColorT color_type;
typedef typename color_type::value_type value_type;
// Dca' = Dca.Sa + Dca.(1 - Sa) = Dca
// Da' = Da.Sa + Da.(1 - Sa) = Da
static AGG_INLINE void blend_pix(value_type* p,
value_type r, value_type g, value_type b, value_type a, cover_type cover)
{
// Well, that was easy!
}
};
//======================================================comp_op_rgba_src_over
template<class ColorT, class Order>
struct comp_op_rgba_src_over : blender_base<ColorT, Order>
{
typedef ColorT color_type;
typedef typename color_type::value_type value_type;
using blender_base<ColorT, Order>::get;
using blender_base<ColorT, Order>::set;
// Dca' = Sca + Dca.(1 - Sa) = Dca + Sca - Dca.Sa
// Da' = Sa + Da - Sa.Da
static AGG_INLINE void blend_pix(value_type* p,
value_type r, value_type g, value_type b, value_type a, cover_type cover)
{
#if 1
blender_rgba_pre<ColorT, Order>::blend_pix(p, r, g, b, a, cover);
#else
rgba s = get(r, g, b, a, cover);
rgba d = get(p);
d.r += s.r - d.r * s.a;
d.g += s.g - d.g * s.a;
d.b += s.b - d.b * s.a;
d.a += s.a - d.a * s.a;
set(p, d);
#endif
}
};
//======================================================comp_op_rgba_dst_over
template<class ColorT, class Order>
struct comp_op_rgba_dst_over : blender_base<ColorT, Order>
{
typedef ColorT color_type;
typedef typename color_type::value_type value_type;
using blender_base<ColorT, Order>::get;
using blender_base<ColorT, Order>::set;
// Dca' = Dca + Sca.(1 - Da)
// Da' = Sa + Da - Sa.Da = Da + Sa.(1 - Da)
static AGG_INLINE void blend_pix(value_type* p,
value_type r, value_type g, value_type b, value_type a, cover_type cover)
{
rgba s = get(r, g, b, a, cover);
rgba d = get(p);
double d1a = 1 - d.a;
d.r += s.r * d1a;
d.g += s.g * d1a;
d.b += s.b * d1a;
d.a += s.a * d1a;
set(p, d);
}
};
//======================================================comp_op_rgba_src_in
template<class ColorT, class Order>
struct comp_op_rgba_src_in : blender_base<ColorT, Order>
{
typedef ColorT color_type;
typedef typename color_type::value_type value_type;
using blender_base<ColorT, Order>::get;
using blender_base<ColorT, Order>::set;
// Dca' = Sca.Da
// Da' = Sa.Da
static AGG_INLINE void blend_pix(value_type* p,
value_type r, value_type g, value_type b, value_type a, cover_type cover)
{
double da = ColorT::to_double(p[Order::A]);
if (da > 0)
{
rgba s = get(r, g, b, a, cover);
rgba d = get(p, cover_full - cover);
d.r += s.r * da;
d.g += s.g * da;
d.b += s.b * da;
d.a += s.a * da;
set(p, d);
}
}
};
//======================================================comp_op_rgba_dst_in
template<class ColorT, class Order>
struct comp_op_rgba_dst_in : blender_base<ColorT, Order>
{
typedef ColorT color_type;
typedef typename color_type::value_type value_type;
using blender_base<ColorT, Order>::get;
using blender_base<ColorT, Order>::set;
// Dca' = Dca.Sa
// Da' = Sa.Da
static AGG_INLINE void blend_pix(value_type* p,
value_type r, value_type g, value_type b, value_type a, cover_type cover)
{
double sa = ColorT::to_double(a);
rgba d = get(p, cover_full - cover);
rgba d2 = get(p, cover);
d.r += d2.r * sa;
d.g += d2.g * sa;
d.b += d2.b * sa;
d.a += d2.a * sa;
set(p, d);
}
};
//======================================================comp_op_rgba_src_out
template<class ColorT, class Order>
struct comp_op_rgba_src_out : blender_base<ColorT, Order>
{
typedef ColorT color_type;
typedef typename color_type::value_type value_type;
using blender_base<ColorT, Order>::get;
using blender_base<ColorT, Order>::set;
// Dca' = Sca.(1 - Da)
// Da' = Sa.(1 - Da)
static AGG_INLINE void blend_pix(value_type* p,
value_type r, value_type g, value_type b, value_type a, cover_type cover)
{
rgba s = get(r, g, b, a, cover);
rgba d = get(p, cover_full - cover);
double d1a = 1 - ColorT::to_double(p[Order::A]);
d.r += s.r * d1a;
d.g += s.g * d1a;
d.b += s.b * d1a;
d.a += s.a * d1a;
set(p, d);
}
};
//======================================================comp_op_rgba_dst_out
template<class ColorT, class Order>
struct comp_op_rgba_dst_out : blender_base<ColorT, Order>
{
typedef ColorT color_type;
typedef typename color_type::value_type value_type;
using blender_base<ColorT, Order>::get;
using blender_base<ColorT, Order>::set;
// Dca' = Dca.(1 - Sa)
// Da' = Da.(1 - Sa)
static AGG_INLINE void blend_pix(value_type* p,
value_type r, value_type g, value_type b, value_type a, cover_type cover)
{
rgba d = get(p, cover_full - cover);
rgba dc = get(p, cover);
double s1a = 1 - ColorT::to_double(a);
d.r += dc.r * s1a;
d.g += dc.g * s1a;
d.b += dc.b * s1a;
d.a += dc.a * s1a;
set(p, d);
}
};
//=====================================================comp_op_rgba_src_atop
template<class ColorT, class Order>
struct comp_op_rgba_src_atop : blender_base<ColorT, Order>
{
typedef ColorT color_type;
typedef typename color_type::value_type value_type;
using blender_base<ColorT, Order>::get;
using blender_base<ColorT, Order>::set;
// Dca' = Sca.Da + Dca.(1 - Sa)
// Da' = Da
static AGG_INLINE void blend_pix(value_type* p,
value_type r, value_type g, value_type b, value_type a, cover_type cover)
{
rgba s = get(r, g, b, a, cover);
rgba d = get(p);
double s1a = 1 - s.a;
d.r = s.r * d.a + d.r * s1a;
d.g = s.g * d.a + d.g * s1a;
d.b = s.b * d.a + d.g * s1a;
set(p, d);
}
};
//=====================================================comp_op_rgba_dst_atop
template<class ColorT, class Order>
struct comp_op_rgba_dst_atop : blender_base<ColorT, Order>
{
typedef ColorT color_type;
typedef typename color_type::value_type value_type;
using blender_base<ColorT, Order>::get;
using blender_base<ColorT, Order>::set;
// Dca' = Dca.Sa + Sca.(1 - Da)
// Da' = Sa
static AGG_INLINE void blend_pix(value_type* p,
value_type r, value_type g, value_type b, value_type a, cover_type cover)
{
rgba sc = get(r, g, b, a, cover);
rgba dc = get(p, cover);
rgba d = get(p, cover_full - cover);
double sa = ColorT::to_double(a);
double d1a = 1 - ColorT::to_double(p[Order::A]);
d.r += dc.r * sa + sc.r * d1a;
d.g += dc.g * sa + sc.g * d1a;
d.b += dc.b * sa + sc.b * d1a;
d.a += sc.a;
set(p, d);
}
};
//=========================================================comp_op_rgba_xor
template<class ColorT, class Order>
struct comp_op_rgba_xor : blender_base<ColorT, Order>
{
typedef ColorT color_type;
typedef typename color_type::value_type value_type;
using blender_base<ColorT, Order>::get;
using blender_base<ColorT, Order>::set;
// Dca' = Sca.(1 - Da) + Dca.(1 - Sa)
// Da' = Sa + Da - 2.Sa.Da
static AGG_INLINE void blend_pix(value_type* p,
value_type r, value_type g, value_type b, value_type a, cover_type cover)
{
rgba s = get(r, g, b, a, cover);
rgba d = get(p);
double s1a = 1 - s.a;
double d1a = 1 - ColorT::to_double(p[Order::A]);
d.r = s.r * d1a + d.r * s1a;
d.g = s.g * d1a + d.g * s1a;
d.b = s.b * d1a + d.b * s1a;
d.a = s.a + d.a - 2 * s.a * d.a;
set(p, d);
}
};
//=========================================================comp_op_rgba_plus
template<class ColorT, class Order>
struct comp_op_rgba_plus : blender_base<ColorT, Order>
{
typedef ColorT color_type;
typedef typename color_type::value_type value_type;
using blender_base<ColorT, Order>::get;
using blender_base<ColorT, Order>::set;
// Dca' = Sca + Dca
// Da' = Sa + Da
static AGG_INLINE void blend_pix(value_type* p,
value_type r, value_type g, value_type b, value_type a, cover_type cover)
{
rgba s = get(r, g, b, a, cover);
if (s.a > 0)
{
rgba d = get(p);
d.a = sd_min(d.a + s.a, 1.0);
d.r = sd_min(d.r + s.r, d.a);
d.g = sd_min(d.g + s.g, d.a);
d.b = sd_min(d.b + s.b, d.a);
set(p, clip(d));
}
}
};
//========================================================comp_op_rgba_minus
// Note: not included in SVG spec.
template<class ColorT, class Order>
struct comp_op_rgba_minus : blender_base<ColorT, Order>
{
typedef ColorT color_type;
typedef typename color_type::value_type value_type;
using blender_base<ColorT, Order>::get;
using blender_base<ColorT, Order>::set;
// Dca' = Dca - Sca
// Da' = 1 - (1 - Sa).(1 - Da) = Da + Sa - Sa.Da
static AGG_INLINE void blend_pix(value_type* p,
value_type r, value_type g, value_type b, value_type a, cover_type cover)
{
rgba s = get(r, g, b, a, cover);
if (s.a > 0)
{
rgba d = get(p);
d.a += s.a - s.a * d.a;
d.r = sd_max(d.r - s.r, 0.0);
d.g = sd_max(d.g - s.g, 0.0);
d.b = sd_max(d.b - s.b, 0.0);
set(p, clip(d));
}
}
};
//=====================================================comp_op_rgba_multiply
template<class ColorT, class Order>
struct comp_op_rgba_multiply : blender_base<ColorT, Order>
{
typedef ColorT color_type;
typedef typename color_type::value_type value_type;
using blender_base<ColorT, Order>::get;
using blender_base<ColorT, Order>::set;
// Dca' = Sca.Dca + Sca.(1 - Da) + Dca.(1 - Sa)
// Da' = Sa + Da - Sa.Da
static AGG_INLINE void blend_pix(value_type* p,
value_type r, value_type g, value_type b, value_type a, cover_type cover)
{
rgba s = get(r, g, b, a, cover);
if (s.a > 0)
{
rgba d = get(p);
double s1a = 1 - s.a;
double d1a = 1 - d.a;
d.r = s.r * d.r + s.r * d1a + d.r * s1a;
d.g = s.g * d.g + s.g * d1a + d.g * s1a;
d.b = s.b * d.b + s.b * d1a + d.b * s1a;
d.a += s.a - s.a * d.a;
set(p, clip(d));
}
}
};
//=====================================================comp_op_rgba_screen
template<class ColorT, class Order>
struct comp_op_rgba_screen : blender_base<ColorT, Order>
{
typedef ColorT color_type;
typedef typename color_type::value_type value_type;
using blender_base<ColorT, Order>::get;
using blender_base<ColorT, Order>::set;
// Dca' = Sca + Dca - Sca.Dca
// Da' = Sa + Da - Sa.Da
static AGG_INLINE void blend_pix(value_type* p,
value_type r, value_type g, value_type b, value_type a, cover_type cover)
{
rgba s = get(r, g, b, a, cover);
if (s.a > 0)
{
rgba d = get(p);
d.r += s.r - s.r * d.r;
d.g += s.g - s.g * d.g;
d.b += s.b - s.b * d.b;
d.a += s.a - s.a * d.a;
set(p, clip(d));
}
}
};
//=====================================================comp_op_rgba_overlay
template<class ColorT, class Order>
struct comp_op_rgba_overlay : blender_base<ColorT, Order>
{
typedef ColorT color_type;
typedef typename color_type::value_type value_type;
using blender_base<ColorT, Order>::get;
using blender_base<ColorT, Order>::set;
// if 2.Dca <= Da
// Dca' = 2.Sca.Dca + Sca.(1 - Da) + Dca.(1 - Sa)
// otherwise
// Dca' = Sa.Da - 2.(Da - Dca).(Sa - Sca) + Sca.(1 - Da) + Dca.(1 - Sa)
//
// Da' = Sa + Da - Sa.Da
static AGG_INLINE double calc(double dca, double sca, double da, double sa, double sada, double d1a, double s1a)
{
return (2 * dca <= da) ?
2 * sca * dca + sca * d1a + dca * s1a :
sada - 2 * (da - dca) * (sa - sca) + sca * d1a + dca * s1a;
}
static AGG_INLINE void blend_pix(value_type* p,
value_type r, value_type g, value_type b, value_type a, cover_type cover)
{
rgba s = get(r, g, b, a, cover);
if (s.a > 0)
{
rgba d = get(p);
double d1a = 1 - d.a;
double s1a = 1 - s.a;
double sada = s.a * d.a;
d.r = calc(d.r, s.r, d.a, s.a, sada, d1a, s1a);
d.g = calc(d.g, s.g, d.a, s.a, sada, d1a, s1a);
d.b = calc(d.b, s.b, d.a, s.a, sada, d1a, s1a);
d.a += s.a - s.a * d.a;
set(p, clip(d));
}
}
};
//=====================================================comp_op_rgba_darken
template<class ColorT, class Order>
struct comp_op_rgba_darken : blender_base<ColorT, Order>
{
typedef ColorT color_type;
typedef typename color_type::value_type value_type;
using blender_base<ColorT, Order>::get;
using blender_base<ColorT, Order>::set;
// Dca' = min(Sca.Da, Dca.Sa) + Sca.(1 - Da) + Dca.(1 - Sa)
// Da' = Sa + Da - Sa.Da
static AGG_INLINE void blend_pix(value_type* p,
value_type r, value_type g, value_type b, value_type a, cover_type cover)
{
rgba s = get(r, g, b, a, cover);
if (s.a > 0)
{
rgba d = get(p);
double d1a = 1 - d.a;
double s1a = 1 - s.a;
d.r = sd_min(s.r * d.a, d.r * s.a) + s.r * d1a + d.r * s1a;
d.g = sd_min(s.g * d.a, d.g * s.a) + s.g * d1a + d.g * s1a;
d.b = sd_min(s.b * d.a, d.b * s.a) + s.b * d1a + d.b * s1a;
d.a += s.a - s.a * d.a;
set(p, clip(d));
}
}
};
//=====================================================comp_op_rgba_lighten
template<class ColorT, class Order>
struct comp_op_rgba_lighten : blender_base<ColorT, Order>
{
typedef ColorT color_type;
typedef typename color_type::value_type value_type;
using blender_base<ColorT, Order>::get;
using blender_base<ColorT, Order>::set;
// Dca' = max(Sca.Da, Dca.Sa) + Sca.(1 - Da) + Dca.(1 - Sa)
// Da' = Sa + Da - Sa.Da
static AGG_INLINE void blend_pix(value_type* p,
value_type r, value_type g, value_type b, value_type a, cover_type cover)
{
rgba s = get(r, g, b, a, cover);
if (s.a > 0)
{
rgba d = get(p);
double d1a = 1 - d.a;
double s1a = 1 - s.a;
d.r = sd_max(s.r * d.a, d.r * s.a) + s.r * d1a + d.r * s1a;
d.g = sd_max(s.g * d.a, d.g * s.a) + s.g * d1a + d.g * s1a;
d.b = sd_max(s.b * d.a, d.b * s.a) + s.b * d1a + d.b * s1a;
d.a += s.a - s.a * d.a;
set(p, clip(d));
}
}
};
//=====================================================comp_op_rgba_color_dodge
template<class ColorT, class Order>
struct comp_op_rgba_color_dodge : blender_base<ColorT, Order>
{
typedef ColorT color_type;
typedef typename color_type::value_type value_type;
using blender_base<ColorT, Order>::get;
using blender_base<ColorT, Order>::set;
// if Sca == Sa and Dca == 0
// Dca' = Sca.(1 - Da) + Dca.(1 - Sa) = Sca.(1 - Da)
// otherwise if Sca == Sa
// Dca' = Sa.Da + Sca.(1 - Da) + Dca.(1 - Sa)
// otherwise if Sca < Sa
// Dca' = Sa.Da.min(1, Dca/Da.Sa/(Sa - Sca)) + Sca.(1 - Da) + Dca.(1 - Sa)
//
// Da' = Sa + Da - Sa.Da
static AGG_INLINE double calc(double dca, double sca, double da, double sa, double sada, double d1a, double s1a)
{
if (sca < sa) return sada * sd_min(1.0, (dca / da) * sa / (sa - sca)) + sca * d1a + dca * s1a;
if (dca > 0) return sada + sca * d1a + dca * s1a;
return sca * d1a;
}
static AGG_INLINE void blend_pix(value_type* p,
value_type r, value_type g, value_type b, value_type a, cover_type cover)
{
rgba s = get(r, g, b, a, cover);
if (s.a > 0)
{
rgba d = get(p);
if (d.a > 0)
{
double sada = s.a * d.a;
double s1a = 1 - s.a;
double d1a = 1 - d.a;
d.r = calc(d.r, s.r, d.a, s.a, sada, d1a, s1a);
d.g = calc(d.g, s.g, d.a, s.a, sada, d1a, s1a);
d.b = calc(d.b, s.b, d.a, s.a, sada, d1a, s1a);
d.a += s.a - s.a * d.a;
set(p, clip(d));
}
else set(p, s);
}
}
};
//=====================================================comp_op_rgba_color_burn
template<class ColorT, class Order>
struct comp_op_rgba_color_burn : blender_base<ColorT, Order>
{
typedef ColorT color_type;
typedef typename color_type::value_type value_type;
using blender_base<ColorT, Order>::get;
using blender_base<ColorT, Order>::set;
// if Sca == 0 and Dca == Da
// Dca' = Sa.Da + Dca.(1 - Sa)
// otherwise if Sca == 0
// Dca' = Dca.(1 - Sa)
// otherwise if Sca > 0
// Dca' = Sa.Da.(1 - min(1, (1 - Dca/Da).Sa/Sca)) + Sca.(1 - Da) + Dca.(1 - Sa)
static AGG_INLINE double calc(double dca, double sca, double da, double sa, double sada, double d1a, double s1a)
{
if (sca > 0) return sada * (1 - sd_min(1.0, (1 - dca / da) * sa / sca)) + sca * d1a + dca * s1a;
if (dca > da) return sada + dca * s1a;
return dca * s1a;
}
static AGG_INLINE void blend_pix(value_type* p,
value_type r, value_type g, value_type b, value_type a, cover_type cover)
{
rgba s = get(r, g, b, a, cover);
if (s.a > 0)
{
rgba d = get(p);
if (d.a > 0)
{
double sada = s.a * d.a;
double s1a = 1 - s.a;
double d1a = 1 - d.a;
d.r = calc(d.r, s.r, d.a, s.a, sada, d1a, s1a);
d.g = calc(d.g, s.g, d.a, s.a, sada, d1a, s1a);
d.b = calc(d.b, s.b, d.a, s.a, sada, d1a, s1a);
d.a += s.a - sada;
set(p, clip(d));
}
else set(p, s);
}
}
};
//=====================================================comp_op_rgba_hard_light
template<class ColorT, class Order>
struct comp_op_rgba_hard_light : blender_base<ColorT, Order>
{
typedef ColorT color_type;
typedef typename color_type::value_type value_type;
using blender_base<ColorT, Order>::get;
using blender_base<ColorT, Order>::set;
// if 2.Sca < Sa
// Dca' = 2.Sca.Dca + Sca.(1 - Da) + Dca.(1 - Sa)
// otherwise
// Dca' = Sa.Da - 2.(Da - Dca).(Sa - Sca) + Sca.(1 - Da) + Dca.(1 - Sa)
//
// Da' = Sa + Da - Sa.Da
static AGG_INLINE double calc(double dca, double sca, double da, double sa, double sada, double d1a, double s1a)
{
return (2 * sca < sa) ?
2 * sca * dca + sca * d1a + dca * s1a :
sada - 2 * (da - dca) * (sa - sca) + sca * d1a + dca * s1a;
}
static AGG_INLINE void blend_pix(value_type* p,
value_type r, value_type g, value_type b, value_type a, cover_type cover)
{
rgba s = get(r, g, b, a, cover);
if (s.a > 0)
{
rgba d = get(p);
double d1a = 1 - d.a;
double s1a = 1 - s.a;
double sada = s.a * d.a;
d.r = calc(d.r, s.r, d.a, s.a, sada, d1a, s1a);
d.g = calc(d.g, s.g, d.a, s.a, sada, d1a, s1a);
d.b = calc(d.b, s.b, d.a, s.a, sada, d1a, s1a);
d.a += s.a - sada;
set(p, clip(d));
}
}
};
//=====================================================comp_op_rgba_soft_light
template<class ColorT, class Order>
struct comp_op_rgba_soft_light : blender_base<ColorT, Order>
{
typedef ColorT color_type;
typedef typename color_type::value_type value_type;
using blender_base<ColorT, Order>::get;
using blender_base<ColorT, Order>::set;
// if 2.Sca <= Sa
// Dca' = Dca.Sa - (Sa.Da - 2.Sca.Da).Dca.Sa.(Sa.Da - Dca.Sa) + Sca.(1 - Da) + Dca.(1 - Sa)
// otherwise if 2.Sca > Sa and 4.Dca <= Da
// Dca' = Dca.Sa + (2.Sca.Da - Sa.Da).((((16.Dsa.Sa - 12).Dsa.Sa + 4).Dsa.Da) - Dsa.Da) + Sca.(1 - Da) + Dca.(1 - Sa)
// otherwise if 2.Sca > Sa and 4.Dca > Da
// Dca' = Dca.Sa + (2.Sca.Da - Sa.Da).((Dca.Sa)^0.5 - Dca.Sa) + Sca.(1 - Da) + Dca.(1 - Sa)
//
// Da' = Sa + Da - Sa.Da
static AGG_INLINE double calc(double dca, double sca, double da, double sa, double sada, double d1a, double s1a)
{
double dcasa = dca * sa;
if (2 * sca <= sa) return dcasa - (sada - 2 * sca * da) * dcasa * (sada - dcasa) + sca * d1a + dca * s1a;
if (4 * dca <= da) return dcasa + (2 * sca * da - sada) * ((((16 * dcasa - 12) * dcasa + 4) * dca * da) - dca * da) + sca * d1a + dca * s1a;
return dcasa + (2 * sca * da - sada) * (sqrt(dcasa) - dcasa) + sca * d1a + dca * s1a;
}
static AGG_INLINE void blend_pix(value_type* p,
value_type r, value_type g, value_type b, value_type a, cover_type cover)
{
rgba s = get(r, g, b, a, cover);
if (s.a > 0)
{
rgba d = get(p);
if (d.a > 0)
{
double sada = s.a * d.a;
double s1a = 1 - s.a;
double d1a = 1 - d.a;
d.r = calc(d.r, s.r, d.a, s.a, sada, d1a, s1a);
d.g = calc(d.g, s.g, d.a, s.a, sada, d1a, s1a);
d.b = calc(d.b, s.b, d.a, s.a, sada, d1a, s1a);
d.a += s.a - sada;
set(p, clip(d));
}
else set(p, s);
}
}
};
//=====================================================comp_op_rgba_difference
template<class ColorT, class Order>
struct comp_op_rgba_difference : blender_base<ColorT, Order>
{
typedef ColorT color_type;
typedef typename color_type::value_type value_type;
using blender_base<ColorT, Order>::get;
using blender_base<ColorT, Order>::set;
// Dca' = Sca + Dca - 2.min(Sca.Da, Dca.Sa)
// Da' = Sa + Da - Sa.Da
static AGG_INLINE void blend_pix(value_type* p,
value_type r, value_type g, value_type b, value_type a, cover_type cover)
{
rgba s = get(r, g, b, a, cover);
if (s.a > 0)
{
rgba d = get(p);
d.r += s.r - 2 * sd_min(s.r * d.a, d.r * s.a);
d.g += s.g - 2 * sd_min(s.g * d.a, d.g * s.a);
d.b += s.b - 2 * sd_min(s.b * d.a, d.b * s.a);
d.a += s.a - s.a * d.a;
set(p, clip(d));
}
}
};
//=====================================================comp_op_rgba_exclusion
template<class ColorT, class Order>
struct comp_op_rgba_exclusion : blender_base<ColorT, Order>
{
typedef ColorT color_type;
typedef typename color_type::value_type value_type;
using blender_base<ColorT, Order>::get;
using blender_base<ColorT, Order>::set;
// Dca' = (Sca.Da + Dca.Sa - 2.Sca.Dca) + Sca.(1 - Da) + Dca.(1 - Sa)
// Da' = Sa + Da - Sa.Da
static AGG_INLINE void blend_pix(value_type* p,
value_type r, value_type g, value_type b, value_type a, cover_type cover)
{
rgba s = get(r, g, b, a, cover);
if (s.a > 0)
{
rgba d = get(p);
double d1a = 1 - d.a;
double s1a = 1 - s.a;
d.r = (s.r * d.a + d.r * s.a - 2 * s.r * d.r) + s.r * d1a + d.r * s1a;
d.g = (s.g * d.a + d.g * s.a - 2 * s.g * d.g) + s.g * d1a + d.g * s1a;
d.b = (s.b * d.a + d.b * s.a - 2 * s.b * d.b) + s.b * d1a + d.b * s1a;
d.a += s.a - s.a * d.a;
set(p, clip(d));
}
}
};
#if 0
//=====================================================comp_op_rgba_contrast
template<class ColorT, class Order> struct comp_op_rgba_contrast
{
typedef ColorT color_type;
typedef Order order_type;
typedef typename color_type::value_type value_type;
typedef typename color_type::calc_type calc_type;
typedef typename color_type::long_type long_type;
enum base_scale_e
{
base_shift = color_type::base_shift,
base_mask = color_type::base_mask
};
static AGG_INLINE void blend_pix(value_type* p,
unsigned sr, unsigned sg, unsigned sb,
unsigned sa, unsigned cover)
{
if (cover < 255)
{
sr = (sr * cover + 255) >> 8;
sg = (sg * cover + 255) >> 8;
sb = (sb * cover + 255) >> 8;
sa = (sa * cover + 255) >> 8;
}
long_type dr = p[Order::R];
long_type dg = p[Order::G];
long_type db = p[Order::B];
int da = p[Order::A];
long_type d2a = da >> 1;
unsigned s2a = sa >> 1;
int r = (int)((((dr - d2a) * int((sr - s2a)*2 + base_mask)) >> base_shift) + d2a);
int g = (int)((((dg - d2a) * int((sg - s2a)*2 + base_mask)) >> base_shift) + d2a);
int b = (int)((((db - d2a) * int((sb - s2a)*2 + base_mask)) >> base_shift) + d2a);
r = (r < 0) ? 0 : r;
g = (g < 0) ? 0 : g;
b = (b < 0) ? 0 : b;
p[Order::R] = (value_type)((r > da) ? da : r);
p[Order::G] = (value_type)((g > da) ? da : g);
p[Order::B] = (value_type)((b > da) ? da : b);
}
};
//=====================================================comp_op_rgba_invert
template<class ColorT, class Order> struct comp_op_rgba_invert
{
typedef ColorT color_type;
typedef Order order_type;
typedef typename color_type::value_type value_type;
typedef typename color_type::calc_type calc_type;
typedef typename color_type::long_type long_type;
enum base_scale_e
{
base_shift = color_type::base_shift,
base_mask = color_type::base_mask
};
// Dca' = (Da - Dca) * Sa + Dca.(1 - Sa)
// Da' = Sa + Da - Sa.Da
static AGG_INLINE void blend_pix(value_type* p,
unsigned sr, unsigned sg, unsigned sb,
unsigned sa, unsigned cover)
{
sa = (sa * cover + 255) >> 8;
if (sa)
{
calc_type da = p[Order::A];
calc_type dr = ((da - p[Order::R]) * sa + base_mask) >> base_shift;
calc_type dg = ((da - p[Order::G]) * sa + base_mask) >> base_shift;
calc_type db = ((da - p[Order::B]) * sa + base_mask) >> base_shift;
calc_type s1a = base_mask - sa;
p[Order::R] = (value_type)(dr + ((p[Order::R] * s1a + base_mask) >> base_shift));
p[Order::G] = (value_type)(dg + ((p[Order::G] * s1a + base_mask) >> base_shift));
p[Order::B] = (value_type)(db + ((p[Order::B] * s1a + base_mask) >> base_shift));
p[Order::A] = (value_type)(sa + da - ((sa * da + base_mask) >> base_shift));
}
}
};
//=================================================comp_op_rgba_invert_rgb
template<class ColorT, class Order> struct comp_op_rgba_invert_rgb
{
typedef ColorT color_type;
typedef Order order_type;
typedef typename color_type::value_type value_type;
typedef typename color_type::calc_type calc_type;
typedef typename color_type::long_type long_type;
enum base_scale_e
{
base_shift = color_type::base_shift,
base_mask = color_type::base_mask
};
// Dca' = (Da - Dca) * Sca + Dca.(1 - Sa)
// Da' = Sa + Da - Sa.Da
static AGG_INLINE void blend_pix(value_type* p,
unsigned sr, unsigned sg, unsigned sb,
unsigned sa, unsigned cover)
{
if (cover < 255)
{
sr = (sr * cover + 255) >> 8;
sg = (sg * cover + 255) >> 8;
sb = (sb * cover + 255) >> 8;
sa = (sa * cover + 255) >> 8;
}
if (sa)
{
calc_type da = p[Order::A];
calc_type dr = ((da - p[Order::R]) * sr + base_mask) >> base_shift;
calc_type dg = ((da - p[Order::G]) * sg + base_mask) >> base_shift;
calc_type db = ((da - p[Order::B]) * sb + base_mask) >> base_shift;
calc_type s1a = base_mask - sa;
p[Order::R] = (value_type)(dr + ((p[Order::R] * s1a + base_mask) >> base_shift));
p[Order::G] = (value_type)(dg + ((p[Order::G] * s1a + base_mask) >> base_shift));
p[Order::B] = (value_type)(db + ((p[Order::B] * s1a + base_mask) >> base_shift));
p[Order::A] = (value_type)(sa + da - ((sa * da + base_mask) >> base_shift));
}
}
};
#endif
//======================================================comp_op_table_rgba
template<class ColorT, class Order> struct comp_op_table_rgba
{
typedef typename ColorT::value_type value_type;
typedef typename ColorT::calc_type calc_type;
typedef void (*comp_op_func_type)(value_type* p,
value_type cr,
value_type cg,
value_type cb,
value_type ca,
cover_type cover);
static comp_op_func_type g_comp_op_func[];
};
//==========================================================g_comp_op_func
template<class ColorT, class Order>
typename comp_op_table_rgba<ColorT, Order>::comp_op_func_type
comp_op_table_rgba<ColorT, Order>::g_comp_op_func[] =
{
comp_op_rgba_clear <ColorT,Order>::blend_pix,
comp_op_rgba_src <ColorT,Order>::blend_pix,
comp_op_rgba_dst <ColorT,Order>::blend_pix,
comp_op_rgba_src_over <ColorT,Order>::blend_pix,
comp_op_rgba_dst_over <ColorT,Order>::blend_pix,
comp_op_rgba_src_in <ColorT,Order>::blend_pix,
comp_op_rgba_dst_in <ColorT,Order>::blend_pix,
comp_op_rgba_src_out <ColorT,Order>::blend_pix,
comp_op_rgba_dst_out <ColorT,Order>::blend_pix,
comp_op_rgba_src_atop <ColorT,Order>::blend_pix,
comp_op_rgba_dst_atop <ColorT,Order>::blend_pix,
comp_op_rgba_xor <ColorT,Order>::blend_pix,
comp_op_rgba_plus <ColorT,Order>::blend_pix,
//comp_op_rgba_minus <ColorT,Order>::blend_pix,
comp_op_rgba_multiply <ColorT,Order>::blend_pix,
comp_op_rgba_screen <ColorT,Order>::blend_pix,
comp_op_rgba_overlay <ColorT,Order>::blend_pix,
comp_op_rgba_darken <ColorT,Order>::blend_pix,
comp_op_rgba_lighten <ColorT,Order>::blend_pix,
comp_op_rgba_color_dodge<ColorT,Order>::blend_pix,
comp_op_rgba_color_burn <ColorT,Order>::blend_pix,
comp_op_rgba_hard_light <ColorT,Order>::blend_pix,
comp_op_rgba_soft_light <ColorT,Order>::blend_pix,
comp_op_rgba_difference <ColorT,Order>::blend_pix,
comp_op_rgba_exclusion <ColorT,Order>::blend_pix,
//comp_op_rgba_contrast <ColorT,Order>::blend_pix,
//comp_op_rgba_invert <ColorT,Order>::blend_pix,
//comp_op_rgba_invert_rgb <ColorT,Order>::blend_pix,
0
};
//==============================================================comp_op_e
enum comp_op_e
{
comp_op_clear, //----comp_op_clear
comp_op_src, //----comp_op_src
comp_op_dst, //----comp_op_dst
comp_op_src_over, //----comp_op_src_over
comp_op_dst_over, //----comp_op_dst_over
comp_op_src_in, //----comp_op_src_in
comp_op_dst_in, //----comp_op_dst_in
comp_op_src_out, //----comp_op_src_out
comp_op_dst_out, //----comp_op_dst_out
comp_op_src_atop, //----comp_op_src_atop
comp_op_dst_atop, //----comp_op_dst_atop
comp_op_xor, //----comp_op_xor
comp_op_plus, //----comp_op_plus
//comp_op_minus, //----comp_op_minus
comp_op_multiply, //----comp_op_multiply
comp_op_screen, //----comp_op_screen
comp_op_overlay, //----comp_op_overlay
comp_op_darken, //----comp_op_darken
comp_op_lighten, //----comp_op_lighten
comp_op_color_dodge, //----comp_op_color_dodge
comp_op_color_burn, //----comp_op_color_burn
comp_op_hard_light, //----comp_op_hard_light
comp_op_soft_light, //----comp_op_soft_light
comp_op_difference, //----comp_op_difference
comp_op_exclusion, //----comp_op_exclusion
//comp_op_contrast, //----comp_op_contrast
//comp_op_invert, //----comp_op_invert
//comp_op_invert_rgb, //----comp_op_invert_rgb
end_of_comp_op_e
};
//====================================================comp_op_adaptor_rgba
template<class ColorT, class Order>
struct comp_op_adaptor_rgba
{
typedef ColorT color_type;
typedef Order order_type;
typedef typename color_type::value_type value_type;
typedef typename color_type::calc_type calc_type;
typedef typename color_type::long_type long_type;
static AGG_INLINE void blend_pix(unsigned op, value_type* p,
value_type r, value_type g, value_type b, value_type a, cover_type cover)
{
comp_op_table_rgba<ColorT, Order>::g_comp_op_func[op](p,
color_type::multiply(r, a),
color_type::multiply(g, a),
color_type::multiply(b, a),
a, cover);
}
};
//=========================================comp_op_adaptor_clip_to_dst_rgba
template<class ColorT, class Order>
struct comp_op_adaptor_clip_to_dst_rgba
{
typedef ColorT color_type;
typedef Order order_type;
typedef typename color_type::value_type value_type;
typedef typename color_type::calc_type calc_type;
typedef typename color_type::long_type long_type;
static AGG_INLINE void blend_pix(unsigned op, value_type* p,
value_type r, value_type g, value_type b, value_type a, cover_type cover)
{
r = color_type::multiply(r, a);
g = color_type::multiply(g, a);
b = color_type::multiply(b, a);
value_type da = p[Order::A];
comp_op_table_rgba<ColorT, Order>::g_comp_op_func[op](p,
color_type::multiply(r, da),
color_type::multiply(g, da),
color_type::multiply(b, da),
color_type::multiply(a, da), cover);
}
};
//================================================comp_op_adaptor_rgba_pre
template<class ColorT, class Order>
struct comp_op_adaptor_rgba_pre
{
typedef ColorT color_type;
typedef Order order_type;
typedef typename color_type::value_type value_type;
typedef typename color_type::calc_type calc_type;
typedef typename color_type::long_type long_type;
static AGG_INLINE void blend_pix(unsigned op, value_type* p,
value_type r, value_type g, value_type b, value_type a, cover_type cover)
{
comp_op_table_rgba<ColorT, Order>::g_comp_op_func[op](p, r, g, b, a, cover);
}
};
//=====================================comp_op_adaptor_clip_to_dst_rgba_pre
template<class ColorT, class Order>
struct comp_op_adaptor_clip_to_dst_rgba_pre
{
typedef ColorT color_type;
typedef Order order_type;
typedef typename color_type::value_type value_type;
typedef typename color_type::calc_type calc_type;
typedef typename color_type::long_type long_type;
static AGG_INLINE void blend_pix(unsigned op, value_type* p,
value_type r, value_type g, value_type b, value_type a, cover_type cover)
{
value_type da = p[Order::A];
comp_op_table_rgba<ColorT, Order>::g_comp_op_func[op](p,
color_type::multiply(r, da),
color_type::multiply(g, da),
color_type::multiply(b, da),
color_type::multiply(a, da), cover);
}
};
//====================================================comp_op_adaptor_rgba_plain
template<class ColorT, class Order>
struct comp_op_adaptor_rgba_plain
{
typedef ColorT color_type;
typedef Order order_type;
typedef typename color_type::value_type value_type;
typedef typename color_type::calc_type calc_type;
typedef typename color_type::long_type long_type;
static AGG_INLINE void blend_pix(unsigned op, value_type* p,
value_type r, value_type g, value_type b, value_type a, cover_type cover)
{
multiplier_rgba<ColorT, Order>::premultiply(p);
comp_op_adaptor_rgba<ColorT, Order>::blend_pix(op, p, r, g, b, a, cover);
multiplier_rgba<ColorT, Order>::demultiply(p);
}
};
//=========================================comp_op_adaptor_clip_to_dst_rgba_plain
template<class ColorT, class Order>
struct comp_op_adaptor_clip_to_dst_rgba_plain
{
typedef ColorT color_type;
typedef Order order_type;
typedef typename color_type::value_type value_type;
typedef typename color_type::calc_type calc_type;
typedef typename color_type::long_type long_type;
static AGG_INLINE void blend_pix(unsigned op, value_type* p,
value_type r, value_type g, value_type b, value_type a, cover_type cover)
{
multiplier_rgba<ColorT, Order>::premultiply(p);
comp_op_adaptor_clip_to_dst_rgba<ColorT, Order>::blend_pix(op, p, r, g, b, a, cover);
multiplier_rgba<ColorT, Order>::demultiply(p);
}
};
//=======================================================comp_adaptor_rgba
template<class BlenderPre>
struct comp_adaptor_rgba
{
typedef typename BlenderPre::color_type color_type;
typedef typename BlenderPre::order_type order_type;
typedef typename color_type::value_type value_type;
typedef typename color_type::calc_type calc_type;
typedef typename color_type::long_type long_type;
static AGG_INLINE void blend_pix(unsigned op, value_type* p,
value_type r, value_type g, value_type b, value_type a, cover_type cover)
{
BlenderPre::blend_pix(p,
color_type::multiply(r, a),
color_type::multiply(g, a),
color_type::multiply(b, a),
a, cover);
}
};
//==========================================comp_adaptor_clip_to_dst_rgba
template<class BlenderPre>
struct comp_adaptor_clip_to_dst_rgba
{
typedef typename BlenderPre::color_type color_type;
typedef typename BlenderPre::order_type order_type;
typedef typename color_type::value_type value_type;
typedef typename color_type::calc_type calc_type;
typedef typename color_type::long_type long_type;
static AGG_INLINE void blend_pix(unsigned op, value_type* p,
value_type r, value_type g, value_type b, value_type a, cover_type cover)
{
r = color_type::multiply(r, a);
g = color_type::multiply(g, a);
b = color_type::multiply(b, a);
value_type da = p[order_type::A];
BlenderPre::blend_pix(p,
color_type::multiply(r, da),
color_type::multiply(g, da),
color_type::multiply(b, da),
color_type::multiply(a, da), cover);
}
};
//=======================================================comp_adaptor_rgba_pre
template<class BlenderPre>
struct comp_adaptor_rgba_pre
{
typedef typename BlenderPre::color_type color_type;
typedef typename BlenderPre::order_type order_type;
typedef typename color_type::value_type value_type;
typedef typename color_type::calc_type calc_type;
typedef typename color_type::long_type long_type;
static AGG_INLINE void blend_pix(unsigned op, value_type* p,
value_type r, value_type g, value_type b, value_type a, cover_type cover)
{
BlenderPre::blend_pix(p, r, g, b, a, cover);
}
};
//======================================comp_adaptor_clip_to_dst_rgba_pre
template<class BlenderPre>
struct comp_adaptor_clip_to_dst_rgba_pre
{
typedef typename BlenderPre::color_type color_type;
typedef typename BlenderPre::order_type order_type;
typedef typename color_type::value_type value_type;
typedef typename color_type::calc_type calc_type;
typedef typename color_type::long_type long_type;
static AGG_INLINE void blend_pix(unsigned op, value_type* p,
value_type r, value_type g, value_type b, value_type a, cover_type cover)
{
unsigned da = p[order_type::A];
BlenderPre::blend_pix(p,
color_type::multiply(r, da),
color_type::multiply(g, da),
color_type::multiply(b, da),
color_type::multiply(a, da),
cover);
}
};
//=======================================================comp_adaptor_rgba_plain
template<class BlenderPre>
struct comp_adaptor_rgba_plain
{
typedef typename BlenderPre::color_type color_type;
typedef typename BlenderPre::order_type order_type;
typedef typename color_type::value_type value_type;
typedef typename color_type::calc_type calc_type;
typedef typename color_type::long_type long_type;
static AGG_INLINE void blend_pix(unsigned op, value_type* p,
value_type r, value_type g, value_type b, value_type a, cover_type cover)
{
multiplier_rgba<color_type, order_type>::premultiply(p);
comp_adaptor_rgba<BlenderPre>::blend_pix(op, p, r, g, b, a, cover);
multiplier_rgba<color_type, order_type>::demultiply(p);
}
};
//==========================================comp_adaptor_clip_to_dst_rgba_plain
template<class BlenderPre>
struct comp_adaptor_clip_to_dst_rgba_plain
{
typedef typename BlenderPre::color_type color_type;
typedef typename BlenderPre::order_type order_type;
typedef typename color_type::value_type value_type;
typedef typename color_type::calc_type calc_type;
typedef typename color_type::long_type long_type;
static AGG_INLINE void blend_pix(unsigned op, value_type* p,
value_type r, value_type g, value_type b, value_type a, cover_type cover)
{
multiplier_rgba<color_type, order_type>::premultiply(p);
comp_adaptor_clip_to_dst_rgba<BlenderPre>::blend_pix(op, p, r, g, b, a, cover);
multiplier_rgba<color_type, order_type>::demultiply(p);
}
};
//=================================================pixfmt_alpha_blend_rgba
template<class Blender, class RenBuf>
class pixfmt_alpha_blend_rgba
{
public:
typedef pixfmt_rgba_tag pixfmt_category;
typedef RenBuf rbuf_type;
typedef typename rbuf_type::row_data row_data;
typedef Blender blender_type;
typedef typename blender_type::color_type color_type;
typedef typename blender_type::order_type order_type;
typedef typename color_type::value_type value_type;
typedef typename color_type::calc_type calc_type;
enum
{
num_components = 4,
pix_step = 4,
pix_width = sizeof(value_type) * pix_step,
};
struct pixel_type
{
value_type c[num_components];
void set(value_type r, value_type g, value_type b, value_type a)
{
c[order_type::R] = r;
c[order_type::G] = g;
c[order_type::B] = b;
c[order_type::A] = a;
}
void set(const color_type& color)
{
set(color.r, color.g, color.b, color.a);
}
void get(value_type& r, value_type& g, value_type& b, value_type& a) const
{
r = c[order_type::R];
g = c[order_type::G];
b = c[order_type::B];
a = c[order_type::A];
}
color_type get() const
{
return color_type(
c[order_type::R],
c[order_type::G],
c[order_type::B],
c[order_type::A]);
}
pixel_type* next()
{
return (pixel_type*)(c + pix_step);
}
const pixel_type* next() const
{
return (const pixel_type*)(c + pix_step);
}
pixel_type* advance(int n)
{
return (pixel_type*)(c + n * pix_step);
}
const pixel_type* advance(int n) const
{
return (const pixel_type*)(c + n * pix_step);
}
};
private:
//--------------------------------------------------------------------
AGG_INLINE void blend_pix(pixel_type* p, const color_type& c, unsigned cover)
{
m_blender.blend_pix(p->c, c.r, c.g, c.b, c.a, cover);
}
//--------------------------------------------------------------------
AGG_INLINE void blend_pix(pixel_type* p, const color_type& c)
{
m_blender.blend_pix(p->c, c.r, c.g, c.b, c.a);
}
//--------------------------------------------------------------------
AGG_INLINE void copy_or_blend_pix(pixel_type* p, const color_type& c, unsigned cover)
{
if (!c.is_transparent())
{
if (c.is_opaque() && cover == cover_mask)
{
p->set(c.r, c.g, c.b, c.a);
}
else
{
m_blender.blend_pix(p->c, c.r, c.g, c.b, c.a, cover);
}
}
}
//--------------------------------------------------------------------
AGG_INLINE void copy_or_blend_pix(pixel_type* p, const color_type& c)
{
if (!c.is_transparent())
{
if (c.is_opaque())
{
p->set(c.r, c.g, c.b, c.a);
}
else
{
m_blender.blend_pix(p->c, c.r, c.g, c.b, c.a);
}
}
}
public:
//--------------------------------------------------------------------
pixfmt_alpha_blend_rgba() : m_rbuf(0) {}
explicit pixfmt_alpha_blend_rgba(rbuf_type& rb) : m_rbuf(&rb) {}
void attach(rbuf_type& rb) { m_rbuf = &rb; }
//--------------------------------------------------------------------
template<class PixFmt>
bool attach(PixFmt& pixf, int x1, int y1, int x2, int y2)
{
rect_i r(x1, y1, x2, y2);
if (r.clip(rect_i(0, 0, pixf.width()-1, pixf.height()-1)))
{
int stride = pixf.stride();
m_rbuf->attach(pixf.pix_ptr(r.x1, stride < 0 ? r.y2 : r.y1),
(r.x2 - r.x1) + 1,
(r.y2 - r.y1) + 1,
stride);
return true;
}
return false;
}
//--------------------------------------------------------------------
AGG_INLINE unsigned width() const { return m_rbuf->width(); }
AGG_INLINE unsigned height() const { return m_rbuf->height(); }
AGG_INLINE int stride() const { return m_rbuf->stride(); }
//--------------------------------------------------------------------
AGG_INLINE int8u* row_ptr(int y) { return m_rbuf->row_ptr(y); }
AGG_INLINE const int8u* row_ptr(int y) const { return m_rbuf->row_ptr(y); }
AGG_INLINE row_data row(int y) const { return m_rbuf->row(y); }
//--------------------------------------------------------------------
AGG_INLINE int8u* pix_ptr(int x, int y)
{
return m_rbuf->row_ptr(y) + sizeof(value_type) * (x * pix_step);
}
AGG_INLINE const int8u* pix_ptr(int x, int y) const
{
return m_rbuf->row_ptr(y) + sizeof(value_type) * (x * pix_step);
}
// Return pointer to pixel value, forcing row to be allocated.
AGG_INLINE pixel_type* pix_value_ptr(int x, int y, unsigned len)
{
return (pixel_type*)(m_rbuf->row_ptr(x, y, len) + sizeof(value_type) * (x * pix_step));
}
// Return pointer to pixel value, or null if row not allocated.
AGG_INLINE const pixel_type* pix_value_ptr(int x, int y) const
{
int8u* p = m_rbuf->row_ptr(y);
return p ? (pixel_type*)(p + sizeof(value_type) * (x * pix_step)) : 0;
}
// Get pixel pointer from raw buffer pointer.
AGG_INLINE static pixel_type* pix_value_ptr(void* p)
{
return (pixel_type*)p;
}
// Get pixel pointer from raw buffer pointer.
AGG_INLINE static const pixel_type* pix_value_ptr(const void* p)
{
return (const pixel_type*)p;
}
//--------------------------------------------------------------------
AGG_INLINE static void write_plain_color(void* p, color_type c)
{
blender_type::set_plain_color(pix_value_ptr(p)->c, c);
}
//--------------------------------------------------------------------
AGG_INLINE static color_type read_plain_color(const void* p)
{
return blender_type::get_plain_color(pix_value_ptr(p)->c);
}
//--------------------------------------------------------------------
AGG_INLINE static void make_pix(int8u* p, const color_type& c)
{
((pixel_type*)p)->set(c);
}
//--------------------------------------------------------------------
AGG_INLINE color_type pixel(int x, int y) const
{
if (const pixel_type* p = pix_value_ptr(x, y))
{
return p->get();
}
return color_type::no_color();
}
//--------------------------------------------------------------------
AGG_INLINE void copy_pixel(int x, int y, const color_type& c)
{
pix_value_ptr(x, y, 1)->set(c);
}
//--------------------------------------------------------------------
AGG_INLINE void blend_pixel(int x, int y, const color_type& c, int8u cover)
{
copy_or_blend_pix(pix_value_ptr(x, y, 1), c, cover);
}
//--------------------------------------------------------------------
AGG_INLINE void copy_hline(int x, int y,
unsigned len,
const color_type& c)
{
pixel_type v;
v.set(c);
pixel_type* p = pix_value_ptr(x, y, len);
do
{
*p = v;
p = p->next();
}
while (--len);
}
//--------------------------------------------------------------------
AGG_INLINE void copy_vline(int x, int y,
unsigned len,
const color_type& c)
{
pixel_type v;
v.set(c);
do
{
*pix_value_ptr(x, y++, 1) = v;
}
while (--len);
}
//--------------------------------------------------------------------
void blend_hline(int x, int y,
unsigned len,
const color_type& c,
int8u cover)
{
if (!c.is_transparent())
{
pixel_type* p = pix_value_ptr(x, y, len);
if (c.is_opaque() && cover == cover_mask)
{
pixel_type v;
v.set(c);
do
{
*p = v;
p = p->next();
}
while (--len);
}
else
{
if (cover == cover_mask)
{
do
{
blend_pix(p, c);
p = p->next();
}
while (--len);
}
else
{
do
{
blend_pix(p, c, cover);
p = p->next();
}
while (--len);
}
}
}
}
//--------------------------------------------------------------------
void blend_vline(int x, int y,
unsigned len,
const color_type& c,
int8u cover)
{
if (!c.is_transparent())
{
if (c.is_opaque() && cover == cover_mask)
{
pixel_type v;
v.set(c);
do
{
*pix_value_ptr(x, y++, 1) = v;
}
while (--len);
}
else
{
if (cover == cover_mask)
{
do
{
blend_pix(pix_value_ptr(x, y++, 1), c, c.a);
}
while (--len);
}
else
{
do
{
blend_pix(pix_value_ptr(x, y++, 1), c, cover);
}
while (--len);
}
}
}
}
//--------------------------------------------------------------------
void blend_solid_hspan(int x, int y,
unsigned len,
const color_type& c,
const int8u* covers)
{
if (!c.is_transparent())
{
pixel_type* p = pix_value_ptr(x, y, len);
do
{
if (c.is_opaque() && *covers == cover_mask)
{
p->set(c);
}
else
{
blend_pix(p, c, *covers);
}
p = p->next();
++covers;
}
while (--len);
}
}
//--------------------------------------------------------------------
void blend_solid_vspan(int x, int y,
unsigned len,
const color_type& c,
const int8u* covers)
{
if (!c.is_transparent())
{
do
{
pixel_type* p = pix_value_ptr(x, y++, 1);
if (c.is_opaque() && *covers == cover_mask)
{
p->set(c);
}
else
{
blend_pix(p, c, *covers);
}
++covers;
}
while (--len);
}
}
//--------------------------------------------------------------------
void copy_color_hspan(int x, int y,
unsigned len,
const color_type* colors)
{
pixel_type* p = pix_value_ptr(x, y, len);
do
{
p->set(*colors++);
p = p->next();
}
while (--len);
}
//--------------------------------------------------------------------
void copy_color_vspan(int x, int y,
unsigned len,
const color_type* colors)
{
do
{
pix_value_ptr(x, y++, 1)->set(*colors++);
}
while (--len);
}
//--------------------------------------------------------------------
void blend_color_hspan(int x, int y,
unsigned len,
const color_type* colors,
const int8u* covers,
int8u cover)
{
pixel_type* p = pix_value_ptr(x, y, len);
if (covers)
{
do
{
copy_or_blend_pix(p, *colors++, *covers++);
p = p->next();
}
while (--len);
}
else
{
if (cover == cover_mask)
{
do
{
copy_or_blend_pix(p, *colors++);
p = p->next();
}
while (--len);
}
else
{
do
{
copy_or_blend_pix(p, *colors++, cover);
p = p->next();
}
while (--len);
}
}
}
//--------------------------------------------------------------------
void blend_color_vspan(int x, int y,
unsigned len,
const color_type* colors,
const int8u* covers,
int8u cover)
{
if (covers)
{
do
{
copy_or_blend_pix(pix_value_ptr(x, y++, 1), *colors++, *covers++);
}
while (--len);
}
else
{
if (cover == cover_mask)
{
do
{
copy_or_blend_pix(pix_value_ptr(x, y++, 1), *colors++);
}
while (--len);
}
else
{
do
{
copy_or_blend_pix(pix_value_ptr(x, y++, 1), *colors++, cover);
}
while (--len);
}
}
}
//--------------------------------------------------------------------
template<class Function> void for_each_pixel(Function f)
{
for (unsigned y = 0; y < height(); ++y)
{
row_data r = m_rbuf->row(y);
if (r.ptr)
{
unsigned len = r.x2 - r.x1 + 1;
pixel_type* p = pix_value_ptr(r.x1, y, len);
do
{
f(p->c);
p = p->next();
}
while (--len);
}
}
}
//--------------------------------------------------------------------
void premultiply()
{
for_each_pixel(multiplier_rgba<color_type, order_type>::premultiply);
}
//--------------------------------------------------------------------
void demultiply()
{
for_each_pixel(multiplier_rgba<color_type, order_type>::demultiply);
}
//--------------------------------------------------------------------
template<class GammaLut> void apply_gamma_dir(const GammaLut& g)
{
for_each_pixel(apply_gamma_dir_rgba<color_type, order_type, GammaLut>(g));
}
//--------------------------------------------------------------------
template<class GammaLut> void apply_gamma_inv(const GammaLut& g)
{
for_each_pixel(apply_gamma_inv_rgba<color_type, order_type, GammaLut>(g));
}
//--------------------------------------------------------------------
template<class RenBuf2> void copy_from(const RenBuf2& from,
int xdst, int ydst,
int xsrc, int ysrc,
unsigned len)
{
if (const int8u* p = from.row_ptr(ysrc))
{
memmove(m_rbuf->row_ptr(xdst, ydst, len) + xdst * pix_width,
p + xsrc * pix_width,
len * pix_width);
}
}
//--------------------------------------------------------------------
// Blend from another RGBA surface.
template<class SrcPixelFormatRenderer>
void blend_from(const SrcPixelFormatRenderer& from,
int xdst, int ydst,
int xsrc, int ysrc,
unsigned len,
int8u cover)
{
typedef typename SrcPixelFormatRenderer::pixel_type src_pixel_type;
if (const src_pixel_type* psrc = from.pix_value_ptr(xsrc, ysrc))
{
pixel_type* pdst = pix_value_ptr(xdst, ydst, len);
int srcinc = 1;
int dstinc = 1;
if (xdst > xsrc)
{
psrc = psrc->advance(len - 1);
pdst = pdst->advance(len - 1);
srcinc = -1;
dstinc = -1;
}
if (cover == cover_mask)
{
do
{
copy_or_blend_pix(pdst, psrc->get());
psrc = psrc->advance(srcinc);
pdst = pdst->advance(dstinc);
}
while (--len);
}
else
{
do
{
copy_or_blend_pix(pdst, psrc->get(), cover);
psrc = psrc->advance(srcinc);
pdst = pdst->advance(dstinc);
}
while (--len);
}
}
}
//--------------------------------------------------------------------
// Combine single color with grayscale surface and blend.
template<class SrcPixelFormatRenderer>
void blend_from_color(const SrcPixelFormatRenderer& from,
const color_type& color,
int xdst, int ydst,
int xsrc, int ysrc,
unsigned len,
int8u cover)
{
typedef typename SrcPixelFormatRenderer::pixel_type src_pixel_type;
typedef typename SrcPixelFormatRenderer::color_type src_color_type;
if (const src_pixel_type* psrc = from.pix_value_ptr(xsrc, ysrc))
{
pixel_type* pdst = pix_value_ptr(xdst, ydst, len);
do
{
copy_or_blend_pix(pdst, color,
src_color_type::scale_cover(cover, psrc->c[0]));
psrc = psrc->next();
pdst = pdst->next();
}
while (--len);
}
}
//--------------------------------------------------------------------
// Blend from color table, using grayscale surface as indexes into table.
// Obviously, this only works for integer value types.
template<class SrcPixelFormatRenderer>
void blend_from_lut(const SrcPixelFormatRenderer& from,
const color_type* color_lut,
int xdst, int ydst,
int xsrc, int ysrc,
unsigned len,
int8u cover)
{
typedef typename SrcPixelFormatRenderer::pixel_type src_pixel_type;
if (const src_pixel_type* psrc = from.pix_value_ptr(xsrc, ysrc))
{
pixel_type* pdst = pix_value_ptr(xdst, ydst, len);
if (cover == cover_mask)
{
do
{
copy_or_blend_pix(pdst, color_lut[psrc->c[0]]);
psrc = psrc->next();
pdst = pdst->next();
}
while (--len);
}
else
{
do
{
copy_or_blend_pix(pdst, color_lut[psrc->c[0]], cover);
psrc = psrc->next();
pdst = pdst->next();
}
while (--len);
}
}
}
private:
rbuf_type* m_rbuf;
Blender m_blender;
};
//================================================pixfmt_custom_blend_rgba
template<class Blender, class RenBuf> class pixfmt_custom_blend_rgba
{
public:
typedef pixfmt_rgba_tag pixfmt_category;
typedef RenBuf rbuf_type;
typedef typename rbuf_type::row_data row_data;
typedef Blender blender_type;
typedef typename blender_type::color_type color_type;
typedef typename blender_type::order_type order_type;
typedef typename color_type::value_type value_type;
typedef typename color_type::calc_type calc_type;
enum
{
num_components = 4,
pix_step = 4,
pix_width = sizeof(value_type) * pix_step,
};
struct pixel_type
{
value_type c[num_components];
void set(value_type r, value_type g, value_type b, value_type a)
{
c[order_type::R] = r;
c[order_type::G] = g;
c[order_type::B] = b;
c[order_type::A] = a;
}
void set(const color_type& color)
{
set(color.r, color.g, color.b, color.a);
}
void get(value_type& r, value_type& g, value_type& b, value_type& a) const
{
r = c[order_type::R];
g = c[order_type::G];
b = c[order_type::B];
a = c[order_type::A];
}
color_type get() const
{
return color_type(
c[order_type::R],
c[order_type::G],
c[order_type::B],
c[order_type::A]);
}
pixel_type* next()
{
return (pixel_type*)(c + pix_step);
}
const pixel_type* next() const
{
return (const pixel_type*)(c + pix_step);
}
pixel_type* advance(int n)
{
return (pixel_type*)(c + n * pix_step);
}
const pixel_type* advance(int n) const
{
return (const pixel_type*)(c + n * pix_step);
}
};
private:
//--------------------------------------------------------------------
AGG_INLINE void blend_pix(pixel_type* p, const color_type& c, unsigned cover = cover_full)
{
m_blender.blend_pix(m_comp_op, p->c, c.r, c.g, c.b, c.a, cover);
}
//--------------------------------------------------------------------
AGG_INLINE void copy_or_blend_pix(pixel_type* p, const color_type& c, unsigned cover = cover_full)
{
if (!c.is_transparent())
{
if (c.is_opaque() && cover == cover_mask)
{
p->set(c.r, c.g, c.b, c.a);
}
else
{
blend_pix(p, c, cover);
}
}
}
public:
//--------------------------------------------------------------------
pixfmt_custom_blend_rgba() : m_rbuf(0), m_comp_op(3) {}
explicit pixfmt_custom_blend_rgba(rbuf_type& rb, unsigned comp_op=3) :
m_rbuf(&rb),
m_comp_op(comp_op)
{}
void attach(rbuf_type& rb) { m_rbuf = &rb; }
//--------------------------------------------------------------------
template<class PixFmt>
bool attach(PixFmt& pixf, int x1, int y1, int x2, int y2)
{
rect_i r(x1, y1, x2, y2);
if (r.clip(rect_i(0, 0, pixf.width()-1, pixf.height()-1)))
{
int stride = pixf.stride();
m_rbuf->attach(pixf.pix_ptr(r.x1, stride < 0 ? r.y2 : r.y1),
(r.x2 - r.x1) + 1,
(r.y2 - r.y1) + 1,
stride);
return true;
}
return false;
}
//--------------------------------------------------------------------
void comp_op(unsigned op) { m_comp_op = op; }
unsigned comp_op() const { return m_comp_op; }
//--------------------------------------------------------------------
AGG_INLINE unsigned width() const { return m_rbuf->width(); }
AGG_INLINE unsigned height() const { return m_rbuf->height(); }
AGG_INLINE int stride() const { return m_rbuf->stride(); }
//--------------------------------------------------------------------
AGG_INLINE int8u* row_ptr(int y) { return m_rbuf->row_ptr(y); }
AGG_INLINE const int8u* row_ptr(int y) const { return m_rbuf->row_ptr(y); }
AGG_INLINE row_data row(int y) const { return m_rbuf->row(y); }
//--------------------------------------------------------------------
AGG_INLINE int8u* pix_ptr(int x, int y)
{
return m_rbuf->row_ptr(y) + sizeof(value_type) * (x * pix_step);
}
AGG_INLINE const int8u* pix_ptr(int x, int y) const
{
return m_rbuf->row_ptr(y) + sizeof(value_type) * (x * pix_step);
}
// Return pointer to pixel value, forcing row to be allocated.
AGG_INLINE pixel_type* pix_value_ptr(int x, int y, unsigned len)
{
return (pixel_type*)(m_rbuf->row_ptr(x, y, len) + sizeof(value_type) * (x * pix_step));
}
// Return pointer to pixel value, or null if row not allocated.
AGG_INLINE const pixel_type* pix_value_ptr(int x, int y) const
{
int8u* p = m_rbuf->row_ptr(y);
return p ? (pixel_type*)(p + sizeof(value_type) * (x * pix_step)) : 0;
}
// Get pixel pointer from raw buffer pointer.
AGG_INLINE static pixel_type* pix_value_ptr(void* p)
{
return (pixel_type*)p;
}
// Get pixel pointer from raw buffer pointer.
AGG_INLINE static const pixel_type* pix_value_ptr(const void* p)
{
return (const pixel_type*)p;
}
//--------------------------------------------------------------------
AGG_INLINE static void make_pix(int8u* p, const color_type& c)
{
((pixel_type*)p)->set(c);
}
//--------------------------------------------------------------------
AGG_INLINE color_type pixel(int x, int y) const
{
if (const pixel_type* p = pix_value_ptr(x, y))
{
return p->get();
}
return color_type::no_color();
}
//--------------------------------------------------------------------
AGG_INLINE void copy_pixel(int x, int y, const color_type& c)
{
make_pix(pix_value_ptr(x, y, 1), c);
}
//--------------------------------------------------------------------
AGG_INLINE void blend_pixel(int x, int y, const color_type& c, int8u cover)
{
blend_pix(pix_value_ptr(x, y, 1), c, cover);
}
//--------------------------------------------------------------------
AGG_INLINE void copy_hline(int x, int y,
unsigned len,
const color_type& c)
{
pixel_type v;
v.set(c);
pixel_type* p = pix_value_ptr(x, y, len);
do
{
*p = v;
p = p->next();
}
while (--len);
}
//--------------------------------------------------------------------
AGG_INLINE void copy_vline(int x, int y,
unsigned len,
const color_type& c)
{
pixel_type v;
v.set(c);
do
{
*pix_value_ptr(x, y++, 1) = v;
}
while (--len);
}
//--------------------------------------------------------------------
void blend_hline(int x, int y, unsigned len,
const color_type& c, int8u cover)
{
pixel_type* p = pix_value_ptr(x, y, len);
do
{
blend_pix(p, c, cover);
p = p->next();
}
while (--len);
}
//--------------------------------------------------------------------
void blend_vline(int x, int y, unsigned len,
const color_type& c, int8u cover)
{
do
{
blend_pix(pix_value_ptr(x, y++, 1), c, cover);
}
while (--len);
}
//--------------------------------------------------------------------
void blend_solid_hspan(int x, int y, unsigned len,
const color_type& c, const int8u* covers)
{
pixel_type* p = pix_value_ptr(x, y, len);
do
{
blend_pix(p, c, *covers++);
p = p->next();
}
while (--len);
}
//--------------------------------------------------------------------
void blend_solid_vspan(int x, int y, unsigned len,
const color_type& c, const int8u* covers)
{
do
{
blend_pix(pix_value_ptr(x, y++, 1), c, *covers++);
}
while (--len);
}
//--------------------------------------------------------------------
void copy_color_hspan(int x, int y,
unsigned len,
const color_type* colors)
{
pixel_type* p = pix_value_ptr(x, y, len);
do
{
p->set(*colors++);
p = p->next();
}
while (--len);
}
//--------------------------------------------------------------------
void copy_color_vspan(int x, int y,
unsigned len,
const color_type* colors)
{
do
{
pix_value_ptr(x, y++, 1)->set(*colors++);
}
while (--len);
}
//--------------------------------------------------------------------
void blend_color_hspan(int x, int y, unsigned len,
const color_type* colors,
const int8u* covers,
int8u cover)
{
pixel_type* p = pix_value_ptr(x, y, len);
do
{
blend_pix(p, *colors++, covers ? *covers++ : cover);
p = p->next();
}
while (--len);
}
//--------------------------------------------------------------------
void blend_color_vspan(int x, int y, unsigned len,
const color_type* colors,
const int8u* covers,
int8u cover)
{
do
{
blend_pix(pix_value_ptr(x, y++, 1), *colors++, covers ? *covers++ : cover);
}
while (--len);
}
//--------------------------------------------------------------------
template<class Function> void for_each_pixel(Function f)
{
unsigned y;
for (y = 0; y < height(); ++y)
{
row_data r = m_rbuf->row(y);
if (r.ptr)
{
unsigned len = r.x2 - r.x1 + 1;
pixel_type* p = pix_value_ptr(r.x1, y, len);
do
{
f(p->c);
p = p->next();
}
while (--len);
}
}
}
//--------------------------------------------------------------------
void premultiply()
{
for_each_pixel(multiplier_rgba<color_type, order_type>::premultiply);
}
//--------------------------------------------------------------------
void demultiply()
{
for_each_pixel(multiplier_rgba<color_type, order_type>::demultiply);
}
//--------------------------------------------------------------------
template<class GammaLut> void apply_gamma_dir(const GammaLut& g)
{
for_each_pixel(apply_gamma_dir_rgba<color_type, order_type, GammaLut>(g));
}
//--------------------------------------------------------------------
template<class GammaLut> void apply_gamma_inv(const GammaLut& g)
{
for_each_pixel(apply_gamma_inv_rgba<color_type, order_type, GammaLut>(g));
}
//--------------------------------------------------------------------
template<class RenBuf2> void copy_from(const RenBuf2& from,
int xdst, int ydst,
int xsrc, int ysrc,
unsigned len)
{
if (const int8u* p = from.row_ptr(ysrc))
{
memmove(m_rbuf->row_ptr(xdst, ydst, len) + xdst * pix_width,
p + xsrc * pix_width,
len * pix_width);
}
}
//--------------------------------------------------------------------
// Blend from another RGBA surface.
template<class SrcPixelFormatRenderer>
void blend_from(const SrcPixelFormatRenderer& from,
int xdst, int ydst,
int xsrc, int ysrc,
unsigned len,
int8u cover)
{
typedef typename SrcPixelFormatRenderer::pixel_type src_pixel_type;
if (const src_pixel_type* psrc = from.pix_value_ptr(xsrc, ysrc))
{
pixel_type* pdst = pix_value_ptr(xdst, ydst, len);
int srcinc = 1;
int dstinc = 1;
if (xdst > xsrc)
{
psrc = psrc->advance(len - 1);
pdst = pdst->advance(len - 1);
srcinc = -1;
dstinc = -1;
}
do
{
blend_pix(pdst, psrc->get(), cover);
psrc = psrc->advance(srcinc);
pdst = pdst->advance(dstinc);
}
while (--len);
}
}
//--------------------------------------------------------------------
// Blend from single color, using grayscale surface as alpha channel.
template<class SrcPixelFormatRenderer>
void blend_from_color(const SrcPixelFormatRenderer& from,
const color_type& color,
int xdst, int ydst,
int xsrc, int ysrc,
unsigned len,
int8u cover)
{
typedef typename SrcPixelFormatRenderer::pixel_type src_pixel_type;
typedef typename SrcPixelFormatRenderer::color_type src_color_type;
if (const src_pixel_type* psrc = from.pix_value_ptr(xsrc, ysrc))
{
pixel_type* pdst = pix_value_ptr(xdst, ydst, len);
do
{
blend_pix(pdst, color,
src_color_type::scale_cover(cover, psrc->c[0]));
psrc = psrc->next();
pdst = pdst->next();
}
while (--len);
}
}
//--------------------------------------------------------------------
// Blend from color table, using grayscale surface as indexes into table.
// Obviously, this only works for integer value types.
template<class SrcPixelFormatRenderer>
void blend_from_lut(const SrcPixelFormatRenderer& from,
const color_type* color_lut,
int xdst, int ydst,
int xsrc, int ysrc,
unsigned len,
int8u cover)
{
typedef typename SrcPixelFormatRenderer::pixel_type src_pixel_type;
if (const src_pixel_type* psrc = from.pix_value_ptr(xsrc, ysrc))
{
pixel_type* pdst = pix_value_ptr(xdst, ydst, len);
do
{
blend_pix(pdst, color_lut[psrc->c[0]], cover);
psrc = psrc->next();
pdst = pdst->next();
}
while (--len);
}
}
private:
rbuf_type* m_rbuf;
Blender m_blender;
unsigned m_comp_op;
};
//-----------------------------------------------------------------------
typedef blender_rgba<rgba8, order_rgba> blender_rgba32;
typedef blender_rgba<rgba8, order_argb> blender_argb32;
typedef blender_rgba<rgba8, order_abgr> blender_abgr32;
typedef blender_rgba<rgba8, order_bgra> blender_bgra32;
typedef blender_rgba<srgba8, order_rgba> blender_srgba32;
typedef blender_rgba<srgba8, order_argb> blender_sargb32;
typedef blender_rgba<srgba8, order_abgr> blender_sabgr32;
typedef blender_rgba<srgba8, order_bgra> blender_sbgra32;
typedef blender_rgba_pre<rgba8, order_rgba> blender_rgba32_pre;
typedef blender_rgba_pre<rgba8, order_argb> blender_argb32_pre;
typedef blender_rgba_pre<rgba8, order_abgr> blender_abgr32_pre;
typedef blender_rgba_pre<rgba8, order_bgra> blender_bgra32_pre;
typedef blender_rgba_pre<srgba8, order_rgba> blender_srgba32_pre;
typedef blender_rgba_pre<srgba8, order_argb> blender_sargb32_pre;
typedef blender_rgba_pre<srgba8, order_abgr> blender_sabgr32_pre;
typedef blender_rgba_pre<srgba8, order_bgra> blender_sbgra32_pre;
typedef blender_rgba_plain<rgba8, order_rgba> blender_rgba32_plain;
typedef blender_rgba_plain<rgba8, order_argb> blender_argb32_plain;
typedef blender_rgba_plain<rgba8, order_abgr> blender_abgr32_plain;
typedef blender_rgba_plain<rgba8, order_bgra> blender_bgra32_plain;
typedef blender_rgba_plain<srgba8, order_rgba> blender_srgba32_plain;
typedef blender_rgba_plain<srgba8, order_argb> blender_sargb32_plain;
typedef blender_rgba_plain<srgba8, order_abgr> blender_sabgr32_plain;
typedef blender_rgba_plain<srgba8, order_bgra> blender_sbgra32_plain;
typedef blender_rgba<rgba16, order_rgba> blender_rgba64;
typedef blender_rgba<rgba16, order_argb> blender_argb64;
typedef blender_rgba<rgba16, order_abgr> blender_abgr64;
typedef blender_rgba<rgba16, order_bgra> blender_bgra64;
typedef blender_rgba_pre<rgba16, order_rgba> blender_rgba64_pre;
typedef blender_rgba_pre<rgba16, order_argb> blender_argb64_pre;
typedef blender_rgba_pre<rgba16, order_abgr> blender_abgr64_pre;
typedef blender_rgba_pre<rgba16, order_bgra> blender_bgra64_pre;
typedef blender_rgba_plain<rgba16, order_rgba> blender_rgba64_plain;
typedef blender_rgba_plain<rgba16, order_argb> blender_argb64_plain;
typedef blender_rgba_plain<rgba16, order_abgr> blender_abgr64_plain;
typedef blender_rgba_plain<rgba16, order_bgra> blender_bgra64_plain;
typedef blender_rgba<rgba32, order_rgba> blender_rgba128;
typedef blender_rgba<rgba32, order_argb> blender_argb128;
typedef blender_rgba<rgba32, order_abgr> blender_abgr128;
typedef blender_rgba<rgba32, order_bgra> blender_bgra128;
typedef blender_rgba_pre<rgba32, order_rgba> blender_rgba128_pre;
typedef blender_rgba_pre<rgba32, order_argb> blender_argb128_pre;
typedef blender_rgba_pre<rgba32, order_abgr> blender_abgr128_pre;
typedef blender_rgba_pre<rgba32, order_bgra> blender_bgra128_pre;
typedef blender_rgba_plain<rgba32, order_rgba> blender_rgba128_plain;
typedef blender_rgba_plain<rgba32, order_argb> blender_argb128_plain;
typedef blender_rgba_plain<rgba32, order_abgr> blender_abgr128_plain;
typedef blender_rgba_plain<rgba32, order_bgra> blender_bgra128_plain;
//-----------------------------------------------------------------------
typedef pixfmt_alpha_blend_rgba<blender_rgba32, rendering_buffer> pixfmt_rgba32;
typedef pixfmt_alpha_blend_rgba<blender_argb32, rendering_buffer> pixfmt_argb32;
typedef pixfmt_alpha_blend_rgba<blender_abgr32, rendering_buffer> pixfmt_abgr32;
typedef pixfmt_alpha_blend_rgba<blender_bgra32, rendering_buffer> pixfmt_bgra32;
typedef pixfmt_alpha_blend_rgba<blender_srgba32, rendering_buffer> pixfmt_srgba32;
typedef pixfmt_alpha_blend_rgba<blender_sargb32, rendering_buffer> pixfmt_sargb32;
typedef pixfmt_alpha_blend_rgba<blender_sabgr32, rendering_buffer> pixfmt_sabgr32;
typedef pixfmt_alpha_blend_rgba<blender_sbgra32, rendering_buffer> pixfmt_sbgra32;
typedef pixfmt_alpha_blend_rgba<blender_rgba32_pre, rendering_buffer> pixfmt_rgba32_pre;
typedef pixfmt_alpha_blend_rgba<blender_argb32_pre, rendering_buffer> pixfmt_argb32_pre;
typedef pixfmt_alpha_blend_rgba<blender_abgr32_pre, rendering_buffer> pixfmt_abgr32_pre;
typedef pixfmt_alpha_blend_rgba<blender_bgra32_pre, rendering_buffer> pixfmt_bgra32_pre;
typedef pixfmt_alpha_blend_rgba<blender_srgba32_pre, rendering_buffer> pixfmt_srgba32_pre;
typedef pixfmt_alpha_blend_rgba<blender_sargb32_pre, rendering_buffer> pixfmt_sargb32_pre;
typedef pixfmt_alpha_blend_rgba<blender_sabgr32_pre, rendering_buffer> pixfmt_sabgr32_pre;
typedef pixfmt_alpha_blend_rgba<blender_sbgra32_pre, rendering_buffer> pixfmt_sbgra32_pre;
typedef pixfmt_alpha_blend_rgba<blender_rgba32_plain, rendering_buffer> pixfmt_rgba32_plain;
typedef pixfmt_alpha_blend_rgba<blender_argb32_plain, rendering_buffer> pixfmt_argb32_plain;
typedef pixfmt_alpha_blend_rgba<blender_abgr32_plain, rendering_buffer> pixfmt_abgr32_plain;
typedef pixfmt_alpha_blend_rgba<blender_bgra32_plain, rendering_buffer> pixfmt_bgra32_plain;
typedef pixfmt_alpha_blend_rgba<blender_srgba32_plain, rendering_buffer> pixfmt_srgba32_plain;
typedef pixfmt_alpha_blend_rgba<blender_sargb32_plain, rendering_buffer> pixfmt_sargb32_plain;
typedef pixfmt_alpha_blend_rgba<blender_sabgr32_plain, rendering_buffer> pixfmt_sabgr32_plain;
typedef pixfmt_alpha_blend_rgba<blender_sbgra32_plain, rendering_buffer> pixfmt_sbgra32_plain;
typedef pixfmt_alpha_blend_rgba<blender_rgba64, rendering_buffer> pixfmt_rgba64;
typedef pixfmt_alpha_blend_rgba<blender_argb64, rendering_buffer> pixfmt_argb64;
typedef pixfmt_alpha_blend_rgba<blender_abgr64, rendering_buffer> pixfmt_abgr64;
typedef pixfmt_alpha_blend_rgba<blender_bgra64, rendering_buffer> pixfmt_bgra64;
typedef pixfmt_alpha_blend_rgba<blender_rgba64_pre, rendering_buffer> pixfmt_rgba64_pre;
typedef pixfmt_alpha_blend_rgba<blender_argb64_pre, rendering_buffer> pixfmt_argb64_pre;
typedef pixfmt_alpha_blend_rgba<blender_abgr64_pre, rendering_buffer> pixfmt_abgr64_pre;
typedef pixfmt_alpha_blend_rgba<blender_bgra64_pre, rendering_buffer> pixfmt_bgra64_pre;
typedef pixfmt_alpha_blend_rgba<blender_rgba64_plain, rendering_buffer> pixfmt_rgba64_plain;
typedef pixfmt_alpha_blend_rgba<blender_argb64_plain, rendering_buffer> pixfmt_argb64_plain;
typedef pixfmt_alpha_blend_rgba<blender_abgr64_plain, rendering_buffer> pixfmt_abgr64_plain;
typedef pixfmt_alpha_blend_rgba<blender_bgra64_plain, rendering_buffer> pixfmt_bgra64_plain;
typedef pixfmt_alpha_blend_rgba<blender_rgba128, rendering_buffer> pixfmt_rgba128;
typedef pixfmt_alpha_blend_rgba<blender_argb128, rendering_buffer> pixfmt_argb128;
typedef pixfmt_alpha_blend_rgba<blender_abgr128, rendering_buffer> pixfmt_abgr128;
typedef pixfmt_alpha_blend_rgba<blender_bgra128, rendering_buffer> pixfmt_bgra128;
typedef pixfmt_alpha_blend_rgba<blender_rgba128_pre, rendering_buffer> pixfmt_rgba128_pre;
typedef pixfmt_alpha_blend_rgba<blender_argb128_pre, rendering_buffer> pixfmt_argb128_pre;
typedef pixfmt_alpha_blend_rgba<blender_abgr128_pre, rendering_buffer> pixfmt_abgr128_pre;
typedef pixfmt_alpha_blend_rgba<blender_bgra128_pre, rendering_buffer> pixfmt_bgra128_pre;
typedef pixfmt_alpha_blend_rgba<blender_rgba128_plain, rendering_buffer> pixfmt_rgba128_plain;
typedef pixfmt_alpha_blend_rgba<blender_argb128_plain, rendering_buffer> pixfmt_argb128_plain;
typedef pixfmt_alpha_blend_rgba<blender_abgr128_plain, rendering_buffer> pixfmt_abgr128_plain;
typedef pixfmt_alpha_blend_rgba<blender_bgra128_plain, rendering_buffer> pixfmt_bgra128_plain;
}
#endif
| 0 |
D://workCode//uploadProject\awtk\3rd\agg | D://workCode//uploadProject\awtk\3rd\agg\include\agg_pixfmt_rgb_packed.h | //----------------------------------------------------------------------------
// Anti-Grain Geometry - Version 2.4
// Copyright (C) 2002-2005 Maxim Shemanarev (http://www.antigrain.com)
//
// Permission to copy, use, modify, sell and distribute this software
// is granted provided this copyright notice appears in all copies.
// This software is provided "as is" without express or implied
// warranty, and with no claim as to its suitability for any purpose.
//
//----------------------------------------------------------------------------
// Contact: mcseem@antigrain.com
// mcseemagg@yahoo.com
// http://www.antigrain.com
//----------------------------------------------------------------------------
//
// Adaptation for high precision colors has been sponsored by
// Liberty Technology Systems, Inc., visit http://lib-sys.com
//
// Liberty Technology Systems, Inc. is the provider of
// PostScript and PDF technology for software developers.
//
//----------------------------------------------------------------------------
#ifndef AGG_PIXFMT_RGB_PACKED_INCLUDED
#define AGG_PIXFMT_RGB_PACKED_INCLUDED
#include <string.h>
#include "agg_basics.h"
#include "agg_color_rgba.h"
#include "agg_rendering_buffer.h"
namespace agg
{
//=========================================================blender_rgb555
struct blender_rgb555
{
typedef rgba8 color_type;
typedef color_type::value_type value_type;
typedef color_type::calc_type calc_type;
typedef int16u pixel_type;
static AGG_INLINE void blend_pix(pixel_type* p,
unsigned cr, unsigned cg, unsigned cb,
unsigned alpha,
unsigned)
{
pixel_type rgb = *p;
calc_type r = (rgb >> 7) & 0xF8;
calc_type g = (rgb >> 2) & 0xF8;
calc_type b = (rgb << 3) & 0xF8;
*p = (pixel_type)
(((((cr - r) * alpha + (r << 8)) >> 1) & 0x7C00) |
((((cg - g) * alpha + (g << 8)) >> 6) & 0x03E0) |
(((cb - b) * alpha + (b << 8)) >> 11) | 0x8000);
}
static AGG_INLINE pixel_type make_pix(unsigned r, unsigned g, unsigned b)
{
return (pixel_type)(((r & 0xF8) << 7) |
((g & 0xF8) << 2) |
(b >> 3) | 0x8000);
}
static AGG_INLINE color_type make_color(pixel_type p)
{
return color_type((p >> 7) & 0xF8,
(p >> 2) & 0xF8,
(p << 3) & 0xF8);
}
};
//=====================================================blender_rgb555_pre
struct blender_rgb555_pre
{
typedef rgba8 color_type;
typedef color_type::value_type value_type;
typedef color_type::calc_type calc_type;
typedef int16u pixel_type;
static AGG_INLINE void blend_pix(pixel_type* p,
unsigned cr, unsigned cg, unsigned cb,
unsigned alpha,
unsigned cover)
{
alpha = color_type::base_mask - alpha;
pixel_type rgb = *p;
calc_type r = (rgb >> 7) & 0xF8;
calc_type g = (rgb >> 2) & 0xF8;
calc_type b = (rgb << 3) & 0xF8;
*p = (pixel_type)
((((r * alpha + cr * cover) >> 1) & 0x7C00) |
(((g * alpha + cg * cover) >> 6) & 0x03E0) |
((b * alpha + cb * cover) >> 11) | 0x8000);
}
static AGG_INLINE pixel_type make_pix(unsigned r, unsigned g, unsigned b)
{
return (pixel_type)(((r & 0xF8) << 7) |
((g & 0xF8) << 2) |
(b >> 3) | 0x8000);
}
static AGG_INLINE color_type make_color(pixel_type p)
{
return color_type((p >> 7) & 0xF8,
(p >> 2) & 0xF8,
(p << 3) & 0xF8);
}
};
//=====================================================blender_rgb555_gamma
template<class Gamma> class blender_rgb555_gamma
{
public:
typedef rgba8 color_type;
typedef color_type::value_type value_type;
typedef color_type::calc_type calc_type;
typedef int16u pixel_type;
typedef Gamma gamma_type;
blender_rgb555_gamma() : m_gamma(0) {}
void gamma(const gamma_type& g) { m_gamma = &g; }
AGG_INLINE void blend_pix(pixel_type* p,
unsigned cr, unsigned cg, unsigned cb,
unsigned alpha,
unsigned)
{
pixel_type rgb = *p;
calc_type r = m_gamma->dir((rgb >> 7) & 0xF8);
calc_type g = m_gamma->dir((rgb >> 2) & 0xF8);
calc_type b = m_gamma->dir((rgb << 3) & 0xF8);
*p = (pixel_type)
(((m_gamma->inv(((m_gamma->dir(cr) - r) * alpha + (r << 8)) >> 8) << 7) & 0x7C00) |
((m_gamma->inv(((m_gamma->dir(cg) - g) * alpha + (g << 8)) >> 8) << 2) & 0x03E0) |
(m_gamma->inv(((m_gamma->dir(cb) - b) * alpha + (b << 8)) >> 8) >> 3) | 0x8000);
}
static AGG_INLINE pixel_type make_pix(unsigned r, unsigned g, unsigned b)
{
return (pixel_type)(((r & 0xF8) << 7) |
((g & 0xF8) << 2) |
(b >> 3) | 0x8000);
}
static AGG_INLINE color_type make_color(pixel_type p)
{
return color_type((p >> 7) & 0xF8,
(p >> 2) & 0xF8,
(p << 3) & 0xF8);
}
private:
const Gamma* m_gamma;
};
//=========================================================blender_rgb565
struct blender_rgb565
{
typedef rgba8 color_type;
typedef color_type::value_type value_type;
typedef color_type::calc_type calc_type;
typedef int16u pixel_type;
static AGG_INLINE void blend_pix(pixel_type* p,
unsigned cr, unsigned cg, unsigned cb,
unsigned alpha,
unsigned)
{
pixel_type rgb = *p;
calc_type r = (rgb >> 8) & 0xF8;
calc_type g = (rgb >> 3) & 0xFC;
calc_type b = (rgb << 3) & 0xF8;
*p = (pixel_type)
(((((cr - r) * alpha + (r << 8)) ) & 0xF800) |
((((cg - g) * alpha + (g << 8)) >> 5) & 0x07E0) |
(((cb - b) * alpha + (b << 8)) >> 11));
}
static AGG_INLINE pixel_type make_pix(unsigned r, unsigned g, unsigned b)
{
return (pixel_type)(((r & 0xF8) << 8) | ((g & 0xFC) << 3) | (b >> 3));
}
static AGG_INLINE color_type make_color(pixel_type p)
{
return color_type((p >> 8) & 0xF8,
(p >> 3) & 0xFC,
(p << 3) & 0xF8);
}
};
//=====================================================blender_rgb565_pre
struct blender_rgb565_pre
{
typedef rgba8 color_type;
typedef color_type::value_type value_type;
typedef color_type::calc_type calc_type;
typedef int16u pixel_type;
static AGG_INLINE void blend_pix(pixel_type* p,
unsigned cr, unsigned cg, unsigned cb,
unsigned alpha,
unsigned cover)
{
alpha = color_type::base_mask - alpha;
pixel_type rgb = *p;
calc_type r = (rgb >> 8) & 0xF8;
calc_type g = (rgb >> 3) & 0xFC;
calc_type b = (rgb << 3) & 0xF8;
*p = (pixel_type)
((((r * alpha + cr * cover) ) & 0xF800) |
(((g * alpha + cg * cover) >> 5 ) & 0x07E0) |
((b * alpha + cb * cover) >> 11));
}
static AGG_INLINE pixel_type make_pix(unsigned r, unsigned g, unsigned b)
{
return (pixel_type)(((r & 0xF8) << 8) | ((g & 0xFC) << 3) | (b >> 3));
}
static AGG_INLINE color_type make_color(pixel_type p)
{
return color_type((p >> 8) & 0xF8,
(p >> 3) & 0xFC,
(p << 3) & 0xF8);
}
};
//=====================================================blender_rgb565_gamma
template<class Gamma> class blender_rgb565_gamma
{
public:
typedef rgba8 color_type;
typedef color_type::value_type value_type;
typedef color_type::calc_type calc_type;
typedef int16u pixel_type;
typedef Gamma gamma_type;
blender_rgb565_gamma() : m_gamma(0) {}
void gamma(const gamma_type& g) { m_gamma = &g; }
AGG_INLINE void blend_pix(pixel_type* p,
unsigned cr, unsigned cg, unsigned cb,
unsigned alpha,
unsigned)
{
pixel_type rgb = *p;
calc_type r = m_gamma->dir((rgb >> 8) & 0xF8);
calc_type g = m_gamma->dir((rgb >> 3) & 0xFC);
calc_type b = m_gamma->dir((rgb << 3) & 0xF8);
*p = (pixel_type)
(((m_gamma->inv(((m_gamma->dir(cr) - r) * alpha + (r << 8)) >> 8) << 8) & 0xF800) |
((m_gamma->inv(((m_gamma->dir(cg) - g) * alpha + (g << 8)) >> 8) << 3) & 0x07E0) |
(m_gamma->inv(((m_gamma->dir(cb) - b) * alpha + (b << 8)) >> 8) >> 3));
}
static AGG_INLINE pixel_type make_pix(unsigned r, unsigned g, unsigned b)
{
return (pixel_type)(((r & 0xF8) << 8) | ((g & 0xFC) << 3) | (b >> 3));
}
static AGG_INLINE color_type make_color(pixel_type p)
{
return color_type((p >> 8) & 0xF8,
(p >> 3) & 0xFC,
(p << 3) & 0xF8);
}
private:
const Gamma* m_gamma;
};
//=====================================================blender_rgbAAA
struct blender_rgbAAA
{
typedef rgba16 color_type;
typedef color_type::value_type value_type;
typedef color_type::calc_type calc_type;
typedef int32u pixel_type;
static AGG_INLINE void blend_pix(pixel_type* p,
unsigned cr, unsigned cg, unsigned cb,
unsigned alpha,
unsigned)
{
pixel_type rgb = *p;
calc_type r = (rgb >> 14) & 0xFFC0;
calc_type g = (rgb >> 4) & 0xFFC0;
calc_type b = (rgb << 6) & 0xFFC0;
*p = (pixel_type)
(((((cr - r) * alpha + (r << 16)) >> 2) & 0x3FF00000) |
((((cg - g) * alpha + (g << 16)) >> 12) & 0x000FFC00) |
(((cb - b) * alpha + (b << 16)) >> 22) | 0xC0000000);
}
static AGG_INLINE pixel_type make_pix(unsigned r, unsigned g, unsigned b)
{
return (pixel_type)(((r & 0xFFC0) << 14) |
((g & 0xFFC0) << 4) |
(b >> 6) | 0xC0000000);
}
static AGG_INLINE color_type make_color(pixel_type p)
{
return color_type((p >> 14) & 0xFFC0,
(p >> 4) & 0xFFC0,
(p << 6) & 0xFFC0);
}
};
//==================================================blender_rgbAAA_pre
struct blender_rgbAAA_pre
{
typedef rgba16 color_type;
typedef color_type::value_type value_type;
typedef color_type::calc_type calc_type;
typedef int32u pixel_type;
static AGG_INLINE void blend_pix(pixel_type* p,
unsigned cr, unsigned cg, unsigned cb,
unsigned alpha,
unsigned cover)
{
alpha = color_type::base_mask - alpha;
cover = (cover + 1) << (color_type::base_shift - 8);
pixel_type rgb = *p;
calc_type r = (rgb >> 14) & 0xFFC0;
calc_type g = (rgb >> 4) & 0xFFC0;
calc_type b = (rgb << 6) & 0xFFC0;
*p = (pixel_type)
((((r * alpha + cr * cover) >> 2) & 0x3FF00000) |
(((g * alpha + cg * cover) >> 12) & 0x000FFC00) |
((b * alpha + cb * cover) >> 22) | 0xC0000000);
}
static AGG_INLINE pixel_type make_pix(unsigned r, unsigned g, unsigned b)
{
return (pixel_type)(((r & 0xFFC0) << 14) |
((g & 0xFFC0) << 4) |
(b >> 6) | 0xC0000000);
}
static AGG_INLINE color_type make_color(pixel_type p)
{
return color_type((p >> 14) & 0xFFC0,
(p >> 4) & 0xFFC0,
(p << 6) & 0xFFC0);
}
};
//=================================================blender_rgbAAA_gamma
template<class Gamma> class blender_rgbAAA_gamma
{
public:
typedef rgba16 color_type;
typedef color_type::value_type value_type;
typedef color_type::calc_type calc_type;
typedef int32u pixel_type;
typedef Gamma gamma_type;
blender_rgbAAA_gamma() : m_gamma(0) {}
void gamma(const gamma_type& g) { m_gamma = &g; }
AGG_INLINE void blend_pix(pixel_type* p,
unsigned cr, unsigned cg, unsigned cb,
unsigned alpha,
unsigned)
{
pixel_type rgb = *p;
calc_type r = m_gamma->dir((rgb >> 14) & 0xFFC0);
calc_type g = m_gamma->dir((rgb >> 4) & 0xFFC0);
calc_type b = m_gamma->dir((rgb << 6) & 0xFFC0);
*p = (pixel_type)
(((m_gamma->inv(((m_gamma->dir(cr) - r) * alpha + (r << 16)) >> 16) << 14) & 0x3FF00000) |
((m_gamma->inv(((m_gamma->dir(cg) - g) * alpha + (g << 16)) >> 16) << 4 ) & 0x000FFC00) |
(m_gamma->inv(((m_gamma->dir(cb) - b) * alpha + (b << 16)) >> 16) >> 6 ) | 0xC0000000);
}
static AGG_INLINE pixel_type make_pix(unsigned r, unsigned g, unsigned b)
{
return (pixel_type)(((r & 0xFFC0) << 14) |
((g & 0xFFC0) << 4) |
(b >> 6) | 0xC0000000);
}
static AGG_INLINE color_type make_color(pixel_type p)
{
return color_type((p >> 14) & 0xFFC0,
(p >> 4) & 0xFFC0,
(p << 6) & 0xFFC0);
}
private:
const Gamma* m_gamma;
};
//=====================================================blender_bgrAAA
struct blender_bgrAAA
{
typedef rgba16 color_type;
typedef color_type::value_type value_type;
typedef color_type::calc_type calc_type;
typedef int32u pixel_type;
static AGG_INLINE void blend_pix(pixel_type* p,
unsigned cr, unsigned cg, unsigned cb,
unsigned alpha,
unsigned)
{
pixel_type bgr = *p;
calc_type b = (bgr >> 14) & 0xFFC0;
calc_type g = (bgr >> 4) & 0xFFC0;
calc_type r = (bgr << 6) & 0xFFC0;
*p = (pixel_type)
(((((cb - b) * alpha + (b << 16)) >> 2) & 0x3FF00000) |
((((cg - g) * alpha + (g << 16)) >> 12) & 0x000FFC00) |
(((cr - r) * alpha + (r << 16)) >> 22) | 0xC0000000);
}
static AGG_INLINE pixel_type make_pix(unsigned r, unsigned g, unsigned b)
{
return (pixel_type)(((b & 0xFFC0) << 14) |
((g & 0xFFC0) << 4) |
(r >> 6) | 0xC0000000);
}
static AGG_INLINE color_type make_color(pixel_type p)
{
return color_type((p << 6) & 0xFFC0,
(p >> 4) & 0xFFC0,
(p >> 14) & 0xFFC0);
}
};
//=================================================blender_bgrAAA_pre
struct blender_bgrAAA_pre
{
typedef rgba16 color_type;
typedef color_type::value_type value_type;
typedef color_type::calc_type calc_type;
typedef int32u pixel_type;
static AGG_INLINE void blend_pix(pixel_type* p,
unsigned cr, unsigned cg, unsigned cb,
unsigned alpha,
unsigned cover)
{
alpha = color_type::base_mask - alpha;
cover = (cover + 1) << (color_type::base_shift - 8);
pixel_type bgr = *p;
calc_type b = (bgr >> 14) & 0xFFC0;
calc_type g = (bgr >> 4) & 0xFFC0;
calc_type r = (bgr << 6) & 0xFFC0;
*p = (pixel_type)
((((b * alpha + cb * cover) >> 2) & 0x3FF00000) |
(((g * alpha + cg * cover) >> 12) & 0x000FFC00) |
((r * alpha + cr * cover) >> 22) | 0xC0000000);
}
static AGG_INLINE pixel_type make_pix(unsigned r, unsigned g, unsigned b)
{
return (pixel_type)(((b & 0xFFC0) << 14) |
((g & 0xFFC0) << 4) |
(r >> 6) | 0xC0000000);
}
static AGG_INLINE color_type make_color(pixel_type p)
{
return color_type((p << 6) & 0xFFC0,
(p >> 4) & 0xFFC0,
(p >> 14) & 0xFFC0);
}
};
//=================================================blender_bgrAAA_gamma
template<class Gamma> class blender_bgrAAA_gamma
{
public:
typedef rgba16 color_type;
typedef color_type::value_type value_type;
typedef color_type::calc_type calc_type;
typedef int32u pixel_type;
typedef Gamma gamma_type;
blender_bgrAAA_gamma() : m_gamma(0) {}
void gamma(const gamma_type& g) { m_gamma = &g; }
AGG_INLINE void blend_pix(pixel_type* p,
unsigned cr, unsigned cg, unsigned cb,
unsigned alpha,
unsigned)
{
pixel_type bgr = *p;
calc_type b = m_gamma->dir((bgr >> 14) & 0xFFC0);
calc_type g = m_gamma->dir((bgr >> 4) & 0xFFC0);
calc_type r = m_gamma->dir((bgr << 6) & 0xFFC0);
*p = (pixel_type)
(((m_gamma->inv(((m_gamma->dir(cb) - b) * alpha + (b << 16)) >> 16) << 14) & 0x3FF00000) |
((m_gamma->inv(((m_gamma->dir(cg) - g) * alpha + (g << 16)) >> 16) << 4 ) & 0x000FFC00) |
(m_gamma->inv(((m_gamma->dir(cr) - r) * alpha + (r << 16)) >> 16) >> 6 ) | 0xC0000000);
}
static AGG_INLINE pixel_type make_pix(unsigned r, unsigned g, unsigned b)
{
return (pixel_type)(((b & 0xFFC0) << 14) |
((g & 0xFFC0) << 4) |
(r >> 6) | 0xC0000000);
}
static AGG_INLINE color_type make_color(pixel_type p)
{
return color_type((p << 6) & 0xFFC0,
(p >> 4) & 0xFFC0,
(p >> 14) & 0xFFC0);
}
private:
const Gamma* m_gamma;
};
//=====================================================blender_rgbBBA
struct blender_rgbBBA
{
typedef rgba16 color_type;
typedef color_type::value_type value_type;
typedef color_type::calc_type calc_type;
typedef int32u pixel_type;
static AGG_INLINE void blend_pix(pixel_type* p,
unsigned cr, unsigned cg, unsigned cb,
unsigned alpha,
unsigned)
{
pixel_type rgb = *p;
calc_type r = (rgb >> 16) & 0xFFE0;
calc_type g = (rgb >> 5) & 0xFFE0;
calc_type b = (rgb << 6) & 0xFFC0;
*p = (pixel_type)
(((((cr - r) * alpha + (r << 16)) ) & 0xFFE00000) |
((((cg - g) * alpha + (g << 16)) >> 11) & 0x001FFC00) |
(((cb - b) * alpha + (b << 16)) >> 22));
}
static AGG_INLINE pixel_type make_pix(unsigned r, unsigned g, unsigned b)
{
return (pixel_type)(((r & 0xFFE0) << 16) | ((g & 0xFFE0) << 5) | (b >> 6));
}
static AGG_INLINE color_type make_color(pixel_type p)
{
return color_type((p >> 16) & 0xFFE0,
(p >> 5) & 0xFFE0,
(p << 6) & 0xFFC0);
}
};
//=================================================blender_rgbBBA_pre
struct blender_rgbBBA_pre
{
typedef rgba16 color_type;
typedef color_type::value_type value_type;
typedef color_type::calc_type calc_type;
typedef int32u pixel_type;
static AGG_INLINE void blend_pix(pixel_type* p,
unsigned cr, unsigned cg, unsigned cb,
unsigned alpha,
unsigned cover)
{
alpha = color_type::base_mask - alpha;
cover = (cover + 1) << (color_type::base_shift - 8);
pixel_type rgb = *p;
calc_type r = (rgb >> 16) & 0xFFE0;
calc_type g = (rgb >> 5) & 0xFFE0;
calc_type b = (rgb << 6) & 0xFFC0;
*p = (pixel_type)
((((r * alpha + cr * cover) ) & 0xFFE00000) |
(((g * alpha + cg * cover) >> 11) & 0x001FFC00) |
((b * alpha + cb * cover) >> 22));
}
static AGG_INLINE pixel_type make_pix(unsigned r, unsigned g, unsigned b)
{
return (pixel_type)(((r & 0xFFE0) << 16) | ((g & 0xFFE0) << 5) | (b >> 6));
}
static AGG_INLINE color_type make_color(pixel_type p)
{
return color_type((p >> 16) & 0xFFE0,
(p >> 5) & 0xFFE0,
(p << 6) & 0xFFC0);
}
};
//=================================================blender_rgbBBA_gamma
template<class Gamma> class blender_rgbBBA_gamma
{
public:
typedef rgba16 color_type;
typedef color_type::value_type value_type;
typedef color_type::calc_type calc_type;
typedef int32u pixel_type;
typedef Gamma gamma_type;
blender_rgbBBA_gamma() : m_gamma(0) {}
void gamma(const gamma_type& g) { m_gamma = &g; }
AGG_INLINE void blend_pix(pixel_type* p,
unsigned cr, unsigned cg, unsigned cb,
unsigned alpha,
unsigned)
{
pixel_type rgb = *p;
calc_type r = m_gamma->dir((rgb >> 16) & 0xFFE0);
calc_type g = m_gamma->dir((rgb >> 5) & 0xFFE0);
calc_type b = m_gamma->dir((rgb << 6) & 0xFFC0);
*p = (pixel_type)
(((m_gamma->inv(((m_gamma->dir(cr) - r) * alpha + (r << 16)) >> 16) << 16) & 0xFFE00000) |
((m_gamma->inv(((m_gamma->dir(cg) - g) * alpha + (g << 16)) >> 16) << 5 ) & 0x001FFC00) |
(m_gamma->inv(((m_gamma->dir(cb) - b) * alpha + (b << 16)) >> 16) >> 6 ));
}
static AGG_INLINE pixel_type make_pix(unsigned r, unsigned g, unsigned b)
{
return (pixel_type)(((r & 0xFFE0) << 16) | ((g & 0xFFE0) << 5) | (b >> 6));
}
static AGG_INLINE color_type make_color(pixel_type p)
{
return color_type((p >> 16) & 0xFFE0,
(p >> 5) & 0xFFE0,
(p << 6) & 0xFFC0);
}
private:
const Gamma* m_gamma;
};
//=====================================================blender_bgrABB
struct blender_bgrABB
{
typedef rgba16 color_type;
typedef color_type::value_type value_type;
typedef color_type::calc_type calc_type;
typedef int32u pixel_type;
static AGG_INLINE void blend_pix(pixel_type* p,
unsigned cr, unsigned cg, unsigned cb,
unsigned alpha,
unsigned)
{
pixel_type bgr = *p;
calc_type b = (bgr >> 16) & 0xFFC0;
calc_type g = (bgr >> 6) & 0xFFE0;
calc_type r = (bgr << 5) & 0xFFE0;
*p = (pixel_type)
(((((cb - b) * alpha + (b << 16)) ) & 0xFFC00000) |
((((cg - g) * alpha + (g << 16)) >> 10) & 0x003FF800) |
(((cr - r) * alpha + (r << 16)) >> 21));
}
static AGG_INLINE pixel_type make_pix(unsigned r, unsigned g, unsigned b)
{
return (pixel_type)(((b & 0xFFC0) << 16) | ((g & 0xFFE0) << 6) | (r >> 5));
}
static AGG_INLINE color_type make_color(pixel_type p)
{
return color_type((p << 5) & 0xFFE0,
(p >> 6) & 0xFFE0,
(p >> 16) & 0xFFC0);
}
};
//=================================================blender_bgrABB_pre
struct blender_bgrABB_pre
{
typedef rgba16 color_type;
typedef color_type::value_type value_type;
typedef color_type::calc_type calc_type;
typedef int32u pixel_type;
static AGG_INLINE void blend_pix(pixel_type* p,
unsigned cr, unsigned cg, unsigned cb,
unsigned alpha,
unsigned cover)
{
alpha = color_type::base_mask - alpha;
cover = (cover + 1) << (color_type::base_shift - 8);
pixel_type bgr = *p;
calc_type b = (bgr >> 16) & 0xFFC0;
calc_type g = (bgr >> 6) & 0xFFE0;
calc_type r = (bgr << 5) & 0xFFE0;
*p = (pixel_type)
((((b * alpha + cb * cover) ) & 0xFFC00000) |
(((g * alpha + cg * cover) >> 10) & 0x003FF800) |
((r * alpha + cr * cover) >> 21));
}
static AGG_INLINE pixel_type make_pix(unsigned r, unsigned g, unsigned b)
{
return (pixel_type)(((b & 0xFFC0) << 16) | ((g & 0xFFE0) << 6) | (r >> 5));
}
static AGG_INLINE color_type make_color(pixel_type p)
{
return color_type((p << 5) & 0xFFE0,
(p >> 6) & 0xFFE0,
(p >> 16) & 0xFFC0);
}
};
//=================================================blender_bgrABB_gamma
template<class Gamma> class blender_bgrABB_gamma
{
public:
typedef rgba16 color_type;
typedef color_type::value_type value_type;
typedef color_type::calc_type calc_type;
typedef int32u pixel_type;
typedef Gamma gamma_type;
blender_bgrABB_gamma() : m_gamma(0) {}
void gamma(const gamma_type& g) { m_gamma = &g; }
AGG_INLINE void blend_pix(pixel_type* p,
unsigned cr, unsigned cg, unsigned cb,
unsigned alpha,
unsigned)
{
pixel_type bgr = *p;
calc_type b = m_gamma->dir((bgr >> 16) & 0xFFC0);
calc_type g = m_gamma->dir((bgr >> 6) & 0xFFE0);
calc_type r = m_gamma->dir((bgr << 5) & 0xFFE0);
*p = (pixel_type)
(((m_gamma->inv(((m_gamma->dir(cb) - b) * alpha + (b << 16)) >> 16) << 16) & 0xFFC00000) |
((m_gamma->inv(((m_gamma->dir(cg) - g) * alpha + (g << 16)) >> 16) << 6 ) & 0x003FF800) |
(m_gamma->inv(((m_gamma->dir(cr) - r) * alpha + (r << 16)) >> 16) >> 5 ));
}
static AGG_INLINE pixel_type make_pix(unsigned r, unsigned g, unsigned b)
{
return (pixel_type)(((b & 0xFFC0) << 16) | ((g & 0xFFE0) << 6) | (r >> 5));
}
static AGG_INLINE color_type make_color(pixel_type p)
{
return color_type((p << 5) & 0xFFE0,
(p >> 6) & 0xFFE0,
(p >> 16) & 0xFFC0);
}
private:
const Gamma* m_gamma;
};
//===========================================pixfmt_alpha_blend_rgb_packed
template<class Blender, class RenBuf> class pixfmt_alpha_blend_rgb_packed
{
public:
typedef RenBuf rbuf_type;
typedef typename rbuf_type::row_data row_data;
typedef Blender blender_type;
typedef typename blender_type::color_type color_type;
typedef typename blender_type::pixel_type pixel_type;
typedef int order_type; // A fake one
typedef typename color_type::value_type value_type;
typedef typename color_type::calc_type calc_type;
enum base_scale_e
{
base_shift = color_type::base_shift,
base_scale = color_type::base_scale,
base_mask = color_type::base_mask,
pix_width = sizeof(pixel_type),
};
private:
//--------------------------------------------------------------------
AGG_INLINE void copy_or_blend_pix(pixel_type* p, const color_type& c, unsigned cover)
{
if (c.a)
{
calc_type alpha = (calc_type(c.a) * (cover + 1)) >> 8;
if(alpha == base_mask)
{
*p = m_blender.make_pix(c.r, c.g, c.b);
}
else
{
m_blender.blend_pix(p, c.r, c.g, c.b, alpha, cover);
}
}
}
public:
//--------------------------------------------------------------------
explicit pixfmt_alpha_blend_rgb_packed(rbuf_type& rb) : m_rbuf(&rb) {}
void attach(rbuf_type& rb) { m_rbuf = &rb; }
//--------------------------------------------------------------------
template<class PixFmt>
bool attach(PixFmt& pixf, int x1, int y1, int x2, int y2)
{
rect_i r(x1, y1, x2, y2);
if(r.clip(rect_i(0, 0, pixf.width()-1, pixf.height()-1)))
{
int stride = pixf.stride();
m_rbuf->attach(pixf.pix_ptr(r.x1, stride < 0 ? r.y2 : r.y1),
(r.x2 - r.x1) + 1,
(r.y2 - r.y1) + 1,
stride);
return true;
}
return false;
}
Blender& blender() { return m_blender; }
//--------------------------------------------------------------------
AGG_INLINE unsigned width() const { return m_rbuf->width(); }
AGG_INLINE unsigned height() const { return m_rbuf->height(); }
AGG_INLINE int stride() const { return m_rbuf->stride(); }
//--------------------------------------------------------------------
AGG_INLINE int8u* row_ptr(int y) { return m_rbuf->row_ptr(y); }
AGG_INLINE const int8u* row_ptr(int y) const { return m_rbuf->row_ptr(y); }
AGG_INLINE row_data row(int y) const { return m_rbuf->row(y); }
//--------------------------------------------------------------------
AGG_INLINE int8u* pix_ptr(int x, int y)
{
return m_rbuf->row_ptr(y) + x * pix_width;
}
AGG_INLINE const int8u* pix_ptr(int x, int y) const
{
return m_rbuf->row_ptr(y) + x * pix_width;
}
//--------------------------------------------------------------------
AGG_INLINE void make_pix(int8u* p, const color_type& c)
{
*(pixel_type*)p = m_blender.make_pix(c.r, c.g, c.b);
}
//--------------------------------------------------------------------
AGG_INLINE color_type pixel(int x, int y) const
{
return m_blender.make_color(((pixel_type*)m_rbuf->row_ptr(y))[x]);
}
//--------------------------------------------------------------------
AGG_INLINE void copy_pixel(int x, int y, const color_type& c)
{
((pixel_type*)
m_rbuf->row_ptr(x, y, 1))[x] =
m_blender.make_pix(c.r, c.g, c.b);
}
//--------------------------------------------------------------------
AGG_INLINE void blend_pixel(int x, int y, const color_type& c, int8u cover)
{
copy_or_blend_pix((pixel_type*)m_rbuf->row_ptr(x, y, 1) + x, c, cover);
}
//--------------------------------------------------------------------
AGG_INLINE void copy_hline(int x, int y,
unsigned len,
const color_type& c)
{
pixel_type* p = (pixel_type*)m_rbuf->row_ptr(x, y, len) + x;
pixel_type v = m_blender.make_pix(c.r, c.g, c.b);
do
{
*p++ = v;
}
while(--len);
}
//--------------------------------------------------------------------
AGG_INLINE void copy_vline(int x, int y,
unsigned len,
const color_type& c)
{
pixel_type v = m_blender.make_pix(c.r, c.g, c.b);
do
{
pixel_type* p = (pixel_type*)m_rbuf->row_ptr(x, y++, 1) + x;
*p = v;
}
while(--len);
}
//--------------------------------------------------------------------
void blend_hline(int x, int y,
unsigned len,
const color_type& c,
int8u cover)
{
if (c.a)
{
pixel_type* p = (pixel_type*)m_rbuf->row_ptr(x, y, len) + x;
calc_type alpha = (calc_type(c.a) * (cover + 1)) >> 8;
if(alpha == base_mask)
{
pixel_type v = m_blender.make_pix(c.r, c.g, c.b);
do
{
*p++ = v;
}
while(--len);
}
else
{
do
{
m_blender.blend_pix(p, c.r, c.g, c.b, alpha, cover);
++p;
}
while(--len);
}
}
}
//--------------------------------------------------------------------
void blend_vline(int x, int y,
unsigned len,
const color_type& c,
int8u cover)
{
if (c.a)
{
calc_type alpha = (calc_type(c.a) * (cover + 1)) >> 8;
if(alpha == base_mask)
{
pixel_type v = m_blender.make_pix(c.r, c.g, c.b);
do
{
((pixel_type*)m_rbuf->row_ptr(x, y++, 1))[x] = v;
}
while(--len);
}
else
{
do
{
m_blender.blend_pix(
(pixel_type*)m_rbuf->row_ptr(x, y++, 1),
c.r, c.g, c.b, alpha, cover);
}
while(--len);
}
}
}
//--------------------------------------------------------------------
void blend_solid_hspan(int x, int y,
unsigned len,
const color_type& c,
const int8u* covers)
{
pixel_type* p = (pixel_type*)m_rbuf->row_ptr(x, y, len) + x;
do
{
copy_or_blend_pix(p, c, *covers++);
++p;
}
while(--len);
}
//--------------------------------------------------------------------
void blend_solid_vspan(int x, int y,
unsigned len,
const color_type& c,
const int8u* covers)
{
do
{
copy_or_blend_pix((pixel_type*)m_rbuf->row_ptr(x, y++, 1) + x,
c, *covers++);
}
while(--len);
}
//--------------------------------------------------------------------
void copy_color_hspan(int x, int y,
unsigned len,
const color_type* colors)
{
pixel_type* p = (pixel_type*)m_rbuf->row_ptr(x, y, len) + x;
do
{
*p++ = m_blender.make_pix(colors->r, colors->g, colors->b);
++colors;
}
while(--len);
}
//--------------------------------------------------------------------
void copy_color_vspan(int x, int y,
unsigned len,
const color_type* colors)
{
do
{
pixel_type* p = (pixel_type*)m_rbuf->row_ptr(x, y++, 1) + x;
*p = m_blender.make_pix(colors->r, colors->g, colors->b);
++colors;
}
while(--len);
}
//--------------------------------------------------------------------
void blend_color_hspan(int x, int y,
unsigned len,
const color_type* colors,
const int8u* covers,
int8u cover)
{
pixel_type* p = (pixel_type*)m_rbuf->row_ptr(x, y, len) + x;
do
{
copy_or_blend_pix(p++, *colors++, covers ? *covers++ : cover);
}
while(--len);
}
//--------------------------------------------------------------------
void blend_color_vspan(int x, int y,
unsigned len,
const color_type* colors,
const int8u* covers,
int8u cover)
{
do
{
copy_or_blend_pix((pixel_type*)m_rbuf->row_ptr(x, y++, 1) + x,
*colors++, covers ? *covers++ : cover);
}
while(--len);
}
//--------------------------------------------------------------------
template<class RenBuf2>
void copy_from(const RenBuf2& from,
int xdst, int ydst,
int xsrc, int ysrc,
unsigned len)
{
const int8u* p = from.row_ptr(ysrc);
if(p)
{
memmove(m_rbuf->row_ptr(xdst, ydst, len) + xdst * pix_width,
p + xsrc * pix_width,
len * pix_width);
}
}
//--------------------------------------------------------------------
template<class SrcPixelFormatRenderer>
void blend_from(const SrcPixelFormatRenderer& from,
int xdst, int ydst,
int xsrc, int ysrc,
unsigned len,
int8u cover)
{
typedef typename SrcPixelFormatRenderer::order_type src_order;
const value_type* psrc = (const value_type*)from.row_ptr(ysrc);
if(psrc)
{
psrc += xsrc * 4;
pixel_type* pdst =
(pixel_type*)m_rbuf->row_ptr(xdst, ydst, len) + xdst;
do
{
value_type alpha = psrc[src_order::A];
if(alpha)
{
if(alpha == base_mask && cover == 255)
{
*pdst = m_blender.make_pix(psrc[src_order::R],
psrc[src_order::G],
psrc[src_order::B]);
}
else
{
m_blender.blend_pix(pdst,
psrc[src_order::R],
psrc[src_order::G],
psrc[src_order::B],
alpha,
cover);
}
}
psrc += 4;
++pdst;
}
while(--len);
}
}
//--------------------------------------------------------------------
template<class SrcPixelFormatRenderer>
void blend_from_color(const SrcPixelFormatRenderer& from,
const color_type& color,
int xdst, int ydst,
int xsrc, int ysrc,
unsigned len,
int8u cover)
{
typedef typename SrcPixelFormatRenderer::value_type src_value_type;
typedef typename SrcPixelFormatRenderer::color_type src_color_type;
const src_value_type* psrc = (src_value_type*)from.row_ptr(ysrc);
if(psrc)
{
psrc += xsrc * SrcPixelFormatRenderer::pix_step + SrcPixelFormatRenderer::pix_offset;
pixel_type* pdst =
(pixel_type*)m_rbuf->row_ptr(xdst, ydst, len) + xdst;
do
{
m_blender.blend_pix(pdst,
color.r, color.g, color.b, color.a,
cover);
psrc += SrcPixelFormatRenderer::pix_step;
++pdst;
}
while(--len);
}
}
//--------------------------------------------------------------------
template<class SrcPixelFormatRenderer>
void blend_from_lut(const SrcPixelFormatRenderer& from,
const color_type* color_lut,
int xdst, int ydst,
int xsrc, int ysrc,
unsigned len,
int8u cover)
{
typedef typename SrcPixelFormatRenderer::value_type src_value_type;
const src_value_type* psrc = (src_value_type*)from.row_ptr(ysrc);
if(psrc)
{
psrc += xsrc * SrcPixelFormatRenderer::pix_step + SrcPixelFormatRenderer::pix_offset;
pixel_type* pdst =
(pixel_type*)m_rbuf->row_ptr(xdst, ydst, len) + xdst;
do
{
const color_type& color = color_lut[*psrc];
m_blender.blend_pix(pdst,
color.r, color.g, color.b, color.a,
cover);
psrc += SrcPixelFormatRenderer::pix_step;
++pdst;
}
while(--len);
}
}
private:
rbuf_type* m_rbuf;
Blender m_blender;
};
typedef pixfmt_alpha_blend_rgb_packed<blender_rgb555, rendering_buffer> pixfmt_rgb555; //----pixfmt_rgb555
typedef pixfmt_alpha_blend_rgb_packed<blender_rgb565, rendering_buffer> pixfmt_rgb565; //----pixfmt_rgb565
typedef pixfmt_alpha_blend_rgb_packed<blender_rgb555_pre, rendering_buffer> pixfmt_rgb555_pre; //----pixfmt_rgb555_pre
typedef pixfmt_alpha_blend_rgb_packed<blender_rgb565_pre, rendering_buffer> pixfmt_rgb565_pre; //----pixfmt_rgb565_pre
typedef pixfmt_alpha_blend_rgb_packed<blender_rgbAAA, rendering_buffer> pixfmt_rgbAAA; //----pixfmt_rgbAAA
typedef pixfmt_alpha_blend_rgb_packed<blender_bgrAAA, rendering_buffer> pixfmt_bgrAAA; //----pixfmt_bgrAAA
typedef pixfmt_alpha_blend_rgb_packed<blender_rgbBBA, rendering_buffer> pixfmt_rgbBBA; //----pixfmt_rgbBBA
typedef pixfmt_alpha_blend_rgb_packed<blender_bgrABB, rendering_buffer> pixfmt_bgrABB; //----pixfmt_bgrABB
typedef pixfmt_alpha_blend_rgb_packed<blender_rgbAAA_pre, rendering_buffer> pixfmt_rgbAAA_pre; //----pixfmt_rgbAAA_pre
typedef pixfmt_alpha_blend_rgb_packed<blender_bgrAAA_pre, rendering_buffer> pixfmt_bgrAAA_pre; //----pixfmt_bgrAAA_pre
typedef pixfmt_alpha_blend_rgb_packed<blender_rgbBBA_pre, rendering_buffer> pixfmt_rgbBBA_pre; //----pixfmt_rgbBBA_pre
typedef pixfmt_alpha_blend_rgb_packed<blender_bgrABB_pre, rendering_buffer> pixfmt_bgrABB_pre; //----pixfmt_bgrABB_pre
//-----------------------------------------------------pixfmt_rgb555_gamma
template<class Gamma> class pixfmt_rgb555_gamma :
public pixfmt_alpha_blend_rgb_packed<blender_rgb555_gamma<Gamma>,
rendering_buffer>
{
public:
pixfmt_rgb555_gamma(rendering_buffer& rb, const Gamma& g) :
pixfmt_alpha_blend_rgb_packed<blender_rgb555_gamma<Gamma>,
rendering_buffer>(rb)
{
this->blender().gamma(g);
}
};
//-----------------------------------------------------pixfmt_rgb565_gamma
template<class Gamma> class pixfmt_rgb565_gamma :
public pixfmt_alpha_blend_rgb_packed<blender_rgb565_gamma<Gamma>, rendering_buffer>
{
public:
pixfmt_rgb565_gamma(rendering_buffer& rb, const Gamma& g) :
pixfmt_alpha_blend_rgb_packed<blender_rgb565_gamma<Gamma>, rendering_buffer>(rb)
{
this->blender().gamma(g);
}
};
//-----------------------------------------------------pixfmt_rgbAAA_gamma
template<class Gamma> class pixfmt_rgbAAA_gamma :
public pixfmt_alpha_blend_rgb_packed<blender_rgbAAA_gamma<Gamma>,
rendering_buffer>
{
public:
pixfmt_rgbAAA_gamma(rendering_buffer& rb, const Gamma& g) :
pixfmt_alpha_blend_rgb_packed<blender_rgbAAA_gamma<Gamma>,
rendering_buffer>(rb)
{
this->blender().gamma(g);
}
};
//-----------------------------------------------------pixfmt_bgrAAA_gamma
template<class Gamma> class pixfmt_bgrAAA_gamma :
public pixfmt_alpha_blend_rgb_packed<blender_bgrAAA_gamma<Gamma>,
rendering_buffer>
{
public:
pixfmt_bgrAAA_gamma(rendering_buffer& rb, const Gamma& g) :
pixfmt_alpha_blend_rgb_packed<blender_bgrAAA_gamma<Gamma>,
rendering_buffer>(rb)
{
this->blender().gamma(g);
}
};
//-----------------------------------------------------pixfmt_rgbBBA_gamma
template<class Gamma> class pixfmt_rgbBBA_gamma :
public pixfmt_alpha_blend_rgb_packed<blender_rgbBBA_gamma<Gamma>,
rendering_buffer>
{
public:
pixfmt_rgbBBA_gamma(rendering_buffer& rb, const Gamma& g) :
pixfmt_alpha_blend_rgb_packed<blender_rgbBBA_gamma<Gamma>,
rendering_buffer>(rb)
{
this->blender().gamma(g);
}
};
//-----------------------------------------------------pixfmt_bgrABB_gamma
template<class Gamma> class pixfmt_bgrABB_gamma :
public pixfmt_alpha_blend_rgb_packed<blender_bgrABB_gamma<Gamma>,
rendering_buffer>
{
public:
pixfmt_bgrABB_gamma(rendering_buffer& rb, const Gamma& g) :
pixfmt_alpha_blend_rgb_packed<blender_bgrABB_gamma<Gamma>,
rendering_buffer>(rb)
{
this->blender().gamma(g);
}
};
}
#endif
| 0 |
D://workCode//uploadProject\awtk\3rd\agg | D://workCode//uploadProject\awtk\3rd\agg\include\agg_platform_support.h | //----------------------------------------------------------------------------
// Anti-Grain Geometry - Version 2.4
// Copyright (C) 2002-2005 Maxim Shemanarev (http://www.antigrain.com)
//
// Permission to copy, use, modify, sell and distribute this software
// is granted provided this copyright notice appears in all copies.
// This software is provided "as is" without express or implied
// warranty, and with no claim as to its suitability for any purpose.
//
//----------------------------------------------------------------------------
// Contact: mcseem@antigrain.com
// mcseemagg@yahoo.com
// http://www.antigrain.com
//----------------------------------------------------------------------------
//
// class platform_support
//
// It's not a part of the AGG library, it's just a helper class to create
// interactive demo examples. Since the examples should not be too complex
// this class is provided to support some very basic interactive graphical
// funtionality, such as putting the rendered image to the window, simple
// keyboard and mouse input, window resizing, setting the window title,
// and catching the "idle" events.
//
// The idea is to have a single header file that does not depend on any
// platform (I hate these endless #ifdef/#elif/#elif.../#endif) and a number
// of different implementations depending on the concrete platform.
// The most popular platforms are:
//
// Windows-32 API
// X-Window API
// SDL library (see http://www.libsdl.org/)
// MacOS C/C++ API
//
// This file does not include any system dependent .h files such as
// windows.h or X11.h, so, your demo applications do not depend on the
// platform. The only file that can #include system dependend headers
// is the implementation file agg_platform_support.cpp. Different
// implementations are placed in different directories, such as
// ~/agg/src/platform/win32
// ~/agg/src/platform/sdl
// ~/agg/src/platform/X11
// and so on.
//
// All the system dependent stuff sits in the platform_specific
// class which is forward-declared here but not defined.
// The platform_support class has just a pointer to it and it's
// the responsibility of the implementation to create/delete it.
// This class being defined in the implementation file can have
// any platform dependent stuff such as HWND, X11 Window and so on.
//
//----------------------------------------------------------------------------
#ifndef AGG_PLATFORM_SUPPORT_INCLUDED
#define AGG_PLATFORM_SUPPORT_INCLUDED
#include "agg_basics.h"
#include "agg_rendering_buffer.h"
#include "agg_trans_viewport.h"
namespace agg
{
//----------------------------------------------------------window_flag_e
// These are flags used in method init(). Not all of them are
// applicable on different platforms, for example the win32_api
// cannot use a hardware buffer (window_hw_buffer).
// The implementation should simply ignore unsupported flags.
enum window_flag_e
{
window_resize = 1,
window_hw_buffer = 2,
window_keep_aspect_ratio = 4,
window_process_all_keys = 8
};
//-----------------------------------------------------------pix_format_e
// Possible formats of the rendering buffer. Initially I thought that it's
// reasonable to create the buffer and the rendering functions in
// accordance with the native pixel format of the system because it
// would have no overhead for pixel format conersion.
// But eventually I came to a conclusion that having a possibility to
// convert pixel formats on demand is a good idea. First, it was X11 where
// there lots of different formats and visuals and it would be great to
// render everything in, say, RGB-24 and display it automatically without
// any additional efforts. The second reason is to have a possibility to
// debug renderers for different pixel formats and colorspaces having only
// one computer and one system.
//
// This stuff is not included into the basic AGG functionality because the
// number of supported pixel formats (and/or colorspaces) can be great and
// if one needs to add new format it would be good only to add new
// rendering files without having to modify any existing ones (a general
// principle of incapsulation and isolation).
//
// Using a particular pixel format doesn't obligatory mean the necessity
// of software conversion. For example, win32 API can natively display
// gray8, 15-bit RGB, 24-bit BGR, and 32-bit BGRA formats.
// This list can be (and will be!) extended in future.
enum pix_format_e
{
pix_format_undefined = 0, // By default. No conversions are applied
pix_format_bw, // 1 bit per color B/W
pix_format_gray8, // Simple 256 level grayscale
pix_format_sgray8, // Simple 256 level grayscale (sRGB)
pix_format_gray16, // Simple 65535 level grayscale
pix_format_gray32, // Grayscale, one 32-bit float per pixel
pix_format_rgb555, // 15 bit rgb. Depends on the byte ordering!
pix_format_rgb565, // 16 bit rgb. Depends on the byte ordering!
pix_format_rgbAAA, // 30 bit rgb. Depends on the byte ordering!
pix_format_rgbBBA, // 32 bit rgb. Depends on the byte ordering!
pix_format_bgrAAA, // 30 bit bgr. Depends on the byte ordering!
pix_format_bgrABB, // 32 bit bgr. Depends on the byte ordering!
pix_format_rgb24, // R-G-B, one byte per color component
pix_format_srgb24, // R-G-B, one byte per color component (sRGB)
pix_format_bgr24, // B-G-R, one byte per color component
pix_format_sbgr24, // B-G-R, native win32 BMP format (sRGB)
pix_format_rgba32, // R-G-B-A, one byte per color component
pix_format_srgba32, // R-G-B-A, one byte per color component (sRGB)
pix_format_argb32, // A-R-G-B, native MAC format
pix_format_sargb32, // A-R-G-B, native MAC format (sRGB)
pix_format_abgr32, // A-B-G-R, one byte per color component
pix_format_sabgr32, // A-B-G-R, one byte per color component (sRGB)
pix_format_bgra32, // B-G-R-A, native win32 BMP format
pix_format_sbgra32, // B-G-R-A, native win32 BMP format (sRGB)
pix_format_rgb48, // R-G-B, 16 bits per color component
pix_format_bgr48, // B-G-R, native win32 BMP format.
pix_format_rgb96, // R-G-B, one 32-bit float per color component
pix_format_bgr96, // B-G-R, one 32-bit float per color component
pix_format_rgba64, // R-G-B-A, 16 bits byte per color component
pix_format_argb64, // A-R-G-B, native MAC format
pix_format_abgr64, // A-B-G-R, one byte per color component
pix_format_bgra64, // B-G-R-A, native win32 BMP format
pix_format_rgba128, // R-G-B-A, one 32-bit float per color component
pix_format_argb128, // A-R-G-B, one 32-bit float per color component
pix_format_abgr128, // A-B-G-R, one 32-bit float per color component
pix_format_bgra128, // B-G-R-A, one 32-bit float per color component
end_of_pix_formats
};
}
#endif
| 0 |
D://workCode//uploadProject\awtk\3rd\agg | D://workCode//uploadProject\awtk\3rd\agg\include\agg_rasterizer_cells_aa.h | //----------------------------------------------------------------------------
// Anti-Grain Geometry - Version 2.4
// Copyright (C) 2002-2005 Maxim Shemanarev (http://www.antigrain.com)
//
// Permission to copy, use, modify, sell and distribute this software
// is granted provided this copyright notice appears in all copies.
// This software is provided "as is" without express or implied
// warranty, and with no claim as to its suitability for any purpose.
//
//----------------------------------------------------------------------------
//
// The author gratefully acknowleges the support of David Turner,
// Robert Wilhelm, and Werner Lemberg - the authors of the FreeType
// libray - in producing this work. See http://www.freetype.org for details.
//
//----------------------------------------------------------------------------
// Contact: mcseem@antigrain.com
// mcseemagg@yahoo.com
// http://www.antigrain.com
//----------------------------------------------------------------------------
//
// Adaptation for 32-bit screen coordinates has been sponsored by
// Liberty Technology Systems, Inc., visit http://lib-sys.com
//
// Liberty Technology Systems, Inc. is the provider of
// PostScript and PDF technology for software developers.
//
//----------------------------------------------------------------------------
#ifndef AGG_RASTERIZER_CELLS_AA_INCLUDED
#define AGG_RASTERIZER_CELLS_AA_INCLUDED
#include <string.h>
#include <math.h>
#include "agg_math.h"
#include "agg_array.h"
namespace agg
{
//-----------------------------------------------------rasterizer_cells_aa
// An internal class that implements the main rasterization algorithm.
// Used in the rasterizer. Should not be used direcly.
template<class Cell> class rasterizer_cells_aa
{
enum cell_block_scale_e
{
cell_block_shift = 12,
cell_block_size = 1 << cell_block_shift,
cell_block_mask = cell_block_size - 1,
cell_block_pool = 256,
cell_block_limit = 1024
};
struct sorted_y
{
unsigned start;
unsigned num;
};
public:
typedef Cell cell_type;
typedef rasterizer_cells_aa<Cell> self_type;
~rasterizer_cells_aa();
rasterizer_cells_aa();
void reset();
void style(const cell_type& style_cell);
void line(int x1, int y1, int x2, int y2);
int min_x() const { return m_min_x; }
int min_y() const { return m_min_y; }
int max_x() const { return m_max_x; }
int max_y() const { return m_max_y; }
void sort_cells();
unsigned total_cells() const
{
return m_num_cells;
}
unsigned scanline_num_cells(unsigned y) const
{
return m_sorted_y[y - m_min_y].num;
}
const cell_type* const* scanline_cells(unsigned y) const
{
return m_sorted_cells.data() + m_sorted_y[y - m_min_y].start;
}
bool sorted() const { return m_sorted; }
private:
rasterizer_cells_aa(const self_type&);
const self_type& operator = (const self_type&);
void set_curr_cell(int x, int y);
void add_curr_cell();
void render_hline(int ey, int x1, int y1, int x2, int y2);
void allocate_block();
private:
unsigned m_num_blocks;
unsigned m_max_blocks;
unsigned m_curr_block;
unsigned m_num_cells;
cell_type** m_cells;
cell_type* m_curr_cell_ptr;
pod_vector<cell_type*> m_sorted_cells;
pod_vector<sorted_y> m_sorted_y;
cell_type m_curr_cell;
cell_type m_style_cell;
int m_min_x;
int m_min_y;
int m_max_x;
int m_max_y;
bool m_sorted;
};
//------------------------------------------------------------------------
template<class Cell>
rasterizer_cells_aa<Cell>::~rasterizer_cells_aa()
{
if(m_num_blocks)
{
cell_type** ptr = m_cells + m_num_blocks - 1;
while(m_num_blocks--)
{
pod_allocator<cell_type>::deallocate(*ptr, cell_block_size);
ptr--;
}
pod_allocator<cell_type*>::deallocate(m_cells, m_max_blocks);
}
}
//------------------------------------------------------------------------
template<class Cell>
rasterizer_cells_aa<Cell>::rasterizer_cells_aa() :
m_num_blocks(0),
m_max_blocks(0),
m_curr_block(0),
m_num_cells(0),
m_cells(0),
m_curr_cell_ptr(0),
m_sorted_cells(),
m_sorted_y(),
m_min_x(0x7FFFFFFF),
m_min_y(0x7FFFFFFF),
m_max_x(-0x7FFFFFFF),
m_max_y(-0x7FFFFFFF),
m_sorted(false)
{
m_style_cell.initial();
m_curr_cell.initial();
}
//------------------------------------------------------------------------
template<class Cell>
void rasterizer_cells_aa<Cell>::reset()
{
m_num_cells = 0;
m_curr_block = 0;
m_curr_cell.initial();
m_style_cell.initial();
m_sorted = false;
m_min_x = 0x7FFFFFFF;
m_min_y = 0x7FFFFFFF;
m_max_x = -0x7FFFFFFF;
m_max_y = -0x7FFFFFFF;
}
//------------------------------------------------------------------------
template<class Cell>
AGG_INLINE void rasterizer_cells_aa<Cell>::add_curr_cell()
{
if(m_curr_cell.area | m_curr_cell.cover)
{
if((m_num_cells & cell_block_mask) == 0)
{
if(m_num_blocks >= cell_block_limit) return;
allocate_block();
}
*m_curr_cell_ptr++ = m_curr_cell;
++m_num_cells;
}
}
//------------------------------------------------------------------------
template<class Cell>
AGG_INLINE void rasterizer_cells_aa<Cell>::set_curr_cell(int x, int y)
{
if(m_curr_cell.not_equal(x, y, m_style_cell))
{
add_curr_cell();
m_curr_cell.style(m_style_cell);
m_curr_cell.x = x;
m_curr_cell.y = y;
m_curr_cell.cover = 0;
m_curr_cell.area = 0;
}
}
//------------------------------------------------------------------------
template<class Cell>
AGG_INLINE void rasterizer_cells_aa<Cell>::render_hline(int ey,
int x1, int y1,
int x2, int y2)
{
int ex1 = x1 >> poly_subpixel_shift;
int ex2 = x2 >> poly_subpixel_shift;
int fx1 = x1 & poly_subpixel_mask;
int fx2 = x2 & poly_subpixel_mask;
int delta, p, first, dx;
int incr, lift, mod, rem;
//trivial case. Happens often
if(y1 == y2)
{
set_curr_cell(ex2, ey);
return;
}
//everything is located in a single cell. That is easy!
if(ex1 == ex2)
{
delta = y2 - y1;
m_curr_cell.cover += delta;
m_curr_cell.area += (fx1 + fx2) * delta;
return;
}
//ok, we'll have to render a run of adjacent cells on the same
//hline...
p = (poly_subpixel_scale - fx1) * (y2 - y1);
first = poly_subpixel_scale;
incr = 1;
dx = x2 - x1;
if(dx < 0)
{
p = fx1 * (y2 - y1);
first = 0;
incr = -1;
dx = -dx;
}
delta = p / dx;
mod = p % dx;
if(mod < 0)
{
delta--;
mod += dx;
}
m_curr_cell.cover += delta;
m_curr_cell.area += (fx1 + first) * delta;
ex1 += incr;
set_curr_cell(ex1, ey);
y1 += delta;
if(ex1 != ex2)
{
p = poly_subpixel_scale * (y2 - y1 + delta);
lift = p / dx;
rem = p % dx;
if (rem < 0)
{
lift--;
rem += dx;
}
mod -= dx;
while (ex1 != ex2)
{
delta = lift;
mod += rem;
if(mod >= 0)
{
mod -= dx;
delta++;
}
m_curr_cell.cover += delta;
m_curr_cell.area += poly_subpixel_scale * delta;
y1 += delta;
ex1 += incr;
set_curr_cell(ex1, ey);
}
}
delta = y2 - y1;
m_curr_cell.cover += delta;
m_curr_cell.area += (fx2 + poly_subpixel_scale - first) * delta;
}
//------------------------------------------------------------------------
template<class Cell>
AGG_INLINE void rasterizer_cells_aa<Cell>::style(const cell_type& style_cell)
{
m_style_cell.style(style_cell);
}
//------------------------------------------------------------------------
template<class Cell>
void rasterizer_cells_aa<Cell>::line(int x1, int y1, int x2, int y2)
{
enum dx_limit_e { dx_limit = 16384 << poly_subpixel_shift };
int dx = x2 - x1;
if(dx >= dx_limit || dx <= -dx_limit)
{
int cx = (x1 + x2) >> 1;
int cy = (y1 + y2) >> 1;
line(x1, y1, cx, cy);
line(cx, cy, x2, y2);
}
int dy = y2 - y1;
int ex1 = x1 >> poly_subpixel_shift;
int ex2 = x2 >> poly_subpixel_shift;
int ey1 = y1 >> poly_subpixel_shift;
int ey2 = y2 >> poly_subpixel_shift;
int fy1 = y1 & poly_subpixel_mask;
int fy2 = y2 & poly_subpixel_mask;
int x_from, x_to;
int p, rem, mod, lift, delta, first, incr;
if(ex1 < m_min_x) m_min_x = ex1;
if(ex1 > m_max_x) m_max_x = ex1;
if(ey1 < m_min_y) m_min_y = ey1;
if(ey1 > m_max_y) m_max_y = ey1;
if(ex2 < m_min_x) m_min_x = ex2;
if(ex2 > m_max_x) m_max_x = ex2;
if(ey2 < m_min_y) m_min_y = ey2;
if(ey2 > m_max_y) m_max_y = ey2;
set_curr_cell(ex1, ey1);
//everything is on a single hline
if(ey1 == ey2)
{
render_hline(ey1, x1, fy1, x2, fy2);
return;
}
//Vertical line - we have to calculate start and end cells,
//and then - the common values of the area and coverage for
//all cells of the line. We know exactly there's only one
//cell, so, we don't have to call render_hline().
incr = 1;
if(dx == 0)
{
int ex = x1 >> poly_subpixel_shift;
int two_fx = (x1 - (ex << poly_subpixel_shift)) << 1;
int area;
first = poly_subpixel_scale;
if(dy < 0)
{
first = 0;
incr = -1;
}
x_from = x1;
//render_hline(ey1, x_from, fy1, x_from, first);
delta = first - fy1;
m_curr_cell.cover += delta;
m_curr_cell.area += two_fx * delta;
ey1 += incr;
set_curr_cell(ex, ey1);
delta = first + first - poly_subpixel_scale;
area = two_fx * delta;
while(ey1 != ey2)
{
//render_hline(ey1, x_from, poly_subpixel_scale - first, x_from, first);
m_curr_cell.cover = delta;
m_curr_cell.area = area;
ey1 += incr;
set_curr_cell(ex, ey1);
}
//render_hline(ey1, x_from, poly_subpixel_scale - first, x_from, fy2);
delta = fy2 - poly_subpixel_scale + first;
m_curr_cell.cover += delta;
m_curr_cell.area += two_fx * delta;
return;
}
//ok, we have to render several hlines
p = (poly_subpixel_scale - fy1) * dx;
first = poly_subpixel_scale;
if(dy < 0)
{
p = fy1 * dx;
first = 0;
incr = -1;
dy = -dy;
}
delta = p / dy;
mod = p % dy;
if(mod < 0)
{
delta--;
mod += dy;
}
x_from = x1 + delta;
render_hline(ey1, x1, fy1, x_from, first);
ey1 += incr;
set_curr_cell(x_from >> poly_subpixel_shift, ey1);
if(ey1 != ey2)
{
p = poly_subpixel_scale * dx;
lift = p / dy;
rem = p % dy;
if(rem < 0)
{
lift--;
rem += dy;
}
mod -= dy;
while(ey1 != ey2)
{
delta = lift;
mod += rem;
if (mod >= 0)
{
mod -= dy;
delta++;
}
x_to = x_from + delta;
render_hline(ey1, x_from, poly_subpixel_scale - first, x_to, first);
x_from = x_to;
ey1 += incr;
set_curr_cell(x_from >> poly_subpixel_shift, ey1);
}
}
render_hline(ey1, x_from, poly_subpixel_scale - first, x2, fy2);
}
//------------------------------------------------------------------------
template<class Cell>
void rasterizer_cells_aa<Cell>::allocate_block()
{
if(m_curr_block >= m_num_blocks)
{
if(m_num_blocks >= m_max_blocks)
{
cell_type** new_cells =
pod_allocator<cell_type*>::allocate(m_max_blocks +
cell_block_pool);
if(m_cells)
{
memcpy(new_cells, m_cells, m_max_blocks * sizeof(cell_type*));
pod_allocator<cell_type*>::deallocate(m_cells, m_max_blocks);
}
m_cells = new_cells;
m_max_blocks += cell_block_pool;
}
m_cells[m_num_blocks++] =
pod_allocator<cell_type>::allocate(cell_block_size);
}
m_curr_cell_ptr = m_cells[m_curr_block++];
}
//------------------------------------------------------------------------
template <class T> static AGG_INLINE void swap_cells(T* a, T* b)
{
T temp = *a;
*a = *b;
*b = temp;
}
//------------------------------------------------------------------------
enum
{
qsort_threshold = 9
};
//------------------------------------------------------------------------
template<class Cell>
void qsort_cells(Cell** start, unsigned num)
{
Cell** stack[80];
Cell*** top;
Cell** limit;
Cell** base;
limit = start + num;
base = start;
top = stack;
for (;;)
{
int len = int(limit - base);
Cell** i;
Cell** j;
Cell** pivot;
if(len > qsort_threshold)
{
// we use base + len/2 as the pivot
pivot = base + len / 2;
swap_cells(base, pivot);
i = base + 1;
j = limit - 1;
// now ensure that *i <= *base <= *j
if((*j)->x < (*i)->x)
{
swap_cells(i, j);
}
if((*base)->x < (*i)->x)
{
swap_cells(base, i);
}
if((*j)->x < (*base)->x)
{
swap_cells(base, j);
}
for(;;)
{
int x = (*base)->x;
do i++; while( (*i)->x < x );
do j--; while( x < (*j)->x );
if(i > j)
{
break;
}
swap_cells(i, j);
}
swap_cells(base, j);
// now, push the largest sub-array
if(j - base > limit - i)
{
top[0] = base;
top[1] = j;
base = i;
}
else
{
top[0] = i;
top[1] = limit;
limit = j;
}
top += 2;
}
else
{
// the sub-array is small, perform insertion sort
j = base;
i = j + 1;
for(; i < limit; j = i, i++)
{
for(; j[1]->x < (*j)->x; j--)
{
swap_cells(j + 1, j);
if (j == base)
{
break;
}
}
}
if(top > stack)
{
top -= 2;
base = top[0];
limit = top[1];
}
else
{
break;
}
}
}
}
//------------------------------------------------------------------------
template<class Cell>
void rasterizer_cells_aa<Cell>::sort_cells()
{
if(m_sorted) return; //Perform sort only the first time.
add_curr_cell();
m_curr_cell.x = 0x7FFFFFFF;
m_curr_cell.y = 0x7FFFFFFF;
m_curr_cell.cover = 0;
m_curr_cell.area = 0;
if(m_num_cells == 0) return;
// DBG: Check to see if min/max works well.
//for(unsigned nc = 0; nc < m_num_cells; nc++)
//{
// cell_type* cell = m_cells[nc >> cell_block_shift] + (nc & cell_block_mask);
// if(cell->x < m_min_x ||
// cell->y < m_min_y ||
// cell->x > m_max_x ||
// cell->y > m_max_y)
// {
// cell = cell; // Breakpoint here
// }
//}
// Allocate the array of cell pointers
m_sorted_cells.allocate(m_num_cells, 16);
// Allocate and zero the Y array
m_sorted_y.allocate(m_max_y - m_min_y + 1, 16);
m_sorted_y.zero();
// Create the Y-histogram (count the numbers of cells for each Y)
cell_type** block_ptr = m_cells;
cell_type* cell_ptr;
unsigned nb = m_num_cells;
unsigned i;
while(nb)
{
cell_ptr = *block_ptr++;
i = (nb > cell_block_size) ? cell_block_size : nb;
nb -= i;
while(i--)
{
m_sorted_y[cell_ptr->y - m_min_y].start++;
++cell_ptr;
}
}
// Convert the Y-histogram into the array of starting indexes
unsigned start = 0;
for(i = 0; i < m_sorted_y.size(); i++)
{
unsigned v = m_sorted_y[i].start;
m_sorted_y[i].start = start;
start += v;
}
// Fill the cell pointer array sorted by Y
block_ptr = m_cells;
nb = m_num_cells;
while(nb)
{
cell_ptr = *block_ptr++;
i = (nb > cell_block_size) ? cell_block_size : nb;
nb -= i;
while(i--)
{
sorted_y& curr_y = m_sorted_y[cell_ptr->y - m_min_y];
m_sorted_cells[curr_y.start + curr_y.num] = cell_ptr;
++curr_y.num;
++cell_ptr;
}
}
// Finally arrange the X-arrays
for(i = 0; i < m_sorted_y.size(); i++)
{
const sorted_y& curr_y = m_sorted_y[i];
if(curr_y.num)
{
qsort_cells(m_sorted_cells.data() + curr_y.start, curr_y.num);
}
}
m_sorted = true;
}
//------------------------------------------------------scanline_hit_test
class scanline_hit_test
{
public:
scanline_hit_test(int x) : m_x(x), m_hit(false) {}
void reset_spans() {}
void finalize(int) {}
void add_cell(int x, int)
{
if(m_x == x) m_hit = true;
}
void add_span(int x, int len, int)
{
if(m_x >= x && m_x < x+len) m_hit = true;
}
unsigned num_spans() const { return 1; }
bool hit() const { return m_hit; }
private:
int m_x;
bool m_hit;
};
}
#endif
| 0 |
D://workCode//uploadProject\awtk\3rd\agg | D://workCode//uploadProject\awtk\3rd\agg\include\agg_rasterizer_scanline_aa.h | //----------------------------------------------------------------------------
// Anti-Grain Geometry - Version 2.4
// Copyright (C) 2002-2005 Maxim Shemanarev (http://www.antigrain.com)
//
// Permission to copy, use, modify, sell and distribute this software
// is granted provided this copyright notice appears in all copies.
// This software is provided "as is" without express or implied
// warranty, and with no claim as to its suitability for any purpose.
//
//----------------------------------------------------------------------------
//
// The author gratefully acknowleges the support of David Turner,
// Robert Wilhelm, and Werner Lemberg - the authors of the FreeType
// libray - in producing this work. See http://www.freetype.org for details.
//
//----------------------------------------------------------------------------
// Contact: mcseem@antigrain.com
// mcseemagg@yahoo.com
// http://www.antigrain.com
//----------------------------------------------------------------------------
//
// Adaptation for 32-bit screen coordinates has been sponsored by
// Liberty Technology Systems, Inc., visit http://lib-sys.com
//
// Liberty Technology Systems, Inc. is the provider of
// PostScript and PDF technology for software developers.
//
//----------------------------------------------------------------------------
#ifndef AGG_RASTERIZER_SCANLINE_AA_INCLUDED
#define AGG_RASTERIZER_SCANLINE_AA_INCLUDED
#include "agg_rasterizer_cells_aa.h"
#include "agg_rasterizer_sl_clip.h"
#include "agg_rasterizer_scanline_aa_nogamma.h"
#include "agg_gamma_functions.h"
namespace agg
{
//==================================================rasterizer_scanline_aa
// Polygon rasterizer that is used to render filled polygons with
// high-quality Anti-Aliasing. Internally, by default, the class uses
// integer coordinates in format 24.8, i.e. 24 bits for integer part
// and 8 bits for fractional - see poly_subpixel_shift. This class can be
// used in the following way:
//
// 1. filling_rule(filling_rule_e ft) - optional.
//
// 2. gamma() - optional.
//
// 3. reset()
//
// 4. move_to(x, y) / line_to(x, y) - make the polygon. One can create
// more than one contour, but each contour must consist of at least 3
// vertices, i.e. move_to(x1, y1); line_to(x2, y2); line_to(x3, y3);
// is the absolute minimum of vertices that define a triangle.
// The algorithm does not check either the number of vertices nor
// coincidence of their coordinates, but in the worst case it just
// won't draw anything.
// The orger of the vertices (clockwise or counterclockwise)
// is important when using the non-zero filling rule (fill_non_zero).
// In this case the vertex order of all the contours must be the same
// if you want your intersecting polygons to be without "holes".
// You actually can use different vertices order. If the contours do not
// intersect each other the order is not important anyway. If they do,
// contours with the same vertex order will be rendered without "holes"
// while the intersecting contours with different orders will have "holes".
//
// filling_rule() and gamma() can be called anytime before "sweeping".
//------------------------------------------------------------------------
template<class Clip=rasterizer_sl_clip_int> class rasterizer_scanline_aa
{
enum status
{
status_initial,
status_move_to,
status_line_to,
status_closed
};
public:
typedef Clip clip_type;
typedef typename Clip::conv_type conv_type;
typedef typename Clip::coord_type coord_type;
enum aa_scale_e
{
aa_shift = 8,
aa_scale = 1 << aa_shift,
aa_mask = aa_scale - 1,
aa_scale2 = aa_scale * 2,
aa_mask2 = aa_scale2 - 1
};
//--------------------------------------------------------------------
rasterizer_scanline_aa() :
m_outline(),
m_clipper(),
m_filling_rule(fill_non_zero),
m_auto_close(true),
m_start_x(0),
m_start_y(0),
m_status(status_initial)
{
int i;
for(i = 0; i < aa_scale; i++) m_gamma[i] = i;
}
//--------------------------------------------------------------------
template<class GammaF>
rasterizer_scanline_aa(const GammaF& gamma_function) :
m_outline(),
m_clipper(m_outline),
m_filling_rule(fill_non_zero),
m_auto_close(true),
m_start_x(0),
m_start_y(0),
m_status(status_initial)
{
gamma(gamma_function);
}
//--------------------------------------------------------------------
void reset();
void reset_clipping();
void clip_box(double x1, double y1, double x2, double y2);
void filling_rule(filling_rule_e filling_rule);
void auto_close(bool flag) { m_auto_close = flag; }
//--------------------------------------------------------------------
template<class GammaF> void gamma(const GammaF& gamma_function)
{
int i;
for(i = 0; i < aa_scale; i++)
{
m_gamma[i] = uround(gamma_function(double(i) / aa_mask) * aa_mask);
}
}
//--------------------------------------------------------------------
unsigned apply_gamma(unsigned cover) const
{
return m_gamma[cover];
}
//--------------------------------------------------------------------
void move_to(int x, int y);
void line_to(int x, int y);
void move_to_d(double x, double y);
void line_to_d(double x, double y);
void close_polygon();
void add_vertex(double x, double y, unsigned cmd);
void edge(int x1, int y1, int x2, int y2);
void edge_d(double x1, double y1, double x2, double y2);
//-------------------------------------------------------------------
template<class VertexSource>
void add_path(VertexSource& vs, unsigned path_id=0)
{
double x;
double y;
unsigned cmd;
vs.rewind(path_id);
if(m_outline.sorted()) reset();
while(!is_stop(cmd = vs.vertex(&x, &y)))
{
add_vertex(x, y, cmd);
}
}
//--------------------------------------------------------------------
int min_x() const { return m_outline.min_x(); }
int min_y() const { return m_outline.min_y(); }
int max_x() const { return m_outline.max_x(); }
int max_y() const { return m_outline.max_y(); }
//--------------------------------------------------------------------
void sort();
bool rewind_scanlines();
bool navigate_scanline(int y);
//--------------------------------------------------------------------
AGG_INLINE unsigned calculate_alpha(int area) const
{
int cover = area >> (poly_subpixel_shift*2 + 1 - aa_shift);
if(cover < 0) cover = -cover;
if(m_filling_rule == fill_even_odd)
{
cover &= aa_mask2;
if(cover > aa_scale)
{
cover = aa_scale2 - cover;
}
}
if(cover > aa_mask) cover = aa_mask;
return m_gamma[cover];
}
//--------------------------------------------------------------------
template<class Scanline> bool sweep_scanline(Scanline& sl)
{
for(;;)
{
if(m_scan_y > m_outline.max_y()) return false;
sl.reset_spans();
unsigned num_cells = m_outline.scanline_num_cells(m_scan_y);
const cell_aa* const* cells = m_outline.scanline_cells(m_scan_y);
int cover = 0;
while(num_cells)
{
const cell_aa* cur_cell = *cells;
int x = cur_cell->x;
int area = cur_cell->area;
unsigned alpha;
cover += cur_cell->cover;
//accumulate all cells with the same X
while(--num_cells)
{
cur_cell = *++cells;
if(cur_cell->x != x) break;
area += cur_cell->area;
cover += cur_cell->cover;
}
if(area)
{
alpha = calculate_alpha((cover << (poly_subpixel_shift + 1)) - area);
if(alpha)
{
sl.add_cell(x, alpha);
}
x++;
}
if(num_cells && cur_cell->x > x)
{
alpha = calculate_alpha(cover << (poly_subpixel_shift + 1));
if(alpha)
{
sl.add_span(x, cur_cell->x - x, alpha);
}
}
}
if(sl.num_spans()) break;
++m_scan_y;
}
sl.finalize(m_scan_y);
++m_scan_y;
return true;
}
//--------------------------------------------------------------------
bool hit_test(int tx, int ty);
private:
//--------------------------------------------------------------------
// Disable copying
rasterizer_scanline_aa(const rasterizer_scanline_aa<Clip>&);
const rasterizer_scanline_aa<Clip>&
operator = (const rasterizer_scanline_aa<Clip>&);
private:
rasterizer_cells_aa<cell_aa> m_outline;
clip_type m_clipper;
int m_gamma[aa_scale];
filling_rule_e m_filling_rule;
bool m_auto_close;
coord_type m_start_x;
coord_type m_start_y;
unsigned m_status;
int m_scan_y;
};
//------------------------------------------------------------------------
template<class Clip>
void rasterizer_scanline_aa<Clip>::reset()
{
m_outline.reset();
m_status = status_initial;
}
//------------------------------------------------------------------------
template<class Clip>
void rasterizer_scanline_aa<Clip>::filling_rule(filling_rule_e filling_rule)
{
m_filling_rule = filling_rule;
}
//------------------------------------------------------------------------
template<class Clip>
void rasterizer_scanline_aa<Clip>::clip_box(double x1, double y1,
double x2, double y2)
{
reset();
m_clipper.clip_box(conv_type::upscale(x1), conv_type::upscale(y1),
conv_type::upscale(x2), conv_type::upscale(y2));
}
//------------------------------------------------------------------------
template<class Clip>
void rasterizer_scanline_aa<Clip>::reset_clipping()
{
reset();
m_clipper.reset_clipping();
}
//------------------------------------------------------------------------
template<class Clip>
void rasterizer_scanline_aa<Clip>::close_polygon()
{
if(m_status == status_line_to)
{
m_clipper.line_to(m_outline, m_start_x, m_start_y);
m_status = status_closed;
}
}
//------------------------------------------------------------------------
template<class Clip>
void rasterizer_scanline_aa<Clip>::move_to(int x, int y)
{
if(m_outline.sorted()) reset();
if(m_auto_close) close_polygon();
m_clipper.move_to(m_start_x = conv_type::downscale(x),
m_start_y = conv_type::downscale(y));
m_status = status_move_to;
}
//------------------------------------------------------------------------
template<class Clip>
void rasterizer_scanline_aa<Clip>::line_to(int x, int y)
{
m_clipper.line_to(m_outline,
conv_type::downscale(x),
conv_type::downscale(y));
m_status = status_line_to;
}
//------------------------------------------------------------------------
template<class Clip>
void rasterizer_scanline_aa<Clip>::move_to_d(double x, double y)
{
if(m_outline.sorted()) reset();
if(m_auto_close) close_polygon();
m_clipper.move_to(m_start_x = conv_type::upscale(x),
m_start_y = conv_type::upscale(y));
m_status = status_move_to;
}
//------------------------------------------------------------------------
template<class Clip>
void rasterizer_scanline_aa<Clip>::line_to_d(double x, double y)
{
m_clipper.line_to(m_outline,
conv_type::upscale(x),
conv_type::upscale(y));
m_status = status_line_to;
}
//------------------------------------------------------------------------
template<class Clip>
void rasterizer_scanline_aa<Clip>::add_vertex(double x, double y, unsigned cmd)
{
if(is_move_to(cmd))
{
move_to_d(x, y);
}
else
if(is_vertex(cmd))
{
line_to_d(x, y);
}
else
if(is_close(cmd))
{
close_polygon();
}
}
//------------------------------------------------------------------------
template<class Clip>
void rasterizer_scanline_aa<Clip>::edge(int x1, int y1, int x2, int y2)
{
if(m_outline.sorted()) reset();
m_clipper.move_to(conv_type::downscale(x1), conv_type::downscale(y1));
m_clipper.line_to(m_outline,
conv_type::downscale(x2),
conv_type::downscale(y2));
m_status = status_move_to;
}
//------------------------------------------------------------------------
template<class Clip>
void rasterizer_scanline_aa<Clip>::edge_d(double x1, double y1,
double x2, double y2)
{
if(m_outline.sorted()) reset();
m_clipper.move_to(conv_type::upscale(x1), conv_type::upscale(y1));
m_clipper.line_to(m_outline,
conv_type::upscale(x2),
conv_type::upscale(y2));
m_status = status_move_to;
}
//------------------------------------------------------------------------
template<class Clip>
void rasterizer_scanline_aa<Clip>::sort()
{
if(m_auto_close) close_polygon();
m_outline.sort_cells();
}
//------------------------------------------------------------------------
template<class Clip>
AGG_INLINE bool rasterizer_scanline_aa<Clip>::rewind_scanlines()
{
if(m_auto_close) close_polygon();
m_outline.sort_cells();
if(m_outline.total_cells() == 0)
{
return false;
}
m_scan_y = m_outline.min_y();
return true;
}
//------------------------------------------------------------------------
template<class Clip>
AGG_INLINE bool rasterizer_scanline_aa<Clip>::navigate_scanline(int y)
{
if(m_auto_close) close_polygon();
m_outline.sort_cells();
if(m_outline.total_cells() == 0 ||
y < m_outline.min_y() ||
y > m_outline.max_y())
{
return false;
}
m_scan_y = y;
return true;
}
//------------------------------------------------------------------------
template<class Clip>
bool rasterizer_scanline_aa<Clip>::hit_test(int tx, int ty)
{
if(!navigate_scanline(ty)) return false;
scanline_hit_test sl(tx);
sweep_scanline(sl);
return sl.hit();
}
}
#endif
| 0 |
D://workCode//uploadProject\awtk\3rd\agg | D://workCode//uploadProject\awtk\3rd\agg\include\agg_rasterizer_scanline_aa_nogamma.h | //----------------------------------------------------------------------------
// Anti-Grain Geometry - Version 2.4
// Copyright (C) 2002-2005 Maxim Shemanarev (http://www.antigrain.com)
//
// Permission to copy, use, modify, sell and distribute this software
// is granted provided this copyright notice appears in all copies.
// This software is provided "as is" without express or implied
// warranty, and with no claim as to its suitability for any purpose.
//
//----------------------------------------------------------------------------
//
// The author gratefully acknowleges the support of David Turner,
// Robert Wilhelm, and Werner Lemberg - the authors of the FreeType
// libray - in producing this work. See http://www.freetype.org for details.
//
//----------------------------------------------------------------------------
// Contact: mcseem@antigrain.com
// mcseemagg@yahoo.com
// http://www.antigrain.com
//----------------------------------------------------------------------------
//
// Adaptation for 32-bit screen coordinates has been sponsored by
// Liberty Technology Systems, Inc., visit http://lib-sys.com
//
// Liberty Technology Systems, Inc. is the provider of
// PostScript and PDF technology for software developers.
//
//----------------------------------------------------------------------------
#ifndef AGG_RASTERIZER_SCANLINE_AA_NOGAMMA_INCLUDED
#define AGG_RASTERIZER_SCANLINE_AA_NOGAMMA_INCLUDED
#include "agg_rasterizer_cells_aa.h"
#include "agg_rasterizer_sl_clip.h"
namespace agg
{
//-----------------------------------------------------------------cell_aa
// A pixel cell. There're no constructors defined and it was done
// intentionally in order to avoid extra overhead when allocating an
// array of cells.
struct cell_aa
{
int x;
int y;
int cover;
int area;
void initial()
{
x = 0x7FFFFFFF;
y = 0x7FFFFFFF;
cover = 0;
area = 0;
}
void style(const cell_aa&) {}
int not_equal(int ex, int ey, const cell_aa&) const
{
return (ex - x) | (ey - y);
}
};
//==================================================rasterizer_scanline_aa_nogamma
// Polygon rasterizer that is used to render filled polygons with
// high-quality Anti-Aliasing. Internally, by default, the class uses
// integer coordinates in format 24.8, i.e. 24 bits for integer part
// and 8 bits for fractional - see poly_subpixel_shift. This class can be
// used in the following way:
//
// 1. filling_rule(filling_rule_e ft) - optional.
//
// 2. gamma() - optional.
//
// 3. reset()
//
// 4. move_to(x, y) / line_to(x, y) - make the polygon. One can create
// more than one contour, but each contour must consist of at least 3
// vertices, i.e. move_to(x1, y1); line_to(x2, y2); line_to(x3, y3);
// is the absolute minimum of vertices that define a triangle.
// The algorithm does not check either the number of vertices nor
// coincidence of their coordinates, but in the worst case it just
// won't draw anything.
// The orger of the vertices (clockwise or counterclockwise)
// is important when using the non-zero filling rule (fill_non_zero).
// In this case the vertex order of all the contours must be the same
// if you want your intersecting polygons to be without "holes".
// You actually can use different vertices order. If the contours do not
// intersect each other the order is not important anyway. If they do,
// contours with the same vertex order will be rendered without "holes"
// while the intersecting contours with different orders will have "holes".
//
// filling_rule() and gamma() can be called anytime before "sweeping".
//------------------------------------------------------------------------
template<class Clip=rasterizer_sl_clip_int> class rasterizer_scanline_aa_nogamma
{
enum status
{
status_initial,
status_move_to,
status_line_to,
status_closed
};
public:
typedef Clip clip_type;
typedef typename Clip::conv_type conv_type;
typedef typename Clip::coord_type coord_type;
enum aa_scale_e
{
aa_shift = 8,
aa_scale = 1 << aa_shift,
aa_mask = aa_scale - 1,
aa_scale2 = aa_scale * 2,
aa_mask2 = aa_scale2 - 1
};
//--------------------------------------------------------------------
rasterizer_scanline_aa_nogamma() :
m_outline(),
m_clipper(),
m_filling_rule(fill_non_zero),
m_auto_close(true),
m_start_x(0),
m_start_y(0),
m_status(status_initial)
{
}
//--------------------------------------------------------------------
void reset();
void reset_clipping();
void clip_box(double x1, double y1, double x2, double y2);
void filling_rule(filling_rule_e filling_rule);
void auto_close(bool flag) { m_auto_close = flag; }
//--------------------------------------------------------------------
unsigned apply_gamma(unsigned cover) const
{
return cover;
}
//--------------------------------------------------------------------
void move_to(int x, int y);
void line_to(int x, int y);
void move_to_d(double x, double y);
void line_to_d(double x, double y);
void close_polygon();
void add_vertex(double x, double y, unsigned cmd);
void edge(int x1, int y1, int x2, int y2);
void edge_d(double x1, double y1, double x2, double y2);
//-------------------------------------------------------------------
template<class VertexSource>
void add_path(VertexSource& vs, unsigned path_id=0)
{
double x;
double y;
unsigned cmd;
vs.rewind(path_id);
if(m_outline.sorted()) reset();
while(!is_stop(cmd = vs.vertex(&x, &y)))
{
add_vertex(x, y, cmd);
}
}
//--------------------------------------------------------------------
int min_x() const { return m_outline.min_x(); }
int min_y() const { return m_outline.min_y(); }
int max_x() const { return m_outline.max_x(); }
int max_y() const { return m_outline.max_y(); }
//--------------------------------------------------------------------
void sort();
bool rewind_scanlines();
bool navigate_scanline(int y);
//--------------------------------------------------------------------
AGG_INLINE unsigned calculate_alpha(int area) const
{
int cover = area >> (poly_subpixel_shift*2 + 1 - aa_shift);
if(cover < 0) cover = -cover;
if(m_filling_rule == fill_even_odd)
{
cover &= aa_mask2;
if(cover > aa_scale)
{
cover = aa_scale2 - cover;
}
}
if(cover > aa_mask) cover = aa_mask;
return cover;
}
//--------------------------------------------------------------------
template<class Scanline> bool sweep_scanline(Scanline& sl)
{
for(;;)
{
if(m_scan_y > m_outline.max_y()) return false;
sl.reset_spans();
unsigned num_cells = m_outline.scanline_num_cells(m_scan_y);
const cell_aa* const* cells = m_outline.scanline_cells(m_scan_y);
int cover = 0;
while(num_cells)
{
const cell_aa* cur_cell = *cells;
int x = cur_cell->x;
int area = cur_cell->area;
unsigned alpha;
cover += cur_cell->cover;
//accumulate all cells with the same X
while(--num_cells)
{
cur_cell = *++cells;
if(cur_cell->x != x) break;
area += cur_cell->area;
cover += cur_cell->cover;
}
if(area)
{
alpha = calculate_alpha((cover << (poly_subpixel_shift + 1)) - area);
if(alpha)
{
sl.add_cell(x, alpha);
}
x++;
}
if(num_cells && cur_cell->x > x)
{
alpha = calculate_alpha(cover << (poly_subpixel_shift + 1));
if(alpha)
{
sl.add_span(x, cur_cell->x - x, alpha);
}
}
}
if(sl.num_spans()) break;
++m_scan_y;
}
sl.finalize(m_scan_y);
++m_scan_y;
return true;
}
//--------------------------------------------------------------------
bool hit_test(int tx, int ty);
private:
//--------------------------------------------------------------------
// Disable copying
rasterizer_scanline_aa_nogamma(const rasterizer_scanline_aa_nogamma<Clip>&);
const rasterizer_scanline_aa_nogamma<Clip>&
operator = (const rasterizer_scanline_aa_nogamma<Clip>&);
private:
rasterizer_cells_aa<cell_aa> m_outline;
clip_type m_clipper;
filling_rule_e m_filling_rule;
bool m_auto_close;
coord_type m_start_x;
coord_type m_start_y;
unsigned m_status;
int m_scan_y;
};
//------------------------------------------------------------------------
template<class Clip>
void rasterizer_scanline_aa_nogamma<Clip>::reset()
{
m_outline.reset();
m_status = status_initial;
}
//------------------------------------------------------------------------
template<class Clip>
void rasterizer_scanline_aa_nogamma<Clip>::filling_rule(filling_rule_e filling_rule)
{
m_filling_rule = filling_rule;
}
//------------------------------------------------------------------------
template<class Clip>
void rasterizer_scanline_aa_nogamma<Clip>::clip_box(double x1, double y1,
double x2, double y2)
{
reset();
m_clipper.clip_box(conv_type::upscale(x1), conv_type::upscale(y1),
conv_type::upscale(x2), conv_type::upscale(y2));
}
//------------------------------------------------------------------------
template<class Clip>
void rasterizer_scanline_aa_nogamma<Clip>::reset_clipping()
{
reset();
m_clipper.reset_clipping();
}
//------------------------------------------------------------------------
template<class Clip>
void rasterizer_scanline_aa_nogamma<Clip>::close_polygon()
{
if(m_status == status_line_to)
{
m_clipper.line_to(m_outline, m_start_x, m_start_y);
m_status = status_closed;
}
}
//------------------------------------------------------------------------
template<class Clip>
void rasterizer_scanline_aa_nogamma<Clip>::move_to(int x, int y)
{
if(m_outline.sorted()) reset();
if(m_auto_close) close_polygon();
m_clipper.move_to(m_start_x = conv_type::downscale(x),
m_start_y = conv_type::downscale(y));
m_status = status_move_to;
}
//------------------------------------------------------------------------
template<class Clip>
void rasterizer_scanline_aa_nogamma<Clip>::line_to(int x, int y)
{
m_clipper.line_to(m_outline,
conv_type::downscale(x),
conv_type::downscale(y));
m_status = status_line_to;
}
//------------------------------------------------------------------------
template<class Clip>
void rasterizer_scanline_aa_nogamma<Clip>::move_to_d(double x, double y)
{
if(m_outline.sorted()) reset();
if(m_auto_close) close_polygon();
m_clipper.move_to(m_start_x = conv_type::upscale(x),
m_start_y = conv_type::upscale(y));
m_status = status_move_to;
}
//------------------------------------------------------------------------
template<class Clip>
void rasterizer_scanline_aa_nogamma<Clip>::line_to_d(double x, double y)
{
m_clipper.line_to(m_outline,
conv_type::upscale(x),
conv_type::upscale(y));
m_status = status_line_to;
}
//------------------------------------------------------------------------
template<class Clip>
void rasterizer_scanline_aa_nogamma<Clip>::add_vertex(double x, double y, unsigned cmd)
{
if(is_move_to(cmd))
{
move_to_d(x, y);
}
else
if(is_vertex(cmd))
{
line_to_d(x, y);
}
else
if(is_close(cmd))
{
close_polygon();
}
}
//------------------------------------------------------------------------
template<class Clip>
void rasterizer_scanline_aa_nogamma<Clip>::edge(int x1, int y1, int x2, int y2)
{
if(m_outline.sorted()) reset();
m_clipper.move_to(conv_type::downscale(x1), conv_type::downscale(y1));
m_clipper.line_to(m_outline,
conv_type::downscale(x2),
conv_type::downscale(y2));
m_status = status_move_to;
}
//------------------------------------------------------------------------
template<class Clip>
void rasterizer_scanline_aa_nogamma<Clip>::edge_d(double x1, double y1,
double x2, double y2)
{
if(m_outline.sorted()) reset();
m_clipper.move_to(conv_type::upscale(x1), conv_type::upscale(y1));
m_clipper.line_to(m_outline,
conv_type::upscale(x2),
conv_type::upscale(y2));
m_status = status_move_to;
}
//------------------------------------------------------------------------
template<class Clip>
void rasterizer_scanline_aa_nogamma<Clip>::sort()
{
if(m_auto_close) close_polygon();
m_outline.sort_cells();
}
//------------------------------------------------------------------------
template<class Clip>
AGG_INLINE bool rasterizer_scanline_aa_nogamma<Clip>::rewind_scanlines()
{
if(m_auto_close) close_polygon();
m_outline.sort_cells();
if(m_outline.total_cells() == 0)
{
return false;
}
m_scan_y = m_outline.min_y();
return true;
}
//------------------------------------------------------------------------
template<class Clip>
AGG_INLINE bool rasterizer_scanline_aa_nogamma<Clip>::navigate_scanline(int y)
{
if(m_auto_close) close_polygon();
m_outline.sort_cells();
if(m_outline.total_cells() == 0 ||
y < m_outline.min_y() ||
y > m_outline.max_y())
{
return false;
}
m_scan_y = y;
return true;
}
//------------------------------------------------------------------------
template<class Clip>
bool rasterizer_scanline_aa_nogamma<Clip>::hit_test(int tx, int ty)
{
if(!navigate_scanline(ty)) return false;
scanline_hit_test sl(tx);
sweep_scanline(sl);
return sl.hit();
}
}
#endif
| 0 |
D://workCode//uploadProject\awtk\3rd\agg | D://workCode//uploadProject\awtk\3rd\agg\include\agg_rasterizer_sl_clip.h | //----------------------------------------------------------------------------
// Anti-Grain Geometry - Version 2.4
// Copyright (C) 2002-2005 Maxim Shemanarev (http://www.antigrain.com)
//
// Permission to copy, use, modify, sell and distribute this software
// is granted provided this copyright notice appears in all copies.
// This software is provided "as is" without express or implied
// warranty, and with no claim as to its suitability for any purpose.
//
//----------------------------------------------------------------------------
// Contact: mcseem@antigrain.com
// mcseemagg@yahoo.com
// http://www.antigrain.com
//----------------------------------------------------------------------------
#ifndef AGG_RASTERIZER_SL_CLIP_INCLUDED
#define AGG_RASTERIZER_SL_CLIP_INCLUDED
#include "agg_clip_liang_barsky.h"
namespace agg
{
//--------------------------------------------------------poly_max_coord_e
enum poly_max_coord_e
{
poly_max_coord = (1 << 30) - 1 //----poly_max_coord
};
//------------------------------------------------------------ras_conv_int
struct ras_conv_int
{
typedef int coord_type;
static AGG_INLINE int mul_div(double a, double b, double c)
{
return iround(a * b / c);
}
static int xi(int v) { return v; }
static int yi(int v) { return v; }
static int upscale(double v) { return iround(v * poly_subpixel_scale); }
static int downscale(int v) { return v; }
};
//--------------------------------------------------------ras_conv_int_sat
struct ras_conv_int_sat
{
typedef int coord_type;
static AGG_INLINE int mul_div(double a, double b, double c)
{
return saturation<poly_max_coord>::iround(a * b / c);
}
static int xi(int v) { return v; }
static int yi(int v) { return v; }
static int upscale(double v)
{
return saturation<poly_max_coord>::iround(v * poly_subpixel_scale);
}
static int downscale(int v) { return v; }
};
//---------------------------------------------------------ras_conv_int_3x
struct ras_conv_int_3x
{
typedef int coord_type;
static AGG_INLINE int mul_div(double a, double b, double c)
{
return iround(a * b / c);
}
static int xi(int v) { return v * 3; }
static int yi(int v) { return v; }
static int upscale(double v) { return iround(v * poly_subpixel_scale); }
static int downscale(int v) { return v; }
};
//-----------------------------------------------------------ras_conv_dbl
struct ras_conv_dbl
{
typedef double coord_type;
static AGG_INLINE double mul_div(double a, double b, double c)
{
return a * b / c;
}
static int xi(double v) { return iround(v * poly_subpixel_scale); }
static int yi(double v) { return iround(v * poly_subpixel_scale); }
static double upscale(double v) { return v; }
static double downscale(int v) { return v / double(poly_subpixel_scale); }
};
//--------------------------------------------------------ras_conv_dbl_3x
struct ras_conv_dbl_3x
{
typedef double coord_type;
static AGG_INLINE double mul_div(double a, double b, double c)
{
return a * b / c;
}
static int xi(double v) { return iround(v * poly_subpixel_scale * 3); }
static int yi(double v) { return iround(v * poly_subpixel_scale); }
static double upscale(double v) { return v; }
static double downscale(int v) { return v / double(poly_subpixel_scale); }
};
//------------------------------------------------------rasterizer_sl_clip
template<class Conv> class rasterizer_sl_clip
{
public:
typedef Conv conv_type;
typedef typename Conv::coord_type coord_type;
typedef rect_base<coord_type> rect_type;
//--------------------------------------------------------------------
rasterizer_sl_clip() :
m_clip_box(0,0,0,0),
m_x1(0),
m_y1(0),
m_f1(0),
m_clipping(false)
{}
//--------------------------------------------------------------------
void reset_clipping()
{
m_clipping = false;
}
//--------------------------------------------------------------------
void clip_box(coord_type x1, coord_type y1, coord_type x2, coord_type y2)
{
m_clip_box = rect_type(x1, y1, x2, y2);
m_clip_box.normalize();
m_clipping = true;
}
//--------------------------------------------------------------------
void move_to(coord_type x1, coord_type y1)
{
m_x1 = x1;
m_y1 = y1;
if(m_clipping) m_f1 = clipping_flags(x1, y1, m_clip_box);
}
private:
//------------------------------------------------------------------------
template<class Rasterizer>
AGG_INLINE void line_clip_y(Rasterizer& ras,
coord_type x1, coord_type y1,
coord_type x2, coord_type y2,
unsigned f1, unsigned f2) const
{
f1 &= 10;
f2 &= 10;
if((f1 | f2) == 0)
{
// Fully visible
ras.line(Conv::xi(x1), Conv::yi(y1), Conv::xi(x2), Conv::yi(y2));
}
else
{
if(f1 == f2)
{
// Invisible by Y
return;
}
coord_type tx1 = x1;
coord_type ty1 = y1;
coord_type tx2 = x2;
coord_type ty2 = y2;
if(f1 & 8) // y1 < clip.y1
{
tx1 = x1 + Conv::mul_div(m_clip_box.y1-y1, x2-x1, y2-y1);
ty1 = m_clip_box.y1;
}
if(f1 & 2) // y1 > clip.y2
{
tx1 = x1 + Conv::mul_div(m_clip_box.y2-y1, x2-x1, y2-y1);
ty1 = m_clip_box.y2;
}
if(f2 & 8) // y2 < clip.y1
{
tx2 = x1 + Conv::mul_div(m_clip_box.y1-y1, x2-x1, y2-y1);
ty2 = m_clip_box.y1;
}
if(f2 & 2) // y2 > clip.y2
{
tx2 = x1 + Conv::mul_div(m_clip_box.y2-y1, x2-x1, y2-y1);
ty2 = m_clip_box.y2;
}
ras.line(Conv::xi(tx1), Conv::yi(ty1),
Conv::xi(tx2), Conv::yi(ty2));
}
}
public:
//--------------------------------------------------------------------
template<class Rasterizer>
void line_to(Rasterizer& ras, coord_type x2, coord_type y2)
{
if(m_clipping)
{
unsigned f2 = clipping_flags(x2, y2, m_clip_box);
if((m_f1 & 10) == (f2 & 10) && (m_f1 & 10) != 0)
{
// Invisible by Y
m_x1 = x2;
m_y1 = y2;
m_f1 = f2;
return;
}
coord_type x1 = m_x1;
coord_type y1 = m_y1;
unsigned f1 = m_f1;
coord_type y3, y4;
unsigned f3, f4;
switch(((f1 & 5) << 1) | (f2 & 5))
{
case 0: // Visible by X
line_clip_y(ras, x1, y1, x2, y2, f1, f2);
break;
case 1: // x2 > clip.x2
y3 = y1 + Conv::mul_div(m_clip_box.x2-x1, y2-y1, x2-x1);
f3 = clipping_flags_y(y3, m_clip_box);
line_clip_y(ras, x1, y1, m_clip_box.x2, y3, f1, f3);
line_clip_y(ras, m_clip_box.x2, y3, m_clip_box.x2, y2, f3, f2);
break;
case 2: // x1 > clip.x2
y3 = y1 + Conv::mul_div(m_clip_box.x2-x1, y2-y1, x2-x1);
f3 = clipping_flags_y(y3, m_clip_box);
line_clip_y(ras, m_clip_box.x2, y1, m_clip_box.x2, y3, f1, f3);
line_clip_y(ras, m_clip_box.x2, y3, x2, y2, f3, f2);
break;
case 3: // x1 > clip.x2 && x2 > clip.x2
line_clip_y(ras, m_clip_box.x2, y1, m_clip_box.x2, y2, f1, f2);
break;
case 4: // x2 < clip.x1
y3 = y1 + Conv::mul_div(m_clip_box.x1-x1, y2-y1, x2-x1);
f3 = clipping_flags_y(y3, m_clip_box);
line_clip_y(ras, x1, y1, m_clip_box.x1, y3, f1, f3);
line_clip_y(ras, m_clip_box.x1, y3, m_clip_box.x1, y2, f3, f2);
break;
case 6: // x1 > clip.x2 && x2 < clip.x1
y3 = y1 + Conv::mul_div(m_clip_box.x2-x1, y2-y1, x2-x1);
y4 = y1 + Conv::mul_div(m_clip_box.x1-x1, y2-y1, x2-x1);
f3 = clipping_flags_y(y3, m_clip_box);
f4 = clipping_flags_y(y4, m_clip_box);
line_clip_y(ras, m_clip_box.x2, y1, m_clip_box.x2, y3, f1, f3);
line_clip_y(ras, m_clip_box.x2, y3, m_clip_box.x1, y4, f3, f4);
line_clip_y(ras, m_clip_box.x1, y4, m_clip_box.x1, y2, f4, f2);
break;
case 8: // x1 < clip.x1
y3 = y1 + Conv::mul_div(m_clip_box.x1-x1, y2-y1, x2-x1);
f3 = clipping_flags_y(y3, m_clip_box);
line_clip_y(ras, m_clip_box.x1, y1, m_clip_box.x1, y3, f1, f3);
line_clip_y(ras, m_clip_box.x1, y3, x2, y2, f3, f2);
break;
case 9: // x1 < clip.x1 && x2 > clip.x2
y3 = y1 + Conv::mul_div(m_clip_box.x1-x1, y2-y1, x2-x1);
y4 = y1 + Conv::mul_div(m_clip_box.x2-x1, y2-y1, x2-x1);
f3 = clipping_flags_y(y3, m_clip_box);
f4 = clipping_flags_y(y4, m_clip_box);
line_clip_y(ras, m_clip_box.x1, y1, m_clip_box.x1, y3, f1, f3);
line_clip_y(ras, m_clip_box.x1, y3, m_clip_box.x2, y4, f3, f4);
line_clip_y(ras, m_clip_box.x2, y4, m_clip_box.x2, y2, f4, f2);
break;
case 12: // x1 < clip.x1 && x2 < clip.x1
line_clip_y(ras, m_clip_box.x1, y1, m_clip_box.x1, y2, f1, f2);
break;
}
m_f1 = f2;
}
else
{
ras.line(Conv::xi(m_x1), Conv::yi(m_y1),
Conv::xi(x2), Conv::yi(y2));
}
m_x1 = x2;
m_y1 = y2;
}
private:
rect_type m_clip_box;
coord_type m_x1;
coord_type m_y1;
unsigned m_f1;
bool m_clipping;
};
//---------------------------------------------------rasterizer_sl_no_clip
class rasterizer_sl_no_clip
{
public:
typedef ras_conv_int conv_type;
typedef int coord_type;
rasterizer_sl_no_clip() : m_x1(0), m_y1(0) {}
void reset_clipping() {}
void clip_box(coord_type x1, coord_type y1, coord_type x2, coord_type y2) {}
void move_to(coord_type x1, coord_type y1) { m_x1 = x1; m_y1 = y1; }
template<class Rasterizer>
void line_to(Rasterizer& ras, coord_type x2, coord_type y2)
{
ras.line(m_x1, m_y1, x2, y2);
m_x1 = x2;
m_y1 = y2;
}
private:
int m_x1, m_y1;
};
// -----rasterizer_sl_clip_int
// -----rasterizer_sl_clip_int_sat
// -----rasterizer_sl_clip_int_3x
// -----rasterizer_sl_clip_dbl
// -----rasterizer_sl_clip_dbl_3x
//------------------------------------------------------------------------
typedef rasterizer_sl_clip<ras_conv_int> rasterizer_sl_clip_int;
typedef rasterizer_sl_clip<ras_conv_int_sat> rasterizer_sl_clip_int_sat;
typedef rasterizer_sl_clip<ras_conv_int_3x> rasterizer_sl_clip_int_3x;
typedef rasterizer_sl_clip<ras_conv_dbl> rasterizer_sl_clip_dbl;
typedef rasterizer_sl_clip<ras_conv_dbl_3x> rasterizer_sl_clip_dbl_3x;
}
#endif
| 0 |
D://workCode//uploadProject\awtk\3rd\agg | D://workCode//uploadProject\awtk\3rd\agg\include\agg_renderer_base.h | //----------------------------------------------------------------------------
// Anti-Grain Geometry - Version 2.4
// Copyright (C) 2002-2005 Maxim Shemanarev (http://www.antigrain.com)
//
// Permission to copy, use, modify, sell and distribute this software
// is granted provided this copyright notice appears in all copies.
// This software is provided "as is" without express or implied
// warranty, and with no claim as to its suitability for any purpose.
//
//----------------------------------------------------------------------------
// Contact: mcseem@antigrain.com
// mcseemagg@yahoo.com
// http://www.antigrain.com
//----------------------------------------------------------------------------
//
// class renderer_base
//
//----------------------------------------------------------------------------
#ifndef AGG_RENDERER_BASE_INCLUDED
#define AGG_RENDERER_BASE_INCLUDED
#include "agg_basics.h"
#include "agg_rendering_buffer.h"
namespace agg
{
//-----------------------------------------------------------renderer_base
template<class PixelFormat> class renderer_base
{
public:
typedef PixelFormat pixfmt_type;
typedef typename pixfmt_type::color_type color_type;
typedef typename pixfmt_type::row_data row_data;
//--------------------------------------------------------------------
renderer_base() : m_ren(0), m_clip_box(1, 1, 0, 0) {}
explicit renderer_base(pixfmt_type& ren) :
m_ren(&ren),
m_clip_box(0, 0, ren.width() - 1, ren.height() - 1)
{}
void attach(pixfmt_type& ren)
{
m_ren = &ren;
m_clip_box = rect_i(0, 0, ren.width() - 1, ren.height() - 1);
}
//--------------------------------------------------------------------
const pixfmt_type& ren() const { return *m_ren; }
pixfmt_type& ren() { return *m_ren; }
//--------------------------------------------------------------------
unsigned width() const { return m_ren->width(); }
unsigned height() const { return m_ren->height(); }
//--------------------------------------------------------------------
bool clip_box(int x1, int y1, int x2, int y2)
{
rect_i cb(x1, y1, x2, y2);
cb.normalize();
if(cb.clip(rect_i(0, 0, width() - 1, height() - 1)))
{
m_clip_box = cb;
return true;
}
m_clip_box.x1 = 1;
m_clip_box.y1 = 1;
m_clip_box.x2 = 0;
m_clip_box.y2 = 0;
return false;
}
//--------------------------------------------------------------------
void reset_clipping(bool visibility)
{
if(visibility)
{
m_clip_box.x1 = 0;
m_clip_box.y1 = 0;
m_clip_box.x2 = width() - 1;
m_clip_box.y2 = height() - 1;
}
else
{
m_clip_box.x1 = 1;
m_clip_box.y1 = 1;
m_clip_box.x2 = 0;
m_clip_box.y2 = 0;
}
}
//--------------------------------------------------------------------
void clip_box_naked(int x1, int y1, int x2, int y2)
{
m_clip_box.x1 = x1;
m_clip_box.y1 = y1;
m_clip_box.x2 = x2;
m_clip_box.y2 = y2;
}
//--------------------------------------------------------------------
bool inbox(int x, int y) const
{
return x >= m_clip_box.x1 && y >= m_clip_box.y1 &&
x <= m_clip_box.x2 && y <= m_clip_box.y2;
}
//--------------------------------------------------------------------
const rect_i& clip_box() const { return m_clip_box; }
int xmin() const { return m_clip_box.x1; }
int ymin() const { return m_clip_box.y1; }
int xmax() const { return m_clip_box.x2; }
int ymax() const { return m_clip_box.y2; }
//--------------------------------------------------------------------
const rect_i& bounding_clip_box() const { return m_clip_box; }
int bounding_xmin() const { return m_clip_box.x1; }
int bounding_ymin() const { return m_clip_box.y1; }
int bounding_xmax() const { return m_clip_box.x2; }
int bounding_ymax() const { return m_clip_box.y2; }
//--------------------------------------------------------------------
void clear(const color_type& c)
{
unsigned y;
if(width())
{
for(y = 0; y < height(); y++)
{
m_ren->copy_hline(0, y, width(), c);
}
}
}
//--------------------------------------------------------------------
void fill(const color_type& c)
{
unsigned y;
if(width())
{
for(y = 0; y < height(); y++)
{
m_ren->blend_hline(0, y, width(), c, cover_mask);
}
}
}
//--------------------------------------------------------------------
void copy_pixel(int x, int y, const color_type& c)
{
if(inbox(x, y))
{
m_ren->copy_pixel(x, y, c);
}
}
//--------------------------------------------------------------------
void blend_pixel(int x, int y, const color_type& c, cover_type cover)
{
if(inbox(x, y))
{
m_ren->blend_pixel(x, y, c, cover);
}
}
//--------------------------------------------------------------------
color_type pixel(int x, int y) const
{
return inbox(x, y) ?
m_ren->pixel(x, y) :
color_type::no_color();
}
//--------------------------------------------------------------------
void copy_hline(int x1, int y, int x2, const color_type& c)
{
if(x1 > x2) { int t = x2; x2 = x1; x1 = t; }
if(y > ymax()) return;
if(y < ymin()) return;
if(x1 > xmax()) return;
if(x2 < xmin()) return;
if(x1 < xmin()) x1 = xmin();
if(x2 > xmax()) x2 = xmax();
m_ren->copy_hline(x1, y, x2 - x1 + 1, c);
}
//--------------------------------------------------------------------
void copy_vline(int x, int y1, int y2, const color_type& c)
{
if(y1 > y2) { int t = y2; y2 = y1; y1 = t; }
if(x > xmax()) return;
if(x < xmin()) return;
if(y1 > ymax()) return;
if(y2 < ymin()) return;
if(y1 < ymin()) y1 = ymin();
if(y2 > ymax()) y2 = ymax();
m_ren->copy_vline(x, y1, y2 - y1 + 1, c);
}
//--------------------------------------------------------------------
void blend_hline(int x1, int y, int x2,
const color_type& c, cover_type cover)
{
if(x1 > x2) { int t = x2; x2 = x1; x1 = t; }
if(y > ymax()) return;
if(y < ymin()) return;
if(x1 > xmax()) return;
if(x2 < xmin()) return;
if(x1 < xmin()) x1 = xmin();
if(x2 > xmax()) x2 = xmax();
m_ren->blend_hline(x1, y, x2 - x1 + 1, c, cover);
}
//--------------------------------------------------------------------
void blend_vline(int x, int y1, int y2,
const color_type& c, cover_type cover)
{
if(y1 > y2) { int t = y2; y2 = y1; y1 = t; }
if(x > xmax()) return;
if(x < xmin()) return;
if(y1 > ymax()) return;
if(y2 < ymin()) return;
if(y1 < ymin()) y1 = ymin();
if(y2 > ymax()) y2 = ymax();
m_ren->blend_vline(x, y1, y2 - y1 + 1, c, cover);
}
//--------------------------------------------------------------------
void copy_bar(int x1, int y1, int x2, int y2, const color_type& c)
{
rect_i rc(x1, y1, x2, y2);
rc.normalize();
if(rc.clip(clip_box()))
{
int y;
for(y = rc.y1; y <= rc.y2; y++)
{
m_ren->copy_hline(rc.x1, y, unsigned(rc.x2 - rc.x1 + 1), c);
}
}
}
//--------------------------------------------------------------------
void blend_bar(int x1, int y1, int x2, int y2,
const color_type& c, cover_type cover)
{
rect_i rc(x1, y1, x2, y2);
rc.normalize();
if(rc.clip(clip_box()))
{
int y;
for(y = rc.y1; y <= rc.y2; y++)
{
m_ren->blend_hline(rc.x1,
y,
unsigned(rc.x2 - rc.x1 + 1),
c,
cover);
}
}
}
//--------------------------------------------------------------------
void blend_solid_hspan(int x, int y, int len,
const color_type& c,
const cover_type* covers)
{
if(y > ymax()) return;
if(y < ymin()) return;
if(x < xmin())
{
len -= xmin() - x;
if(len <= 0) return;
covers += xmin() - x;
x = xmin();
}
if(x + len > xmax())
{
len = xmax() - x + 1;
if(len <= 0) return;
}
m_ren->blend_solid_hspan(x, y, len, c, covers);
}
//--------------------------------------------------------------------
void blend_solid_vspan(int x, int y, int len,
const color_type& c,
const cover_type* covers)
{
if(x > xmax()) return;
if(x < xmin()) return;
if(y < ymin())
{
len -= ymin() - y;
if(len <= 0) return;
covers += ymin() - y;
y = ymin();
}
if(y + len > ymax())
{
len = ymax() - y + 1;
if(len <= 0) return;
}
m_ren->blend_solid_vspan(x, y, len, c, covers);
}
//--------------------------------------------------------------------
void copy_color_hspan(int x, int y, int len, const color_type* colors)
{
if(y > ymax()) return;
if(y < ymin()) return;
if(x < xmin())
{
int d = xmin() - x;
len -= d;
if(len <= 0) return;
colors += d;
x = xmin();
}
if(x + len > xmax())
{
len = xmax() - x + 1;
if(len <= 0) return;
}
m_ren->copy_color_hspan(x, y, len, colors);
}
//--------------------------------------------------------------------
void copy_color_vspan(int x, int y, int len, const color_type* colors)
{
if(x > xmax()) return;
if(x < xmin()) return;
if(y < ymin())
{
int d = ymin() - y;
len -= d;
if(len <= 0) return;
colors += d;
y = ymin();
}
if(y + len > ymax())
{
len = ymax() - y + 1;
if(len <= 0) return;
}
m_ren->copy_color_vspan(x, y, len, colors);
}
//--------------------------------------------------------------------
void blend_color_hspan(int x, int y, int len,
const color_type* colors,
const cover_type* covers,
cover_type cover = agg::cover_full)
{
if(y > ymax()) return;
if(y < ymin()) return;
if(x < xmin())
{
int d = xmin() - x;
len -= d;
if(len <= 0) return;
if(covers) covers += d;
colors += d;
x = xmin();
}
if(x + len > xmax())
{
len = xmax() - x + 1;
if(len <= 0) return;
}
m_ren->blend_color_hspan(x, y, len, colors, covers, cover);
}
//--------------------------------------------------------------------
void blend_color_vspan(int x, int y, int len,
const color_type* colors,
const cover_type* covers,
cover_type cover = agg::cover_full)
{
if(x > xmax()) return;
if(x < xmin()) return;
if(y < ymin())
{
int d = ymin() - y;
len -= d;
if(len <= 0) return;
if(covers) covers += d;
colors += d;
y = ymin();
}
if(y + len > ymax())
{
len = ymax() - y + 1;
if(len <= 0) return;
}
m_ren->blend_color_vspan(x, y, len, colors, covers, cover);
}
//--------------------------------------------------------------------
rect_i clip_rect_area(rect_i& dst, rect_i& src, int wsrc, int hsrc) const
{
rect_i rc(0,0,0,0);
rect_i cb = clip_box();
++cb.x2;
++cb.y2;
if(src.x1 < 0)
{
dst.x1 -= src.x1;
src.x1 = 0;
}
if(src.y1 < 0)
{
dst.y1 -= src.y1;
src.y1 = 0;
}
if(src.x2 > wsrc) src.x2 = wsrc;
if(src.y2 > hsrc) src.y2 = hsrc;
if(dst.x1 < cb.x1)
{
src.x1 += cb.x1 - dst.x1;
dst.x1 = cb.x1;
}
if(dst.y1 < cb.y1)
{
src.y1 += cb.y1 - dst.y1;
dst.y1 = cb.y1;
}
if(dst.x2 > cb.x2) dst.x2 = cb.x2;
if(dst.y2 > cb.y2) dst.y2 = cb.y2;
rc.x2 = dst.x2 - dst.x1;
rc.y2 = dst.y2 - dst.y1;
if(rc.x2 > src.x2 - src.x1) rc.x2 = src.x2 - src.x1;
if(rc.y2 > src.y2 - src.y1) rc.y2 = src.y2 - src.y1;
return rc;
}
//--------------------------------------------------------------------
template<class RenBuf>
void copy_from(const RenBuf& src,
const rect_i* rect_src_ptr = 0,
int dx = 0,
int dy = 0)
{
rect_i rsrc(0, 0, src.width(), src.height());
if(rect_src_ptr)
{
rsrc.x1 = rect_src_ptr->x1;
rsrc.y1 = rect_src_ptr->y1;
rsrc.x2 = rect_src_ptr->x2 + 1;
rsrc.y2 = rect_src_ptr->y2 + 1;
}
// Version with xdst, ydst (absolute positioning)
//rect_i rdst(xdst, ydst, xdst + rsrc.x2 - rsrc.x1, ydst + rsrc.y2 - rsrc.y1);
// Version with dx, dy (relative positioning)
rect_i rdst(rsrc.x1 + dx, rsrc.y1 + dy, rsrc.x2 + dx, rsrc.y2 + dy);
rect_i rc = clip_rect_area(rdst, rsrc, src.width(), src.height());
if(rc.x2 > 0)
{
int incy = 1;
if(rdst.y1 > rsrc.y1)
{
rsrc.y1 += rc.y2 - 1;
rdst.y1 += rc.y2 - 1;
incy = -1;
}
while(rc.y2 > 0)
{
m_ren->copy_from(src,
rdst.x1, rdst.y1,
rsrc.x1, rsrc.y1,
rc.x2);
rdst.y1 += incy;
rsrc.y1 += incy;
--rc.y2;
}
}
}
//--------------------------------------------------------------------
template<class SrcPixelFormatRenderer>
void blend_from(const SrcPixelFormatRenderer& src,
const rect_i* rect_src_ptr = 0,
int dx = 0,
int dy = 0,
cover_type cover = agg::cover_full)
{
rect_i rsrc(0, 0, src.width(), src.height());
if(rect_src_ptr)
{
rsrc.x1 = rect_src_ptr->x1;
rsrc.y1 = rect_src_ptr->y1;
rsrc.x2 = rect_src_ptr->x2 + 1;
rsrc.y2 = rect_src_ptr->y2 + 1;
}
// Version with xdst, ydst (absolute positioning)
//rect_i rdst(xdst, ydst, xdst + rsrc.x2 - rsrc.x1, ydst + rsrc.y2 - rsrc.y1);
// Version with dx, dy (relative positioning)
rect_i rdst(rsrc.x1 + dx, rsrc.y1 + dy, rsrc.x2 + dx, rsrc.y2 + dy);
rect_i rc = clip_rect_area(rdst, rsrc, src.width(), src.height());
if(rc.x2 > 0)
{
int incy = 1;
if(rdst.y1 > rsrc.y1)
{
rsrc.y1 += rc.y2 - 1;
rdst.y1 += rc.y2 - 1;
incy = -1;
}
while(rc.y2 > 0)
{
typename SrcPixelFormatRenderer::row_data rw = src.row(rsrc.y1);
if(rw.ptr)
{
int x1src = rsrc.x1;
int x1dst = rdst.x1;
int len = rc.x2;
if(rw.x1 > x1src)
{
x1dst += rw.x1 - x1src;
len -= rw.x1 - x1src;
x1src = rw.x1;
}
if(len > 0)
{
if(x1src + len-1 > rw.x2)
{
len -= x1src + len - rw.x2 - 1;
}
if(len > 0)
{
m_ren->blend_from(src,
x1dst, rdst.y1,
x1src, rsrc.y1,
len,
cover);
}
}
}
rdst.y1 += incy;
rsrc.y1 += incy;
--rc.y2;
}
}
}
//--------------------------------------------------------------------
template<class SrcPixelFormatRenderer>
void blend_from_color(const SrcPixelFormatRenderer& src,
const color_type& color,
const rect_i* rect_src_ptr = 0,
int dx = 0,
int dy = 0,
cover_type cover = agg::cover_full)
{
rect_i rsrc(0, 0, src.width(), src.height());
if(rect_src_ptr)
{
rsrc.x1 = rect_src_ptr->x1;
rsrc.y1 = rect_src_ptr->y1;
rsrc.x2 = rect_src_ptr->x2 + 1;
rsrc.y2 = rect_src_ptr->y2 + 1;
}
// Version with xdst, ydst (absolute positioning)
//rect_i rdst(xdst, ydst, xdst + rsrc.x2 - rsrc.x1, ydst + rsrc.y2 - rsrc.y1);
// Version with dx, dy (relative positioning)
rect_i rdst(rsrc.x1 + dx, rsrc.y1 + dy, rsrc.x2 + dx, rsrc.y2 + dy);
rect_i rc = clip_rect_area(rdst, rsrc, src.width(), src.height());
if(rc.x2 > 0)
{
int incy = 1;
if(rdst.y1 > rsrc.y1)
{
rsrc.y1 += rc.y2 - 1;
rdst.y1 += rc.y2 - 1;
incy = -1;
}
while(rc.y2 > 0)
{
typename SrcPixelFormatRenderer::row_data rw = src.row(rsrc.y1);
if(rw.ptr)
{
int x1src = rsrc.x1;
int x1dst = rdst.x1;
int len = rc.x2;
if(rw.x1 > x1src)
{
x1dst += rw.x1 - x1src;
len -= rw.x1 - x1src;
x1src = rw.x1;
}
if(len > 0)
{
if(x1src + len-1 > rw.x2)
{
len -= x1src + len - rw.x2 - 1;
}
if(len > 0)
{
m_ren->blend_from_color(src,
color,
x1dst, rdst.y1,
x1src, rsrc.y1,
len,
cover);
}
}
}
rdst.y1 += incy;
rsrc.y1 += incy;
--rc.y2;
}
}
}
//--------------------------------------------------------------------
template<class SrcPixelFormatRenderer>
void blend_from_lut(const SrcPixelFormatRenderer& src,
const color_type* color_lut,
const rect_i* rect_src_ptr = 0,
int dx = 0,
int dy = 0,
cover_type cover = agg::cover_full)
{
rect_i rsrc(0, 0, src.width(), src.height());
if(rect_src_ptr)
{
rsrc.x1 = rect_src_ptr->x1;
rsrc.y1 = rect_src_ptr->y1;
rsrc.x2 = rect_src_ptr->x2 + 1;
rsrc.y2 = rect_src_ptr->y2 + 1;
}
// Version with xdst, ydst (absolute positioning)
//rect_i rdst(xdst, ydst, xdst + rsrc.x2 - rsrc.x1, ydst + rsrc.y2 - rsrc.y1);
// Version with dx, dy (relative positioning)
rect_i rdst(rsrc.x1 + dx, rsrc.y1 + dy, rsrc.x2 + dx, rsrc.y2 + dy);
rect_i rc = clip_rect_area(rdst, rsrc, src.width(), src.height());
if(rc.x2 > 0)
{
int incy = 1;
if(rdst.y1 > rsrc.y1)
{
rsrc.y1 += rc.y2 - 1;
rdst.y1 += rc.y2 - 1;
incy = -1;
}
while(rc.y2 > 0)
{
typename SrcPixelFormatRenderer::row_data rw = src.row(rsrc.y1);
if(rw.ptr)
{
int x1src = rsrc.x1;
int x1dst = rdst.x1;
int len = rc.x2;
if(rw.x1 > x1src)
{
x1dst += rw.x1 - x1src;
len -= rw.x1 - x1src;
x1src = rw.x1;
}
if(len > 0)
{
if(x1src + len-1 > rw.x2)
{
len -= x1src + len - rw.x2 - 1;
}
if(len > 0)
{
m_ren->blend_from_lut(src,
color_lut,
x1dst, rdst.y1,
x1src, rsrc.y1,
len,
cover);
}
}
}
rdst.y1 += incy;
rsrc.y1 += incy;
--rc.y2;
}
}
}
private:
pixfmt_type* m_ren;
rect_i m_clip_box;
};
}
#endif
| 0 |
D://workCode//uploadProject\awtk\3rd\agg | D://workCode//uploadProject\awtk\3rd\agg\include\agg_renderer_scanline.h | //----------------------------------------------------------------------------
// Anti-Grain Geometry - Version 2.4
// Copyright (C) 2002-2005 Maxim Shemanarev (http://www.antigrain.com)
//
// Permission to copy, use, modify, sell and distribute this software
// is granted provided this copyright notice appears in all copies.
// This software is provided "as is" without express or implied
// warranty, and with no claim as to its suitability for any purpose.
//
//----------------------------------------------------------------------------
// Contact: mcseem@antigrain.com
// mcseemagg@yahoo.com
// http://www.antigrain.com
//----------------------------------------------------------------------------
#ifndef AGG_RENDERER_SCANLINE_INCLUDED
#define AGG_RENDERER_SCANLINE_INCLUDED
#include "agg_basics.h"
#include "agg_renderer_base.h"
namespace agg
{
//================================================render_scanline_aa_solid
template<class Scanline, class BaseRenderer, class ColorT>
void render_scanline_aa_solid(const Scanline& sl,
BaseRenderer& ren,
const ColorT& color)
{
int y = sl.y();
unsigned num_spans = sl.num_spans();
typename Scanline::const_iterator span = sl.begin();
for(;;)
{
int x = span->x;
if(span->len > 0)
{
ren.blend_solid_hspan(x, y, (unsigned)span->len,
color,
span->covers);
}
else
{
ren.blend_hline(x, y, (unsigned)(x - span->len - 1),
color,
*(span->covers));
}
if(--num_spans == 0) break;
++span;
}
}
//===============================================render_scanlines_aa_solid
template<class Rasterizer, class Scanline,
class BaseRenderer, class ColorT>
void render_scanlines_aa_solid(Rasterizer& ras, Scanline& sl,
BaseRenderer& ren, const ColorT& color)
{
if(ras.rewind_scanlines())
{
// Explicitly convert "color" to the BaseRenderer color type.
// For example, it can be called with color type "rgba", while
// "rgba8" is needed. Otherwise it will be implicitly
// converted in the loop many times.
//----------------------
typename BaseRenderer::color_type ren_color = color;
sl.reset(ras.min_x(), ras.max_x());
while(ras.sweep_scanline(sl))
{
//render_scanline_aa_solid(sl, ren, ren_color);
// This code is equivalent to the above call (copy/paste).
// It's just a "manual" optimization for old compilers,
// like Microsoft Visual C++ v6.0
//-------------------------------
int y = sl.y();
unsigned num_spans = sl.num_spans();
typename Scanline::const_iterator span = sl.begin();
for(;;)
{
int x = span->x;
if(span->len > 0)
{
ren.blend_solid_hspan(x, y, (unsigned)span->len,
ren_color,
span->covers);
}
else
{
ren.blend_hline(x, y, (unsigned)(x - span->len - 1),
ren_color,
*(span->covers));
}
if(--num_spans == 0) break;
++span;
}
}
}
}
//==============================================renderer_scanline_aa_solid
template<class BaseRenderer> class renderer_scanline_aa_solid
{
public:
typedef BaseRenderer base_ren_type;
typedef typename base_ren_type::color_type color_type;
//--------------------------------------------------------------------
renderer_scanline_aa_solid() : m_ren(0) {}
explicit renderer_scanline_aa_solid(base_ren_type& ren) : m_ren(&ren) {}
void attach(base_ren_type& ren)
{
m_ren = &ren;
}
//--------------------------------------------------------------------
void color(const color_type& c) { m_color = c; }
const color_type& color() const { return m_color; }
//--------------------------------------------------------------------
void prepare() {}
//--------------------------------------------------------------------
template<class Scanline> void render(const Scanline& sl)
{
render_scanline_aa_solid(sl, *m_ren, m_color);
}
private:
base_ren_type* m_ren;
color_type m_color;
};
//======================================================render_scanline_aa
template<class Scanline, class BaseRenderer,
class SpanAllocator, class SpanGenerator>
void render_scanline_aa(const Scanline& sl, BaseRenderer& ren,
SpanAllocator& alloc, SpanGenerator& span_gen)
{
int y = sl.y();
unsigned num_spans = sl.num_spans();
typename Scanline::const_iterator span = sl.begin();
for(;;)
{
int x = span->x;
int len = span->len;
const typename Scanline::cover_type* covers = span->covers;
if(len < 0) len = -len;
typename BaseRenderer::color_type* colors = alloc.allocate(len);
span_gen.generate(colors, x, y, len);
ren.blend_color_hspan(x, y, len, colors,
(span->len < 0) ? 0 : covers, *covers);
if(--num_spans == 0) break;
++span;
}
}
//=====================================================render_scanlines_aa
template<class Rasterizer, class Scanline, class BaseRenderer,
class SpanAllocator, class SpanGenerator>
void render_scanlines_aa(Rasterizer& ras, Scanline& sl, BaseRenderer& ren,
SpanAllocator& alloc, SpanGenerator& span_gen)
{
if(ras.rewind_scanlines())
{
sl.reset(ras.min_x(), ras.max_x());
span_gen.prepare();
while(ras.sweep_scanline(sl))
{
render_scanline_aa(sl, ren, alloc, span_gen);
}
}
}
//====================================================renderer_scanline_aa
template<class BaseRenderer, class SpanAllocator, class SpanGenerator>
class renderer_scanline_aa
{
public:
typedef BaseRenderer base_ren_type;
typedef SpanAllocator alloc_type;
typedef SpanGenerator span_gen_type;
//--------------------------------------------------------------------
renderer_scanline_aa() : m_ren(0), m_alloc(0), m_span_gen(0) {}
renderer_scanline_aa(base_ren_type& ren,
alloc_type& alloc,
span_gen_type& span_gen) :
m_ren(&ren),
m_alloc(&alloc),
m_span_gen(&span_gen)
{}
void attach(base_ren_type& ren,
alloc_type& alloc,
span_gen_type& span_gen)
{
m_ren = &ren;
m_alloc = &alloc;
m_span_gen = &span_gen;
}
//--------------------------------------------------------------------
void prepare() { m_span_gen->prepare(); }
//--------------------------------------------------------------------
template<class Scanline> void render(const Scanline& sl)
{
render_scanline_aa(sl, *m_ren, *m_alloc, *m_span_gen);
}
private:
base_ren_type* m_ren;
alloc_type* m_alloc;
span_gen_type* m_span_gen;
};
//===============================================render_scanline_bin_solid
template<class Scanline, class BaseRenderer, class ColorT>
void render_scanline_bin_solid(const Scanline& sl,
BaseRenderer& ren,
const ColorT& color)
{
unsigned num_spans = sl.num_spans();
typename Scanline::const_iterator span = sl.begin();
for(;;)
{
ren.blend_hline(span->x,
sl.y(),
span->x - 1 + ((span->len < 0) ?
-span->len :
span->len),
color,
cover_full);
if(--num_spans == 0) break;
++span;
}
}
//==============================================render_scanlines_bin_solid
template<class Rasterizer, class Scanline,
class BaseRenderer, class ColorT>
void render_scanlines_bin_solid(Rasterizer& ras, Scanline& sl,
BaseRenderer& ren, const ColorT& color)
{
if(ras.rewind_scanlines())
{
// Explicitly convert "color" to the BaseRenderer color type.
// For example, it can be called with color type "rgba", while
// "rgba8" is needed. Otherwise it will be implicitly
// converted in the loop many times.
//----------------------
typename BaseRenderer::color_type ren_color(color);
sl.reset(ras.min_x(), ras.max_x());
while(ras.sweep_scanline(sl))
{
//render_scanline_bin_solid(sl, ren, ren_color);
// This code is equivalent to the above call (copy/paste).
// It's just a "manual" optimization for old compilers,
// like Microsoft Visual C++ v6.0
//-------------------------------
unsigned num_spans = sl.num_spans();
typename Scanline::const_iterator span = sl.begin();
for(;;)
{
ren.blend_hline(span->x,
sl.y(),
span->x - 1 + ((span->len < 0) ?
-span->len :
span->len),
ren_color,
cover_full);
if(--num_spans == 0) break;
++span;
}
}
}
}
//=============================================renderer_scanline_bin_solid
template<class BaseRenderer> class renderer_scanline_bin_solid
{
public:
typedef BaseRenderer base_ren_type;
typedef typename base_ren_type::color_type color_type;
//--------------------------------------------------------------------
renderer_scanline_bin_solid() : m_ren(0) {}
explicit renderer_scanline_bin_solid(base_ren_type& ren) : m_ren(&ren) {}
void attach(base_ren_type& ren)
{
m_ren = &ren;
}
//--------------------------------------------------------------------
void color(const color_type& c) { m_color = c; }
const color_type& color() const { return m_color; }
//--------------------------------------------------------------------
void prepare() {}
//--------------------------------------------------------------------
template<class Scanline> void render(const Scanline& sl)
{
render_scanline_bin_solid(sl, *m_ren, m_color);
}
private:
base_ren_type* m_ren;
color_type m_color;
};
//======================================================render_scanline_bin
template<class Scanline, class BaseRenderer,
class SpanAllocator, class SpanGenerator>
void render_scanline_bin(const Scanline& sl, BaseRenderer& ren,
SpanAllocator& alloc, SpanGenerator& span_gen)
{
int y = sl.y();
unsigned num_spans = sl.num_spans();
typename Scanline::const_iterator span = sl.begin();
for(;;)
{
int x = span->x;
int len = span->len;
if(len < 0) len = -len;
typename BaseRenderer::color_type* colors = alloc.allocate(len);
span_gen.generate(colors, x, y, len);
ren.blend_color_hspan(x, y, len, colors, 0, cover_full);
if(--num_spans == 0) break;
++span;
}
}
//=====================================================render_scanlines_bin
template<class Rasterizer, class Scanline, class BaseRenderer,
class SpanAllocator, class SpanGenerator>
void render_scanlines_bin(Rasterizer& ras, Scanline& sl, BaseRenderer& ren,
SpanAllocator& alloc, SpanGenerator& span_gen)
{
if(ras.rewind_scanlines())
{
sl.reset(ras.min_x(), ras.max_x());
span_gen.prepare();
while(ras.sweep_scanline(sl))
{
render_scanline_bin(sl, ren, alloc, span_gen);
}
}
}
//====================================================renderer_scanline_bin
template<class BaseRenderer, class SpanAllocator, class SpanGenerator>
class renderer_scanline_bin
{
public:
typedef BaseRenderer base_ren_type;
typedef SpanAllocator alloc_type;
typedef SpanGenerator span_gen_type;
//--------------------------------------------------------------------
renderer_scanline_bin() : m_ren(0), m_alloc(0), m_span_gen(0) {}
renderer_scanline_bin(base_ren_type& ren,
alloc_type& alloc,
span_gen_type& span_gen) :
m_ren(&ren),
m_alloc(&alloc),
m_span_gen(&span_gen)
{}
void attach(base_ren_type& ren,
alloc_type& alloc,
span_gen_type& span_gen)
{
m_ren = &ren;
m_alloc = &alloc;
m_span_gen = &span_gen;
}
//--------------------------------------------------------------------
void prepare() { m_span_gen->prepare(); }
//--------------------------------------------------------------------
template<class Scanline> void render(const Scanline& sl)
{
render_scanline_bin(sl, *m_ren, *m_alloc, *m_span_gen);
}
private:
base_ren_type* m_ren;
alloc_type* m_alloc;
span_gen_type* m_span_gen;
};
//========================================================render_scanlines
template<class Rasterizer, class Scanline, class Renderer>
void render_scanlines(Rasterizer& ras, Scanline& sl, Renderer& ren)
{
if(ras.rewind_scanlines())
{
sl.reset(ras.min_x(), ras.max_x());
ren.prepare();
while(ras.sweep_scanline(sl))
{
ren.render(sl);
}
}
}
//========================================================render_all_paths
template<class Rasterizer, class Scanline, class Renderer,
class VertexSource, class ColorStorage, class PathId>
void render_all_paths(Rasterizer& ras,
Scanline& sl,
Renderer& r,
VertexSource& vs,
const ColorStorage& as,
const PathId& path_id,
unsigned num_paths)
{
for(unsigned i = 0; i < num_paths; i++)
{
ras.reset();
ras.add_path(vs, path_id[i]);
r.color(as[i]);
render_scanlines(ras, sl, r);
}
}
//=============================================render_scanlines_compound
template<class Rasterizer,
class ScanlineAA,
class ScanlineBin,
class BaseRenderer,
class SpanAllocator,
class StyleHandler>
void render_scanlines_compound(Rasterizer& ras,
ScanlineAA& sl_aa,
ScanlineBin& sl_bin,
BaseRenderer& ren,
SpanAllocator& alloc,
StyleHandler& sh)
{
if(ras.rewind_scanlines())
{
int min_x = ras.min_x();
int len = ras.max_x() - min_x + 2;
sl_aa.reset(min_x, ras.max_x());
sl_bin.reset(min_x, ras.max_x());
typedef typename BaseRenderer::color_type color_type;
color_type* color_span = alloc.allocate(len * 2);
color_type* mix_buffer = color_span + len;
unsigned num_spans;
unsigned num_styles;
unsigned style;
bool solid;
while((num_styles = ras.sweep_styles()) > 0)
{
typename ScanlineAA::const_iterator span_aa;
if(num_styles == 1)
{
// Optimization for a single style. Happens often
//-------------------------
if(ras.sweep_scanline(sl_aa, 0))
{
style = ras.style(0);
if(sh.is_solid(style))
{
// Just solid fill
//-----------------------
render_scanline_aa_solid(sl_aa, ren, sh.color(style));
}
else
{
// Arbitrary span generator
//-----------------------
span_aa = sl_aa.begin();
num_spans = sl_aa.num_spans();
for(;;)
{
len = span_aa->len;
sh.generate_span(color_span,
span_aa->x,
sl_aa.y(),
len,
style);
ren.blend_color_hspan(span_aa->x,
sl_aa.y(),
span_aa->len,
color_span,
span_aa->covers);
if(--num_spans == 0) break;
++span_aa;
}
}
}
}
else
{
if(ras.sweep_scanline(sl_bin, -1))
{
// Clear the spans of the mix_buffer
//--------------------
typename ScanlineBin::const_iterator span_bin = sl_bin.begin();
num_spans = sl_bin.num_spans();
for(;;)
{
memset(mix_buffer + span_bin->x - min_x,
0,
span_bin->len * sizeof(color_type));
if(--num_spans == 0) break;
++span_bin;
}
unsigned i;
for(i = 0; i < num_styles; i++)
{
style = ras.style(i);
solid = sh.is_solid(style);
if(ras.sweep_scanline(sl_aa, i))
{
color_type* colors;
color_type* cspan;
typename ScanlineAA::cover_type* covers;
span_aa = sl_aa.begin();
num_spans = sl_aa.num_spans();
if(solid)
{
// Just solid fill
//-----------------------
for(;;)
{
color_type c = sh.color(style);
len = span_aa->len;
colors = mix_buffer + span_aa->x - min_x;
covers = span_aa->covers;
do
{
if(*covers == cover_full)
{
*colors = c;
}
else
{
colors->add(c, *covers);
}
++colors;
++covers;
}
while(--len);
if(--num_spans == 0) break;
++span_aa;
}
}
else
{
// Arbitrary span generator
//-----------------------
for(;;)
{
len = span_aa->len;
colors = mix_buffer + span_aa->x - min_x;
cspan = color_span;
sh.generate_span(cspan,
span_aa->x,
sl_aa.y(),
len,
style);
covers = span_aa->covers;
do
{
if(*covers == cover_full)
{
*colors = *cspan;
}
else
{
colors->add(*cspan, *covers);
}
++cspan;
++colors;
++covers;
}
while(--len);
if(--num_spans == 0) break;
++span_aa;
}
}
}
}
// Emit the blended result as a color hspan
//-------------------------
span_bin = sl_bin.begin();
num_spans = sl_bin.num_spans();
for(;;)
{
ren.blend_color_hspan(span_bin->x,
sl_bin.y(),
span_bin->len,
mix_buffer + span_bin->x - min_x,
0,
cover_full);
if(--num_spans == 0) break;
++span_bin;
}
} // if(ras.sweep_scanline(sl_bin, -1))
} // if(num_styles == 1) ... else
} // while((num_styles = ras.sweep_styles()) > 0)
} // if(ras.rewind_scanlines())
}
//=======================================render_scanlines_compound_layered
template<class Rasterizer,
class ScanlineAA,
class BaseRenderer,
class SpanAllocator,
class StyleHandler>
void render_scanlines_compound_layered(Rasterizer& ras,
ScanlineAA& sl_aa,
BaseRenderer& ren,
SpanAllocator& alloc,
StyleHandler& sh)
{
if(ras.rewind_scanlines())
{
int min_x = ras.min_x();
int len = ras.max_x() - min_x + 2;
sl_aa.reset(min_x, ras.max_x());
typedef typename BaseRenderer::color_type color_type;
color_type* color_span = alloc.allocate(len * 2);
color_type* mix_buffer = color_span + len;
cover_type* cover_buffer = ras.allocate_cover_buffer(len);
unsigned num_spans;
unsigned num_styles;
unsigned style;
bool solid;
while((num_styles = ras.sweep_styles()) > 0)
{
typename ScanlineAA::const_iterator span_aa;
if(num_styles == 1)
{
// Optimization for a single style. Happens often
//-------------------------
if(ras.sweep_scanline(sl_aa, 0))
{
style = ras.style(0);
if(sh.is_solid(style))
{
// Just solid fill
//-----------------------
render_scanline_aa_solid(sl_aa, ren, sh.color(style));
}
else
{
// Arbitrary span generator
//-----------------------
span_aa = sl_aa.begin();
num_spans = sl_aa.num_spans();
for(;;)
{
len = span_aa->len;
sh.generate_span(color_span,
span_aa->x,
sl_aa.y(),
len,
style);
ren.blend_color_hspan(span_aa->x,
sl_aa.y(),
span_aa->len,
color_span,
span_aa->covers);
if(--num_spans == 0) break;
++span_aa;
}
}
}
}
else
{
int sl_start = ras.scanline_start();
unsigned sl_len = ras.scanline_length();
if(sl_len)
{
memset(mix_buffer + sl_start - min_x,
0,
sl_len * sizeof(color_type));
memset(cover_buffer + sl_start - min_x,
0,
sl_len * sizeof(cover_type));
int sl_y = 0x7FFFFFFF;
unsigned i;
for(i = 0; i < num_styles; i++)
{
style = ras.style(i);
solid = sh.is_solid(style);
if(ras.sweep_scanline(sl_aa, i))
{
unsigned cover;
color_type* colors;
color_type* cspan;
cover_type* src_covers;
cover_type* dst_covers;
span_aa = sl_aa.begin();
num_spans = sl_aa.num_spans();
sl_y = sl_aa.y();
if(solid)
{
// Just solid fill
//-----------------------
for(;;)
{
color_type c = sh.color(style);
len = span_aa->len;
colors = mix_buffer + span_aa->x - min_x;
src_covers = span_aa->covers;
dst_covers = cover_buffer + span_aa->x - min_x;
do
{
cover = *src_covers;
if(*dst_covers + cover > cover_full)
{
cover = cover_full - *dst_covers;
}
if(cover)
{
colors->add(c, cover);
*dst_covers += cover;
}
++colors;
++src_covers;
++dst_covers;
}
while(--len);
if(--num_spans == 0) break;
++span_aa;
}
}
else
{
// Arbitrary span generator
//-----------------------
for(;;)
{
len = span_aa->len;
colors = mix_buffer + span_aa->x - min_x;
cspan = color_span;
sh.generate_span(cspan,
span_aa->x,
sl_aa.y(),
len,
style);
src_covers = span_aa->covers;
dst_covers = cover_buffer + span_aa->x - min_x;
do
{
cover = *src_covers;
if(*dst_covers + cover > cover_full)
{
cover = cover_full - *dst_covers;
}
if(cover)
{
colors->add(*cspan, cover);
*dst_covers += cover;
}
++cspan;
++colors;
++src_covers;
++dst_covers;
}
while(--len);
if(--num_spans == 0) break;
++span_aa;
}
}
}
}
ren.blend_color_hspan(sl_start,
sl_y,
sl_len,
mix_buffer + sl_start - min_x,
0,
cover_full);
} //if(sl_len)
} //if(num_styles == 1) ... else
} //while((num_styles = ras.sweep_styles()) > 0)
} //if(ras.rewind_scanlines())
}
}
#endif
| 0 |
D://workCode//uploadProject\awtk\3rd\agg | D://workCode//uploadProject\awtk\3rd\agg\include\agg_rendering_buffer.h | //----------------------------------------------------------------------------
// Anti-Grain Geometry - Version 2.4
// Copyright (C) 2002-2005 Maxim Shemanarev (http://www.antigrain.com)
//
// Permission to copy, use, modify, sell and distribute this software
// is granted provided this copyright notice appears in all copies.
// This software is provided "as is" without express or implied
// warranty, and with no claim as to its suitability for any purpose.
//
//----------------------------------------------------------------------------
// Contact: mcseem@antigrain.com
// mcseemagg@yahoo.com
// http://www.antigrain.com
//----------------------------------------------------------------------------
//
// class rendering_buffer
//
//----------------------------------------------------------------------------
#ifndef AGG_RENDERING_BUFFER_INCLUDED
#define AGG_RENDERING_BUFFER_INCLUDED
#include "agg_array.h"
namespace agg
{
//===========================================================row_accessor
template<class T> class row_accessor
{
public:
typedef const_row_info<T> row_data;
//-------------------------------------------------------------------
row_accessor() :
m_buf(0),
m_start(0),
m_width(0),
m_height(0),
m_stride(0)
{
}
//--------------------------------------------------------------------
row_accessor(T* buf, unsigned width, unsigned height, int stride) :
m_buf(0),
m_start(0),
m_width(0),
m_height(0),
m_stride(0)
{
attach(buf, width, height, stride);
}
//--------------------------------------------------------------------
void attach(T* buf, unsigned width, unsigned height, int stride)
{
m_buf = m_start = buf;
m_width = width;
m_height = height;
m_stride = stride;
if(stride < 0)
{
m_start = m_buf - int(height - 1) * stride;
}
}
//--------------------------------------------------------------------
AGG_INLINE T* buf() { return m_buf; }
AGG_INLINE const T* buf() const { return m_buf; }
AGG_INLINE unsigned width() const { return m_width; }
AGG_INLINE unsigned height() const { return m_height; }
AGG_INLINE int stride() const { return m_stride; }
AGG_INLINE unsigned stride_abs() const
{
return (m_stride < 0) ? unsigned(-m_stride) : unsigned(m_stride);
}
//--------------------------------------------------------------------
AGG_INLINE T* row_ptr(int, int y, unsigned)
{
return m_start + y * m_stride;
}
AGG_INLINE T* row_ptr(int y) { return m_start + y * m_stride; }
AGG_INLINE const T* row_ptr(int y) const { return m_start + y * m_stride; }
AGG_INLINE row_data row (int y) const
{
return row_data(0, m_width-1, row_ptr(y));
}
//--------------------------------------------------------------------
template<class RenBuf>
void copy_from(const RenBuf& src)
{
unsigned h = height();
if(src.height() < h) h = src.height();
unsigned l = stride_abs();
if(src.stride_abs() < l) l = src.stride_abs();
l *= sizeof(T);
unsigned y;
unsigned w = width();
for (y = 0; y < h; y++)
{
memcpy(row_ptr(0, y, w), src.row_ptr(y), l);
}
}
//--------------------------------------------------------------------
void clear(T value)
{
unsigned y;
unsigned w = width();
unsigned stride = stride_abs();
for(y = 0; y < height(); y++)
{
T* p = row_ptr(0, y, w);
unsigned x;
for(x = 0; x < stride; x++)
{
*p++ = value;
}
}
}
private:
//--------------------------------------------------------------------
T* m_buf; // Pointer to renrdering buffer
T* m_start; // Pointer to first pixel depending on stride
unsigned m_width; // Width in pixels
unsigned m_height; // Height in pixels
int m_stride; // Number of bytes per row. Can be < 0
};
//==========================================================row_ptr_cache
template<class T> class row_ptr_cache
{
public:
typedef const_row_info<T> row_data;
//-------------------------------------------------------------------
row_ptr_cache() :
m_buf(0),
m_rows(),
m_width(0),
m_height(0),
m_stride(0)
{
}
//--------------------------------------------------------------------
row_ptr_cache(T* buf, unsigned width, unsigned height, int stride) :
m_buf(0),
m_rows(),
m_width(0),
m_height(0),
m_stride(0)
{
attach(buf, width, height, stride);
}
//--------------------------------------------------------------------
void attach(T* buf, unsigned width, unsigned height, int stride)
{
m_buf = buf;
m_width = width;
m_height = height;
m_stride = stride;
if(height > m_rows.size())
{
m_rows.resize(height);
}
T* row_ptr = m_buf;
if(stride < 0)
{
row_ptr = m_buf - int(height - 1) * stride;
}
T** rows = &m_rows[0];
while(height--)
{
*rows++ = row_ptr;
row_ptr += stride;
}
}
//--------------------------------------------------------------------
AGG_INLINE T* buf() { return m_buf; }
AGG_INLINE const T* buf() const { return m_buf; }
AGG_INLINE unsigned width() const { return m_width; }
AGG_INLINE unsigned height() const { return m_height; }
AGG_INLINE int stride() const { return m_stride; }
AGG_INLINE unsigned stride_abs() const
{
return (m_stride < 0) ? unsigned(-m_stride) : unsigned(m_stride);
}
//--------------------------------------------------------------------
AGG_INLINE T* row_ptr(int, int y, unsigned)
{
return m_rows[y];
}
AGG_INLINE T* row_ptr(int y) { return m_rows[y]; }
AGG_INLINE const T* row_ptr(int y) const { return m_rows[y]; }
AGG_INLINE row_data row (int y) const
{
return row_data(0, m_width-1, m_rows[y]);
}
//--------------------------------------------------------------------
T const* const* rows() const { return &m_rows[0]; }
//--------------------------------------------------------------------
template<class RenBuf>
void copy_from(const RenBuf& src)
{
unsigned h = height();
if(src.height() < h) h = src.height();
unsigned l = stride_abs();
if(src.stride_abs() < l) l = src.stride_abs();
l *= sizeof(T);
unsigned y;
unsigned w = width();
for (y = 0; y < h; y++)
{
memcpy(row_ptr(0, y, w), src.row_ptr(y), l);
}
}
//--------------------------------------------------------------------
void clear(T value)
{
unsigned y;
unsigned w = width();
unsigned stride = stride_abs();
for(y = 0; y < height(); y++)
{
T* p = row_ptr(0, y, w);
unsigned x;
for(x = 0; x < stride; x++)
{
*p++ = value;
}
}
}
private:
//--------------------------------------------------------------------
T* m_buf; // Pointer to renrdering buffer
pod_array<T*> m_rows; // Pointers to each row of the buffer
unsigned m_width; // Width in pixels
unsigned m_height; // Height in pixels
int m_stride; // Number of bytes per row. Can be < 0
};
//========================================================rendering_buffer
//
// The definition of the main type for accessing the rows in the frame
// buffer. It provides functionality to navigate to the rows in a
// rectangular matrix, from top to bottom or from bottom to top depending
// on stride.
//
// row_accessor is cheap to create/destroy, but performs one multiplication
// when calling row_ptr().
//
// row_ptr_cache creates an array of pointers to rows, so, the access
// via row_ptr() may be faster. But it requires memory allocation
// when creating. For example, on typical Intel Pentium hardware
// row_ptr_cache speeds span_image_filter_rgb_nn up to 10%
//
// It's used only in short hand typedefs like pixfmt_rgba32 and can be
// redefined in agg_config.h
// In real applications you can use both, depending on your needs
//------------------------------------------------------------------------
#ifdef AGG_RENDERING_BUFFER
typedef AGG_RENDERING_BUFFER rendering_buffer;
#else
// typedef row_ptr_cache<int8u> rendering_buffer;
typedef row_accessor<int8u> rendering_buffer;
#endif
}
#endif
| 0 |
D://workCode//uploadProject\awtk\3rd\agg | D://workCode//uploadProject\awtk\3rd\agg\include\agg_scanline_u.h | //----------------------------------------------------------------------------
// Anti-Grain Geometry - Version 2.4
// Copyright (C) 2002-2005 Maxim Shemanarev (http://www.antigrain.com)
//
// Permission to copy, use, modify, sell and distribute this software
// is granted provided this copyright notice appears in all copies.
// This software is provided "as is" without express or implied
// warranty, and with no claim as to its suitability for any purpose.
//
//----------------------------------------------------------------------------
// Contact: mcseem@antigrain.com
// mcseemagg@yahoo.com
// http://www.antigrain.com
//----------------------------------------------------------------------------
//
// Adaptation for 32-bit screen coordinates (scanline32_u) has been sponsored by
// Liberty Technology Systems, Inc., visit http://lib-sys.com
//
// Liberty Technology Systems, Inc. is the provider of
// PostScript and PDF technology for software developers.
//
//----------------------------------------------------------------------------
#ifndef AGG_SCANLINE_U_INCLUDED
#define AGG_SCANLINE_U_INCLUDED
#include "agg_array.h"
namespace agg
{
//=============================================================scanline_u8
//
// Unpacked scanline container class
//
// This class is used to transfer data from a scanline rasterizer
// to the rendering buffer. It's organized very simple. The class stores
// information of horizontal spans to render it into a pixel-map buffer.
// Each span has staring X, length, and an array of bytes that determine the
// cover-values for each pixel.
// Before using this class you should know the minimal and maximal pixel
// coordinates of your scanline. The protocol of using is:
// 1. reset(min_x, max_x)
// 2. add_cell() / add_span() - accumulate scanline.
// When forming one scanline the next X coordinate must be always greater
// than the last stored one, i.e. it works only with ordered coordinates.
// 3. Call finalize(y) and render the scanline.
// 3. Call reset_spans() to prepare for the new scanline.
//
// 4. Rendering:
//
// Scanline provides an iterator class that allows you to extract
// the spans and the cover values for each pixel. Be aware that clipping
// has not been done yet, so you should perform it yourself.
// Use scanline_u8::iterator to render spans:
//-------------------------------------------------------------------------
//
// int y = sl.y(); // Y-coordinate of the scanline
//
// ************************************
// ...Perform vertical clipping here...
// ************************************
//
// scanline_u8::const_iterator span = sl.begin();
//
// unsigned char* row = m_rbuf->row(y); // The the address of the beginning
// // of the current row
//
// unsigned num_spans = sl.num_spans(); // Number of spans. It's guaranteed that
// // num_spans is always greater than 0.
//
// do
// {
// const scanline_u8::cover_type* covers =
// span->covers; // The array of the cover values
//
// int num_pix = span->len; // Number of pixels of the span.
// // Always greater than 0, still it's
// // better to use "int" instead of
// // "unsigned" because it's more
// // convenient for clipping
// int x = span->x;
//
// **************************************
// ...Perform horizontal clipping here...
// ...you have x, covers, and pix_count..
// **************************************
//
// unsigned char* dst = row + x; // Calculate the start address of the row.
// // In this case we assume a simple
// // grayscale image 1-byte per pixel.
// do
// {
// *dst++ = *covers++; // Hypotetical rendering.
// }
// while(--num_pix);
//
// ++span;
// }
// while(--num_spans); // num_spans cannot be 0, so this loop is quite safe
//------------------------------------------------------------------------
//
// The question is: why should we accumulate the whole scanline when we
// could render just separate spans when they're ready?
// That's because using the scanline is generally faster. When is consists
// of more than one span the conditions for the processor cash system
// are better, because switching between two different areas of memory
// (that can be very large) occurs less frequently.
//------------------------------------------------------------------------
class scanline_u8
{
public:
typedef scanline_u8 self_type;
typedef int8u cover_type;
typedef int16 coord_type;
//--------------------------------------------------------------------
struct span
{
coord_type x;
coord_type len;
cover_type* covers;
};
typedef span* iterator;
typedef const span* const_iterator;
//--------------------------------------------------------------------
scanline_u8() :
m_min_x(0),
m_last_x(0x7FFFFFF0),
m_cur_span(0)
{}
//--------------------------------------------------------------------
void reset(int min_x, int max_x)
{
unsigned max_len = max_x - min_x + 2;
if(max_len > m_spans.size())
{
m_spans.resize(max_len);
m_covers.resize(max_len);
}
m_last_x = 0x7FFFFFF0;
m_min_x = min_x;
m_cur_span = &m_spans[0];
}
//--------------------------------------------------------------------
void add_cell(int x, unsigned cover)
{
x -= m_min_x;
m_covers[x] = (cover_type)cover;
if(x == m_last_x+1)
{
m_cur_span->len++;
}
else
{
m_cur_span++;
m_cur_span->x = (coord_type)(x + m_min_x);
m_cur_span->len = 1;
m_cur_span->covers = &m_covers[x];
}
m_last_x = x;
}
//--------------------------------------------------------------------
void add_cells(int x, unsigned len, const cover_type* covers)
{
x -= m_min_x;
memcpy(&m_covers[x], covers, len * sizeof(cover_type));
if(x == m_last_x+1)
{
m_cur_span->len += (coord_type)len;
}
else
{
m_cur_span++;
m_cur_span->x = (coord_type)(x + m_min_x);
m_cur_span->len = (coord_type)len;
m_cur_span->covers = &m_covers[x];
}
m_last_x = x + len - 1;
}
//--------------------------------------------------------------------
void add_span(int x, unsigned len, unsigned cover)
{
x -= m_min_x;
memset(&m_covers[x], cover, len);
if(x == m_last_x+1)
{
m_cur_span->len += (coord_type)len;
}
else
{
m_cur_span++;
m_cur_span->x = (coord_type)(x + m_min_x);
m_cur_span->len = (coord_type)len;
m_cur_span->covers = &m_covers[x];
}
m_last_x = x + len - 1;
}
//--------------------------------------------------------------------
void finalize(int y)
{
m_y = y;
}
//--------------------------------------------------------------------
void reset_spans()
{
m_last_x = 0x7FFFFFF0;
m_cur_span = &m_spans[0];
}
//--------------------------------------------------------------------
int y() const { return m_y; }
unsigned num_spans() const { return unsigned(m_cur_span - &m_spans[0]); }
const_iterator begin() const { return &m_spans[1]; }
iterator begin() { return &m_spans[1]; }
private:
scanline_u8(const self_type&);
const self_type& operator = (const self_type&);
private:
int m_min_x;
int m_last_x;
int m_y;
pod_array<cover_type> m_covers;
pod_array<span> m_spans;
span* m_cur_span;
};
//==========================================================scanline_u8_am
//
// The scanline container with alpha-masking
//
//------------------------------------------------------------------------
template<class AlphaMask>
class scanline_u8_am : public scanline_u8
{
public:
typedef scanline_u8 base_type;
typedef AlphaMask alpha_mask_type;
typedef base_type::cover_type cover_type;
typedef base_type::coord_type coord_type;
scanline_u8_am() : base_type(), m_alpha_mask(0) {}
scanline_u8_am(AlphaMask& am) : base_type(), m_alpha_mask(&am) {}
//--------------------------------------------------------------------
void finalize(int span_y)
{
base_type::finalize(span_y);
if(m_alpha_mask)
{
typename base_type::iterator span = base_type::begin();
unsigned count = base_type::num_spans();
do
{
m_alpha_mask->combine_hspan(span->x,
base_type::y(),
span->covers,
span->len);
++span;
}
while(--count);
}
}
private:
AlphaMask* m_alpha_mask;
};
//===========================================================scanline32_u8
class scanline32_u8
{
public:
typedef scanline32_u8 self_type;
typedef int8u cover_type;
typedef int32 coord_type;
//--------------------------------------------------------------------
struct span
{
span() {}
span(coord_type x_, coord_type len_, cover_type* covers_) :
x(x_), len(len_), covers(covers_) {}
coord_type x;
coord_type len;
cover_type* covers;
};
typedef pod_bvector<span, 4> span_array_type;
//--------------------------------------------------------------------
class const_iterator
{
public:
const_iterator(const span_array_type& spans) :
m_spans(spans),
m_span_idx(0)
{}
const span& operator*() const { return m_spans[m_span_idx]; }
const span* operator->() const { return &m_spans[m_span_idx]; }
void operator ++ () { ++m_span_idx; }
private:
const span_array_type& m_spans;
unsigned m_span_idx;
};
//--------------------------------------------------------------------
class iterator
{
public:
iterator(span_array_type& spans) :
m_spans(spans),
m_span_idx(0)
{}
span& operator*() { return m_spans[m_span_idx]; }
span* operator->() { return &m_spans[m_span_idx]; }
void operator ++ () { ++m_span_idx; }
private:
span_array_type& m_spans;
unsigned m_span_idx;
};
//--------------------------------------------------------------------
scanline32_u8() :
m_min_x(0),
m_last_x(0x7FFFFFF0),
m_covers()
{}
//--------------------------------------------------------------------
void reset(int min_x, int max_x)
{
unsigned max_len = max_x - min_x + 2;
if(max_len > m_covers.size())
{
m_covers.resize(max_len);
}
m_last_x = 0x7FFFFFF0;
m_min_x = min_x;
m_spans.remove_all();
}
//--------------------------------------------------------------------
void add_cell(int x, unsigned cover)
{
x -= m_min_x;
m_covers[x] = cover_type(cover);
if(x == m_last_x+1)
{
m_spans.last().len++;
}
else
{
m_spans.add(span(coord_type(x + m_min_x), 1, &m_covers[x]));
}
m_last_x = x;
}
//--------------------------------------------------------------------
void add_cells(int x, unsigned len, const cover_type* covers)
{
x -= m_min_x;
memcpy(&m_covers[x], covers, len * sizeof(cover_type));
if(x == m_last_x+1)
{
m_spans.last().len += coord_type(len);
}
else
{
m_spans.add(span(coord_type(x + m_min_x),
coord_type(len),
&m_covers[x]));
}
m_last_x = x + len - 1;
}
//--------------------------------------------------------------------
void add_span(int x, unsigned len, unsigned cover)
{
x -= m_min_x;
memset(&m_covers[x], cover, len);
if(x == m_last_x+1)
{
m_spans.last().len += coord_type(len);
}
else
{
m_spans.add(span(coord_type(x + m_min_x),
coord_type(len),
&m_covers[x]));
}
m_last_x = x + len - 1;
}
//--------------------------------------------------------------------
void finalize(int y)
{
m_y = y;
}
//--------------------------------------------------------------------
void reset_spans()
{
m_last_x = 0x7FFFFFF0;
m_spans.remove_all();
}
//--------------------------------------------------------------------
int y() const { return m_y; }
unsigned num_spans() const { return m_spans.size(); }
const_iterator begin() const { return const_iterator(m_spans); }
iterator begin() { return iterator(m_spans); }
private:
scanline32_u8(const self_type&);
const self_type& operator = (const self_type&);
private:
int m_min_x;
int m_last_x;
int m_y;
pod_array<cover_type> m_covers;
span_array_type m_spans;
};
//========================================================scanline32_u8_am
//
// The scanline container with alpha-masking
//
//------------------------------------------------------------------------
template<class AlphaMask>
class scanline32_u8_am : public scanline32_u8
{
public:
typedef scanline32_u8 base_type;
typedef AlphaMask alpha_mask_type;
typedef base_type::cover_type cover_type;
typedef base_type::coord_type coord_type;
scanline32_u8_am() : base_type(), m_alpha_mask(0) {}
scanline32_u8_am(AlphaMask& am) : base_type(), m_alpha_mask(&am) {}
//--------------------------------------------------------------------
void finalize(int span_y)
{
base_type::finalize(span_y);
if(m_alpha_mask)
{
typename base_type::iterator span = base_type::begin();
unsigned count = base_type::num_spans();
do
{
m_alpha_mask->combine_hspan(span->x,
base_type::y(),
span->covers,
span->len);
++span;
}
while(--count);
}
}
private:
AlphaMask* m_alpha_mask;
};
}
#endif
| 0 |
D://workCode//uploadProject\awtk\3rd\agg | D://workCode//uploadProject\awtk\3rd\agg\include\agg_shorten_path.h | //----------------------------------------------------------------------------
// Anti-Grain Geometry - Version 2.4
// Copyright (C) 2002-2005 Maxim Shemanarev (http://www.antigrain.com)
//
// Permission to copy, use, modify, sell and distribute this software
// is granted provided this copyright notice appears in all copies.
// This software is provided "as is" without express or implied
// warranty, and with no claim as to its suitability for any purpose.
//
//----------------------------------------------------------------------------
// Contact: mcseem@antigrain.com
// mcseemagg@yahoo.com
// http://www.antigrain.com
//----------------------------------------------------------------------------
#ifndef AGG_SHORTEN_PATH_INCLUDED
#define AGG_SHORTEN_PATH_INCLUDED
#include "agg_basics.h"
#include "agg_vertex_sequence.h"
namespace agg
{
//===========================================================shorten_path
template<class VertexSequence>
void shorten_path(VertexSequence& vs, double s, unsigned closed = 0)
{
typedef typename VertexSequence::value_type vertex_type;
if(s > 0.0 && vs.size() > 1)
{
double d;
int n = int(vs.size() - 2);
while(n)
{
d = vs[n].dist;
if(d > s) break;
vs.remove_last();
s -= d;
--n;
}
if(vs.size() < 2)
{
vs.remove_all();
}
else
{
n = vs.size() - 1;
vertex_type& prev = vs[n-1];
vertex_type& last = vs[n];
d = (prev.dist - s) / prev.dist;
double x = prev.x + (last.x - prev.x) * d;
double y = prev.y + (last.y - prev.y) * d;
last.x = x;
last.y = y;
if(!prev(last)) vs.remove_last();
vs.close(closed != 0);
}
}
}
}
#endif
| 0 |
D://workCode//uploadProject\awtk\3rd\agg | D://workCode//uploadProject\awtk\3rd\agg\include\agg_span_allocator.h | //----------------------------------------------------------------------------
// Anti-Grain Geometry - Version 2.4
// Copyright (C) 2002-2005 Maxim Shemanarev (http://www.antigrain.com)
//
// Permission to copy, use, modify, sell and distribute this software
// is granted provided this copyright notice appears in all copies.
// This software is provided "as is" without express or implied
// warranty, and with no claim as to its suitability for any purpose.
//
//----------------------------------------------------------------------------
// Contact: mcseem@antigrain.com
// mcseemagg@yahoo.com
// http://www.antigrain.com
//----------------------------------------------------------------------------
#ifndef AGG_SPAN_ALLOCATOR_INCLUDED
#define AGG_SPAN_ALLOCATOR_INCLUDED
#include "agg_array.h"
namespace agg
{
//----------------------------------------------------------span_allocator
template<class ColorT> class span_allocator
{
public:
typedef ColorT color_type;
//--------------------------------------------------------------------
AGG_INLINE color_type* allocate(unsigned span_len)
{
if(span_len > m_span.size())
{
// To reduce the number of reallocs we align the
// span_len to 256 color elements.
// Well, I just like this number and it looks reasonable.
//-----------------------
m_span.resize(((span_len + 255) >> 8) << 8);
}
return &m_span[0];
}
AGG_INLINE color_type* span() { return &m_span[0]; }
AGG_INLINE unsigned max_span_len() const { return m_span.size(); }
private:
pod_array<color_type> m_span;
};
}
#endif
| 0 |
D://workCode//uploadProject\awtk\3rd\agg | D://workCode//uploadProject\awtk\3rd\agg\include\agg_span_image_filter.h | //----------------------------------------------------------------------------
// Anti-Grain Geometry - Version 2.4
// Copyright (C) 2002-2005 Maxim Shemanarev (http://www.antigrain.com)
//
// Permission to copy, use, modify, sell and distribute this software
// is granted provided this copyright notice appears in all copies.
// This software is provided "as is" without express or implied
// warranty, and with no claim as to its suitability for any purpose.
//
//----------------------------------------------------------------------------
// Contact: mcseem@antigrain.com
// mcseemagg@yahoo.com
// http://www.antigrain.com
//----------------------------------------------------------------------------
//
// Image transformations with filtering. Span generator base class
//
//----------------------------------------------------------------------------
#ifndef AGG_SPAN_IMAGE_FILTER_INCLUDED
#define AGG_SPAN_IMAGE_FILTER_INCLUDED
#include "agg_basics.h"
#include "agg_image_filters.h"
#include "agg_span_interpolator_linear.h"
namespace agg
{
//-------------------------------------------------------span_image_filter
template<class Source, class Interpolator> class span_image_filter
{
public:
typedef Source source_type;
typedef Interpolator interpolator_type;
//--------------------------------------------------------------------
span_image_filter() {}
span_image_filter(source_type& src,
interpolator_type& interpolator,
image_filter_lut* filter) :
m_src(&src),
m_interpolator(&interpolator),
m_filter(filter),
m_dx_dbl(0.5),
m_dy_dbl(0.5),
m_dx_int(image_subpixel_scale / 2),
m_dy_int(image_subpixel_scale / 2)
{}
void attach(source_type& v) { m_src = &v; }
//--------------------------------------------------------------------
source_type& source() { return *m_src; }
const source_type& source() const { return *m_src; }
const image_filter_lut& filter() const { return *m_filter; }
int filter_dx_int() const { return m_dx_int; }
int filter_dy_int() const { return m_dy_int; }
double filter_dx_dbl() const { return m_dx_dbl; }
double filter_dy_dbl() const { return m_dy_dbl; }
//--------------------------------------------------------------------
void interpolator(interpolator_type& v) { m_interpolator = &v; }
void filter(image_filter_lut& v) { m_filter = &v; }
void filter_offset(double dx, double dy)
{
m_dx_dbl = dx;
m_dy_dbl = dy;
m_dx_int = iround(dx * image_subpixel_scale);
m_dy_int = iround(dy * image_subpixel_scale);
}
void filter_offset(double d) { filter_offset(d, d); }
//--------------------------------------------------------------------
interpolator_type& interpolator() { return *m_interpolator; }
//--------------------------------------------------------------------
void prepare() {}
//--------------------------------------------------------------------
private:
source_type* m_src;
interpolator_type* m_interpolator;
image_filter_lut* m_filter;
double m_dx_dbl;
double m_dy_dbl;
unsigned m_dx_int;
unsigned m_dy_int;
};
//==============================================span_image_resample_affine
template<class Source>
class span_image_resample_affine :
public span_image_filter<Source, span_interpolator_linear<trans_affine> >
{
public:
typedef Source source_type;
typedef span_interpolator_linear<trans_affine> interpolator_type;
typedef span_image_filter<source_type, interpolator_type> base_type;
//--------------------------------------------------------------------
span_image_resample_affine() :
m_scale_limit(200.0),
m_blur_x(1.0),
m_blur_y(1.0)
{}
//--------------------------------------------------------------------
span_image_resample_affine(source_type& src,
interpolator_type& inter,
image_filter_lut& filter) :
base_type(src, inter, &filter),
m_scale_limit(200.0),
m_blur_x(1.0),
m_blur_y(1.0)
{}
//--------------------------------------------------------------------
int scale_limit() const { return uround(m_scale_limit); }
void scale_limit(int v) { m_scale_limit = v; }
//--------------------------------------------------------------------
double blur_x() const { return m_blur_x; }
double blur_y() const { return m_blur_y; }
void blur_x(double v) { m_blur_x = v; }
void blur_y(double v) { m_blur_y = v; }
void blur(double v) { m_blur_x = m_blur_y = v; }
//--------------------------------------------------------------------
void prepare()
{
double scale_x;
double scale_y;
base_type::interpolator().transformer().scaling_abs(&scale_x, &scale_y);
if(scale_x * scale_y > m_scale_limit)
{
scale_x = scale_x * m_scale_limit / (scale_x * scale_y);
scale_y = scale_y * m_scale_limit / (scale_x * scale_y);
}
if(scale_x < 1) scale_x = 1;
if(scale_y < 1) scale_y = 1;
if(scale_x > m_scale_limit) scale_x = m_scale_limit;
if(scale_y > m_scale_limit) scale_y = m_scale_limit;
scale_x *= m_blur_x;
scale_y *= m_blur_y;
if(scale_x < 1) scale_x = 1;
if(scale_y < 1) scale_y = 1;
m_rx = uround( scale_x * double(image_subpixel_scale));
m_rx_inv = uround(1.0/scale_x * double(image_subpixel_scale));
m_ry = uround( scale_y * double(image_subpixel_scale));
m_ry_inv = uround(1.0/scale_y * double(image_subpixel_scale));
}
protected:
int m_rx;
int m_ry;
int m_rx_inv;
int m_ry_inv;
private:
double m_scale_limit;
double m_blur_x;
double m_blur_y;
};
//=====================================================span_image_resample
template<class Source, class Interpolator>
class span_image_resample :
public span_image_filter<Source, Interpolator>
{
public:
typedef Source source_type;
typedef Interpolator interpolator_type;
typedef span_image_filter<source_type, interpolator_type> base_type;
//--------------------------------------------------------------------
span_image_resample() :
m_scale_limit(20),
m_blur_x(image_subpixel_scale),
m_blur_y(image_subpixel_scale)
{}
//--------------------------------------------------------------------
span_image_resample(source_type& src,
interpolator_type& inter,
image_filter_lut& filter) :
base_type(src, inter, &filter),
m_scale_limit(20),
m_blur_x(image_subpixel_scale),
m_blur_y(image_subpixel_scale)
{}
//--------------------------------------------------------------------
int scale_limit() const { return m_scale_limit; }
void scale_limit(int v) { m_scale_limit = v; }
//--------------------------------------------------------------------
double blur_x() const { return double(m_blur_x) / double(image_subpixel_scale); }
double blur_y() const { return double(m_blur_y) / double(image_subpixel_scale); }
void blur_x(double v) { m_blur_x = uround(v * double(image_subpixel_scale)); }
void blur_y(double v) { m_blur_y = uround(v * double(image_subpixel_scale)); }
void blur(double v) { m_blur_x =
m_blur_y = uround(v * double(image_subpixel_scale)); }
protected:
AGG_INLINE void adjust_scale(int* rx, int* ry)
{
if(*rx < image_subpixel_scale) *rx = image_subpixel_scale;
if(*ry < image_subpixel_scale) *ry = image_subpixel_scale;
if(*rx > image_subpixel_scale * m_scale_limit)
{
*rx = image_subpixel_scale * m_scale_limit;
}
if(*ry > image_subpixel_scale * m_scale_limit)
{
*ry = image_subpixel_scale * m_scale_limit;
}
*rx = (*rx * m_blur_x) >> image_subpixel_shift;
*ry = (*ry * m_blur_y) >> image_subpixel_shift;
if(*rx < image_subpixel_scale) *rx = image_subpixel_scale;
if(*ry < image_subpixel_scale) *ry = image_subpixel_scale;
}
int m_scale_limit;
int m_blur_x;
int m_blur_y;
};
}
#endif
| 0 |
D://workCode//uploadProject\awtk\3rd\agg | D://workCode//uploadProject\awtk\3rd\agg\include\agg_span_image_filter_rgb.h | //----------------------------------------------------------------------------
// Anti-Grain Geometry - Version 2.4
// Copyright (C) 2002-2005 Maxim Shemanarev (http://www.antigrain.com)
//
// Permission to copy, use, modify, sell and distribute this software
// is granted provided this copyright notice appears in all copies.
// This software is provided "as is" without express or implied
// warranty, and with no claim as to its suitability for any purpose.
//
//----------------------------------------------------------------------------
// Contact: mcseem@antigrain.com
// mcseemagg@yahoo.com
// http://www.antigrain.com
//----------------------------------------------------------------------------
//
// Adaptation for high precision colors has been sponsored by
// Liberty Technology Systems, Inc., visit http://lib-sys.com
//
// Liberty Technology Systems, Inc. is the provider of
// PostScript and PDF technology for software developers.
//
//----------------------------------------------------------------------------
#ifndef AGG_SPAN_IMAGE_FILTER_RGB_INCLUDED
#define AGG_SPAN_IMAGE_FILTER_RGB_INCLUDED
#include "agg_basics.h"
#include "agg_color_rgba.h"
#include "agg_span_image_filter.h"
namespace agg
{
//===============================================span_image_filter_rgb_nn
template<class Source, class Interpolator>
class span_image_filter_rgb_nn :
public span_image_filter<Source, Interpolator>
{
public:
typedef Source source_type;
typedef typename source_type::color_type color_type;
typedef typename source_type::order_type order_type;
typedef Interpolator interpolator_type;
typedef span_image_filter<source_type, interpolator_type> base_type;
typedef typename color_type::value_type value_type;
typedef typename color_type::calc_type calc_type;
typedef typename color_type::long_type long_type;
//--------------------------------------------------------------------
span_image_filter_rgb_nn() {}
span_image_filter_rgb_nn(source_type& src,
interpolator_type& inter) :
base_type(src, inter, 0)
{}
//--------------------------------------------------------------------
void generate(color_type* span, int x, int y, unsigned len)
{
base_type::interpolator().begin(x + base_type::filter_dx_dbl(),
y + base_type::filter_dy_dbl(), len);
do
{
base_type::interpolator().coordinates(&x, &y);
const value_type* fg_ptr = (const value_type*)
base_type::source().span(x >> image_subpixel_shift,
y >> image_subpixel_shift,
1);
span->r = fg_ptr[order_type::R];
span->g = fg_ptr[order_type::G];
span->b = fg_ptr[order_type::B];
span->a = color_type::full_value();
++span;
++base_type::interpolator();
} while(--len);
}
};
//==========================================span_image_filter_rgb_bilinear
template<class Source, class Interpolator>
class span_image_filter_rgb_bilinear :
public span_image_filter<Source, Interpolator>
{
public:
typedef Source source_type;
typedef typename source_type::color_type color_type;
typedef typename source_type::order_type order_type;
typedef Interpolator interpolator_type;
typedef span_image_filter<source_type, interpolator_type> base_type;
typedef typename color_type::value_type value_type;
typedef typename color_type::calc_type calc_type;
typedef typename color_type::long_type long_type;
//--------------------------------------------------------------------
span_image_filter_rgb_bilinear() {}
span_image_filter_rgb_bilinear(source_type& src,
interpolator_type& inter) :
base_type(src, inter, 0)
{}
//--------------------------------------------------------------------
void generate(color_type* span, int x, int y, unsigned len)
{
base_type::interpolator().begin(x + base_type::filter_dx_dbl(),
y + base_type::filter_dy_dbl(), len);
long_type fg[3];
const value_type *fg_ptr;
do
{
int x_hr;
int y_hr;
base_type::interpolator().coordinates(&x_hr, &y_hr);
x_hr -= base_type::filter_dx_int();
y_hr -= base_type::filter_dy_int();
int x_lr = x_hr >> image_subpixel_shift;
int y_lr = y_hr >> image_subpixel_shift;
unsigned weight;
fg[0] = fg[1] = fg[2] = 0;
x_hr &= image_subpixel_mask;
y_hr &= image_subpixel_mask;
fg_ptr = (const value_type*)base_type::source().span(x_lr, y_lr, 2);
weight = (image_subpixel_scale - x_hr) *
(image_subpixel_scale - y_hr);
fg[0] += weight * *fg_ptr++;
fg[1] += weight * *fg_ptr++;
fg[2] += weight * *fg_ptr;
fg_ptr = (const value_type*)base_type::source().next_x();
weight = x_hr * (image_subpixel_scale - y_hr);
fg[0] += weight * *fg_ptr++;
fg[1] += weight * *fg_ptr++;
fg[2] += weight * *fg_ptr;
fg_ptr = (const value_type*)base_type::source().next_y();
weight = (image_subpixel_scale - x_hr) * y_hr;
fg[0] += weight * *fg_ptr++;
fg[1] += weight * *fg_ptr++;
fg[2] += weight * *fg_ptr;
fg_ptr = (const value_type*)base_type::source().next_x();
weight = x_hr * y_hr;
fg[0] += weight * *fg_ptr++;
fg[1] += weight * *fg_ptr++;
fg[2] += weight * *fg_ptr;
span->r = color_type::downshift(fg[order_type::R], image_subpixel_shift * 2);
span->g = color_type::downshift(fg[order_type::G], image_subpixel_shift * 2);
span->b = color_type::downshift(fg[order_type::B], image_subpixel_shift * 2);
span->a = color_type::full_value();
++span;
++base_type::interpolator();
} while(--len);
}
};
//=====================================span_image_filter_rgb_bilinear_clip
template<class Source, class Interpolator>
class span_image_filter_rgb_bilinear_clip :
public span_image_filter<Source, Interpolator>
{
public:
typedef Source source_type;
typedef typename source_type::color_type color_type;
typedef typename source_type::order_type order_type;
typedef Interpolator interpolator_type;
typedef span_image_filter<source_type, interpolator_type> base_type;
typedef typename color_type::value_type value_type;
typedef typename color_type::calc_type calc_type;
typedef typename color_type::long_type long_type;
//--------------------------------------------------------------------
span_image_filter_rgb_bilinear_clip() {}
span_image_filter_rgb_bilinear_clip(source_type& src,
const color_type& back_color,
interpolator_type& inter) :
base_type(src, inter, 0),
m_back_color(back_color)
{}
const color_type& background_color() const { return m_back_color; }
void background_color(const color_type& v) { m_back_color = v; }
//--------------------------------------------------------------------
void generate(color_type* span, int x, int y, unsigned len)
{
base_type::interpolator().begin(x + base_type::filter_dx_dbl(),
y + base_type::filter_dy_dbl(), len);
long_type fg[3];
long_type src_alpha;
value_type back_r = m_back_color.r;
value_type back_g = m_back_color.g;
value_type back_b = m_back_color.b;
value_type back_a = m_back_color.a;
const value_type *fg_ptr;
int maxx = base_type::source().width() - 1;
int maxy = base_type::source().height() - 1;
do
{
int x_hr;
int y_hr;
base_type::interpolator().coordinates(&x_hr, &y_hr);
x_hr -= base_type::filter_dx_int();
y_hr -= base_type::filter_dy_int();
int x_lr = x_hr >> image_subpixel_shift;
int y_lr = y_hr >> image_subpixel_shift;
unsigned weight;
if(x_lr >= 0 && y_lr >= 0 &&
x_lr < maxx && y_lr < maxy)
{
fg[0] = fg[1] = fg[2] = 0;
x_hr &= image_subpixel_mask;
y_hr &= image_subpixel_mask;
fg_ptr = (const value_type*)
base_type::source().row_ptr(y_lr) + x_lr + x_lr + x_lr;
weight = (image_subpixel_scale - x_hr) *
(image_subpixel_scale - y_hr);
fg[0] += weight * *fg_ptr++;
fg[1] += weight * *fg_ptr++;
fg[2] += weight * *fg_ptr++;
weight = x_hr * (image_subpixel_scale - y_hr);
fg[0] += weight * *fg_ptr++;
fg[1] += weight * *fg_ptr++;
fg[2] += weight * *fg_ptr++;
++y_lr;
fg_ptr = (const value_type*)
base_type::source().row_ptr(y_lr) + x_lr + x_lr + x_lr;
weight = (image_subpixel_scale - x_hr) * y_hr;
fg[0] += weight * *fg_ptr++;
fg[1] += weight * *fg_ptr++;
fg[2] += weight * *fg_ptr++;
weight = x_hr * y_hr;
fg[0] += weight * *fg_ptr++;
fg[1] += weight * *fg_ptr++;
fg[2] += weight * *fg_ptr++;
fg[0] = color_type::downshift(fg[0], image_subpixel_shift * 2);
fg[1] = color_type::downshift(fg[1], image_subpixel_shift * 2);
fg[2] = color_type::downshift(fg[2], image_subpixel_shift * 2);
src_alpha = color_type::full_value();
}
else
{
if(x_lr < -1 || y_lr < -1 ||
x_lr > maxx || y_lr > maxy)
{
fg[order_type::R] = back_r;
fg[order_type::G] = back_g;
fg[order_type::B] = back_b;
src_alpha = back_a;
}
else
{
fg[0] = fg[1] = fg[2] = src_alpha = 0;
x_hr &= image_subpixel_mask;
y_hr &= image_subpixel_mask;
weight = (image_subpixel_scale - x_hr) *
(image_subpixel_scale - y_hr);
if(x_lr >= 0 && y_lr >= 0 &&
x_lr <= maxx && y_lr <= maxy)
{
fg_ptr = (const value_type*)
base_type::source().row_ptr(y_lr) + x_lr + x_lr + x_lr;
fg[0] += weight * *fg_ptr++;
fg[1] += weight * *fg_ptr++;
fg[2] += weight * *fg_ptr++;
src_alpha += weight * color_type::full_value();
}
else
{
fg[order_type::R] += back_r * weight;
fg[order_type::G] += back_g * weight;
fg[order_type::B] += back_b * weight;
src_alpha += back_a * weight;
}
x_lr++;
weight = x_hr * (image_subpixel_scale - y_hr);
if(x_lr >= 0 && y_lr >= 0 &&
x_lr <= maxx && y_lr <= maxy)
{
fg_ptr = (const value_type*)
base_type::source().row_ptr(y_lr) + x_lr + x_lr + x_lr;
fg[0] += weight * *fg_ptr++;
fg[1] += weight * *fg_ptr++;
fg[2] += weight * *fg_ptr++;
src_alpha += weight * color_type::full_value();
}
else
{
fg[order_type::R] += back_r * weight;
fg[order_type::G] += back_g * weight;
fg[order_type::B] += back_b * weight;
src_alpha += back_a * weight;
}
x_lr--;
y_lr++;
weight = (image_subpixel_scale - x_hr) * y_hr;
if(x_lr >= 0 && y_lr >= 0 &&
x_lr <= maxx && y_lr <= maxy)
{
fg_ptr = (const value_type*)
base_type::source().row_ptr(y_lr) + x_lr + x_lr + x_lr;
fg[0] += weight * *fg_ptr++;
fg[1] += weight * *fg_ptr++;
fg[2] += weight * *fg_ptr++;
src_alpha += weight * color_type::full_value();
}
else
{
fg[order_type::R] += back_r * weight;
fg[order_type::G] += back_g * weight;
fg[order_type::B] += back_b * weight;
src_alpha += back_a * weight;
}
x_lr++;
weight = x_hr * y_hr;
if(x_lr >= 0 && y_lr >= 0 &&
x_lr <= maxx && y_lr <= maxy)
{
fg_ptr = (const value_type*)
base_type::source().row_ptr(y_lr) + x_lr + x_lr + x_lr;
fg[0] += weight * *fg_ptr++;
fg[1] += weight * *fg_ptr++;
fg[2] += weight * *fg_ptr++;
src_alpha += weight * color_type::full_value();
}
else
{
fg[order_type::R] += back_r * weight;
fg[order_type::G] += back_g * weight;
fg[order_type::B] += back_b * weight;
src_alpha += back_a * weight;
}
fg[0] = color_type::downshift(fg[0], image_subpixel_shift * 2);
fg[1] = color_type::downshift(fg[1], image_subpixel_shift * 2);
fg[2] = color_type::downshift(fg[2], image_subpixel_shift * 2);
src_alpha = color_type::downshift(src_alpha, image_subpixel_shift * 2);
}
}
span->r = (value_type)fg[order_type::R];
span->g = (value_type)fg[order_type::G];
span->b = (value_type)fg[order_type::B];
span->a = (value_type)src_alpha;
++span;
++base_type::interpolator();
} while(--len);
}
private:
color_type m_back_color;
};
//===============================================span_image_filter_rgb_2x2
template<class Source, class Interpolator>
class span_image_filter_rgb_2x2 :
public span_image_filter<Source, Interpolator>
{
public:
typedef Source source_type;
typedef typename source_type::color_type color_type;
typedef typename source_type::order_type order_type;
typedef Interpolator interpolator_type;
typedef span_image_filter<source_type, interpolator_type> base_type;
typedef typename color_type::value_type value_type;
typedef typename color_type::calc_type calc_type;
typedef typename color_type::long_type long_type;
//--------------------------------------------------------------------
span_image_filter_rgb_2x2() {}
span_image_filter_rgb_2x2(source_type& src,
interpolator_type& inter,
image_filter_lut& filter) :
base_type(src, inter, &filter)
{}
//--------------------------------------------------------------------
void generate(color_type* span, int x, int y, unsigned len)
{
base_type::interpolator().begin(x + base_type::filter_dx_dbl(),
y + base_type::filter_dy_dbl(), len);
long_type fg[3];
const value_type *fg_ptr;
const int16* weight_array = base_type::filter().weight_array() +
((base_type::filter().diameter()/2 - 1) <<
image_subpixel_shift);
do
{
int x_hr;
int y_hr;
base_type::interpolator().coordinates(&x_hr, &y_hr);
x_hr -= base_type::filter_dx_int();
y_hr -= base_type::filter_dy_int();
int x_lr = x_hr >> image_subpixel_shift;
int y_lr = y_hr >> image_subpixel_shift;
unsigned weight;
fg[0] = fg[1] = fg[2] = 0;
x_hr &= image_subpixel_mask;
y_hr &= image_subpixel_mask;
fg_ptr = (const value_type*)base_type::source().span(x_lr, y_lr, 2);
weight = (weight_array[x_hr + image_subpixel_scale] *
weight_array[y_hr + image_subpixel_scale] +
image_filter_scale / 2) >>
image_filter_shift;
fg[0] += weight * *fg_ptr++;
fg[1] += weight * *fg_ptr++;
fg[2] += weight * *fg_ptr;
fg_ptr = (const value_type*)base_type::source().next_x();
weight = (weight_array[x_hr] *
weight_array[y_hr + image_subpixel_scale] +
image_filter_scale / 2) >>
image_filter_shift;
fg[0] += weight * *fg_ptr++;
fg[1] += weight * *fg_ptr++;
fg[2] += weight * *fg_ptr;
fg_ptr = (const value_type*)base_type::source().next_y();
weight = (weight_array[x_hr + image_subpixel_scale] *
weight_array[y_hr] +
image_filter_scale / 2) >>
image_filter_shift;
fg[0] += weight * *fg_ptr++;
fg[1] += weight * *fg_ptr++;
fg[2] += weight * *fg_ptr;
fg_ptr = (const value_type*)base_type::source().next_x();
weight = (weight_array[x_hr] *
weight_array[y_hr] +
image_filter_scale / 2) >>
image_filter_shift;
fg[0] += weight * *fg_ptr++;
fg[1] += weight * *fg_ptr++;
fg[2] += weight * *fg_ptr;
fg[0] = color_type::downshift(fg[0], image_filter_shift);
fg[1] = color_type::downshift(fg[1], image_filter_shift);
fg[2] = color_type::downshift(fg[2], image_filter_shift);
if(fg[order_type::R] > color_type::full_value()) fg[order_type::R] = color_type::full_value();
if(fg[order_type::G] > color_type::full_value()) fg[order_type::G] = color_type::full_value();
if(fg[order_type::B] > color_type::full_value()) fg[order_type::B] = color_type::full_value();
span->r = (value_type)fg[order_type::R];
span->g = (value_type)fg[order_type::G];
span->b = (value_type)fg[order_type::B];
span->a = color_type::full_value();
++span;
++base_type::interpolator();
} while(--len);
}
};
//===================================================span_image_filter_rgb
template<class Source, class Interpolator>
class span_image_filter_rgb :
public span_image_filter<Source, Interpolator>
{
public:
typedef Source source_type;
typedef typename source_type::color_type color_type;
typedef typename source_type::order_type order_type;
typedef Interpolator interpolator_type;
typedef span_image_filter<source_type, interpolator_type> base_type;
typedef typename color_type::value_type value_type;
typedef typename color_type::calc_type calc_type;
typedef typename color_type::long_type long_type;
//--------------------------------------------------------------------
span_image_filter_rgb() {}
span_image_filter_rgb(source_type& src,
interpolator_type& inter,
image_filter_lut& filter) :
base_type(src, inter, &filter)
{}
//--------------------------------------------------------------------
void generate(color_type* span, int x, int y, unsigned len)
{
base_type::interpolator().begin(x + base_type::filter_dx_dbl(),
y + base_type::filter_dy_dbl(), len);
long_type fg[3];
const value_type *fg_ptr;
unsigned diameter = base_type::filter().diameter();
int start = base_type::filter().start();
const int16* weight_array = base_type::filter().weight_array();
int x_count;
int weight_y;
do
{
base_type::interpolator().coordinates(&x, &y);
x -= base_type::filter_dx_int();
y -= base_type::filter_dy_int();
int x_hr = x;
int y_hr = y;
int x_lr = x_hr >> image_subpixel_shift;
int y_lr = y_hr >> image_subpixel_shift;
fg[0] = fg[1] = fg[2] = 0;
int x_fract = x_hr & image_subpixel_mask;
unsigned y_count = diameter;
y_hr = image_subpixel_mask - (y_hr & image_subpixel_mask);
fg_ptr = (const value_type*)base_type::source().span(x_lr + start,
y_lr + start,
diameter);
for(;;)
{
x_count = diameter;
weight_y = weight_array[y_hr];
x_hr = image_subpixel_mask - x_fract;
for(;;)
{
int weight = (weight_y * weight_array[x_hr] +
image_filter_scale / 2) >>
image_filter_shift;
fg[0] += weight * *fg_ptr++;
fg[1] += weight * *fg_ptr++;
fg[2] += weight * *fg_ptr;
if(--x_count == 0) break;
x_hr += image_subpixel_scale;
fg_ptr = (const value_type*)base_type::source().next_x();
}
if(--y_count == 0) break;
y_hr += image_subpixel_scale;
fg_ptr = (const value_type*)base_type::source().next_y();
}
fg[0] = color_type::downshift(fg[0], image_filter_shift);
fg[1] = color_type::downshift(fg[1], image_filter_shift);
fg[2] = color_type::downshift(fg[2], image_filter_shift);
if(fg[0] < 0) fg[0] = 0;
if(fg[1] < 0) fg[1] = 0;
if(fg[2] < 0) fg[2] = 0;
if(fg[order_type::R] > color_type::full_value()) fg[order_type::R] = color_type::full_value();
if(fg[order_type::G] > color_type::full_value()) fg[order_type::G] = color_type::full_value();
if(fg[order_type::B] > color_type::full_value()) fg[order_type::B] = color_type::full_value();
span->r = (value_type)fg[order_type::R];
span->g = (value_type)fg[order_type::G];
span->b = (value_type)fg[order_type::B];
span->a = color_type::full_value();
++span;
++base_type::interpolator();
} while(--len);
}
};
//==========================================span_image_resample_rgb_affine
template<class Source>
class span_image_resample_rgb_affine :
public span_image_resample_affine<Source>
{
public:
typedef Source source_type;
typedef typename source_type::color_type color_type;
typedef typename source_type::order_type order_type;
typedef span_image_resample_affine<source_type> base_type;
typedef typename base_type::interpolator_type interpolator_type;
typedef typename color_type::value_type value_type;
typedef typename color_type::long_type long_type;
enum base_scale_e
{
downscale_shift = image_filter_shift
};
//--------------------------------------------------------------------
span_image_resample_rgb_affine() {}
span_image_resample_rgb_affine(source_type& src,
interpolator_type& inter,
image_filter_lut& filter) :
base_type(src, inter, filter)
{}
//--------------------------------------------------------------------
void generate(color_type* span, int x, int y, unsigned len)
{
base_type::interpolator().begin(x + base_type::filter_dx_dbl(),
y + base_type::filter_dy_dbl(), len);
long_type fg[3];
int diameter = base_type::filter().diameter();
int filter_scale = diameter << image_subpixel_shift;
int radius_x = (diameter * base_type::m_rx) >> 1;
int radius_y = (diameter * base_type::m_ry) >> 1;
int len_x_lr =
(diameter * base_type::m_rx + image_subpixel_mask) >>
image_subpixel_shift;
const int16* weight_array = base_type::filter().weight_array();
do
{
base_type::interpolator().coordinates(&x, &y);
x += base_type::filter_dx_int() - radius_x;
y += base_type::filter_dy_int() - radius_y;
fg[0] = fg[1] = fg[2] = 0;
int y_lr = y >> image_subpixel_shift;
int y_hr = ((image_subpixel_mask - (y & image_subpixel_mask)) *
base_type::m_ry_inv) >>
image_subpixel_shift;
int total_weight = 0;
int x_lr = x >> image_subpixel_shift;
int x_hr = ((image_subpixel_mask - (x & image_subpixel_mask)) *
base_type::m_rx_inv) >>
image_subpixel_shift;
int x_hr2 = x_hr;
const value_type* fg_ptr =
(const value_type*)base_type::source().span(x_lr, y_lr, len_x_lr);
for(;;)
{
int weight_y = weight_array[y_hr];
x_hr = x_hr2;
for(;;)
{
int weight = (weight_y * weight_array[x_hr] +
image_filter_scale / 2) >>
downscale_shift;
fg[0] += *fg_ptr++ * weight;
fg[1] += *fg_ptr++ * weight;
fg[2] += *fg_ptr * weight;
total_weight += weight;
x_hr += base_type::m_rx_inv;
if(x_hr >= filter_scale) break;
fg_ptr = (const value_type*)base_type::source().next_x();
}
y_hr += base_type::m_ry_inv;
if(y_hr >= filter_scale) break;
fg_ptr = (const value_type*)base_type::source().next_y();
}
fg[0] /= total_weight;
fg[1] /= total_weight;
fg[2] /= total_weight;
if(fg[0] < 0) fg[0] = 0;
if(fg[1] < 0) fg[1] = 0;
if(fg[2] < 0) fg[2] = 0;
if(fg[order_type::R] > color_type::full_value()) fg[order_type::R] = color_type::full_value();
if(fg[order_type::G] > color_type::full_value()) fg[order_type::G] = color_type::full_value();
if(fg[order_type::B] > color_type::full_value()) fg[order_type::B] = color_type::full_value();
span->r = (value_type)fg[order_type::R];
span->g = (value_type)fg[order_type::G];
span->b = (value_type)fg[order_type::B];
span->a = color_type::full_value();
++span;
++base_type::interpolator();
} while(--len);
}
};
//=================================================span_image_resample_rgb
template<class Source, class Interpolator>
class span_image_resample_rgb :
public span_image_resample<Source, Interpolator>
{
public:
typedef Source source_type;
typedef typename source_type::color_type color_type;
typedef typename source_type::order_type order_type;
typedef Interpolator interpolator_type;
typedef span_image_resample<source_type, interpolator_type> base_type;
typedef typename color_type::value_type value_type;
typedef typename color_type::long_type long_type;
enum base_scale_e
{
downscale_shift = image_filter_shift
};
//--------------------------------------------------------------------
span_image_resample_rgb() {}
span_image_resample_rgb(source_type& src,
interpolator_type& inter,
image_filter_lut& filter) :
base_type(src, inter, filter)
{}
//--------------------------------------------------------------------
void generate(color_type* span, int x, int y, unsigned len)
{
base_type::interpolator().begin(x + base_type::filter_dx_dbl(),
y + base_type::filter_dy_dbl(), len);
long_type fg[3];
int diameter = base_type::filter().diameter();
int filter_scale = diameter << image_subpixel_shift;
const int16* weight_array = base_type::filter().weight_array();
do
{
int rx;
int ry;
int rx_inv = image_subpixel_scale;
int ry_inv = image_subpixel_scale;
base_type::interpolator().coordinates(&x, &y);
base_type::interpolator().local_scale(&rx, &ry);
base_type::adjust_scale(&rx, &ry);
rx_inv = image_subpixel_scale * image_subpixel_scale / rx;
ry_inv = image_subpixel_scale * image_subpixel_scale / ry;
int radius_x = (diameter * rx) >> 1;
int radius_y = (diameter * ry) >> 1;
int len_x_lr =
(diameter * rx + image_subpixel_mask) >>
image_subpixel_shift;
x += base_type::filter_dx_int() - radius_x;
y += base_type::filter_dy_int() - radius_y;
fg[0] = fg[1] = fg[2] = 0;
int y_lr = y >> image_subpixel_shift;
int y_hr = ((image_subpixel_mask - (y & image_subpixel_mask)) *
ry_inv) >>
image_subpixel_shift;
int total_weight = 0;
int x_lr = x >> image_subpixel_shift;
int x_hr = ((image_subpixel_mask - (x & image_subpixel_mask)) *
rx_inv) >>
image_subpixel_shift;
int x_hr2 = x_hr;
const value_type* fg_ptr =
(const value_type*)base_type::source().span(x_lr, y_lr, len_x_lr);
for(;;)
{
int weight_y = weight_array[y_hr];
x_hr = x_hr2;
for(;;)
{
int weight = (weight_y * weight_array[x_hr] +
image_filter_scale / 2) >>
downscale_shift;
fg[0] += *fg_ptr++ * weight;
fg[1] += *fg_ptr++ * weight;
fg[2] += *fg_ptr * weight;
total_weight += weight;
x_hr += rx_inv;
if(x_hr >= filter_scale) break;
fg_ptr = (const value_type*)base_type::source().next_x();
}
y_hr += ry_inv;
if(y_hr >= filter_scale) break;
fg_ptr = (const value_type*)base_type::source().next_y();
}
fg[0] /= total_weight;
fg[1] /= total_weight;
fg[2] /= total_weight;
if(fg[0] < 0) fg[0] = 0;
if(fg[1] < 0) fg[1] = 0;
if(fg[2] < 0) fg[2] = 0;
if(fg[order_type::R] > color_type::full_value()) fg[order_type::R] = color_type::full_value();
if(fg[order_type::G] > color_type::full_value()) fg[order_type::G] = color_type::full_value();
if(fg[order_type::B] > color_type::full_value()) fg[order_type::B] = color_type::full_value();
span->r = (value_type)fg[order_type::R];
span->g = (value_type)fg[order_type::G];
span->b = (value_type)fg[order_type::B];
span->a = color_type::full_value();
++span;
++base_type::interpolator();
} while(--len);
}
};
}
#endif
| 0 |
D://workCode//uploadProject\awtk\3rd\agg | D://workCode//uploadProject\awtk\3rd\agg\include\agg_span_image_filter_rgba.h | //----------------------------------------------------------------------------
// Anti-Grain Geometry - Version 2.4
// Copyright (C) 2002-2005 Maxim Shemanarev (http://www.antigrain.com)
//
// Permission to copy, use, modify, sell and distribute this software
// is granted provided this copyright notice appears in all copies.
// This software is provided "as is" without express or implied
// warranty, and with no claim as to its suitability for any purpose.
//
//----------------------------------------------------------------------------
// Contact: mcseem@antigrain.com
// mcseemagg@yahoo.com
// http://www.antigrain.com
//----------------------------------------------------------------------------
//
// Adaptation for high precision colors has been sponsored by
// Liberty Technology Systems, Inc., visit http://lib-sys.com
//
// Liberty Technology Systems, Inc. is the provider of
// PostScript and PDF technology for software developers.
//
//----------------------------------------------------------------------------
#ifndef AGG_SPAN_IMAGE_FILTER_RGBA_INCLUDED
#define AGG_SPAN_IMAGE_FILTER_RGBA_INCLUDED
#include "agg_basics.h"
#include "agg_color_rgba.h"
#include "agg_span_image_filter.h"
namespace agg
{
//==============================================span_image_filter_rgba_nn
template<class Source, class Interpolator>
class span_image_filter_rgba_nn :
public span_image_filter<Source, Interpolator>
{
public:
typedef Source source_type;
typedef typename source_type::color_type color_type;
typedef typename source_type::order_type order_type;
typedef Interpolator interpolator_type;
typedef span_image_filter<source_type, interpolator_type> base_type;
typedef typename color_type::value_type value_type;
typedef typename color_type::calc_type calc_type;
typedef typename color_type::long_type long_type;
//--------------------------------------------------------------------
span_image_filter_rgba_nn() {}
span_image_filter_rgba_nn(source_type& src,
interpolator_type& inter) :
base_type(src, inter, 0)
{}
//--------------------------------------------------------------------
void generate(color_type* span, int x, int y, unsigned len)
{
base_type::interpolator().begin(x + base_type::filter_dx_dbl(),
y + base_type::filter_dy_dbl(), len);
do
{
base_type::interpolator().coordinates(&x, &y);
const value_type* fg_ptr = (const value_type*)
base_type::source().span(x >> image_subpixel_shift,
y >> image_subpixel_shift,
1);
span->r = fg_ptr[order_type::R];
span->g = fg_ptr[order_type::G];
span->b = fg_ptr[order_type::B];
span->a = fg_ptr[order_type::A];
++span;
++base_type::interpolator();
} while(--len);
}
};
//=========================================span_image_filter_rgba_bilinear
template<class Source, class Interpolator>
class span_image_filter_rgba_bilinear :
public span_image_filter<Source, Interpolator>
{
public:
typedef Source source_type;
typedef typename source_type::color_type color_type;
typedef typename source_type::order_type order_type;
typedef Interpolator interpolator_type;
typedef span_image_filter<source_type, interpolator_type> base_type;
typedef typename color_type::value_type value_type;
typedef typename color_type::calc_type calc_type;
typedef typename color_type::long_type long_type;
//--------------------------------------------------------------------
span_image_filter_rgba_bilinear() {}
span_image_filter_rgba_bilinear(source_type& src,
interpolator_type& inter) :
base_type(src, inter, 0)
{}
//--------------------------------------------------------------------
void generate(color_type* span, int x, int y, unsigned len)
{
base_type::interpolator().begin(x + base_type::filter_dx_dbl(),
y + base_type::filter_dy_dbl(), len);
long_type fg[4];
const value_type *fg_ptr;
do
{
int x_hr;
int y_hr;
base_type::interpolator().coordinates(&x_hr, &y_hr);
x_hr -= base_type::filter_dx_int();
y_hr -= base_type::filter_dy_int();
int x_lr = x_hr >> image_subpixel_shift;
int y_lr = y_hr >> image_subpixel_shift;
unsigned weight;
fg[0] =
fg[1] =
fg[2] =
fg[3] = image_subpixel_scale * image_subpixel_scale / 2;
x_hr &= image_subpixel_mask;
y_hr &= image_subpixel_mask;
fg_ptr = (const value_type*)base_type::source().span(x_lr, y_lr, 2);
weight = (image_subpixel_scale - x_hr) *
(image_subpixel_scale - y_hr);
fg[0] += weight * *fg_ptr++;
fg[1] += weight * *fg_ptr++;
fg[2] += weight * *fg_ptr++;
fg[3] += weight * *fg_ptr;
fg_ptr = (const value_type*)base_type::source().next_x();
weight = x_hr * (image_subpixel_scale - y_hr);
fg[0] += weight * *fg_ptr++;
fg[1] += weight * *fg_ptr++;
fg[2] += weight * *fg_ptr++;
fg[3] += weight * *fg_ptr;
fg_ptr = (const value_type*)base_type::source().next_y();
weight = (image_subpixel_scale - x_hr) * y_hr;
fg[0] += weight * *fg_ptr++;
fg[1] += weight * *fg_ptr++;
fg[2] += weight * *fg_ptr++;
fg[3] += weight * *fg_ptr;
fg_ptr = (const value_type*)base_type::source().next_x();
weight = x_hr * y_hr;
fg[0] += weight * *fg_ptr++;
fg[1] += weight * *fg_ptr++;
fg[2] += weight * *fg_ptr++;
fg[3] += weight * *fg_ptr;
span->r = value_type(color_type::downshift(fg[order_type::R], image_subpixel_shift * 2));
span->g = value_type(color_type::downshift(fg[order_type::G], image_subpixel_shift * 2));
span->b = value_type(color_type::downshift(fg[order_type::B], image_subpixel_shift * 2));
span->a = value_type(color_type::downshift(fg[order_type::A], image_subpixel_shift * 2));
++span;
++base_type::interpolator();
} while(--len);
}
};
//====================================span_image_filter_rgba_bilinear_clip
template<class Source, class Interpolator>
class span_image_filter_rgba_bilinear_clip :
public span_image_filter<Source, Interpolator>
{
public:
typedef Source source_type;
typedef typename source_type::color_type color_type;
typedef typename source_type::order_type order_type;
typedef Interpolator interpolator_type;
typedef span_image_filter<source_type, interpolator_type> base_type;
typedef typename color_type::value_type value_type;
typedef typename color_type::calc_type calc_type;
typedef typename color_type::long_type long_type;
//--------------------------------------------------------------------
span_image_filter_rgba_bilinear_clip() {}
span_image_filter_rgba_bilinear_clip(source_type& src,
const color_type& back_color,
interpolator_type& inter) :
base_type(src, inter, 0),
m_back_color(back_color)
{}
const color_type& background_color() const { return m_back_color; }
void background_color(const color_type& v) { m_back_color = v; }
//--------------------------------------------------------------------
void generate(color_type* span, int x, int y, unsigned len)
{
base_type::interpolator().begin(x + base_type::filter_dx_dbl(),
y + base_type::filter_dy_dbl(), len);
long_type fg[4];
value_type back_r = m_back_color.r;
value_type back_g = m_back_color.g;
value_type back_b = m_back_color.b;
value_type back_a = m_back_color.a;
const value_type *fg_ptr;
int maxx = base_type::source().width() - 1;
int maxy = base_type::source().height() - 1;
do
{
int x_hr;
int y_hr;
base_type::interpolator().coordinates(&x_hr, &y_hr);
x_hr -= base_type::filter_dx_int();
y_hr -= base_type::filter_dy_int();
int x_lr = x_hr >> image_subpixel_shift;
int y_lr = y_hr >> image_subpixel_shift;
unsigned weight;
if(x_lr >= 0 && y_lr >= 0 &&
x_lr < maxx && y_lr < maxy)
{
fg[0] = fg[1] = fg[2] = fg[3] = 0;
x_hr &= image_subpixel_mask;
y_hr &= image_subpixel_mask;
fg_ptr = (const value_type*)
base_type::source().row_ptr(y_lr) + (x_lr << 2);
weight = (image_subpixel_scale - x_hr) *
(image_subpixel_scale - y_hr);
fg[0] += weight * *fg_ptr++;
fg[1] += weight * *fg_ptr++;
fg[2] += weight * *fg_ptr++;
fg[3] += weight * *fg_ptr++;
weight = x_hr * (image_subpixel_scale - y_hr);
fg[0] += weight * *fg_ptr++;
fg[1] += weight * *fg_ptr++;
fg[2] += weight * *fg_ptr++;
fg[3] += weight * *fg_ptr++;
++y_lr;
fg_ptr = (const value_type*)
base_type::source().row_ptr(y_lr) + (x_lr << 2);
weight = (image_subpixel_scale - x_hr) * y_hr;
fg[0] += weight * *fg_ptr++;
fg[1] += weight * *fg_ptr++;
fg[2] += weight * *fg_ptr++;
fg[3] += weight * *fg_ptr++;
weight = x_hr * y_hr;
fg[0] += weight * *fg_ptr++;
fg[1] += weight * *fg_ptr++;
fg[2] += weight * *fg_ptr++;
fg[3] += weight * *fg_ptr++;
fg[0] = color_type::downshift(fg[0], image_subpixel_shift * 2);
fg[1] = color_type::downshift(fg[1], image_subpixel_shift * 2);
fg[2] = color_type::downshift(fg[2], image_subpixel_shift * 2);
fg[3] = color_type::downshift(fg[3], image_subpixel_shift * 2);
}
else
{
if(x_lr < -1 || y_lr < -1 ||
x_lr > maxx || y_lr > maxy)
{
fg[order_type::R] = back_r;
fg[order_type::G] = back_g;
fg[order_type::B] = back_b;
fg[order_type::A] = back_a;
}
else
{
fg[0] = fg[1] = fg[2] = fg[3] = 0;
x_hr &= image_subpixel_mask;
y_hr &= image_subpixel_mask;
weight = (image_subpixel_scale - x_hr) *
(image_subpixel_scale - y_hr);
if(x_lr >= 0 && y_lr >= 0 &&
x_lr <= maxx && y_lr <= maxy)
{
fg_ptr = (const value_type*)
base_type::source().row_ptr(y_lr) + (x_lr << 2);
fg[0] += weight * *fg_ptr++;
fg[1] += weight * *fg_ptr++;
fg[2] += weight * *fg_ptr++;
fg[3] += weight * *fg_ptr++;
}
else
{
fg[order_type::R] += back_r * weight;
fg[order_type::G] += back_g * weight;
fg[order_type::B] += back_b * weight;
fg[order_type::A] += back_a * weight;
}
x_lr++;
weight = x_hr * (image_subpixel_scale - y_hr);
if(x_lr >= 0 && y_lr >= 0 &&
x_lr <= maxx && y_lr <= maxy)
{
fg_ptr = (const value_type*)
base_type::source().row_ptr(y_lr) + (x_lr << 2);
fg[0] += weight * *fg_ptr++;
fg[1] += weight * *fg_ptr++;
fg[2] += weight * *fg_ptr++;
fg[3] += weight * *fg_ptr++;
}
else
{
fg[order_type::R] += back_r * weight;
fg[order_type::G] += back_g * weight;
fg[order_type::B] += back_b * weight;
fg[order_type::A] += back_a * weight;
}
x_lr--;
y_lr++;
weight = (image_subpixel_scale - x_hr) * y_hr;
if(x_lr >= 0 && y_lr >= 0 &&
x_lr <= maxx && y_lr <= maxy)
{
fg_ptr = (const value_type*)
base_type::source().row_ptr(y_lr) + (x_lr << 2);
fg[0] += weight * *fg_ptr++;
fg[1] += weight * *fg_ptr++;
fg[2] += weight * *fg_ptr++;
fg[3] += weight * *fg_ptr++;
}
else
{
fg[order_type::R] += back_r * weight;
fg[order_type::G] += back_g * weight;
fg[order_type::B] += back_b * weight;
fg[order_type::A] += back_a * weight;
}
x_lr++;
weight = x_hr * y_hr;
if(x_lr >= 0 && y_lr >= 0 &&
x_lr <= maxx && y_lr <= maxy)
{
fg_ptr = (const value_type*)
base_type::source().row_ptr(y_lr) + (x_lr << 2);
fg[0] += weight * *fg_ptr++;
fg[1] += weight * *fg_ptr++;
fg[2] += weight * *fg_ptr++;
fg[3] += weight * *fg_ptr++;
}
else
{
fg[order_type::R] += back_r * weight;
fg[order_type::G] += back_g * weight;
fg[order_type::B] += back_b * weight;
fg[order_type::A] += back_a * weight;
}
fg[0] = color_type::downshift(fg[0], image_subpixel_shift * 2);
fg[1] = color_type::downshift(fg[1], image_subpixel_shift * 2);
fg[2] = color_type::downshift(fg[2], image_subpixel_shift * 2);
fg[3] = color_type::downshift(fg[3], image_subpixel_shift * 2);
}
}
span->r = (value_type)fg[order_type::R];
span->g = (value_type)fg[order_type::G];
span->b = (value_type)fg[order_type::B];
span->a = (value_type)fg[order_type::A];
++span;
++base_type::interpolator();
} while(--len);
}
private:
color_type m_back_color;
};
//==============================================span_image_filter_rgba_2x2
template<class Source, class Interpolator>
class span_image_filter_rgba_2x2 :
public span_image_filter<Source, Interpolator>
{
public:
typedef Source source_type;
typedef typename source_type::color_type color_type;
typedef typename source_type::order_type order_type;
typedef Interpolator interpolator_type;
typedef span_image_filter<source_type, interpolator_type> base_type;
typedef typename color_type::value_type value_type;
typedef typename color_type::calc_type calc_type;
typedef typename color_type::long_type long_type;
//--------------------------------------------------------------------
span_image_filter_rgba_2x2() {}
span_image_filter_rgba_2x2(source_type& src,
interpolator_type& inter,
image_filter_lut& filter) :
base_type(src, inter, &filter)
{}
//--------------------------------------------------------------------
void generate(color_type* span, int x, int y, unsigned len)
{
base_type::interpolator().begin(x + base_type::filter_dx_dbl(),
y + base_type::filter_dy_dbl(), len);
long_type fg[4];
const value_type *fg_ptr;
const int16* weight_array = base_type::filter().weight_array() +
((base_type::filter().diameter()/2 - 1) <<
image_subpixel_shift);
do
{
int x_hr;
int y_hr;
base_type::interpolator().coordinates(&x_hr, &y_hr);
x_hr -= base_type::filter_dx_int();
y_hr -= base_type::filter_dy_int();
int x_lr = x_hr >> image_subpixel_shift;
int y_lr = y_hr >> image_subpixel_shift;
unsigned weight;
fg[0] = fg[1] = fg[2] = fg[3] = 0;
x_hr &= image_subpixel_mask;
y_hr &= image_subpixel_mask;
fg_ptr = (const value_type*)base_type::source().span(x_lr, y_lr, 2);
weight = (weight_array[x_hr + image_subpixel_scale] *
weight_array[y_hr + image_subpixel_scale] +
image_filter_scale / 2) >>
image_filter_shift;
fg[0] += weight * *fg_ptr++;
fg[1] += weight * *fg_ptr++;
fg[2] += weight * *fg_ptr++;
fg[3] += weight * *fg_ptr;
fg_ptr = (const value_type*)base_type::source().next_x();
weight = (weight_array[x_hr] *
weight_array[y_hr + image_subpixel_scale] +
image_filter_scale / 2) >>
image_filter_shift;
fg[0] += weight * *fg_ptr++;
fg[1] += weight * *fg_ptr++;
fg[2] += weight * *fg_ptr++;
fg[3] += weight * *fg_ptr;
fg_ptr = (const value_type*)base_type::source().next_y();
weight = (weight_array[x_hr + image_subpixel_scale] *
weight_array[y_hr] +
image_filter_scale / 2) >>
image_filter_shift;
fg[0] += weight * *fg_ptr++;
fg[1] += weight * *fg_ptr++;
fg[2] += weight * *fg_ptr++;
fg[3] += weight * *fg_ptr;
fg_ptr = (const value_type*)base_type::source().next_x();
weight = (weight_array[x_hr] *
weight_array[y_hr] +
image_filter_scale / 2) >>
image_filter_shift;
fg[0] += weight * *fg_ptr++;
fg[1] += weight * *fg_ptr++;
fg[2] += weight * *fg_ptr++;
fg[3] += weight * *fg_ptr;
fg[0] = color_type::downshift(fg[0], image_filter_shift);
fg[1] = color_type::downshift(fg[1], image_filter_shift);
fg[2] = color_type::downshift(fg[2], image_filter_shift);
fg[3] = color_type::downshift(fg[3], image_filter_shift);
if(fg[order_type::A] > color_type::full_value()) fg[order_type::A] = color_type::full_value();
if(fg[order_type::R] > fg[order_type::A]) fg[order_type::R] = fg[order_type::A];
if(fg[order_type::G] > fg[order_type::A]) fg[order_type::G] = fg[order_type::A];
if(fg[order_type::B] > fg[order_type::A]) fg[order_type::B] = fg[order_type::A];
span->r = (value_type)fg[order_type::R];
span->g = (value_type)fg[order_type::G];
span->b = (value_type)fg[order_type::B];
span->a = (value_type)fg[order_type::A];
++span;
++base_type::interpolator();
} while(--len);
}
};
//==================================================span_image_filter_rgba
template<class Source, class Interpolator>
class span_image_filter_rgba :
public span_image_filter<Source, Interpolator>
{
public:
typedef Source source_type;
typedef typename source_type::color_type color_type;
typedef typename source_type::order_type order_type;
typedef Interpolator interpolator_type;
typedef span_image_filter<source_type, interpolator_type> base_type;
typedef typename color_type::value_type value_type;
typedef typename color_type::calc_type calc_type;
typedef typename color_type::long_type long_type;
//--------------------------------------------------------------------
span_image_filter_rgba() {}
span_image_filter_rgba(source_type& src,
interpolator_type& inter,
image_filter_lut& filter) :
base_type(src, inter, &filter)
{}
//--------------------------------------------------------------------
void generate(color_type* span, int x, int y, unsigned len)
{
base_type::interpolator().begin(x + base_type::filter_dx_dbl(),
y + base_type::filter_dy_dbl(), len);
long_type fg[4];
const value_type *fg_ptr;
unsigned diameter = base_type::filter().diameter();
int start = base_type::filter().start();
const int16* weight_array = base_type::filter().weight_array();
int x_count;
int weight_y;
do
{
base_type::interpolator().coordinates(&x, &y);
x -= base_type::filter_dx_int();
y -= base_type::filter_dy_int();
int x_hr = x;
int y_hr = y;
int x_lr = x_hr >> image_subpixel_shift;
int y_lr = y_hr >> image_subpixel_shift;
fg[0] = fg[1] = fg[2] = fg[3] = 0;
int x_fract = x_hr & image_subpixel_mask;
unsigned y_count = diameter;
y_hr = image_subpixel_mask - (y_hr & image_subpixel_mask);
fg_ptr = (const value_type*)base_type::source().span(x_lr + start,
y_lr + start,
diameter);
for(;;)
{
x_count = diameter;
weight_y = weight_array[y_hr];
x_hr = image_subpixel_mask - x_fract;
for(;;)
{
int weight = (weight_y * weight_array[x_hr] +
image_filter_scale / 2) >>
image_filter_shift;
fg[0] += weight * *fg_ptr++;
fg[1] += weight * *fg_ptr++;
fg[2] += weight * *fg_ptr++;
fg[3] += weight * *fg_ptr;
if(--x_count == 0) break;
x_hr += image_subpixel_scale;
fg_ptr = (const value_type*)base_type::source().next_x();
}
if(--y_count == 0) break;
y_hr += image_subpixel_scale;
fg_ptr = (const value_type*)base_type::source().next_y();
}
fg[0] = color_type::downshift(fg[0], image_filter_shift);
fg[1] = color_type::downshift(fg[1], image_filter_shift);
fg[2] = color_type::downshift(fg[2], image_filter_shift);
fg[3] = color_type::downshift(fg[3], image_filter_shift);
if(fg[0] < 0) fg[0] = 0;
if(fg[1] < 0) fg[1] = 0;
if(fg[2] < 0) fg[2] = 0;
if(fg[3] < 0) fg[3] = 0;
if(fg[order_type::A] > color_type::full_value()) fg[order_type::A] = color_type::full_value();
if(fg[order_type::R] > fg[order_type::A]) fg[order_type::R] = fg[order_type::A];
if(fg[order_type::G] > fg[order_type::A]) fg[order_type::G] = fg[order_type::A];
if(fg[order_type::B] > fg[order_type::A]) fg[order_type::B] = fg[order_type::A];
span->r = (value_type)fg[order_type::R];
span->g = (value_type)fg[order_type::G];
span->b = (value_type)fg[order_type::B];
span->a = (value_type)fg[order_type::A];
++span;
++base_type::interpolator();
} while(--len);
}
};
//========================================span_image_resample_rgba_affine
template<class Source>
class span_image_resample_rgba_affine :
public span_image_resample_affine<Source>
{
public:
typedef Source source_type;
typedef typename source_type::color_type color_type;
typedef typename source_type::order_type order_type;
typedef span_image_resample_affine<source_type> base_type;
typedef typename base_type::interpolator_type interpolator_type;
typedef typename color_type::value_type value_type;
typedef typename color_type::long_type long_type;
enum base_scale_e
{
downscale_shift = image_filter_shift
};
//--------------------------------------------------------------------
span_image_resample_rgba_affine() {}
span_image_resample_rgba_affine(source_type& src,
interpolator_type& inter,
image_filter_lut& filter) :
base_type(src, inter, filter)
{}
//--------------------------------------------------------------------
void generate(color_type* span, int x, int y, unsigned len)
{
base_type::interpolator().begin(x + base_type::filter_dx_dbl(),
y + base_type::filter_dy_dbl(), len);
long_type fg[4];
int diameter = base_type::filter().diameter();
int filter_scale = diameter << image_subpixel_shift;
int radius_x = (diameter * base_type::m_rx) >> 1;
int radius_y = (diameter * base_type::m_ry) >> 1;
int len_x_lr =
(diameter * base_type::m_rx + image_subpixel_mask) >>
image_subpixel_shift;
const int16* weight_array = base_type::filter().weight_array();
do
{
base_type::interpolator().coordinates(&x, &y);
x += base_type::filter_dx_int() - radius_x;
y += base_type::filter_dy_int() - radius_y;
fg[0] = fg[1] = fg[2] = fg[3] = 0;
int y_lr = y >> image_subpixel_shift;
int y_hr = ((image_subpixel_mask - (y & image_subpixel_mask)) *
base_type::m_ry_inv) >>
image_subpixel_shift;
int total_weight = 0;
int x_lr = x >> image_subpixel_shift;
int x_hr = ((image_subpixel_mask - (x & image_subpixel_mask)) *
base_type::m_rx_inv) >>
image_subpixel_shift;
int x_hr2 = x_hr;
const value_type* fg_ptr =
(const value_type*)base_type::source().span(x_lr, y_lr, len_x_lr);
for(;;)
{
int weight_y = weight_array[y_hr];
x_hr = x_hr2;
for(;;)
{
int weight = (weight_y * weight_array[x_hr] +
image_filter_scale / 2) >>
downscale_shift;
fg[0] += *fg_ptr++ * weight;
fg[1] += *fg_ptr++ * weight;
fg[2] += *fg_ptr++ * weight;
fg[3] += *fg_ptr++ * weight;
total_weight += weight;
x_hr += base_type::m_rx_inv;
if(x_hr >= filter_scale) break;
fg_ptr = (const value_type*)base_type::source().next_x();
}
y_hr += base_type::m_ry_inv;
if(y_hr >= filter_scale) break;
fg_ptr = (const value_type*)base_type::source().next_y();
}
fg[0] /= total_weight;
fg[1] /= total_weight;
fg[2] /= total_weight;
fg[3] /= total_weight;
if(fg[0] < 0) fg[0] = 0;
if(fg[1] < 0) fg[1] = 0;
if(fg[2] < 0) fg[2] = 0;
if(fg[3] < 0) fg[3] = 0;
if(fg[order_type::A] > color_type::full_value()) fg[order_type::A] = color_type::full_value();
if(fg[order_type::R] > fg[order_type::A]) fg[order_type::R] = fg[order_type::A];
if(fg[order_type::G] > fg[order_type::A]) fg[order_type::G] = fg[order_type::A];
if(fg[order_type::B] > fg[order_type::A]) fg[order_type::B] = fg[order_type::A];
span->r = (value_type)fg[order_type::R];
span->g = (value_type)fg[order_type::G];
span->b = (value_type)fg[order_type::B];
span->a = (value_type)fg[order_type::A];
++span;
++base_type::interpolator();
} while(--len);
}
};
//==============================================span_image_resample_rgba
template<class Source, class Interpolator>
class span_image_resample_rgba :
public span_image_resample<Source, Interpolator>
{
public:
typedef Source source_type;
typedef typename source_type::color_type color_type;
typedef typename source_type::order_type order_type;
typedef Interpolator interpolator_type;
typedef span_image_resample<source_type, interpolator_type> base_type;
typedef typename color_type::value_type value_type;
typedef typename color_type::long_type long_type;
enum base_scale_e
{
downscale_shift = image_filter_shift
};
//--------------------------------------------------------------------
span_image_resample_rgba() {}
span_image_resample_rgba(source_type& src,
interpolator_type& inter,
image_filter_lut& filter) :
base_type(src, inter, filter)
{}
//--------------------------------------------------------------------
void generate(color_type* span, int x, int y, unsigned len)
{
base_type::interpolator().begin(x + base_type::filter_dx_dbl(),
y + base_type::filter_dy_dbl(), len);
long_type fg[4];
int diameter = base_type::filter().diameter();
int filter_scale = diameter << image_subpixel_shift;
const int16* weight_array = base_type::filter().weight_array();
do
{
int rx;
int ry;
int rx_inv = image_subpixel_scale;
int ry_inv = image_subpixel_scale;
base_type::interpolator().coordinates(&x, &y);
base_type::interpolator().local_scale(&rx, &ry);
base_type::adjust_scale(&rx, &ry);
rx_inv = image_subpixel_scale * image_subpixel_scale / rx;
ry_inv = image_subpixel_scale * image_subpixel_scale / ry;
int radius_x = (diameter * rx) >> 1;
int radius_y = (diameter * ry) >> 1;
int len_x_lr =
(diameter * rx + image_subpixel_mask) >>
image_subpixel_shift;
x += base_type::filter_dx_int() - radius_x;
y += base_type::filter_dy_int() - radius_y;
fg[0] = fg[1] = fg[2] = fg[3] = 0;
int y_lr = y >> image_subpixel_shift;
int y_hr = ((image_subpixel_mask - (y & image_subpixel_mask)) *
ry_inv) >>
image_subpixel_shift;
int total_weight = 0;
int x_lr = x >> image_subpixel_shift;
int x_hr = ((image_subpixel_mask - (x & image_subpixel_mask)) *
rx_inv) >>
image_subpixel_shift;
int x_hr2 = x_hr;
const value_type* fg_ptr =
(const value_type*)base_type::source().span(x_lr, y_lr, len_x_lr);
for(;;)
{
int weight_y = weight_array[y_hr];
x_hr = x_hr2;
for(;;)
{
int weight = (weight_y * weight_array[x_hr] +
image_filter_scale / 2) >>
downscale_shift;
fg[0] += *fg_ptr++ * weight;
fg[1] += *fg_ptr++ * weight;
fg[2] += *fg_ptr++ * weight;
fg[3] += *fg_ptr++ * weight;
total_weight += weight;
x_hr += rx_inv;
if(x_hr >= filter_scale) break;
fg_ptr = (const value_type*)base_type::source().next_x();
}
y_hr += ry_inv;
if(y_hr >= filter_scale) break;
fg_ptr = (const value_type*)base_type::source().next_y();
}
fg[0] /= total_weight;
fg[1] /= total_weight;
fg[2] /= total_weight;
fg[3] /= total_weight;
if(fg[0] < 0) fg[0] = 0;
if(fg[1] < 0) fg[1] = 0;
if(fg[2] < 0) fg[2] = 0;
if(fg[3] < 0) fg[3] = 0;
if(fg[order_type::A] > color_type::full_value()) fg[order_type::A] = color_type::full_value();
if(fg[order_type::R] > fg[order_type::R]) fg[order_type::R] = fg[order_type::R];
if(fg[order_type::G] > fg[order_type::G]) fg[order_type::G] = fg[order_type::G];
if(fg[order_type::B] > fg[order_type::B]) fg[order_type::B] = fg[order_type::B];
span->r = (value_type)fg[order_type::R];
span->g = (value_type)fg[order_type::G];
span->b = (value_type)fg[order_type::B];
span->a = (value_type)fg[order_type::A];
++span;
++base_type::interpolator();
} while(--len);
}
};
}
#endif
| 0 |
D://workCode//uploadProject\awtk\3rd\agg | D://workCode//uploadProject\awtk\3rd\agg\include\agg_span_interpolator_linear.h | //----------------------------------------------------------------------------
// Anti-Grain Geometry - Version 2.4
// Copyright (C) 2002-2005 Maxim Shemanarev (http://www.antigrain.com)
//
// Permission to copy, use, modify, sell and distribute this software
// is granted provided this copyright notice appears in all copies.
// This software is provided "as is" without express or implied
// warranty, and with no claim as to its suitability for any purpose.
//
//----------------------------------------------------------------------------
// Contact: mcseem@antigrain.com
// mcseemagg@yahoo.com
// http://www.antigrain.com
//----------------------------------------------------------------------------
#ifndef AGG_SPAN_INTERPOLATOR_LINEAR_INCLUDED
#define AGG_SPAN_INTERPOLATOR_LINEAR_INCLUDED
#include "agg_basics.h"
#include "agg_dda_line.h"
#include "agg_trans_affine.h"
namespace agg
{
//================================================span_interpolator_linear
template<class Transformer = trans_affine, unsigned SubpixelShift = 8>
class span_interpolator_linear
{
public:
typedef Transformer trans_type;
enum subpixel_scale_e
{
subpixel_shift = SubpixelShift,
subpixel_scale = 1 << subpixel_shift
};
//--------------------------------------------------------------------
span_interpolator_linear() {}
span_interpolator_linear(trans_type& trans) : m_trans(&trans) {}
span_interpolator_linear(trans_type& trans,
double x, double y, unsigned len) :
m_trans(&trans)
{
begin(x, y, len);
}
//----------------------------------------------------------------
const trans_type& transformer() const { return *m_trans; }
void transformer(trans_type& trans) { m_trans = &trans; }
//----------------------------------------------------------------
void begin(double x, double y, unsigned len)
{
double tx;
double ty;
tx = x;
ty = y;
m_trans->transform(&tx, &ty);
int x1 = iround(tx * subpixel_scale);
int y1 = iround(ty * subpixel_scale);
tx = x + len;
ty = y;
m_trans->transform(&tx, &ty);
int x2 = iround(tx * subpixel_scale);
int y2 = iround(ty * subpixel_scale);
m_li_x = dda2_line_interpolator(x1, x2, len);
m_li_y = dda2_line_interpolator(y1, y2, len);
}
//----------------------------------------------------------------
void resynchronize(double xe, double ye, unsigned len)
{
m_trans->transform(&xe, &ye);
m_li_x = dda2_line_interpolator(m_li_x.y(), iround(xe * subpixel_scale), len);
m_li_y = dda2_line_interpolator(m_li_y.y(), iround(ye * subpixel_scale), len);
}
//----------------------------------------------------------------
void operator++()
{
++m_li_x;
++m_li_y;
}
//----------------------------------------------------------------
void coordinates(int* x, int* y) const
{
*x = m_li_x.y();
*y = m_li_y.y();
}
private:
trans_type* m_trans;
dda2_line_interpolator m_li_x;
dda2_line_interpolator m_li_y;
};
//=====================================span_interpolator_linear_subdiv
template<class Transformer = trans_affine, unsigned SubpixelShift = 8>
class span_interpolator_linear_subdiv
{
public:
typedef Transformer trans_type;
enum subpixel_scale_e
{
subpixel_shift = SubpixelShift,
subpixel_scale = 1 << subpixel_shift
};
//----------------------------------------------------------------
span_interpolator_linear_subdiv() :
m_subdiv_shift(4),
m_subdiv_size(1 << m_subdiv_shift),
m_subdiv_mask(m_subdiv_size - 1) {}
span_interpolator_linear_subdiv(trans_type& trans,
unsigned subdiv_shift = 4) :
m_subdiv_shift(subdiv_shift),
m_subdiv_size(1 << m_subdiv_shift),
m_subdiv_mask(m_subdiv_size - 1),
m_trans(&trans) {}
span_interpolator_linear_subdiv(trans_type& trans,
double x, double y, unsigned len,
unsigned subdiv_shift = 4) :
m_subdiv_shift(subdiv_shift),
m_subdiv_size(1 << m_subdiv_shift),
m_subdiv_mask(m_subdiv_size - 1),
m_trans(&trans)
{
begin(x, y, len);
}
//----------------------------------------------------------------
const trans_type& transformer() const { return *m_trans; }
void transformer(const trans_type& trans) { m_trans = &trans; }
//----------------------------------------------------------------
unsigned subdiv_shift() const { return m_subdiv_shift; }
void subdiv_shift(unsigned shift)
{
m_subdiv_shift = shift;
m_subdiv_size = 1 << m_subdiv_shift;
m_subdiv_mask = m_subdiv_size - 1;
}
//----------------------------------------------------------------
void begin(double x, double y, unsigned len)
{
double tx;
double ty;
m_pos = 1;
m_src_x = iround(x * subpixel_scale) + subpixel_scale;
m_src_y = y;
m_len = len;
if(len > m_subdiv_size) len = m_subdiv_size;
tx = x;
ty = y;
m_trans->transform(&tx, &ty);
int x1 = iround(tx * subpixel_scale);
int y1 = iround(ty * subpixel_scale);
tx = x + len;
ty = y;
m_trans->transform(&tx, &ty);
m_li_x = dda2_line_interpolator(x1, iround(tx * subpixel_scale), len);
m_li_y = dda2_line_interpolator(y1, iround(ty * subpixel_scale), len);
}
//----------------------------------------------------------------
void operator++()
{
++m_li_x;
++m_li_y;
if(m_pos >= m_subdiv_size)
{
unsigned len = m_len;
if(len > m_subdiv_size) len = m_subdiv_size;
double tx = double(m_src_x) / double(subpixel_scale) + len;
double ty = m_src_y;
m_trans->transform(&tx, &ty);
m_li_x = dda2_line_interpolator(m_li_x.y(), iround(tx * subpixel_scale), len);
m_li_y = dda2_line_interpolator(m_li_y.y(), iround(ty * subpixel_scale), len);
m_pos = 0;
}
m_src_x += subpixel_scale;
++m_pos;
--m_len;
}
//----------------------------------------------------------------
void coordinates(int* x, int* y) const
{
*x = m_li_x.y();
*y = m_li_y.y();
}
private:
unsigned m_subdiv_shift;
unsigned m_subdiv_size;
unsigned m_subdiv_mask;
trans_type* m_trans;
dda2_line_interpolator m_li_x;
dda2_line_interpolator m_li_y;
int m_src_x;
double m_src_y;
unsigned m_pos;
unsigned m_len;
};
}
#endif
| 0 |
D://workCode//uploadProject\awtk\3rd\agg | D://workCode//uploadProject\awtk\3rd\agg\include\agg_trans_affine.h | //----------------------------------------------------------------------------
// Anti-Grain Geometry - Version 2.4
// Copyright (C) 2002-2005 Maxim Shemanarev (http://www.antigrain.com)
//
// Permission to copy, use, modify, sell and distribute this software
// is granted provided this copyright notice appears in all copies.
// This software is provided "as is" without express or implied
// warranty, and with no claim as to its suitability for any purpose.
//
//----------------------------------------------------------------------------
// Contact: mcseem@antigrain.com
// mcseemagg@yahoo.com
// http://www.antigrain.com
//----------------------------------------------------------------------------
//
// Affine transformation classes.
//
//----------------------------------------------------------------------------
#ifndef AGG_TRANS_AFFINE_INCLUDED
#define AGG_TRANS_AFFINE_INCLUDED
#include <math.h>
#include "agg_basics.h"
namespace agg
{
const double affine_epsilon = 1e-14;
//============================================================trans_affine
//
// See Implementation agg_trans_affine.cpp
//
// Affine transformation are linear transformations in Cartesian coordinates
// (strictly speaking not only in Cartesian, but for the beginning we will
// think so). They are rotation, scaling, translation and skewing.
// After any affine transformation a line segment remains a line segment
// and it will never become a curve.
//
// There will be no math about matrix calculations, since it has been
// described many times. Ask yourself a very simple question:
// "why do we need to understand and use some matrix stuff instead of just
// rotating, scaling and so on". The answers are:
//
// 1. Any combination of transformations can be done by only 4 multiplications
// and 4 additions in floating point.
// 2. One matrix transformation is equivalent to the number of consecutive
// discrete transformations, i.e. the matrix "accumulates" all transformations
// in the order of their settings. Suppose we have 4 transformations:
// * rotate by 30 degrees,
// * scale X to 2.0,
// * scale Y to 1.5,
// * move to (100, 100).
// The result will depend on the order of these transformations,
// and the advantage of matrix is that the sequence of discret calls:
// rotate(30), scaleX(2.0), scaleY(1.5), move(100,100)
// will have exactly the same result as the following matrix transformations:
//
// affine_matrix m;
// m *= rotate_matrix(30);
// m *= scaleX_matrix(2.0);
// m *= scaleY_matrix(1.5);
// m *= move_matrix(100,100);
//
// m.transform_my_point_at_last(x, y);
//
// What is the good of it? In real life we will set-up the matrix only once
// and then transform many points, let alone the convenience to set any
// combination of transformations.
//
// So, how to use it? Very easy - literally as it's shown above. Not quite,
// let us write a correct example:
//
// agg::trans_affine m;
// m *= agg::trans_affine_rotation(30.0 * 3.1415926 / 180.0);
// m *= agg::trans_affine_scaling(2.0, 1.5);
// m *= agg::trans_affine_translation(100.0, 100.0);
// m.transform(&x, &y);
//
// The affine matrix is all you need to perform any linear transformation,
// but all transformations have origin point (0,0). It means that we need to
// use 2 translations if we want to rotate someting around (100,100):
//
// m *= agg::trans_affine_translation(-100.0, -100.0); // move to (0,0)
// m *= agg::trans_affine_rotation(30.0 * 3.1415926 / 180.0); // rotate
// m *= agg::trans_affine_translation(100.0, 100.0); // move back to (100,100)
//----------------------------------------------------------------------
struct trans_affine
{
double sx, shy, shx, sy, tx, ty;
//------------------------------------------ Construction
// Identity matrix
trans_affine() :
sx(1.0), shy(0.0), shx(0.0), sy(1.0), tx(0.0), ty(0.0)
{}
// Custom matrix. Usually used in derived classes
trans_affine(double v0, double v1, double v2,
double v3, double v4, double v5) :
sx(v0), shy(v1), shx(v2), sy(v3), tx(v4), ty(v5)
{}
// Custom matrix from m[6]
explicit trans_affine(const double* m) :
sx(m[0]), shy(m[1]), shx(m[2]), sy(m[3]), tx(m[4]), ty(m[5])
{}
// Rectangle to a parallelogram.
trans_affine(double x1, double y1, double x2, double y2,
const double* parl)
{
rect_to_parl(x1, y1, x2, y2, parl);
}
// Parallelogram to a rectangle.
trans_affine(const double* parl,
double x1, double y1, double x2, double y2)
{
parl_to_rect(parl, x1, y1, x2, y2);
}
// Arbitrary parallelogram transformation.
trans_affine(const double* src, const double* dst)
{
parl_to_parl(src, dst);
}
//---------------------------------- Parellelogram transformations
// transform a parallelogram to another one. Src and dst are
// pointers to arrays of three points (double[6], x1,y1,...) that
// identify three corners of the parallelograms assuming implicit
// fourth point. The arguments are arrays of double[6] mapped
// to x1,y1, x2,y2, x3,y3 where the coordinates are:
// *-----------------*
// / (x3,y3)/
// / /
// /(x1,y1) (x2,y2)/
// *-----------------*
const trans_affine& parl_to_parl(const double* src,
const double* dst);
const trans_affine& rect_to_parl(double x1, double y1,
double x2, double y2,
const double* parl);
const trans_affine& parl_to_rect(const double* parl,
double x1, double y1,
double x2, double y2);
//------------------------------------------ Operations
// Reset - load an identity matrix
const trans_affine& reset();
// Direct transformations operations
const trans_affine& translate(double x, double y);
const trans_affine& rotate(double a);
const trans_affine& scale(double s);
const trans_affine& scale(double x, double y);
// Multiply matrix to another one
const trans_affine& multiply(const trans_affine& m);
// Multiply "m" to "this" and assign the result to "this"
const trans_affine& premultiply(const trans_affine& m);
// Multiply matrix to inverse of another one
const trans_affine& multiply_inv(const trans_affine& m);
// Multiply inverse of "m" to "this" and assign the result to "this"
const trans_affine& premultiply_inv(const trans_affine& m);
// Invert matrix. Do not try to invert degenerate matrices,
// there's no check for validity. If you set scale to 0 and
// then try to invert matrix, expect unpredictable result.
const trans_affine& invert();
// Mirroring around X
const trans_affine& flip_x();
// Mirroring around Y
const trans_affine& flip_y();
//------------------------------------------- Load/Store
// Store matrix to an array [6] of double
void store_to(double* m) const
{
*m++ = sx; *m++ = shy; *m++ = shx; *m++ = sy; *m++ = tx; *m++ = ty;
}
// Load matrix from an array [6] of double
const trans_affine& load_from(const double* m)
{
sx = *m++; shy = *m++; shx = *m++; sy = *m++; tx = *m++; ty = *m++;
return *this;
}
//------------------------------------------- Operators
// Multiply the matrix by another one
const trans_affine& operator *= (const trans_affine& m)
{
return multiply(m);
}
// Multiply the matrix by inverse of another one
const trans_affine& operator /= (const trans_affine& m)
{
return multiply_inv(m);
}
// Multiply the matrix by another one and return
// the result in a separete matrix.
trans_affine operator * (const trans_affine& m) const
{
return trans_affine(*this).multiply(m);
}
// Multiply the matrix by inverse of another one
// and return the result in a separete matrix.
trans_affine operator / (const trans_affine& m) const
{
return trans_affine(*this).multiply_inv(m);
}
// Calculate and return the inverse matrix
trans_affine operator ~ () const
{
trans_affine ret = *this;
return ret.invert();
}
// Equal operator with default epsilon
bool operator == (const trans_affine& m) const
{
return is_equal(m, affine_epsilon);
}
// Not Equal operator with default epsilon
bool operator != (const trans_affine& m) const
{
return !is_equal(m, affine_epsilon);
}
//-------------------------------------------- Transformations
// Direct transformation of x and y
void transform(double* x, double* y) const;
// Direct transformation of x and y, 2x2 matrix only, no translation
void transform_2x2(double* x, double* y) const;
// Inverse transformation of x and y. It works slower than the
// direct transformation. For massive operations it's better to
// invert() the matrix and then use direct transformations.
void inverse_transform(double* x, double* y) const;
//-------------------------------------------- Auxiliary
// Calculate the determinant of matrix
double determinant() const
{
return sx * sy - shy * shx;
}
// Calculate the reciprocal of the determinant
double determinant_reciprocal() const
{
return 1.0 / (sx * sy - shy * shx);
}
// Get the average scale (by X and Y).
// Basically used to calculate the approximation_scale when
// decomposinting curves into line segments.
double scale() const;
// Check to see if the matrix is not degenerate
bool is_valid(double epsilon = affine_epsilon) const;
// Check to see if it's an identity matrix
bool is_identity(double epsilon = affine_epsilon) const;
// Check to see if two matrices are equal
bool is_equal(const trans_affine& m, double epsilon = affine_epsilon) const;
// Determine the major parameters. Use with caution considering
// possible degenerate cases.
double rotation() const;
void translation(double* dx, double* dy) const;
void scaling(double* x, double* y) const;
void scaling_abs(double* x, double* y) const;
};
//------------------------------------------------------------------------
inline void trans_affine::transform(double* x, double* y) const
{
double tmp = *x;
*x = tmp * sx + *y * shx + tx;
*y = tmp * shy + *y * sy + ty;
}
//------------------------------------------------------------------------
inline void trans_affine::transform_2x2(double* x, double* y) const
{
double tmp = *x;
*x = tmp * sx + *y * shx;
*y = tmp * shy + *y * sy;
}
//------------------------------------------------------------------------
inline void trans_affine::inverse_transform(double* x, double* y) const
{
double d = determinant_reciprocal();
double a = (*x - tx) * d;
double b = (*y - ty) * d;
*x = a * sy - b * shx;
*y = b * sx - a * shy;
}
//------------------------------------------------------------------------
inline double trans_affine::scale() const
{
double x = 0.707106781 * sx + 0.707106781 * shx;
double y = 0.707106781 * shy + 0.707106781 * sy;
return sqrt(x*x + y*y);
}
//------------------------------------------------------------------------
inline const trans_affine& trans_affine::translate(double x, double y)
{
tx += x;
ty += y;
return *this;
}
//------------------------------------------------------------------------
inline const trans_affine& trans_affine::rotate(double a)
{
double ca = cos(a);
double sa = sin(a);
double t0 = sx * ca - shy * sa;
double t2 = shx * ca - sy * sa;
double t4 = tx * ca - ty * sa;
shy = sx * sa + shy * ca;
sy = shx * sa + sy * ca;
ty = tx * sa + ty * ca;
sx = t0;
shx = t2;
tx = t4;
return *this;
}
//------------------------------------------------------------------------
inline const trans_affine& trans_affine::scale(double x, double y)
{
double mm0 = x; // Possible hint for the optimizer
double mm3 = y;
sx *= mm0;
shx *= mm0;
tx *= mm0;
shy *= mm3;
sy *= mm3;
ty *= mm3;
return *this;
}
//------------------------------------------------------------------------
inline const trans_affine& trans_affine::scale(double s)
{
double m = s; // Possible hint for the optimizer
sx *= m;
shx *= m;
tx *= m;
shy *= m;
sy *= m;
ty *= m;
return *this;
}
//------------------------------------------------------------------------
inline const trans_affine& trans_affine::premultiply(const trans_affine& m)
{
trans_affine t = m;
return *this = t.multiply(*this);
}
//------------------------------------------------------------------------
inline const trans_affine& trans_affine::multiply_inv(const trans_affine& m)
{
trans_affine t = m;
t.invert();
return multiply(t);
}
//------------------------------------------------------------------------
inline const trans_affine& trans_affine::premultiply_inv(const trans_affine& m)
{
trans_affine t = m;
t.invert();
return *this = t.multiply(*this);
}
//------------------------------------------------------------------------
inline void trans_affine::scaling_abs(double* x, double* y) const
{
// Used to calculate scaling coefficients in image resampling.
// When there is considerable shear this method gives us much
// better estimation than just sx, sy.
*x = sqrt(sx * sx + shx * shx);
*y = sqrt(shy * shy + sy * sy);
}
//====================================================trans_affine_rotation
// Rotation matrix. sin() and cos() are calculated twice for the same angle.
// There's no harm because the performance of sin()/cos() is very good on all
// modern processors. Besides, this operation is not going to be invoked too
// often.
class trans_affine_rotation : public trans_affine
{
public:
trans_affine_rotation(double a) :
trans_affine(cos(a), sin(a), -sin(a), cos(a), 0.0, 0.0)
{}
};
//====================================================trans_affine_scaling
// Scaling matrix. x, y - scale coefficients by X and Y respectively
class trans_affine_scaling : public trans_affine
{
public:
trans_affine_scaling(double x, double y) :
trans_affine(x, 0.0, 0.0, y, 0.0, 0.0)
{}
trans_affine_scaling(double s) :
trans_affine(s, 0.0, 0.0, s, 0.0, 0.0)
{}
};
//================================================trans_affine_translation
// Translation matrix
class trans_affine_translation : public trans_affine
{
public:
trans_affine_translation(double x, double y) :
trans_affine(1.0, 0.0, 0.0, 1.0, x, y)
{}
};
//====================================================trans_affine_skewing
// Sckewing (shear) matrix
class trans_affine_skewing : public trans_affine
{
public:
trans_affine_skewing(double x, double y) :
trans_affine(1.0, tan(y), tan(x), 1.0, 0.0, 0.0)
{}
};
//===============================================trans_affine_line_segment
// Rotate, Scale and Translate, associating 0...dist with line segment
// x1,y1,x2,y2
class trans_affine_line_segment : public trans_affine
{
public:
trans_affine_line_segment(double x1, double y1, double x2, double y2,
double dist)
{
double dx = x2 - x1;
double dy = y2 - y1;
if(dist > 0.0)
{
multiply(trans_affine_scaling(sqrt(dx * dx + dy * dy) / dist));
}
multiply(trans_affine_rotation(atan2(dy, dx)));
multiply(trans_affine_translation(x1, y1));
}
};
//============================================trans_affine_reflection_unit
// Reflection matrix. Reflect coordinates across the line through
// the origin containing the unit vector (ux, uy).
// Contributed by John Horigan
class trans_affine_reflection_unit : public trans_affine
{
public:
trans_affine_reflection_unit(double ux, double uy) :
trans_affine(2.0 * ux * ux - 1.0,
2.0 * ux * uy,
2.0 * ux * uy,
2.0 * uy * uy - 1.0,
0.0, 0.0)
{}
};
//=================================================trans_affine_reflection
// Reflection matrix. Reflect coordinates across the line through
// the origin at the angle a or containing the non-unit vector (x, y).
// Contributed by John Horigan
class trans_affine_reflection : public trans_affine_reflection_unit
{
public:
trans_affine_reflection(double a) :
trans_affine_reflection_unit(cos(a), sin(a))
{}
trans_affine_reflection(double x, double y) :
trans_affine_reflection_unit(x / sqrt(x * x + y * y), y / sqrt(x * x + y * y))
{}
};
}
#endif
| 0 |
D://workCode//uploadProject\awtk\3rd\agg | D://workCode//uploadProject\awtk\3rd\agg\include\agg_trans_viewport.h | //----------------------------------------------------------------------------
// Anti-Grain Geometry - Version 2.4
// Copyright (C) 2002-2005 Maxim Shemanarev (http://www.antigrain.com)
//
// Permission to copy, use, modify, sell and distribute this software
// is granted provided this copyright notice appears in all copies.
// This software is provided "as is" without express or implied
// warranty, and with no claim as to its suitability for any purpose.
//
//----------------------------------------------------------------------------
// Contact: mcseem@antigrain.com
// mcseemagg@yahoo.com
// http://www.antigrain.com
//----------------------------------------------------------------------------
//
// Viewport transformer - simple orthogonal conversions from world coordinates
// to screen (device) ones.
//
//----------------------------------------------------------------------------
#ifndef AGG_TRANS_VIEWPORT_INCLUDED
#define AGG_TRANS_VIEWPORT_INCLUDED
#include <string.h>
#include "agg_trans_affine.h"
namespace agg
{
enum aspect_ratio_e
{
aspect_ratio_stretch,
aspect_ratio_meet,
aspect_ratio_slice
};
//----------------------------------------------------------trans_viewport
class trans_viewport
{
public:
//-------------------------------------------------------------------
trans_viewport() :
m_world_x1(0.0),
m_world_y1(0.0),
m_world_x2(1.0),
m_world_y2(1.0),
m_device_x1(0.0),
m_device_y1(0.0),
m_device_x2(1.0),
m_device_y2(1.0),
m_aspect(aspect_ratio_stretch),
m_is_valid(true),
m_align_x(0.5),
m_align_y(0.5),
m_wx1(0.0),
m_wy1(0.0),
m_wx2(1.0),
m_wy2(1.0),
m_dx1(0.0),
m_dy1(0.0),
m_kx(1.0),
m_ky(1.0)
{}
//-------------------------------------------------------------------
void preserve_aspect_ratio(double alignx,
double aligny,
aspect_ratio_e aspect)
{
m_align_x = alignx;
m_align_y = aligny;
m_aspect = aspect;
update();
}
//-------------------------------------------------------------------
void device_viewport(double x1, double y1, double x2, double y2)
{
m_device_x1 = x1;
m_device_y1 = y1;
m_device_x2 = x2;
m_device_y2 = y2;
update();
}
//-------------------------------------------------------------------
void world_viewport(double x1, double y1, double x2, double y2)
{
m_world_x1 = x1;
m_world_y1 = y1;
m_world_x2 = x2;
m_world_y2 = y2;
update();
}
//-------------------------------------------------------------------
void device_viewport(double* x1, double* y1, double* x2, double* y2) const
{
*x1 = m_device_x1;
*y1 = m_device_y1;
*x2 = m_device_x2;
*y2 = m_device_y2;
}
//-------------------------------------------------------------------
void world_viewport(double* x1, double* y1, double* x2, double* y2) const
{
*x1 = m_world_x1;
*y1 = m_world_y1;
*x2 = m_world_x2;
*y2 = m_world_y2;
}
//-------------------------------------------------------------------
void world_viewport_actual(double* x1, double* y1,
double* x2, double* y2) const
{
*x1 = m_wx1;
*y1 = m_wy1;
*x2 = m_wx2;
*y2 = m_wy2;
}
//-------------------------------------------------------------------
bool is_valid() const { return m_is_valid; }
double align_x() const { return m_align_x; }
double align_y() const { return m_align_y; }
aspect_ratio_e aspect_ratio() const { return m_aspect; }
//-------------------------------------------------------------------
void transform(double* x, double* y) const
{
*x = (*x - m_wx1) * m_kx + m_dx1;
*y = (*y - m_wy1) * m_ky + m_dy1;
}
//-------------------------------------------------------------------
void transform_scale_only(double* x, double* y) const
{
*x *= m_kx;
*y *= m_ky;
}
//-------------------------------------------------------------------
void inverse_transform(double* x, double* y) const
{
*x = (*x - m_dx1) / m_kx + m_wx1;
*y = (*y - m_dy1) / m_ky + m_wy1;
}
//-------------------------------------------------------------------
void inverse_transform_scale_only(double* x, double* y) const
{
*x /= m_kx;
*y /= m_ky;
}
//-------------------------------------------------------------------
double device_dx() const { return m_dx1 - m_wx1 * m_kx; }
double device_dy() const { return m_dy1 - m_wy1 * m_ky; }
//-------------------------------------------------------------------
double scale_x() const
{
return m_kx;
}
//-------------------------------------------------------------------
double scale_y() const
{
return m_ky;
}
//-------------------------------------------------------------------
double scale() const
{
return (m_kx + m_ky) * 0.5;
}
//-------------------------------------------------------------------
trans_affine to_affine() const
{
trans_affine mtx = trans_affine_translation(-m_wx1, -m_wy1);
mtx *= trans_affine_scaling(m_kx, m_ky);
mtx *= trans_affine_translation(m_dx1, m_dy1);
return mtx;
}
//-------------------------------------------------------------------
trans_affine to_affine_scale_only() const
{
return trans_affine_scaling(m_kx, m_ky);
}
//-------------------------------------------------------------------
unsigned byte_size() const
{
return sizeof(*this);
}
void serialize(int8u* ptr) const
{
memcpy(ptr, this, sizeof(*this));
}
void deserialize(const int8u* ptr)
{
memcpy(this, ptr, sizeof(*this));
}
private:
void update();
double m_world_x1;
double m_world_y1;
double m_world_x2;
double m_world_y2;
double m_device_x1;
double m_device_y1;
double m_device_x2;
double m_device_y2;
aspect_ratio_e m_aspect;
bool m_is_valid;
double m_align_x;
double m_align_y;
double m_wx1;
double m_wy1;
double m_wx2;
double m_wy2;
double m_dx1;
double m_dy1;
double m_kx;
double m_ky;
};
//-----------------------------------------------------------------------
inline void trans_viewport::update()
{
const double epsilon = 1e-30;
if(fabs(m_world_x1 - m_world_x2) < epsilon ||
fabs(m_world_y1 - m_world_y2) < epsilon ||
fabs(m_device_x1 - m_device_x2) < epsilon ||
fabs(m_device_y1 - m_device_y2) < epsilon)
{
m_wx1 = m_world_x1;
m_wy1 = m_world_y1;
m_wx2 = m_world_x1 + 1.0;
m_wy2 = m_world_y2 + 1.0;
m_dx1 = m_device_x1;
m_dy1 = m_device_y1;
m_kx = 1.0;
m_ky = 1.0;
m_is_valid = false;
return;
}
double world_x1 = m_world_x1;
double world_y1 = m_world_y1;
double world_x2 = m_world_x2;
double world_y2 = m_world_y2;
double device_x1 = m_device_x1;
double device_y1 = m_device_y1;
double device_x2 = m_device_x2;
double device_y2 = m_device_y2;
if(m_aspect != aspect_ratio_stretch)
{
double d;
m_kx = (device_x2 - device_x1) / (world_x2 - world_x1);
m_ky = (device_y2 - device_y1) / (world_y2 - world_y1);
if((m_aspect == aspect_ratio_meet) == (m_kx < m_ky))
{
d = (world_y2 - world_y1) * m_ky / m_kx;
world_y1 += (world_y2 - world_y1 - d) * m_align_y;
world_y2 = world_y1 + d;
}
else
{
d = (world_x2 - world_x1) * m_kx / m_ky;
world_x1 += (world_x2 - world_x1 - d) * m_align_x;
world_x2 = world_x1 + d;
}
}
m_wx1 = world_x1;
m_wy1 = world_y1;
m_wx2 = world_x2;
m_wy2 = world_y2;
m_dx1 = device_x1;
m_dy1 = device_y1;
m_kx = (device_x2 - device_x1) / (world_x2 - world_x1);
m_ky = (device_y2 - device_y1) / (world_y2 - world_y1);
m_is_valid = true;
}
}
#endif
| 0 |
D://workCode//uploadProject\awtk\3rd\agg | D://workCode//uploadProject\awtk\3rd\agg\include\agg_vcgen_stroke.h | //----------------------------------------------------------------------------
// Anti-Grain Geometry - Version 2.4
// Copyright (C) 2002-2005 Maxim Shemanarev (http://www.antigrain.com)
//
// Permission to copy, use, modify, sell and distribute this software
// is granted provided this copyright notice appears in all copies.
// This software is provided "as is" without express or implied
// warranty, and with no claim as to its suitability for any purpose.
//
//----------------------------------------------------------------------------
// Contact: mcseem@antigrain.com
// mcseemagg@yahoo.com
// http://www.antigrain.com
//----------------------------------------------------------------------------
#ifndef AGG_VCGEN_STROKE_INCLUDED
#define AGG_VCGEN_STROKE_INCLUDED
#include "agg_math_stroke.h"
namespace agg
{
//============================================================vcgen_stroke
//
// See Implementation agg_vcgen_stroke.cpp
// Stroke generator
//
//------------------------------------------------------------------------
class vcgen_stroke
{
enum status_e
{
initial,
ready,
cap1,
cap2,
outline1,
close_first,
outline2,
out_vertices,
end_poly1,
end_poly2,
stop
};
public:
typedef vertex_sequence<vertex_dist, 6> vertex_storage;
typedef pod_bvector<point_d, 6> coord_storage;
vcgen_stroke();
void line_cap(line_cap_e lc) { m_stroker.line_cap(lc); }
void line_join(line_join_e lj) { m_stroker.line_join(lj); }
void inner_join(inner_join_e ij) { m_stroker.inner_join(ij); }
line_cap_e line_cap() const { return m_stroker.line_cap(); }
line_join_e line_join() const { return m_stroker.line_join(); }
inner_join_e inner_join() const { return m_stroker.inner_join(); }
void width(double w) { m_stroker.width(w); }
void miter_limit(double ml) { m_stroker.miter_limit(ml); }
void miter_limit_theta(double t) { m_stroker.miter_limit_theta(t); }
void inner_miter_limit(double ml) { m_stroker.inner_miter_limit(ml); }
void approximation_scale(double as) { m_stroker.approximation_scale(as); }
double width() const { return m_stroker.width(); }
double miter_limit() const { return m_stroker.miter_limit(); }
double inner_miter_limit() const { return m_stroker.inner_miter_limit(); }
double approximation_scale() const { return m_stroker.approximation_scale(); }
void shorten(double s) { m_shorten = s; }
double shorten() const { return m_shorten; }
// Vertex Generator Interface
void remove_all();
void add_vertex(double x, double y, unsigned cmd);
// Vertex Source Interface
void rewind(unsigned path_id);
unsigned vertex(double* x, double* y);
private:
vcgen_stroke(const vcgen_stroke&);
const vcgen_stroke& operator = (const vcgen_stroke&);
math_stroke<coord_storage> m_stroker;
vertex_storage m_src_vertices;
coord_storage m_out_vertices;
double m_shorten;
unsigned m_closed;
status_e m_status;
status_e m_prev_status;
unsigned m_src_vertex;
unsigned m_out_vertex;
};
}
#endif
| 0 |
D://workCode//uploadProject\awtk\3rd\agg | D://workCode//uploadProject\awtk\3rd\agg\include\agg_vertex_sequence.h | //----------------------------------------------------------------------------
// Anti-Grain Geometry - Version 2.4
// Copyright (C) 2002-2005 Maxim Shemanarev (http://www.antigrain.com)
//
// Permission to copy, use, modify, sell and distribute this software
// is granted provided this copyright notice appears in all copies.
// This software is provided "as is" without express or implied
// warranty, and with no claim as to its suitability for any purpose.
//
//----------------------------------------------------------------------------
// Contact: mcseem@antigrain.com
// mcseemagg@yahoo.com
// http://www.antigrain.com
//----------------------------------------------------------------------------
//
// vertex_sequence container and vertex_dist struct
//
//----------------------------------------------------------------------------
#ifndef AGG_VERTEX_SEQUENCE_INCLUDED
#define AGG_VERTEX_SEQUENCE_INCLUDED
#include "agg_basics.h"
#include "agg_array.h"
#include "agg_math.h"
namespace agg
{
//----------------------------------------------------------vertex_sequence
// Modified agg::pod_bvector. The data is interpreted as a sequence
// of vertices. It means that the type T must expose:
//
// bool T::operator() (const T& val)
//
// that is called every time new vertex is being added. The main purpose
// of this operator is the possibility to calculate some values during
// adding and to return true if the vertex fits some criteria or false if
// it doesn't. In the last case the new vertex is not added.
//
// The simple example is filtering coinciding vertices with calculation
// of the distance between the current and previous ones:
//
// struct vertex_dist
// {
// double x;
// double y;
// double dist;
//
// vertex_dist() {}
// vertex_dist(double x_, double y_) :
// x(x_),
// y(y_),
// dist(0.0)
// {
// }
//
// bool operator () (const vertex_dist& val)
// {
// return (dist = calc_distance(x, y, val.x, val.y)) > EPSILON;
// }
// };
//
// Function close() calls this operator and removes the last vertex if
// necessary.
//------------------------------------------------------------------------
template<class T, unsigned S=6>
class vertex_sequence : public pod_bvector<T, S>
{
public:
typedef pod_bvector<T, S> base_type;
void add(const T& val);
void modify_last(const T& val);
void close(bool remove_flag);
};
//------------------------------------------------------------------------
template<class T, unsigned S>
void vertex_sequence<T, S>::add(const T& val)
{
if(base_type::size() > 1)
{
if(!(*this)[base_type::size() - 2]((*this)[base_type::size() - 1]))
{
base_type::remove_last();
}
}
base_type::add(val);
}
//------------------------------------------------------------------------
template<class T, unsigned S>
void vertex_sequence<T, S>::modify_last(const T& val)
{
base_type::remove_last();
add(val);
}
//------------------------------------------------------------------------
template<class T, unsigned S>
void vertex_sequence<T, S>::close(bool closed)
{
while(base_type::size() > 1)
{
if((*this)[base_type::size() - 2]((*this)[base_type::size() - 1])) break;
T t = (*this)[base_type::size() - 1];
base_type::remove_last();
modify_last(t);
}
if(closed)
{
while(base_type::size() > 1)
{
if((*this)[base_type::size() - 1]((*this)[0])) break;
base_type::remove_last();
}
}
}
//-------------------------------------------------------------vertex_dist
// Vertex (x, y) with the distance to the next one. The last vertex has
// distance between the last and the first points if the polygon is closed
// and 0.0 if it's a polyline.
struct vertex_dist
{
double x;
double y;
double dist;
vertex_dist() {}
vertex_dist(double x_, double y_) :
x(x_),
y(y_),
dist(0.0)
{
}
bool operator () (const vertex_dist& val)
{
bool ret = (dist = calc_distance(x, y, val.x, val.y)) > vertex_dist_epsilon;
if(!ret) dist = 1.0 / vertex_dist_epsilon;
return ret;
}
};
//--------------------------------------------------------vertex_dist_cmd
// Save as the above but with additional "command" value
struct vertex_dist_cmd : public vertex_dist
{
unsigned cmd;
vertex_dist_cmd() {}
vertex_dist_cmd(double x_, double y_, unsigned cmd_) :
vertex_dist(x_, y_),
cmd(cmd_)
{
}
};
}
#endif
| 0 |
D://workCode//uploadProject\awtk\3rd\agge | D://workCode//uploadProject\awtk\3rd\agge\agge\bitmap.h | #pragma once
#include "pixel.h"
#include "tools.h"
namespace agge {
template <typename PixelT>
struct pixel_bpp {};
template <typename PixelT, typename RawBitmapT>
class bitmap : public RawBitmapT {
public:
typedef PixelT pixel;
public:
bitmap(count_t width, count_t height, count_t stride, uint8_t* data);
bitmap(count_t width, count_t height, count_t stride, count_t flags, uint8_t* data);
bitmap(count_t width, count_t height, count_t stride, count_t flags, count_t orientation, uint8_t* data);
pixel* row_ptr(count_t y);
const pixel* row_ptr(count_t y) const;
};
template <>
struct pixel_bpp<pixel32_rgba> {
static const bits_per_pixel bpp = bpp32;
};
template <>
struct pixel_bpp<pixel32_abgr> {
static const bits_per_pixel bpp = bpp32;
};
template <>
struct pixel_bpp<pixel32_bgra> {
static const bits_per_pixel bpp = bpp32;
};
template <>
struct pixel_bpp<pixel32_argb> {
static const bits_per_pixel bpp = bpp32;
};
template <>
struct pixel_bpp<pixel24_rgb> {
static const bits_per_pixel bpp = bpp24;
};
template <>
struct pixel_bpp<pixel24_bgr> {
static const bits_per_pixel bpp = bpp24;
};
template <>
struct pixel_bpp<pixel16_rgb565> {
static const bits_per_pixel bpp = bpp16;
};
template <>
struct pixel_bpp<pixel16_bgr565> {
static const bits_per_pixel bpp = bpp16;
};
template <>
struct pixel_bpp<uint8_t> {
static const bits_per_pixel bpp = bpp8;
};
template <>
struct pixel_bpp<pixel8> {
static const bits_per_pixel bpp = bpp8;
};
template <typename PixelT, typename RawBitmapT>
inline bitmap<PixelT, RawBitmapT>::bitmap(count_t width, count_t height, count_t stride, uint8_t* data)
: RawBitmapT(width, height, stride, 0, pixel_bpp<PixelT>::bpp, data) {
}
template <typename PixelT, typename RawBitmapT>
inline bitmap<PixelT, RawBitmapT>::bitmap(count_t width, count_t height, count_t stride, count_t flags, uint8_t* data)
: RawBitmapT(width, height, stride, flags, pixel_bpp<PixelT>::bpp, data) {
}
template <typename PixelT, typename RawBitmapT>
inline bitmap<PixelT, RawBitmapT>::bitmap(count_t width, count_t height, count_t stride, count_t flags, count_t orientation, uint8_t* data)
: RawBitmapT(width, height, stride, flags, orientation, pixel_bpp<PixelT>::bpp, data) {
}
template <typename PixelT, typename RawBitmapT>
inline typename bitmap<PixelT, RawBitmapT>::pixel* bitmap<PixelT, RawBitmapT>::row_ptr(count_t y) {
return static_cast<pixel*>(RawBitmapT::row_ptr(y));
}
template <typename PixelT, typename RawBitmapT>
inline const typename bitmap<PixelT, RawBitmapT>::pixel* bitmap<PixelT, RawBitmapT>::row_ptr(
count_t y) const {
return static_cast<const pixel*>(RawBitmapT::row_ptr(y));
}
template <typename SrcBitmapT, typename DestBitmapT>
inline void copy(const SrcBitmapT& src, int src_x, int src_y, DestBitmapT& dest, int dest_x,
int dest_y, count_t width, count_t height) {
if (src_x < 0) width += src_x, dest_x -= src_x, src_x = 0;
if (src_y < 0) height += src_y, dest_y -= src_y, src_y = 0;
if (dest_x < 0) width += dest_x, src_x -= dest_x, dest_x = 0;
if (dest_y < 0) height += dest_y, src_y -= dest_y, dest_y = 0;
width = agge_min(width, agge_min(src.width() - src_x, dest.width() - dest_x));
height = agge_min(height, agge_min(src.height() - src_y, dest.height() - dest_y));
for (count_t y = 0; y < height; ++y) {
const typename SrcBitmapT::pixel* src_pixel = src.row_ptr(y + src_y) + src_x;
typename DestBitmapT::pixel* dest_pixel = dest.row_ptr(y + dest_y) + dest_x;
for (count_t i = 0; i < width; ++i, ++src_pixel, ++dest_pixel) *dest_pixel = *src_pixel;
}
}
} // namespace agge
| 0 |
D://workCode//uploadProject\awtk\3rd\agge | D://workCode//uploadProject\awtk\3rd\agge\agge\blenders_generic.h | #pragma once
#include "pixel.h"
namespace agge {
template <typename PixelT>
class blender_solid_color_rgb {
public:
typedef PixelT pixel;
typedef uint8_t cover_type;
public:
blender_solid_color_rgb(uint8_t r, uint8_t g, uint8_t b, uint8_t a = 0xFF);
void operator()(pixel* pixels, int x, int y, count_t n) const;
void operator()(pixel* pixels, int x, int y, count_t n, const cover_type* covers) const;
private:
uint8_t _r, _g, _b, _a;
};
template <typename PixelT>
inline blender_solid_color_rgb<PixelT>::blender_solid_color_rgb(uint8_t r, uint8_t g, uint8_t b,
uint8_t a)
: _r(r), _g(g), _b(b), _a(a) {
}
template <typename PixelT>
inline void blender_solid_color_rgb<PixelT>::operator()(pixel* pixels, int /*x*/, int /*y*/,
count_t n) const {
pixel32_rgba p(_r, _g, _b, _a);
for (; n; --n, ++pixels) {
pixel_blend<PixelT, pixel32_rgba>(*pixels, p, p.a);
}
}
template <typename PixelT>
inline void blender_solid_color_rgb<PixelT>::operator()(pixel* pixels, int /*x*/, int /*y*/,
count_t n, const cover_type* covers) const {
pixel32_rgba p(_r, _g, _b, _a);
for (; n; --n, ++pixels, ++covers) {
uint8_t a = (_a * covers[0]) >> 8;
pixel_blend<PixelT, pixel32_rgba>(*pixels, p, a);
}
}
} // namespace agge
| 0 |
D://workCode//uploadProject\awtk\3rd\agge | D://workCode//uploadProject\awtk\3rd\agge\agge\blender_linear_gradient.h | #pragma once
#include <cstdio>
#include "pixel.h"
namespace agge {
template <typename PixelT>
class blender_linear_gradient {
public:
typedef PixelT pixel;
typedef uint8_t cover_type;
public:
blender_linear_gradient(float sx, float sy, float ex, float ey, pixel32_rgba sc, pixel32_rgba ec);
void operator()(pixel* pixels, int x, int y, count_t n, const cover_type* covers) const;
bool gradient(float factor, pixel32_rgba& c) const;
bool get_color(int x, int y, pixel32_rgba& c) const;
private:
float _sx;
float _sy;
float _ex;
float _ey;
float _dx;
float _dy;
float _dot_product_1;
float _xw;
float _yw;
pixel32_rgba _sc;
pixel32_rgba _ec;
};
template <typename PixelT>
inline blender_linear_gradient<PixelT>::blender_linear_gradient(float sx, float sy, float ex, float ey,
pixel32_rgba sc, pixel32_rgba ec) : _sx(sx), _sy(sy), _ex(ex), _ey(ey), _sc(sc), _ec(ec) {
if(sx == ex && sy == ey) {
assert(!"invalid params");
_ex = _sx + 1;
_ey = _sy + 1;
}
_dx = ex - sx;
_dy = ey - sy;
_dot_product_1 = 1/(_dx * _dx + _dy * _dy);
}
template <typename PixelT>
inline bool blender_linear_gradient<PixelT>::gradient(float factor, pixel32_rgba& c) const {
if(factor <= 0) {
c = _sc;
} else if(factor >= 1.0f) {
c = _ec;
} else {
c.r = _sc.r + (_ec.r - _sc.r) * factor;
c.g = _sc.g + (_ec.g - _sc.g) * factor;
c.b = _sc.b + (_ec.b - _sc.b) * factor;
c.a = _sc.a + (_ec.a - _sc.a) * factor;
}
return true;
}
template <typename PixelT>
inline bool blender_linear_gradient<PixelT>::get_color(int x, int y, pixel32_rgba& c) const {
if(_sx == _ex && _sy == _ey) {
c = _sc;
return true;
} else if(_sx == _ex) {
return this->gradient((y - _sy)/_dy, c);
} else if(_sy == _ey) {
return this->gradient((x - _sx)/_dx, c);
} else {
//https://github.com/SFML/SFML/wiki/Source:-Color-Gradient
// if(x < _sx || y < _sy) {
// c = _sc;
// } else if(x > _ex || y > _ey) {
// c = _ec;
// } else {
float dot_product = (x - _sx) * _dx + (y - _sy) * _dy;
float factor = dot_product * _dot_product_1;
return this->gradient(factor, c);
// }
}
return true;
}
template <typename PixelT>
inline void blender_linear_gradient<PixelT>::operator()(pixel* pixels, int x, int y,
count_t n, const cover_type* covers) const {
pixel32_rgba p(0, 0, 0, 0);
for (; n; --n, ++pixels, ++covers) {
if(this->get_color(x++, y, p)) {
uint8_t a = (*covers * p.a) >> 8;
pixel_blend<PixelT, pixel32_rgba>(*pixels, p, a);
}
}
}
} // namespace agge
| 0 |
D://workCode//uploadProject\awtk\3rd\agge | D://workCode//uploadProject\awtk\3rd\agge\agge\blender_radial_gradient.h | #pragma once
#include "pixel.h"
namespace agge {
template <typename PixelT>
class blender_radial_gradient {
public:
typedef PixelT pixel;
typedef uint8_t cover_type;
public:
blender_radial_gradient(float cx, float cy, float inr, float outr, pixel32_rgba sc, pixel32_rgba ec);
void operator()(pixel* pixels, int x, int y, count_t n, const cover_type* covers) const;
bool gradient(float factor, pixel32_rgba& c) const;
bool get_color(int x, int y, pixel32_rgba& c) const;
private:
float _cx;
float _cy;
float _inr;
float _outr;
pixel32_rgba _sc;
pixel32_rgba _ec;
};
template <typename PixelT>
inline blender_radial_gradient<PixelT>::blender_radial_gradient(float cx, float cy, float inr, float outr,
pixel32_rgba sc, pixel32_rgba ec) : _cx(cx), _cy(cy), _inr(inr), _outr(outr), _sc(sc), _ec(ec) {
}
template <typename PixelT>
inline bool blender_radial_gradient<PixelT>::gradient(float factor, pixel32_rgba& c) const {
if(factor <= 0) {
c = _sc;
} else if(factor >= 1.0f) {
c = _ec;
} else {
c.r = _sc.r + (_ec.r - _sc.r) * factor;
c.g = _sc.g + (_ec.g - _sc.g) * factor;
c.b = _sc.b + (_ec.b - _sc.b) * factor;
c.a = _sc.a + (_ec.a - _sc.a) * factor;
}
return true;
}
template <typename PixelT>
inline bool blender_radial_gradient<PixelT>::get_color(int x, int y, pixel32_rgba& c) const {
float dx = x - _cx;
float dy = y - _cy;
float r = sqrt(dx * dx + dy * dy);
if(r <= _inr) {
c = _sc;
} else if(r >= _outr) {
c = _ec;
} else {
float factor = (r - _inr)/(_outr - _inr);
this->gradient(factor, c);
}
return true;
}
template <typename PixelT>
inline void blender_radial_gradient<PixelT>::operator()(pixel* pixels, int x, int y,
count_t n, const cover_type* covers) const {
pixel32_rgba p(0, 0, 0, 0);
for (; n; --n, ++pixels, ++covers) {
if(this->get_color(x++, y, p)) {
uint8_t a = (*covers * p.a) >> 8;
pixel_blend<PixelT, pixel32_rgba>(*pixels, p, a);
}
}
}
} // namespace agge
| 0 |
D://workCode//uploadProject\awtk\3rd\agge | D://workCode//uploadProject\awtk\3rd\agge\agge\clipper.h | #pragma once
#include "math.h"
#include "types.h"
namespace agge
{
enum clipping_flags {
x1_clipped_shift = 2,
x2_clipped_shift = 0,
y1_clipped_shift = 3,
y2_clipped_shift = 1,
x1_clipped = 1 << x1_clipped_shift,
x2_clipped = 1 << x2_clipped_shift,
y1_clipped = 1 << y1_clipped_shift,
y2_clipped = 1 << y2_clipped_shift,
x_clipped = x1_clipped | x2_clipped,
y_clipped = y1_clipped | y2_clipped
};
template <typename T>
class clipper
{
public:
typedef T coord_type;
public:
clipper();
void reset();
void set(const rect<T> &window);
void move_to(T x, T y);
template <typename LinesSinkT>
void line_to(LinesSinkT &sink, T x, T y);
private:
template <typename LinesSinkT>
void line_clip_y(LinesSinkT &sink, T x1, T y1, T x2, T y2, int f1, int f2) const;
private:
rect<T> _window;
T _x1, _y1;
int _f1;
bool _enabled;
};
template <typename T>
inline int clipping_y(T y, const rect<T> &window)
{ return ((y < window.y1) << y1_clipped_shift) | ((y > window.y2) << y2_clipped_shift); }
template <typename T>
inline int clipping(T x, T y, const rect<T> &window)
{ return ((x < window.x1) << x1_clipped_shift) | ((x > window.x2) << x2_clipped_shift) | clipping_y(y, window); }
template <typename T>
inline clipper<T>::clipper()
: _enabled(false)
{
this->_x1 = 0;
this->_y1 = 0;
this->_f1 = 0;
this->_window.x1 = 0;
this->_window.x2 = 0;
this->_window.y1 = 0;
this->_window.y2 = 0;
}
template <typename T>
inline void clipper<T>::reset()
{ _enabled = false; }
template <typename T>
inline void clipper<T>::set(const rect<T> &window)
{
_window = window;
_enabled = true;
_f1 = clipping(_x1, _y1, _window);
}
template <typename T>
inline void clipper<T>::move_to(T x, T y)
{
_x1 = x;
_y1 = y;
_f1 = clipping(x, y, _window);
}
template <typename T>
template <typename LinesSinkT>
inline void clipper<T>::line_to(LinesSinkT &sink, T x2, T y2)
{
if (_enabled)
{
const int f2 = clipping(x2, y2, _window);
int f3, f4;
T y3, y4;
switch ((_f1 & x_clipped) | (f2 & x_clipped) << 1)
{
case 0 | 0:
line_clip_y(sink, _x1, _y1, x2, y2, _f1, f2);
break;
case 0 | x2_clipped << 1:
y3 = _y1 + muldiv(_window.x2 - _x1, y2 - _y1, x2 - _x1);
f3 = clipping_y(y3, _window);
line_clip_y(sink, _x1, _y1, _window.x2, y3, _f1, f3);
line_clip_y(sink, _window.x2, y3, _window.x2, y2, f3, f2);
break;
case x2_clipped | 0:
y3 = _y1 + muldiv(_window.x2 - _x1, y2 - _y1, x2 - _x1);
f3 = clipping_y(y3, _window);
line_clip_y(sink, _window.x2, _y1, _window.x2, y3, _f1, f3);
line_clip_y(sink, _window.x2, y3, x2, y2, f3, f2);
break;
case x2_clipped | x2_clipped << 1:
line_clip_y(sink, _window.x2, _y1, _window.x2, y2, _f1, f2);
break;
case 0 | x1_clipped << 1:
y3 = _y1 + muldiv(_window.x1 - _x1, y2 - _y1, x2 - _x1);
f3 = clipping_y(y3, _window);
line_clip_y(sink, _x1, _y1, _window.x1, y3, _f1, f3);
line_clip_y(sink, _window.x1, y3, _window.x1, y2, f3, f2);
break;
case x2_clipped | x1_clipped << 1:
y3 = _y1 + muldiv(_window.x2 - _x1, y2 - _y1, x2 - _x1);
y4 = _y1 + muldiv(_window.x1 - _x1, y2 - _y1, x2 - _x1);
f3 = clipping_y(y3, _window);
f4 = clipping_y(y4, _window);
line_clip_y(sink, _window.x2, _y1, _window.x2, y3, _f1, f3);
line_clip_y(sink, _window.x2, y3, _window.x1, y4, f3, f4);
line_clip_y(sink, _window.x1, y4, _window.x1, y2, f4, f2);
break;
case x1_clipped | 0:
y3 = _y1 + muldiv(_window.x1 - _x1, y2 - _y1, x2 - _x1);
f3 = clipping_y(y3, _window);
line_clip_y(sink, _window.x1, _y1, _window.x1, y3, _f1, f3);
line_clip_y(sink, _window.x1, y3, x2, y2, f3, f2);
break;
case x1_clipped | x2_clipped << 1:
y3 = _y1 + muldiv(_window.x1 - _x1, y2 - _y1, x2 - _x1);
y4 = _y1 + muldiv(_window.x2 - _x1, y2 - _y1, x2 - _x1);
f3 = clipping_y(y3, _window);
f4 = clipping_y(y4, _window);
line_clip_y(sink, _window.x1, _y1, _window.x1, y3, _f1, f3);
line_clip_y(sink, _window.x1, y3, _window.x2, y4, f3, f4);
line_clip_y(sink, _window.x2, y4, _window.x2, y2, f4, f2);
break;
case x1_clipped | x1_clipped << 1:
line_clip_y(sink, _window.x1, _y1, _window.x1, y2, _f1, f2);
break;
}
_f1 = f2;
}
else
{
sink.line(_x1, _y1, x2, y2);
}
_x1 = x2;
_y1 = y2;
}
template <typename T>
template <typename LinesSinkT>
inline void clipper<T>::line_clip_y(LinesSinkT &sink, T x1, T y1, T x2, T y2, int f1, int f2) const
{
f1 &= y_clipped;
f2 &= y_clipped;
if (f1 | f2)
{
if (f1 == f2)
return;
coord_type tx1 = x1;
coord_type ty1 = y1;
coord_type tx2 = x2;
coord_type ty2 = y2;
if (f1 & y1_clipped)
{
tx1 = x1 + muldiv(_window.y1 - y1, x2 - x1, y2 - y1);
ty1 = _window.y1;
}
if (f1 & y2_clipped)
{
tx1 = x1 + muldiv(_window.y2 - y1, x2 - x1, y2 - y1);
ty1 = _window.y2;
}
if (f2 & y1_clipped)
{
tx2 = x1 + muldiv(_window.y1 - y1, x2 - x1, y2 - y1);
ty2 = _window.y1;
}
if (f2 & y2_clipped)
{
tx2 = x1 + muldiv(_window.y2 - y1, x2 - x1, y2 - y1);
ty2 = _window.y2;
}
sink.line(tx1, ty1, tx2, ty2);
}
else
{
sink.line(x1, y1, x2, y2);
}
}
}
| 0 |
D://workCode//uploadProject\awtk\3rd\agge | D://workCode//uploadProject\awtk\3rd\agge\agge\config.h | #pragma once
#if defined(_MSC_VER) && (defined(_M_X64) || defined(_M_IX86))
#define AGGE_INLINE __forceinline
#elif defined(__GNUC__) && (defined(__x86_64) || defined(__i386))
#define AGGE_INLINE __attribute__((always_inline)) inline
#else
#define AGGE_INLINE inline
#endif
#if defined(_M_IX86) || defined(__i386) || defined(_M_X64) || defined(__x86_64__)
#define AGGE_ARCH_INTEL
#elif defined(_M_ARM)
#define AGGE_ARCH_ARM _M_ARM
#elif defined(__arm__)
#if defined(__ARM_ARCH_7__)
#define AGGE_ARCH_ARM 7
#elif defined(__ARM_ARCH_6__) || defined(__ARM_ARCH_6T2__)
#define AGGE_ARCH_ARM 6
#elif defined(__ARM_ARCH_5__) || defined(__ARM_ARCH_5T__)
#define AGGE_ARCH_ARM 5
#else
#define AGGE_ARCH_ARM 1
#endif
#else
#define AGGE_ARCH_GENERIC
#endif
#if defined(__ANDROID__)
#define AGGE_PLATFORM_LINUX
#define AGGE_PLATFORM_ANDROID
#elif defined(__linux__)
#define AGGE_PLATFORM_LINUX
#elif defined(__APPLE__)
#define AGGE_PLATFORM_APPLE
#elif defined(_WIN32)
#define AGGE_PLATFORM_WINDOWS
#else
#define AGGE_PLATFORM_GENERIC
#endif
| 0 |
D://workCode//uploadProject\awtk\3rd\agge | D://workCode//uploadProject\awtk\3rd\agge\agge\filling_rules.h | #pragma once
#include "vector_rasterizer.h"
namespace agge
{
template <agge::uint8_t area_factor_shift = vector_rasterizer::_1_shift>
struct winding
{
uint8_t operator ()(int area) const;
};
template <agge::uint8_t area_factor_shift>
inline uint8_t winding<area_factor_shift>::operator ()(int area) const
{
area >>= 1 + area_factor_shift;
if (area < 0)
area = -area;
if (area > vector_rasterizer::_1_mask)
area = vector_rasterizer::_1_mask;
return static_cast<uint8_t>(area);
}
}
| 0 |
D://workCode//uploadProject\awtk\3rd\agge | D://workCode//uploadProject\awtk\3rd\agge\agge\math.h | #pragma once
#include "types.h"
namespace agge
{
extern const real_t distance_epsilon;
extern const real_t pi;
template <typename T>
struct limits
{
static T resolution();
};
real_t sqrt(real_t x);
real_t sin(real_t a);
real_t cos(real_t a);
inline int iround(real_t v)
{ return static_cast<int>(v > real_t() ? v + static_cast<real_t>(0.5) : v - static_cast<real_t>(0.5)); }
inline int muldiv(int a, int b, int c)
{ return static_cast<int>(static_cast<long long>(a) * b / c); }
inline real_t muldiv(real_t a, real_t b, real_t c)
{ return a * b / c; }
inline real_t distance(real_t ax, real_t ay, real_t bx, real_t by)
{
bx -= ax;
by -= ay;
return sqrt(bx * bx + by * by);
}
inline real_t distance(const point_r &lhs, const point_r &rhs)
{ return distance(lhs.x, lhs.y, rhs.x, rhs.y); }
template <typename CoordT>
inline CoordT distance(const point<CoordT> &a, const point<CoordT> &b)
{ return distance(a.x, a.y, b.x, b.y); }
template <typename CoordT>
inline point<CoordT> operator +(const point<CoordT> &lhs, const agge_vector<CoordT> &rhs)
{
const point<CoordT> result = { lhs.x + rhs.dx, lhs.y + rhs.dy };
return result;
}
template <typename CoordT>
inline agge_vector<CoordT> operator -(const point<CoordT> &lhs, const point<CoordT> &rhs)
{
const agge_vector<CoordT> result = { lhs.x - rhs.x, lhs.y - rhs.y };
return result;
}
template <typename CoordT>
inline agge_vector<CoordT> operator *(CoordT lhs, const agge_vector<CoordT> &rhs)
{
const agge_vector<CoordT> result = { lhs * rhs.dx, lhs * rhs.dy };
return result;
}
template <typename CoordT>
inline agge_vector<CoordT> operator *(const agge_vector<CoordT> &lhs, CoordT rhs)
{
const agge_vector<CoordT> result = { rhs * lhs.dx, rhs * lhs.dy };
return result;
}
}
| 0 |
D://workCode//uploadProject\awtk\3rd\agge | D://workCode//uploadProject\awtk\3rd\agge\agge\memory.h | #pragma once
#include "types.h"
namespace agge
{
template <typename T>
void memset(T *buffer, T value, count_t count);
class raw_memory_object : noncopyable
{
public:
raw_memory_object();
~raw_memory_object();
template <typename T>
T *get(count_t size);
private:
uint8_t *_buffer;
count_t _size;
};
inline raw_memory_object::raw_memory_object()
: _buffer(0), _size(0)
{ }
inline raw_memory_object::~raw_memory_object()
{ delete []_buffer; }
template <typename T>
inline T *raw_memory_object::get(count_t size)
{
size *= sizeof(T);
size /= sizeof(uint8_t);
if (size > _size)
{
uint8_t *buffer = new uint8_t[size];
delete []_buffer;
_buffer = buffer;
_size = size;
memset(buffer, uint8_t(), size);
}
return reinterpret_cast<T *>(_buffer);
}
template <typename T>
inline void memset(T *buffer, T value, count_t count)
{
if (0 == (count & ~0x03))
{
if (count--)
*buffer++ = value;
if (count--)
*buffer++ = value;
if (count--)
*buffer++ = value;
}
else
{
while (count--)
*buffer++ = value;
}
}
}
| 0 |
D://workCode//uploadProject\awtk\3rd\agge | D://workCode//uploadProject\awtk\3rd\agge\agge\path.h | #pragma once
#include "types.h"
namespace agge
{
enum path_commands {
path_command_stop = 0x00,
path_command_move_to = 0x01,
path_command_line_to = 0x02,
path_command_end_poly = 0x10,
path_vertex_mask = 0x07,
path_command_mask = 0x1F
};
enum path_flags {
path_flag_close = 0x20,
path_flags_mask = 0xE0
};
template <typename SourceT, typename GeneratorT>
class path_generator_adapter
{
public:
path_generator_adapter(const SourceT &source, GeneratorT &generator);
void rewind(int /*path_id*/) { /*not implemented*/ }
int vertex(real_t *x, real_t *y);
private:
enum state { initial = 0, accumulate = 1, generate = 2, stage_mask = 3, complete = 4 };
private:
const path_generator_adapter &operator =(const path_generator_adapter &rhs);
void set_stage(state stage, bool force_complete = false);
private:
SourceT _source;
GeneratorT &_generator;
real_t _start_x, _start_y;
int _state;
};
template <typename PathIterator1T, typename PathIterator2T>
class joined_path
{
public:
joined_path(const PathIterator1T &path1, const PathIterator2T &path2);
void rewind(unsigned id);
int vertex(real_t *x, real_t *y);
private:
enum state { first_initial, first, second };
private:
PathIterator1T _path1;
PathIterator2T _path2;
state _state;
};
template <typename SourceT, typename GeneratorT>
path_generator_adapter<SourceT, GeneratorT> assist(const SourceT &source, GeneratorT &generator)
{ return path_generator_adapter<SourceT, GeneratorT>(source, generator); }
template <typename PathIterator1T, typename PathIterator2T>
joined_path<PathIterator1T, PathIterator2T> join(const PathIterator1T &path1, const PathIterator2T &path2)
{ return joined_path<PathIterator1T, PathIterator2T>(path1, path2); }
inline bool is_vertex(int c)
{ return 0 != (path_vertex_mask & c); }
inline bool is_end_poly(int c)
{ return path_command_end_poly == (path_command_mask & c); }
inline bool is_close(int c)
{ return 0 != (path_flag_close & c); }
template <typename AcceptorT>
inline void add_polyline_vertex(AcceptorT &acceptor, real_t x, real_t y, int command)
{
if (path_command_move_to == (path_vertex_mask & command))
acceptor.move_to(x, y);
else if (path_command_line_to == (path_vertex_mask & command))
acceptor.line_to(x, y);
if (is_close(command))
acceptor.close_polygon();
}
template <typename SinkT, typename PathIteratorT>
inline void add_path(SinkT &sink, PathIteratorT path)
{
real_t x, y;
path.rewind(0);
for (int command; command = path.vertex(&x, &y), path_command_stop != command; )
add_polyline_vertex(sink, x, y, command);
}
template <typename SourceT, typename GeneratorT>
inline path_generator_adapter<SourceT, GeneratorT>::path_generator_adapter(const SourceT &source, GeneratorT &generator)
: _source(source), _generator(generator), _state(initial)
{ }
template <typename SourceT, typename GeneratorT>
inline int path_generator_adapter<SourceT, GeneratorT>::vertex(real_t *x, real_t *y)
{
int command;
for (;;)
switch (_state & stage_mask)
{
case initial:
command = _source.vertex(&_start_x, &_start_y);
set_stage(accumulate, path_command_stop == command);
case accumulate:
if (_state & complete)
return path_command_stop;
_generator.remove_all();
_generator.add_vertex(_start_x, _start_y, path_command_move_to);
for (;;)
{
real_t xx, yy;
command = _source.vertex(&xx, &yy);
if (path_command_move_to == command)
{
_start_x = xx;
_start_y = yy;
}
else if (path_command_stop != command)
{
_generator.add_vertex(xx, yy, command);
continue;
}
break;
}
set_stage(generate, path_command_stop == command);
case generate:
command = _generator.vertex(x, y);
if (path_command_stop != command)
return command;
set_stage(accumulate);
}
}
template <typename SourceT, typename GeneratorT>
inline void path_generator_adapter<SourceT, GeneratorT>::set_stage(state stage, bool force_complete)
{ _state = (stage & stage_mask) | (force_complete ? complete : (_state & complete)); }
template <typename PathIterator1T, typename PathIterator2T>
inline joined_path<PathIterator1T, PathIterator2T>::joined_path(const PathIterator1T &path1, const PathIterator2T &path2)
: _path1(path1), _path2(path2), _state(first_initial)
{ }
template <typename PathIterator1T, typename PathIterator2T>
inline void joined_path<PathIterator1T, PathIterator2T>::rewind(unsigned /*id*/)
{
_state = first_initial;
_path1.rewind(0);
_path2.rewind(0);
}
template <typename PathIterator1T, typename PathIterator2T>
inline int joined_path<PathIterator1T, PathIterator2T>::vertex(real_t *x, real_t *y)
{
int command;
switch (_state)
{
case first_initial:
command = _path1.vertex(x, y);
if (command == path_command_stop)
_state = second;
else
return _state = first, command;
case second:
return _path2.vertex(x, y);
case first:
command = _path1.vertex(x, y);
if (command != path_command_stop)
return command;
_state = second;
command = _path2.vertex(x, y);
return command == path_command_move_to ? path_command_line_to : command;
}
return path_command_stop;
}
}
| 0 |
D://workCode//uploadProject\awtk\3rd\agge | D://workCode//uploadProject\awtk\3rd\agge\agge\pixel.h | #pragma once
#include "types.h"
namespace agge {
enum bits_per_pixel { bpp32 = 32, bpp24 = 24, bpp16 = 16, bpp8 = 8 };
#pragma pack(push, 1)
struct pixel32_rgba {
uint8_t r;
uint8_t g;
uint8_t b;
uint8_t a;
pixel32_rgba() : r(0), g(0), b(0), a(0) {
}
pixel32_rgba(uint8_t rr, uint8_t gg, uint8_t bb, uint8_t aa) : r(rr), g(gg), b(bb), a(aa) {
}
};
struct pixel32_abgr {
uint8_t a;
uint8_t b;
uint8_t g;
uint8_t r;
pixel32_abgr() : a(0), b(0), g(0), r(0) {
}
pixel32_abgr(uint8_t rr, uint8_t gg, uint8_t bb, uint8_t aa) : a(aa), b(bb), g(gg), r(rr) {
}
};
struct pixel32_bgra {
uint8_t b;
uint8_t g;
uint8_t r;
uint8_t a;
pixel32_bgra() : b(0), g(0), r(0), a(0) {
}
pixel32_bgra(uint8_t rr, uint8_t gg, uint8_t bb, uint8_t aa) : b(bb), g(gg), r(rr), a(aa) {
}
};
struct pixel32_argb {
uint8_t a;
uint8_t r;
uint8_t g;
uint8_t b;
pixel32_argb() : a(0), r(0), g(0), b(0) {
}
pixel32_argb(uint8_t rr, uint8_t gg, uint8_t bb, uint8_t aa) : a(aa), r(rr), g(gg), b(bb) {
}
};
struct pixel24_rgb {
uint8_t r;
uint8_t g;
uint8_t b;
pixel24_rgb() : r(0), g(0), b(0) {
}
pixel24_rgb(uint8_t rr, uint8_t gg, uint8_t bb) : r(rr), g(gg), b(bb) {
}
};
struct pixel24_bgr {
uint8_t b;
uint8_t g;
uint8_t r;
pixel24_bgr() : b(0), g(0), r(0) {
}
pixel24_bgr(uint8_t rr, uint8_t gg, uint8_t bb) : b(bb), g(gg), r(rr) {
}
};
struct pixel16_bgr565 {
uint16_t b : 5;
uint16_t g : 6;
uint16_t r : 5;
pixel16_bgr565() : b(0), g(0), r(0) {
}
pixel16_bgr565(uint8_t rr, uint8_t gg, uint8_t bb) : b(bb), g(gg), r(rr) {
}
};
struct pixel16_rgb565 {
uint16_t r : 5;
uint16_t g : 6;
uint16_t b : 5;
pixel16_rgb565() : r(0), g(0), b(0) {
}
pixel16_rgb565(uint8_t rr, uint8_t gg, uint8_t bb) : r(rr), g(gg), b(bb) {
}
};
struct pixel8 {
uint8_t a;
pixel8() : a(0) {
}
pixel8(uint8_t aa) : a(aa) {
}
};
#pragma pack(pop)
#include "pixel_a.h"
#include "pixel_set_a.h"
#include "pixel_convert.h"
template <typename PixelTargetT, typename PixelSrcT>
inline void pixel_blend(PixelTargetT& t, const PixelSrcT& s, uint8_t a) {
if (a > 0xf4) {
if (sizeof(t) == sizeof(s) || sizeof(t) == 4 || sizeof(t) == 3) {
t.r = s.r;
t.g = s.g;
t.b = s.b;
} else {
t.r = s.r >> 3;
t.g = s.g >> 2;
t.b = s.b >> 3;
}
} else if (a > 0x01) {
uint8_t m_a = 0xff - a;
if (sizeof(t) == 2) {
if (sizeof(s) == 2) {
t.r = s.r;
t.g = s.g;
t.b = s.b;
} else {
t.r = (s.r * a + (t.r << 3) * m_a) >> 11;
t.g = (s.g * a + (t.g << 2) * m_a) >> 10;
t.b = (s.b * a + (t.b << 3) * m_a) >> 11;
}
} else if (sizeof(s) == 2) {
if (sizeof(t) == 2) {
t.r = s.r;
t.g = s.g;
t.b = s.b;
} else {
t.r = ((s.r << 3) * a + t.r * m_a) >> 11;
t.g = ((s.g << 2) * a + t.g * m_a) >> 10;
t.b = ((s.b << 3) * a + t.b * m_a) >> 11;
}
} else {
t.r = (s.r * a + t.r * m_a) >> 8;
t.g = (s.g * a + t.g * m_a) >> 8;
t.b = (s.b * a + t.b * m_a) >> 8;
}
}
}
template <>
inline void pixel_blend(pixel16_bgr565& t, const pixel32_rgba& s, uint8_t a) {
if (a > 0xf4) {
t.r = s.r >> 3;
t.g = s.g >> 2;
t.b = s.b >> 3;
} else if (a > 0x01) {
uint8_t m_a = 0xff - a;
t.r = (s.r * a + (t.r << 3) * m_a) >> 11;
t.g = (s.g * a + (t.g << 2) * m_a) >> 10;
t.b = (s.b * a + (t.b << 3) * m_a) >> 11;
}
}
template <>
inline void pixel_blend(pixel16_rgb565& t, const pixel32_rgba& s, uint8_t a) {
if (a > 0xf4) {
t.r = s.r >> 3;
t.g = s.g >> 2;
t.b = s.b >> 3;
} else if (a > 0x01) {
uint8_t m_a = 0xff - a;
t.r = (s.r * a + (t.r << 3) * m_a) >> 11;
t.g = (s.g * a + (t.g << 2) * m_a) >> 10;
t.b = (s.b * a + (t.b << 3) * m_a) >> 11;
}
}
template <>
inline void pixel_blend(pixel24_bgr& t, const pixel32_rgba& s, uint8_t a) {
if (a > 0xf4) {
t.r = s.r;
t.g = s.g;
t.b = s.b;
} else if (a > 0x01) {
uint8_t m_a = 0xff - a;
t.r = (s.r * a + t.r * m_a) >> 8;
t.g = (s.g * a + t.g * m_a) >> 8;
t.b = (s.b * a + t.b * m_a) >> 8;
}
}
template <>
inline void pixel_blend(pixel24_rgb& t, const pixel32_rgba& s, uint8_t a) {
if (a > 0xf4) {
t.r = s.r;
t.g = s.g;
t.b = s.b;
} else if (a > 0x01) {
uint8_t m_a = 0xff - a;
t.r = (s.r * a + t.r * m_a) >> 8;
t.g = (s.g * a + t.g * m_a) >> 8;
t.b = (s.b * a + t.b * m_a) >> 8;
}
}
static inline uint8_t pixel_limit_uint8(int tmp) {
if(tmp > 0xff) {
tmp = 0xff;
} else if(tmp < 0) {
tmp = 0;
}
return (uint8_t)tmp;
}
template <>
inline void pixel_blend(pixel32_rgba& t, const pixel32_rgba& s, uint8_t a) {
if (a > 0xf4) {
t.r = s.r;
t.g = s.g;
t.b = s.b;
t.a = a;
} else if (a > 0x01) {
uint8_t m_a = 0xff - a;
if(t.a > 0xf4) {
t.r = (s.r * a + t.r * m_a) >> 8;
t.g = (s.g * a + t.g * m_a) >> 8;
t.b = (s.b * a + t.b * m_a) >> 8;
} else {
uint8_t out_a = pixel_limit_uint8(a + t.a - ((a * t.a) >> 8));
if(out_a > 0) {
uint8_t d_a = (t.a * (0xff - a)) >> 8;
t.r = (s.r * a + t.r * d_a) / out_a;
t.g = (s.g * a + t.g * d_a) / out_a;
t.b = (s.b * a + t.b * d_a) / out_a;
}
t.a = out_a;
}
}
}
template <>
inline void pixel_blend(pixel32_bgra& t, const pixel32_rgba& s, uint8_t a) {
if (a > 0xf4) {
t.r = s.r;
t.g = s.g;
t.b = s.b;
t.a = a;
} else if (a > 0x01) {
if(t.a > 0xf4) {
uint8_t m_a = 0xff - a;
t.r = (s.r * a + t.r * m_a) >> 8;
t.g = (s.g * a + t.g * m_a) >> 8;
t.b = (s.b * a + t.b * m_a) >> 8;
} else {
uint8_t out_a = pixel_limit_uint8(a + t.a - ((a * t.a) >> 8));
if(out_a > 0) {
uint8_t d_a = (t.a * (0xff - a)) >> 8;
t.r = (s.r * a + t.r * d_a) / out_a;
t.g = (s.g * a + t.g * d_a) / out_a;
t.b = (s.b * a + t.b * d_a) / out_a;
}
t.a = out_a;
}
}
}
template <typename PixelTargetT, typename PixelSrcT>
inline void pixel_blend_premulti_alpha(PixelTargetT& t, const PixelSrcT& s, uint8_t a, uint8_t pa) {
if (a > 0xf4) {
if (sizeof(t) == sizeof(s) || sizeof(t) == 4 || sizeof(t) == 3) {
t.r = s.r;
t.g = s.g;
t.b = s.b;
} else {
t.r = s.r >> 3;
t.g = s.g >> 2;
t.b = s.b >> 3;
}
} else if (a > 0x01) {
uint8_t m_a = 0xff - a;
if (sizeof(t) == 2) {
if (sizeof(s) == 2) {
t.r = s.r;
t.g = s.g;
t.b = s.b;
} else {
t.r = (s.r * pa + (t.r << 3) * m_a) >> 11;
t.g = (s.g * pa + (t.g << 2) * m_a) >> 10;
t.b = (s.b * pa + (t.b << 3) * m_a) >> 11;
}
} else if (sizeof(s) == 2) {
if (sizeof(t) == 2) {
t.r = s.r;
t.g = s.g;
t.b = s.b;
} else {
t.r = ((s.r << 3) * pa + t.r * m_a) >> 11;
t.g = ((s.g << 2) * pa + t.g * m_a) >> 10;
t.b = ((s.b << 3) * pa + t.b * m_a) >> 11;
}
} else {
t.r = (s.r * pa + t.r * m_a) >> 8;
t.g = (s.g * pa + t.g * m_a) >> 8;
t.b = (s.b * pa + t.b * m_a) >> 8;
}
}
}
template <>
inline void pixel_blend_premulti_alpha(pixel16_bgr565& t, const pixel32_rgba& s, uint8_t a, uint8_t pa) {
if (a > 0xf4) {
t.r = s.r >> 3;
t.g = s.g >> 2;
t.b = s.b >> 3;
} else if (a > 0x01) {
uint8_t m_a = 0xff - a;
t.r = (s.r * pa + (t.r << 3) * m_a) >> 11;
t.g = (s.g * pa + (t.g << 2) * m_a) >> 10;
t.b = (s.b * pa + (t.b << 3) * m_a) >> 11;
}
}
template <>
inline void pixel_blend_premulti_alpha(pixel16_rgb565& t, const pixel32_rgba& s, uint8_t a, uint8_t pa) {
if (a > 0xf4) {
t.r = s.r >> 3;
t.g = s.g >> 2;
t.b = s.b >> 3;
} else if (a > 0x01) {
uint8_t m_a = 0xff - a;
t.r = (s.r * pa + (t.r << 3) * m_a) >> 11;
t.g = (s.g * pa + (t.g << 2) * m_a) >> 10;
t.b = (s.b * pa + (t.b << 3) * m_a) >> 11;
}
}
template <>
inline void pixel_blend_premulti_alpha(pixel24_bgr& t, const pixel32_rgba& s, uint8_t a, uint8_t pa) {
if (a > 0xf4) {
t.r = s.r;
t.g = s.g;
t.b = s.b;
} else if (a > 0x01) {
uint8_t m_a = 0xff - a;
t.r = (s.r * pa + t.r * m_a) >> 8;
t.g = (s.g * pa + t.g * m_a) >> 8;
t.b = (s.b * pa + t.b * m_a) >> 8;
}
}
template <>
inline void pixel_blend_premulti_alpha(pixel24_rgb& t, const pixel32_rgba& s, uint8_t a, uint8_t pa) {
if (a > 0xf4) {
t.r = s.r;
t.g = s.g;
t.b = s.b;
} else if (a > 0x01) {
uint8_t m_a = 0xff - a;
t.r = (s.r * pa + t.r * m_a) >> 8;
t.g = (s.g * pa + t.g * m_a) >> 8;
t.b = (s.b * pa + t.b * m_a) >> 8;
}
}
template <>
inline void pixel_blend_premulti_alpha(pixel32_rgba& t, const pixel32_rgba& s, uint8_t a, uint8_t pa) {
if (a > 0xf4) {
t.r = s.r;
t.g = s.g;
t.b = s.b;
t.a = a;
} else if (a > 0x01) {
uint8_t m_a = 0xff - a;
if(t.a > 0xf4) {
t.r = (s.r * pa + t.r * m_a) >> 8;
t.g = (s.g * pa + t.g * m_a) >> 8;
t.b = (s.b * pa + t.b * m_a) >> 8;
} else {
uint8_t out_a = pixel_limit_uint8(a + t.a - ((a * t.a) >> 8));
if(out_a > 0) {
uint8_t d_a = (t.a * (0xff - a)) >> 8;
t.r = (s.r * pa + t.r * d_a) / out_a;
t.g = (s.g * pa + t.g * d_a) / out_a;
t.b = (s.b * pa + t.b * d_a) / out_a;
}
t.a = out_a;
}
}
}
template <>
inline void pixel_blend_premulti_alpha(pixel32_bgra& t, const pixel32_rgba& s, uint8_t a, uint8_t pa) {
if (a > 0xf4) {
t.r = s.r;
t.g = s.g;
t.b = s.b;
t.a = a;
} else if (a > 0x01) {
if(t.a > 0xf4) {
uint8_t m_a = 0xff - a;
t.r = (s.r * pa + t.r * m_a) >> 8;
t.g = (s.g * pa + t.g * m_a) >> 8;
t.b = (s.b * pa + t.b * m_a) >> 8;
} else {
uint8_t out_a = pixel_limit_uint8(a + t.a - ((a * t.a) >> 8));
if(out_a > 0) {
uint8_t d_a = (t.a * (0xff - a)) >> 8;
t.r = (s.r * pa + t.r * d_a) / out_a;
t.g = (s.g * pa + t.g * d_a) / out_a;
t.b = (s.b * pa + t.b * d_a) / out_a;
}
t.a = out_a;
}
}
}
} // namespace agge
| 0 |
D://workCode//uploadProject\awtk\3rd\agge | D://workCode//uploadProject\awtk\3rd\agge\agge\pixel_a.h | #pragma once
template <typename PixelT>
inline uint8_t pixel_a(const PixelT& p, uint8_t a) {
return a;
}
template <>
inline uint8_t pixel_a(const pixel32_rgba& p, uint8_t a) {
return (a * p.a) >> 8;
}
template <>
inline uint8_t pixel_a(const pixel32_abgr& p, uint8_t a) {
return (a * p.a) >> 8;
}
template <>
inline uint8_t pixel_a(const pixel32_bgra& p, uint8_t a) {
return (a * p.a) >> 8;
}
template <>
inline uint8_t pixel_a(const pixel32_argb& p, uint8_t a) {
return (a * p.a) >> 8;
}
template <>
inline uint8_t pixel_a(const pixel8& p, uint8_t a) {
return (a * p.a) >> 8;
}
| 0 |
D://workCode//uploadProject\awtk\3rd\agge | D://workCode//uploadProject\awtk\3rd\agge\agge\pixel_convert.h | #pragma once
#define PIXEL_CONVERT_NO_A(t, s) \
t.r = s.r; \
t.g = s.g; \
t.b = s.b;
#define PIXEL_CONVERT_FROM_565(t, s) \
t.r = s.r << 3; \
t.g = s.g << 2; \
t.b = s.b << 3;
#define PIXEL_CONVERT_TO_565(t, s) \
t.r = s.r >> 3; \
t.g = s.g >> 2; \
t.b = s.b >> 3;
#define PIXEL_CONVERT_A(t, s) \
t.r = s.r; \
t.g = s.g; \
t.b = s.b; \
t.a = s.a;
template <typename PixelTargetT, typename PixelSrcT>
inline void pixel_convert(PixelTargetT& t, const PixelSrcT& s) {
PIXEL_CONVERT_NO_A(t, s);
}
template <>
inline void pixel_convert(pixel32_rgba& t, const pixel32_rgba& s) {
t = s;
}
template <>
inline void pixel_convert(pixel32_abgr& t, const pixel32_abgr& s) {
t = s;
}
template <>
inline void pixel_convert(pixel32_bgra& t, const pixel32_bgra& s) {
t = s;
}
template <>
inline void pixel_convert(pixel32_argb& t, const pixel32_argb& s) {
t = s;
}
template <>
inline void pixel_convert(pixel24_rgb& t, const pixel24_rgb& s) {
t = s;
}
template <>
inline void pixel_convert(pixel24_bgr& t, const pixel24_bgr& s) {
t = s;
}
template <>
inline void pixel_convert(pixel16_rgb565& t, const pixel16_rgb565& s) {
t = s;
}
template <>
inline void pixel_convert(pixel16_bgr565& t, const pixel16_bgr565& s) {
t = s;
}
// pixel32_rgba
template <>
inline void pixel_convert(pixel32_rgba& t, const pixel32_abgr& s) {
PIXEL_CONVERT_A(t, s);
}
template <>
inline void pixel_convert(pixel32_rgba& t, const pixel32_bgra& s) {
PIXEL_CONVERT_A(t, s);
}
template <>
inline void pixel_convert(pixel32_rgba& t, const pixel32_argb& s) {
PIXEL_CONVERT_A(t, s);
}
template <>
inline void pixel_convert(pixel32_rgba& t, const pixel16_bgr565& s) {
PIXEL_CONVERT_FROM_565(t, s);
}
// pixel32_abgr
template <>
inline void pixel_convert(pixel32_abgr& t, const pixel32_rgba& s) {
PIXEL_CONVERT_A(t, s);
}
template <>
inline void pixel_convert(pixel32_abgr& t, const pixel32_bgra& s) {
PIXEL_CONVERT_A(t, s);
}
template <>
inline void pixel_convert(pixel32_abgr& t, const pixel32_argb& s) {
PIXEL_CONVERT_A(t, s);
}
template <>
inline void pixel_convert(pixel32_abgr& t, const pixel16_bgr565& s) {
PIXEL_CONVERT_FROM_565(t, s);
}
// pixel32_bgra
template <>
inline void pixel_convert(pixel32_bgra& t, const pixel32_abgr& s) {
PIXEL_CONVERT_A(t, s);
}
template <>
inline void pixel_convert(pixel32_bgra& t, const pixel32_rgba& s) {
PIXEL_CONVERT_A(t, s);
}
template <>
inline void pixel_convert(pixel32_bgra& t, const pixel32_argb& s) {
PIXEL_CONVERT_A(t, s);
}
template <>
inline void pixel_convert(pixel32_bgra& t, const pixel16_bgr565& s) {
PIXEL_CONVERT_FROM_565(t, s);
}
// pixel32_argb
template <>
inline void pixel_convert(pixel32_argb& t, const pixel32_rgba& s) {
PIXEL_CONVERT_A(t, s);
}
template <>
inline void pixel_convert(pixel32_argb& t, const pixel32_abgr& s) {
PIXEL_CONVERT_A(t, s);
}
template <>
inline void pixel_convert(pixel32_argb& t, const pixel32_bgra& s) {
PIXEL_CONVERT_A(t, s);
}
template <>
inline void pixel_convert(pixel32_argb& t, const pixel16_bgr565& s) {
PIXEL_CONVERT_FROM_565(t, s);
}
| 0 |
D://workCode//uploadProject\awtk\3rd\agge | D://workCode//uploadProject\awtk\3rd\agge\agge\pixel_set_a.h | #pragma once
template <typename PixelT>
inline void pixel_set_a(PixelT& p, uint8_t a) {
}
template <>
inline void pixel_set_a(pixel32_rgba& p, uint8_t a) {
p.a = a;
}
template <>
inline void pixel_set_a(pixel32_bgra& p, uint8_t a) {
p.a = a;
}
template <>
inline void pixel_set_a(pixel32_abgr& p, uint8_t a) {
p.a = a;
}
template <>
inline void pixel_set_a(pixel32_argb& p, uint8_t a) {
p.a = a;
}
template <>
inline void pixel_set_a(pixel8& p, uint8_t a) {
p.a = a;
}
| 0 |
D://workCode//uploadProject\awtk\3rd\agge | D://workCode//uploadProject\awtk\3rd\agge\agge\pod_vector.h | #pragma once
#include "tools.h"
#include "types.h"
namespace agge
{
template <typename T>
class pod_vector
{
public:
typedef T *iterator;
typedef const T *const_iterator;
typedef T value_type;
public:
explicit pod_vector(count_t initial_size = 0);
pod_vector(const pod_vector &other);
~pod_vector();
iterator push_back(const T &element);
void pop_back();
void clear();
void clear_cache();
void resize(count_t size);
void set_end(iterator end);
void assign(count_t size, const T &value);
void swap(pod_vector &other);
const T *data() const;
bool empty() const;
count_t size() const;
count_t capacity() const;
iterator begin();
iterator end();
const_iterator begin() const;
const_iterator end() const;
T &operator [](count_t index);
const T &operator [](count_t index) const;
private:
union pod_constraint
{
T _unused1;
int _unused2;
};
private:
const pod_vector &operator =(const pod_vector &rhs);
void grow(count_t by = 0);
private:
T *_begin, *_end, *_limit;
};
template <typename T>
inline pod_vector<T>::pod_vector(count_t initial_size)
: _begin(0), _end(0), _limit(0)
{ resize(initial_size); }
template <typename T>
inline pod_vector<T>::pod_vector(const pod_vector &other)
: _begin(new T[other.capacity()]), _end(_begin + other.size()), _limit(_begin + other.capacity())
{
const_iterator i = other.begin();
iterator j = begin();
while (i != other.end())
*j++ = *i++;
}
template <typename T>
inline pod_vector<T>::~pod_vector()
{ clear_cache(); }
template <typename T>
inline void pod_vector<T>::clear_cache()
{
if (_begin != 0) {
delete []_begin;
}
_end = 0;
_begin = 0;
_limit = 0;
}
template <typename T>
inline typename pod_vector<T>::iterator pod_vector<T>::push_back(const T &element)
{
if (_end == _limit)
grow();
*_end = element;
return _end++;
}
template <typename T>
inline void pod_vector<T>::pop_back()
{ --_end; }
template <typename T>
inline void pod_vector<T>::clear()
{ _end = _begin; }
template <typename T>
inline void pod_vector<T>::resize(count_t size_)
{
if (size_ > capacity())
grow(size_ - capacity());
_end = _begin + size_;
}
template <typename T>
inline void pod_vector<T>::set_end(iterator end)
{ _end = end; }
template <typename T>
inline void pod_vector<T>::assign(count_t size_, const T &value)
{
resize(size_);
for (T *i = _begin; size_; --size_, ++i)
*i = value;
}
template <typename T>
inline void pod_vector<T>::swap(pod_vector &other)
{
iterator t;
t = _begin, _begin = other._begin, other._begin = t;
t = _end, _end = other._end, other._end = t;
t = _limit, _limit = other._limit, other._limit = t;
}
template <typename T>
inline const T *pod_vector<T>::data() const
{ return _begin; }
template <typename T>
inline bool pod_vector<T>::empty() const
{ return _begin == _end; }
template <typename T>
inline count_t pod_vector<T>::size() const
{ return static_cast<count_t>(_end - _begin); }
template <typename T>
inline count_t pod_vector<T>::capacity() const
{ return static_cast<count_t>(_limit - _begin); }
template <typename T>
inline typename pod_vector<T>::iterator pod_vector<T>::begin()
{ return _begin; }
template <typename T>
inline typename pod_vector<T>::iterator pod_vector<T>::end()
{ return _end; }
template <typename T>
inline typename pod_vector<T>::const_iterator pod_vector<T>::begin() const
{ return _begin; }
template <typename T>
inline typename pod_vector<T>::const_iterator pod_vector<T>::end() const
{ return _end; }
template <typename T>
inline T &pod_vector<T>::operator [](count_t index)
{ return _begin[index]; }
template <typename T>
inline const T &pod_vector<T>::operator [](count_t index) const
{ return _begin[index]; }
template <typename T>
inline void pod_vector<T>::grow(count_t by)
{
count_t size = this->size(), new_capacity = capacity();
new_capacity += agge_max(2 * by > new_capacity ? by : new_capacity / 2, 1u);
T *buffer = new T[new_capacity], *p = buffer;
for (iterator i = _begin; i != _end; )
*p++ = *i++;
if (_begin != 0) {
delete []_begin;
}
_begin = buffer;
_end = _begin + size;
_limit = _begin + new_capacity;
}
}
| 0 |
D://workCode//uploadProject\awtk\3rd\agge | D://workCode//uploadProject\awtk\3rd\agge\agge\precise_delta.h | #pragma once
#include "config.h"
namespace agge
{
class precise_delta
{
public:
AGGE_INLINE precise_delta(int numerator, int denominator)
: _acc(0), _exp(0)
{
const float q = static_cast<float>(numerator) / denominator;
const int &iq = reinterpret_cast<const int &>(q);
const int exp = (((iq & 0x7F800000)) >> 23) - 127;
int m = (iq & 0x7FFFFF) | 0x800000;
m--; // Sacrifice precision to be agnostic to the rounding mode: we must not overflow on increments!
if (exp > 0x17)
m <<= exp - 0x17;
else if (exp >= 0x15)
m >>= 0x17 - exp;
else if (exp >= 0x15 - 0x1E)
_exp = 0x15 - exp, m >>= 2;
else
_exp = 0x1E, m >>= 2 + 0x15 - 0x1E - exp;
_quotient = (m ^ iq >> 31) + (static_cast<unsigned>(iq) >> 31);
}
void multiply(int k)
{
_delta_fraction = k * _quotient;
}
int next()
{
_acc += _delta_fraction;
int delta = _acc >> _exp;
_acc -= delta << _exp;
return delta;
}
private:
int _acc;
int _quotient;
int _delta_fraction;
int _exp;
};
}
| 0 |
D://workCode//uploadProject\awtk\3rd\agge | D://workCode//uploadProject\awtk\3rd\agge\agge\rasterizer.h | #pragma once
#include "math.h"
#include "vector_rasterizer.h"
namespace agge
{
template <typename T>
struct scaling;
template < typename ClipperT, typename ScalingT = scaling<typename ClipperT::coord_type> >
class rasterizer : private vector_rasterizer
{
public:
using vector_rasterizer::_1_shift;
using vector_rasterizer::scanline_cells;
rasterizer() : _start_x(), _start_y() {};
public:
using vector_rasterizer::reset;
void clear_cache();
void reset_clipping();
void set_clipping(const rect<real_t> &window);
void move_to(real_t x, real_t y);
void line_to(real_t x, real_t y);
void close_polygon();
void append(const rasterizer &other, int dx, int dy);
using vector_rasterizer::sort;
using vector_rasterizer::operator [];
using vector_rasterizer::width;
using vector_rasterizer::min_y;
using vector_rasterizer::height;
private:
typedef typename ClipperT::coord_type coord_type;
public:
void line(coord_type x1, coord_type y1, coord_type x2, coord_type y2);
private:
ClipperT _clipper;
coord_type _start_x, _start_y;
};
template <>
struct scaling<int>
{
static void scale1(real_t x, real_t y, int &cx, int &cy)
{ cx = iround(256.0f * x), cy = iround(256.0f * y); }
static void scale2(int x1, int y1, int x2, int y2, int &cx1, int &cy1, int &cx2, int &cy2)
{ cx1 = x1, cy1 = y1, cx2 = x2, cy2 = y2; }
};
template <>
struct scaling<real_t>
{
static void scale1(real_t x, real_t y, real_t &cx, real_t &cy)
{ cx = x, cy = y; }
static void scale2(real_t x1, real_t y1, real_t x2, real_t y2, int &cx1, int &cy1, int &cx2, int &cy2)
{
cx1 = iround(256.0f * x1);
cy1 = iround(256.0f * y1);
cx2 = iround(256.0f * x2);
cy2 = iround(256.0f * y2);
}
};
template <typename ClipperT, typename ScalingT>
inline void rasterizer<ClipperT, ScalingT>::reset_clipping()
{ _clipper.reset(); }
template <typename ClipperT, typename ScalingT>
inline void rasterizer<ClipperT, ScalingT>::clear_cache()
{ vector_rasterizer::clear_cache(); }
template <typename ClipperT, typename ScalingT>
inline void rasterizer<ClipperT, ScalingT>::set_clipping(const rect<real_t> &window)
{
rect<typename ClipperT::coord_type> translated;
ScalingT::scale1(window.x1, window.y1, translated.x1, translated.y1);
ScalingT::scale1(window.x2, window.y2, translated.x2, translated.y2);
_clipper.set(translated);
}
template <typename ClipperT, typename ScalingT>
inline void rasterizer<ClipperT, ScalingT>::move_to(real_t x, real_t y)
{
ScalingT::scale1(x, y, _start_x, _start_y);
_clipper.move_to(_start_x, _start_y);
}
template <typename ClipperT, typename ScalingT>
inline void rasterizer<ClipperT, ScalingT>::line_to(real_t x, real_t y)
{
coord_type cx, cy;
ScalingT::scale1(x, y, cx, cy);
_clipper.line_to(*this, cx, cy);
}
template <typename ClipperT, typename ScalingT>
inline void rasterizer<ClipperT, ScalingT>::close_polygon()
{ _clipper.line_to(*this, _start_x, _start_y); }
template <typename ClipperT, typename ScalingT>
inline void rasterizer<ClipperT, ScalingT>::append(const rasterizer &other, int dx, int dy)
{
int cx, cy, unused;
ScalingT::scale2(dx, dy, 0, 0, cx, cy, unused, unused);
vector_rasterizer::append(other, cx, cy);
}
template <typename ClipperT, typename ScalingT>
inline void rasterizer<ClipperT, ScalingT>::line(coord_type x1, coord_type y1, coord_type x2, coord_type y2)
{
int cx1, cy1, cx2, cy2;
ScalingT::scale2(x1, y1, x2, y2, cx1, cy1, cx2, cy2);
vector_rasterizer::line(cx1, cy1, cx2, cy2);
}
}
| 0 |
D://workCode//uploadProject\awtk\3rd\agge | D://workCode//uploadProject\awtk\3rd\agge\agge\renderer.h | #pragma once
#include "scanline.h"
#include "tools.h"
namespace agge
{
class renderer
{
public:
template <typename BitmapT, typename BlenderT>
class adapter;
public:
template <typename BitmapT, typename MaskT, typename BlenderT, typename AlphaFn>
void operator ()(BitmapT &bitmap_, const rect_i *window, const MaskT &mask, const BlenderT &blender, const AlphaFn &alpha);
private:
raw_memory_object _scanline_cache;
};
template <typename BitmapT, typename BlenderT>
class renderer::adapter
{
public:
typedef typename BlenderT::cover_type cover_type;
public:
adapter(BitmapT &bitmap_, const rect_i *window, const BlenderT &blender);
bool set_y(int y);
void operator ()(int x, int length, const cover_type *covers);
private:
const adapter &operator =(const adapter &rhs);
private:
const BlenderT &_blender;
const int _offset_x, _limit_x;
typename BitmapT::pixel *_row;
BitmapT &_bitmap;
int _y;
const int _offset_y, _limit_y;
};
template <typename BitmapT, typename BlenderT>
inline renderer::adapter<BitmapT, BlenderT>::adapter(BitmapT &bitmap_, const rect_i *window, const BlenderT &blender)
: _blender(blender),
_offset_x(window ? window->x1 : 0),
_limit_x((!window || static_cast<int>(bitmap_.width()) < width(*window) ? bitmap_.width() : width(*window)) + _offset_x),
_bitmap(bitmap_),
_offset_y(window ? window->y1 : 0),
_limit_y((!window || static_cast<int>(bitmap_.height()) < height(*window) ? bitmap_.height() : height(*window)) + _offset_y)
{ }
template <typename BitmapT, typename BlenderT>
inline bool renderer::adapter<BitmapT, BlenderT>::set_y(int y)
{
if (y < _offset_y || _limit_y <= y)
return false;
_y = y;
_row = _bitmap.row_ptr(y - _offset_y) - _offset_x;
return true;
}
template <typename BitmapT, typename BlenderT>
inline void renderer::adapter<BitmapT, BlenderT>::operator ()(int x, int length, const cover_type *covers)
{
if (x < _offset_x)
{
const int dx = x - _offset_x;
x = _offset_x;
length += dx;
covers -= dx;
}
if (x + length > _limit_x)
length = _limit_x - x;
if (length > 0)
_blender(_row + x, x, _y, length, covers);
}
template <unsigned _1_shift, typename ScanlineT, typename CellsIteratorT, typename AlphaFn>
AGGE_INLINE void sweep_scanline(ScanlineT &scanline, CellsIteratorT begin, CellsIteratorT end, const AlphaFn &alpha)
{
int cover = 0;
if (begin == end)
return;
for (CellsIteratorT i = begin; ; )
{
int x = i->x, area = 0;
do
{
area += i->area;
cover += i->cover;
++i;
} while (i != end && i->x == x);
int cover_m = cover << (1 + _1_shift);
if (area)
{
scanline.add_cell(x, alpha(cover_m - area));
++x;
}
if (i == end)
break;
int len = i->x - x;
if (len && cover_m) {
if (len > 0) {
scanline.add_span(x, len, alpha(cover_m));
} else {
//printf("sweep_scanline i->x=%d x=%d len=%d area=%d cover=%d cover_m=%d \r\n", i->x, x, len, area, cover, cover_m);
}
}
}
}
template <typename ScanlineT, typename MaskT, typename AlphaFn>
AGGE_INLINE void render(ScanlineT &scanline, const MaskT &mask, const AlphaFn &alpha, int offset, int step)
{
for (int y = mask.min_y() + offset, limit_y = mask.min_y() + mask.height(); y < limit_y; y += step)
{
typename MaskT::scanline_cells cells = mask[y];
if (scanline.begin(y))
{
sweep_scanline<MaskT::_1_shift>(scanline, cells.first, cells.second, alpha);
scanline.commit();
}
}
}
template <typename BitmapT, typename BlenderT>
inline void fill(BitmapT &bitmap_, const rect_i &area, const BlenderT &blender)
{
const int x = agge_max(0, area.x1);
const int width = agge_min<int>(bitmap_.width(), area.x2) - x;
if (width > 0)
{
for (int y = agge_max(0, area.y1), limit_y = agge_min<int>(bitmap_.height(), area.y2); y < limit_y; ++y)
blender(bitmap_.row_ptr(y) + x, x, y, width);
}
}
template <typename BitmapT, typename MaskT, typename BlenderT, typename AlphaFn>
void renderer::operator ()(BitmapT &bitmap_, const rect_i *window, const MaskT &mask, const BlenderT &blender,
const AlphaFn &alpha)
{
typedef adapter<BitmapT, BlenderT> rendition_adapter;
rendition_adapter ra(bitmap_, window, blender);
scanline_adapter<rendition_adapter> scanline(ra, _scanline_cache, mask.width());
render(scanline, mask, alpha, 0, 1);
}
}
| 0 |
D://workCode//uploadProject\awtk\3rd\agge | D://workCode//uploadProject\awtk\3rd\agge\agge\scanline.h | #pragma once
#include "config.h"
#include "memory.h"
namespace agge
{
template <typename RendererT>
class scanline_adapter : noncopyable
{
public:
typedef typename RendererT::cover_type cover_type;
public:
scanline_adapter(RendererT &renderer_, raw_memory_object &covers_buffer, count_t max_length);
bool begin(int y);
void add_cell(int x, cover_type cover);
void add_span(int x, unsigned int length, cover_type cover);
void commit(int next_x = 0);
private:
RendererT &_renderer;
cover_type *_cover;
int _x, _start_x;
cover_type * const _start_cover;
};
template <typename RendererT>
inline scanline_adapter<RendererT>::scanline_adapter(RendererT &renderer_, raw_memory_object &covers, count_t max_length)
: _renderer(renderer_), _x(0), _start_x(0), _start_cover(covers.get<cover_type>(max_length + 16) + 4)
{ _cover = _start_cover; }
template <typename RendererT>
inline bool scanline_adapter<RendererT>::begin(int y)
{ return _renderer.set_y(y); }
template <typename RendererT>
AGGE_INLINE void scanline_adapter<RendererT>::add_cell(int x, cover_type cover)
{
if (x != _x)
commit(x);
++_x;
*_cover++ = cover;
}
template <typename RendererT>
AGGE_INLINE void scanline_adapter<RendererT>::add_span(int x, count_t length, cover_type cover)
{
if (x != _x)
commit(x);
cover_type *p = _cover;
_x += length;
_cover += length;
memset(p, cover, length);
}
template <typename RendererT>
AGGE_INLINE void scanline_adapter<RendererT>::commit(int next_x)
{
//*reinterpret_cast<int *>(_cover) = 0;
*_cover = 0;
_renderer(_start_x, _x - _start_x, _start_cover);
_start_x = _x = next_x;
_cover = _start_cover;
}
}
| 0 |
D://workCode//uploadProject\awtk\3rd\agge | D://workCode//uploadProject\awtk\3rd\agge\agge\stroke.h | #pragma once
#include "vertex_sequence.h"
namespace agge
{
typedef pod_vector<point_r> points;
class stroke : vertex_sequence, noncopyable
{
public:
struct cap;
struct join;
public:
stroke();
~stroke();
// Vertex population
void remove_all();
using vertex_sequence::move_to;
using vertex_sequence::line_to;
void close_polygon();
void add_vertex(real_t x, real_t y, int command);
// Vertex access
int vertex(real_t *x, real_t *y);
// Setup
void width(real_t w);
template <typename CapT>
void set_cap(const CapT &c);
template <typename JoinT>
void set_join(const JoinT &j);
private:
enum state {
// Stages
start_cap = 0x00,
outline_forward = 0x01,
outline_forward_closed = 0x02,
end_poly1 = 0x03,
end_cap = 0x04,
outline_backward = 0x05,
end_poly = 0x06,
stop = 0x07,
stage_mask = 0x07,
// Flags
closed = 0x10,
moveto = 0x20,
ready = 0x40
};
private:
bool prepare();
void set_state(int stage_and_flags);
private:
points _output;
vertex_sequence::const_iterator _i;
points::const_iterator _o;
const cap *_cap;
const join *_join;
real_t _width;
int _state;
};
struct stroke::cap
{
virtual ~cap() { }
virtual void calc(points &output, real_t w, const point_r &v0, real_t d, const point_r &v1) const = 0;
};
struct stroke::join
{
virtual ~join() { }
virtual void calc(points &output, real_t w, const point_r &v0, real_t d01, const point_r &v1, real_t d12, const point_r &v2) const = 0;
};
template <typename CapT>
inline void stroke::set_cap(const CapT &c)
{
const CapT *replacement = new CapT(c);
delete _cap;
_cap = replacement;
}
template <typename JoinT>
inline void stroke::set_join(const JoinT &j)
{
const JoinT *replacement = new JoinT(j);
delete _join;
_join = replacement;
}
}
| 0 |
D://workCode//uploadProject\awtk\3rd\agge | D://workCode//uploadProject\awtk\3rd\agge\agge\stroke_features.h | #pragma once
#include "stroke.h"
#include <math.h>
namespace agge
{
namespace caps
{
class butt : public stroke::cap
{
public:
virtual void calc(points &output, real_t w, const point_r &v0, real_t d, const point_r &v1) const;
};
class square : public stroke::cap
{
public:
virtual void calc(points &output, real_t w, const point_r &v0, real_t d, const point_r &v1) const;
};
class round : public stroke::cap
{
public:
virtual void calc(points &output, real_t w, const point_r &v0, real_t d, const point_r &v1) const;
};
class triangle : public stroke::cap
{
public:
triangle(real_t tip_extension = 1.0f);
virtual void calc(points &output, real_t w, const point_r &v0, real_t d, const point_r &v1) const;
private:
real_t _tip_extension;
};
}
namespace joins
{
class bevel : public stroke::join
{
public:
virtual void calc(points &output, real_t w, const point_r &v0, real_t d01, const point_r &v1, real_t d12, const point_r &v2) const;
};
class round : public stroke::join
{
public:
virtual void calc(points &output, real_t w, const point_r &v0, real_t d01, const point_r &v1, real_t d12, const point_r &v2) const;
};
class miter : public stroke::join
{
public:
virtual void calc(points &output, real_t w, const point_r &v0, real_t d01, const point_r &v1, real_t d12, const point_r &v2) const;
};
}
}
| 0 |
D://workCode//uploadProject\awtk\3rd\agge | D://workCode//uploadProject\awtk\3rd\agge\agge\tools.h | #pragma once
namespace agge
{
template <typename CoordT>
struct point;
template <typename CoordT>
struct agge_vector;
template <typename CoordT>
struct rect;
template <typename CoordT>
inline point<CoordT> create_point(CoordT x, CoordT y)
{
point<CoordT> p = { x, y };
return p;
}
template <typename CoordT>
inline agge_vector<CoordT> create_vector(CoordT dx, CoordT dy)
{
agge_vector<CoordT> v = { dx, dy };
return v;
}
template <typename CoordT>
inline rect<CoordT> create_rect(CoordT x1, CoordT y1, CoordT x2, CoordT y2)
{
rect<CoordT> r = { x1, y1, x2, y2 };
return r;
}
template <typename CoordT>
inline CoordT width(const rect<CoordT> &rc)
{ return rc.x2 - rc.x1; }
template <typename CoordT>
inline CoordT height(const rect<CoordT> &rc)
{ return rc.y2 - rc.y1; }
template <typename T>
inline T agge_min(const T &lhs, const T &rhs)
{ return lhs < rhs ? lhs : rhs; }
template <typename T>
inline T agge_max(const T &lhs, const T &rhs)
{ return lhs > rhs ? lhs : rhs; }
}
| 0 |
D://workCode//uploadProject\awtk\3rd\agge | D://workCode//uploadProject\awtk\3rd\agge\agge\types.h | #pragma once
namespace agge
{
typedef float real_t;
template <typename T>
struct point;
template <typename T>
struct agge_vector;
template <typename T>
struct rect;
template <typename T>
struct box;
typedef unsigned int count_t;
typedef unsigned char uint8_t;
typedef unsigned short uint16_t;
typedef point<real_t> point_r;
typedef agge_vector<real_t> vector_r;
typedef rect<int> rect_i;
typedef box<real_t> box_r;
template <typename T>
struct point
{
T x, y;
};
template <typename T>
struct agge_vector
{
T dx, dy;
};
template <typename T>
struct rect
{
T x1, y1, x2, y2;
};
template <typename T>
struct box
{
T w, h;
};
class noncopyable
{
public:
noncopyable() throw() { }
private:
noncopyable(const noncopyable &other);
const noncopyable &operator =(const noncopyable &rhs);
};
}
| 0 |
D://workCode//uploadProject\awtk\3rd\agge | D://workCode//uploadProject\awtk\3rd\agge\agge\vector_rasterizer.h | #pragma once
#include "pod_vector.h"
#include "types.h"
namespace agge
{
class precise_delta;
class vector_rasterizer
{
public:
enum
{
_1_shift = 8,
_1 = 1 << _1_shift,
_1_mask = _1 - 1,
};
#pragma pack(push, 1)
struct cell
{
short x, y;
int area;
short cover;
};
#pragma pack(pop)
struct scanline_cells;
typedef pod_vector<cell> cells_container;
typedef cells_container::const_iterator const_cells_iterator;
public:
vector_rasterizer();
void reset();
void clear_cache();
void line(int x1, int y1, int x2, int y2);
void append(const vector_rasterizer &source, int dx, int dy);
const cells_container &cells() const;
void sort(bool was_presorted = false);
bool empty() const;
bool sorted() const;
scanline_cells operator [](int y) const;
int width() const;
int min_y() const;
int height() const;
private:
typedef pod_vector<count_t> histogram;
private:
void hline(cells_container::iterator ¤t, precise_delta &tg_delta, int ey, int x1, int x2, int dy);
void extend_bounds(int x, int y);
private:
cells_container _cells;
histogram _histogram_y, _histogram_x;
cells_container _x_sorted_cells;
int _min_y, _min_x, _max_x, _max_y, _sorted;
};
struct vector_rasterizer::scanline_cells
{
vector_rasterizer::const_cells_iterator first;
vector_rasterizer::const_cells_iterator second;
};
inline const vector_rasterizer::cells_container &vector_rasterizer::cells() const
{ return _cells; }
inline bool vector_rasterizer::empty() const
{ return _min_y > _max_y; }
inline void vector_rasterizer::clear_cache()
{ _cells.clear_cache(); }
inline bool vector_rasterizer::sorted() const
{ return !!_sorted; }
inline vector_rasterizer::scanline_cells vector_rasterizer::operator [](int y) const
{
histogram::const_iterator offset = _histogram_y.begin() + y - _min_y;
const const_cells_iterator start = _cells.begin();
const scanline_cells sc = { start + *offset++, start + *offset };
return sc;
}
inline int vector_rasterizer::width() const
{ return empty() ? 0 : _max_x - _min_x + 1; }
inline int vector_rasterizer::min_y() const
{ return _min_y; }
inline int vector_rasterizer::height() const
{ return empty() ? 0 : _max_y - _min_y + 1; }
}
| 0 |
D://workCode//uploadProject\awtk\3rd\agge | D://workCode//uploadProject\awtk\3rd\agge\agge\vertex_sequence.h | #pragma once
#include "math.h"
#include "pod_vector.h"
namespace agge
{
struct vertex
{
point_r point;
real_t distance;
};
class vertex_sequence : pod_vector<vertex>
{
public:
using pod_vector<vertex>::const_iterator;
using pod_vector<vertex>::iterator;
public:
void move_to(real_t x, real_t y);
void line_to(real_t x, real_t y);
void close_polygon();
using pod_vector<vertex>::clear;
using pod_vector<vertex>::size;
using pod_vector<vertex>::begin;
using pod_vector<vertex>::end;
using pod_vector<vertex>::empty;
private:
static bool set_distance(vertex &v, const point_r &next);
};
inline void vertex_sequence::move_to(real_t x, real_t y)
{
vertex v = { { x, y } };
push_back(v);
}
inline void vertex_sequence::line_to(real_t x, real_t y)
{
vertex v = { { x, y } };
if (empty())
{
push_back(v);
}
else
{
vertex &last = *(end() - 1);
if (set_distance(last, v.point))
push_back(v);
}
}
inline void vertex_sequence::close_polygon()
{
if (!empty() && !set_distance(*(end() - 1), begin()->point))
pop_back();
}
inline bool vertex_sequence::set_distance(vertex &v, const point_r &next)
{
v.distance = distance(v.point, next);
return v.distance > distance_epsilon;
}
}
| 0 |
D://workCode//uploadProject\awtk\3rd | D://workCode//uploadProject\awtk\3rd\cairo\cairo-version.h | #ifndef CAIRO_VERSION_H
#define CAIRO_VERSION_H
#define CAIRO_VERSION_MAJOR 1
#define CAIRO_VERSION_MINOR 17
#define CAIRO_VERSION_MICRO 3
#endif
| 0 |
D://workCode//uploadProject\awtk\3rd\cairo | D://workCode//uploadProject\awtk\3rd\cairo\cairo\cairo-analysis-surface-private.h | /*
* Copyright © 2005 Keith Packard
*
* This library is free software; you can redistribute it and/or
* modify it either under the terms of the GNU Lesser General Public
* License version 2.1 as published by the Free Software Foundation
* (the "LGPL") or, at your option, under the terms of the Mozilla
* Public License Version 1.1 (the "MPL"). If you do not alter this
* notice, a recipient may use your version of this file under either
* the MPL or the LGPL.
*
* You should have received a copy of the LGPL along with this library
* in the file COPYING-LGPL-2.1; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Suite 500, Boston, MA 02110-1335, USA
* You should have received a copy of the MPL along with this library
* in the file COPYING-MPL-1.1
*
* The contents of this file are subject to the Mozilla Public License
* Version 1.1 (the "License"); you may not use this file except in
* compliance with the License. You may obtain a copy of the License at
* http://www.mozilla.org/MPL/
*
* This software is distributed on an "AS IS" basis, WITHOUT WARRANTY
* OF ANY KIND, either express or implied. See the LGPL or the MPL for
* the specific language governing rights and limitations.
*
* The Original Code is the cairo graphics library.
*
* The Initial Developer of the Original Code is Keith Packard
*
* Contributor(s):
* Keith Packard <keithp@keithp.com>
*/
#ifndef CAIRO_ANALYSIS_SURFACE_H
#define CAIRO_ANALYSIS_SURFACE_H
#include "cairoint.h"
cairo_private cairo_surface_t *
_cairo_analysis_surface_create (cairo_surface_t *target);
cairo_private void
_cairo_analysis_surface_set_ctm (cairo_surface_t *surface,
const cairo_matrix_t *ctm);
cairo_private void
_cairo_analysis_surface_get_ctm (cairo_surface_t *surface,
cairo_matrix_t *ctm);
cairo_private cairo_region_t *
_cairo_analysis_surface_get_supported (cairo_surface_t *surface);
cairo_private cairo_region_t *
_cairo_analysis_surface_get_unsupported (cairo_surface_t *surface);
cairo_private cairo_bool_t
_cairo_analysis_surface_has_supported (cairo_surface_t *surface);
cairo_private cairo_bool_t
_cairo_analysis_surface_has_unsupported (cairo_surface_t *surface);
cairo_private void
_cairo_analysis_surface_get_bounding_box (cairo_surface_t *surface,
cairo_box_t *bbox);
cairo_private cairo_int_status_t
_cairo_analysis_surface_merge_status (cairo_int_status_t status_a,
cairo_int_status_t status_b);
cairo_private cairo_surface_t *
_cairo_null_surface_create (cairo_content_t content);
#endif /* CAIRO_ANALYSIS_SURFACE_H */
| 0 |
D://workCode//uploadProject\awtk\3rd\cairo | D://workCode//uploadProject\awtk\3rd\cairo\cairo\cairo-analysis-surface.c | /*
* Copyright © 2006 Keith Packard
* Copyright © 2007 Adrian Johnson
*
* This library is free software; you can redistribute it and/or
* modify it either under the terms of the GNU Lesser General Public
* License version 2.1 as published by the Free Software Foundation
* (the "LGPL") or, at your option, under the terms of the Mozilla
* Public License Version 1.1 (the "MPL"). If you do not alter this
* notice, a recipient may use your version of this file under either
* the MPL or the LGPL.
*
* You should have received a copy of the LGPL along with this library
* in the file COPYING-LGPL-2.1; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Suite 500, Boston, MA 02110-1335, USA
* You should have received a copy of the MPL along with this library
* in the file COPYING-MPL-1.1
*
* The contents of this file are subject to the Mozilla Public License
* Version 1.1 (the "License"); you may not use this file except in
* compliance with the License. You may obtain a copy of the License at
* http://www.mozilla.org/MPL/
*
* This software is distributed on an "AS IS" basis, WITHOUT WARRANTY
* OF ANY KIND, either express or implied. See the LGPL or the MPL for
* the specific language governing rights and limitations.
*
* The Original Code is the cairo graphics library.
*
* The Initial Developer of the Original Code is Keith Packard
*
* Contributor(s):
* Keith Packard <keithp@keithp.com>
* Adrian Johnson <ajohnson@redneon.com>
*/
#include "cairoint.h"
#include "cairo-analysis-surface-private.h"
#include "cairo-box-inline.h"
#include "cairo-default-context-private.h"
#include "cairo-error-private.h"
#include "cairo-paginated-private.h"
#include "cairo-recording-surface-inline.h"
#include "cairo-surface-snapshot-inline.h"
#include "cairo-surface-subsurface-inline.h"
#include "cairo-region-private.h"
typedef struct {
cairo_surface_t base;
cairo_surface_t *target;
cairo_bool_t first_op;
cairo_bool_t has_supported;
cairo_bool_t has_unsupported;
cairo_region_t supported_region;
cairo_region_t fallback_region;
cairo_box_t page_bbox;
cairo_bool_t has_ctm;
cairo_matrix_t ctm;
} cairo_analysis_surface_t;
cairo_int_status_t
_cairo_analysis_surface_merge_status (cairo_int_status_t status_a,
cairo_int_status_t status_b)
{
/* fatal errors should be checked and propagated at source */
assert (! _cairo_int_status_is_error (status_a));
assert (! _cairo_int_status_is_error (status_b));
/* return the most important status */
if (status_a == CAIRO_INT_STATUS_UNSUPPORTED ||
status_b == CAIRO_INT_STATUS_UNSUPPORTED)
return CAIRO_INT_STATUS_UNSUPPORTED;
if (status_a == CAIRO_INT_STATUS_IMAGE_FALLBACK ||
status_b == CAIRO_INT_STATUS_IMAGE_FALLBACK)
return CAIRO_INT_STATUS_IMAGE_FALLBACK;
if (status_a == CAIRO_INT_STATUS_ANALYZE_RECORDING_SURFACE_PATTERN ||
status_b == CAIRO_INT_STATUS_ANALYZE_RECORDING_SURFACE_PATTERN)
return CAIRO_INT_STATUS_ANALYZE_RECORDING_SURFACE_PATTERN;
if (status_a == CAIRO_INT_STATUS_FLATTEN_TRANSPARENCY ||
status_b == CAIRO_INT_STATUS_FLATTEN_TRANSPARENCY)
return CAIRO_INT_STATUS_FLATTEN_TRANSPARENCY;
/* at this point we have checked all the valid internal codes, so... */
assert (status_a == CAIRO_INT_STATUS_SUCCESS &&
status_b == CAIRO_INT_STATUS_SUCCESS);
return CAIRO_INT_STATUS_SUCCESS;
}
struct proxy {
cairo_surface_t base;
cairo_surface_t *target;
};
static cairo_status_t
proxy_finish (void *abstract_surface)
{
return CAIRO_STATUS_SUCCESS;
}
static const cairo_surface_backend_t proxy_backend = {
CAIRO_INTERNAL_SURFACE_TYPE_NULL,
proxy_finish,
};
static cairo_surface_t *
attach_proxy (cairo_surface_t *source,
cairo_surface_t *target)
{
struct proxy *proxy;
proxy = _cairo_malloc (sizeof (*proxy));
if (unlikely (proxy == NULL))
return _cairo_surface_create_in_error (CAIRO_STATUS_NO_MEMORY);
_cairo_surface_init (&proxy->base, &proxy_backend, NULL, target->content, target->is_vector);
proxy->target = target;
_cairo_surface_attach_snapshot (source, &proxy->base, NULL);
return &proxy->base;
}
static void
detach_proxy (cairo_surface_t *proxy)
{
cairo_surface_finish (proxy);
cairo_surface_destroy (proxy);
}
static cairo_int_status_t
_add_operation (cairo_analysis_surface_t *surface,
cairo_rectangle_int_t *rect,
cairo_int_status_t backend_status)
{
cairo_int_status_t status;
cairo_box_t bbox;
if (rect->width == 0 || rect->height == 0) {
/* Even though the operation is not visible we must be careful
* to not allow unsupported operations to be replayed to the
* backend during CAIRO_PAGINATED_MODE_RENDER */
if (backend_status == CAIRO_INT_STATUS_SUCCESS ||
backend_status == CAIRO_INT_STATUS_FLATTEN_TRANSPARENCY ||
backend_status == CAIRO_INT_STATUS_NOTHING_TO_DO)
{
return CAIRO_INT_STATUS_SUCCESS;
}
else
{
return CAIRO_INT_STATUS_IMAGE_FALLBACK;
}
}
_cairo_box_from_rectangle (&bbox, rect);
if (surface->has_ctm) {
int tx, ty;
if (_cairo_matrix_is_integer_translation (&surface->ctm, &tx, &ty)) {
rect->x += tx;
rect->y += ty;
tx = _cairo_fixed_from_int (tx);
bbox.p1.x += tx;
bbox.p2.x += tx;
ty = _cairo_fixed_from_int (ty);
bbox.p1.y += ty;
bbox.p2.y += ty;
} else {
_cairo_matrix_transform_bounding_box_fixed (&surface->ctm,
&bbox, NULL);
if (bbox.p1.x == bbox.p2.x || bbox.p1.y == bbox.p2.y) {
/* Even though the operation is not visible we must be
* careful to not allow unsupported operations to be
* replayed to the backend during
* CAIRO_PAGINATED_MODE_RENDER */
if (backend_status == CAIRO_INT_STATUS_SUCCESS ||
backend_status == CAIRO_INT_STATUS_FLATTEN_TRANSPARENCY ||
backend_status == CAIRO_INT_STATUS_NOTHING_TO_DO)
{
return CAIRO_INT_STATUS_SUCCESS;
}
else
{
return CAIRO_INT_STATUS_IMAGE_FALLBACK;
}
}
_cairo_box_round_to_rectangle (&bbox, rect);
}
}
if (surface->first_op) {
surface->first_op = FALSE;
surface->page_bbox = bbox;
} else
_cairo_box_add_box(&surface->page_bbox, &bbox);
/* If the operation is completely enclosed within the fallback
* region there is no benefit in emitting a native operation as
* the fallback image will be painted on top.
*/
if (cairo_region_contains_rectangle (&surface->fallback_region, rect) == CAIRO_REGION_OVERLAP_IN)
return CAIRO_INT_STATUS_IMAGE_FALLBACK;
if (backend_status == CAIRO_INT_STATUS_FLATTEN_TRANSPARENCY) {
/* A status of CAIRO_INT_STATUS_FLATTEN_TRANSPARENCY indicates
* that the backend only supports this operation if the
* transparency removed. If the extents of this operation does
* not intersect any other native operation, the operation is
* natively supported and the backend will blend the
* transparency into the white background.
*/
if (cairo_region_contains_rectangle (&surface->supported_region, rect) == CAIRO_REGION_OVERLAP_OUT)
backend_status = CAIRO_INT_STATUS_SUCCESS;
}
if (backend_status == CAIRO_INT_STATUS_SUCCESS) {
/* Add the operation to the supported region. Operations in
* this region will be emitted as native operations.
*/
surface->has_supported = TRUE;
return cairo_region_union_rectangle (&surface->supported_region, rect);
}
/* Add the operation to the unsupported region. This region will
* be painted as an image after all native operations have been
* emitted.
*/
surface->has_unsupported = TRUE;
status = cairo_region_union_rectangle (&surface->fallback_region, rect);
/* The status CAIRO_INT_STATUS_IMAGE_FALLBACK is used to indicate
* unsupported operations to the recording surface as using
* CAIRO_INT_STATUS_UNSUPPORTED would cause cairo-surface to
* invoke the cairo-surface-fallback path then return
* CAIRO_STATUS_SUCCESS.
*/
if (status == CAIRO_INT_STATUS_SUCCESS)
return CAIRO_INT_STATUS_IMAGE_FALLBACK;
else
return status;
}
static cairo_int_status_t
_analyze_recording_surface_pattern (cairo_analysis_surface_t *surface,
const cairo_pattern_t *pattern,
cairo_rectangle_int_t *extents)
{
const cairo_surface_pattern_t *surface_pattern;
cairo_analysis_surface_t *tmp;
cairo_surface_t *source, *proxy;
cairo_matrix_t p2d;
cairo_int_status_t status;
cairo_int_status_t analysis_status = CAIRO_INT_STATUS_SUCCESS;
cairo_bool_t surface_is_unbounded;
cairo_bool_t unused;
assert (pattern->type == CAIRO_PATTERN_TYPE_SURFACE);
surface_pattern = (const cairo_surface_pattern_t *) pattern;
assert (surface_pattern->surface->type == CAIRO_SURFACE_TYPE_RECORDING);
source = surface_pattern->surface;
proxy = _cairo_surface_has_snapshot (source, &proxy_backend);
if (proxy != NULL) {
/* nothing untoward found so far */
return CAIRO_STATUS_SUCCESS;
}
tmp = (cairo_analysis_surface_t *)
_cairo_analysis_surface_create (surface->target);
if (unlikely (tmp->base.status)) {
status =tmp->base.status;
goto cleanup1;
}
proxy = attach_proxy (source, &tmp->base);
p2d = pattern->matrix;
status = cairo_matrix_invert (&p2d);
assert (status == CAIRO_INT_STATUS_SUCCESS);
_cairo_analysis_surface_set_ctm (&tmp->base, &p2d);
source = _cairo_surface_get_source (source, NULL);
surface_is_unbounded = (pattern->extend == CAIRO_EXTEND_REPEAT
|| pattern->extend == CAIRO_EXTEND_REFLECT);
status = _cairo_recording_surface_replay_and_create_regions (source,
&pattern->matrix,
&tmp->base,
surface_is_unbounded);
if (unlikely (status))
goto cleanup2;
/* black background or mime data fills entire extents */
if (!(source->content & CAIRO_CONTENT_ALPHA) || _cairo_surface_has_mime_image (source)) {
cairo_rectangle_int_t rect;
if (_cairo_surface_get_extents (source, &rect)) {
cairo_box_t bbox;
_cairo_box_from_rectangle (&bbox, &rect);
_cairo_matrix_transform_bounding_box_fixed (&p2d, &bbox, NULL);
_cairo_box_round_to_rectangle (&bbox, &rect);
status = _add_operation (tmp, &rect, CAIRO_INT_STATUS_SUCCESS);
if (status == CAIRO_INT_STATUS_IMAGE_FALLBACK)
status = CAIRO_INT_STATUS_SUCCESS;
if (unlikely (status))
goto cleanup2;
}
}
if (tmp->has_supported) {
surface->has_supported = TRUE;
unused = cairo_region_union (&surface->supported_region, &tmp->supported_region);
}
if (tmp->has_unsupported) {
surface->has_unsupported = TRUE;
unused = cairo_region_union (&surface->fallback_region, &tmp->fallback_region);
}
analysis_status = tmp->has_unsupported ? CAIRO_INT_STATUS_IMAGE_FALLBACK : CAIRO_INT_STATUS_SUCCESS;
if (pattern->extend != CAIRO_EXTEND_NONE) {
_cairo_unbounded_rectangle_init (extents);
} else {
status = cairo_matrix_invert (&tmp->ctm);
_cairo_matrix_transform_bounding_box_fixed (&tmp->ctm,
&tmp->page_bbox, NULL);
_cairo_box_round_to_rectangle (&tmp->page_bbox, extents);
}
cleanup2:
detach_proxy (proxy);
cleanup1:
cairo_surface_destroy (&tmp->base);
if (unlikely (status))
return status;
else
return analysis_status;
}
static cairo_status_t
_cairo_analysis_surface_finish (void *abstract_surface)
{
cairo_analysis_surface_t *surface = (cairo_analysis_surface_t *) abstract_surface;
_cairo_region_fini (&surface->supported_region);
_cairo_region_fini (&surface->fallback_region);
cairo_surface_destroy (surface->target);
return CAIRO_STATUS_SUCCESS;
}
static cairo_bool_t
_cairo_analysis_surface_get_extents (void *abstract_surface,
cairo_rectangle_int_t *rectangle)
{
cairo_analysis_surface_t *surface = abstract_surface;
return _cairo_surface_get_extents (surface->target, rectangle);
}
static void
_rectangle_intersect_clip (cairo_rectangle_int_t *extents, const cairo_clip_t *clip)
{
if (clip != NULL)
_cairo_rectangle_intersect (extents, _cairo_clip_get_extents (clip));
}
static void
_cairo_analysis_surface_operation_extents (cairo_analysis_surface_t *surface,
cairo_operator_t op,
const cairo_pattern_t *source,
const cairo_clip_t *clip,
cairo_rectangle_int_t *extents)
{
cairo_bool_t is_empty;
is_empty = _cairo_surface_get_extents (&surface->base, extents);
if (_cairo_operator_bounded_by_source (op)) {
cairo_rectangle_int_t source_extents;
_cairo_pattern_get_extents (source, &source_extents, surface->target->is_vector);
_cairo_rectangle_intersect (extents, &source_extents);
}
_rectangle_intersect_clip (extents, clip);
}
static cairo_int_status_t
_cairo_analysis_surface_paint (void *abstract_surface,
cairo_operator_t op,
const cairo_pattern_t *source,
const cairo_clip_t *clip)
{
cairo_analysis_surface_t *surface = abstract_surface;
cairo_int_status_t backend_status;
cairo_rectangle_int_t extents;
if (surface->target->backend->paint == NULL) {
backend_status = CAIRO_INT_STATUS_UNSUPPORTED;
} else {
backend_status =
surface->target->backend->paint (surface->target,
op, source, clip);
if (_cairo_int_status_is_error (backend_status))
return backend_status;
}
_cairo_analysis_surface_operation_extents (surface,
op, source, clip,
&extents);
if (backend_status == CAIRO_INT_STATUS_ANALYZE_RECORDING_SURFACE_PATTERN) {
cairo_rectangle_int_t rec_extents;
backend_status = _analyze_recording_surface_pattern (surface, source, &rec_extents);
_cairo_rectangle_intersect (&extents, &rec_extents);
}
return _add_operation (surface, &extents, backend_status);
}
static cairo_int_status_t
_cairo_analysis_surface_mask (void *abstract_surface,
cairo_operator_t op,
const cairo_pattern_t *source,
const cairo_pattern_t *mask,
const cairo_clip_t *clip)
{
cairo_analysis_surface_t *surface = abstract_surface;
cairo_int_status_t backend_status;
cairo_rectangle_int_t extents;
if (surface->target->backend->mask == NULL) {
backend_status = CAIRO_INT_STATUS_UNSUPPORTED;
} else {
backend_status =
surface->target->backend->mask (surface->target,
op, source, mask, clip);
if (_cairo_int_status_is_error (backend_status))
return backend_status;
}
_cairo_analysis_surface_operation_extents (surface,
op, source, clip,
&extents);
if (backend_status == CAIRO_INT_STATUS_ANALYZE_RECORDING_SURFACE_PATTERN) {
cairo_int_status_t backend_source_status = CAIRO_STATUS_SUCCESS;
cairo_int_status_t backend_mask_status = CAIRO_STATUS_SUCCESS;
cairo_rectangle_int_t rec_extents;
if (source->type == CAIRO_PATTERN_TYPE_SURFACE) {
cairo_surface_t *src_surface = ((cairo_surface_pattern_t *)source)->surface;
src_surface = _cairo_surface_get_source (src_surface, NULL);
if (_cairo_surface_is_recording (src_surface)) {
backend_source_status =
_analyze_recording_surface_pattern (surface, source, &rec_extents);
if (_cairo_int_status_is_error (backend_source_status))
return backend_source_status;
_cairo_rectangle_intersect (&extents, &rec_extents);
}
}
if (mask->type == CAIRO_PATTERN_TYPE_SURFACE) {
cairo_surface_t *mask_surface = ((cairo_surface_pattern_t *)mask)->surface;
mask_surface = _cairo_surface_get_source (mask_surface, NULL);
if (_cairo_surface_is_recording (mask_surface)) {
backend_mask_status =
_analyze_recording_surface_pattern (surface, mask, &rec_extents);
if (_cairo_int_status_is_error (backend_mask_status))
return backend_mask_status;
_cairo_rectangle_intersect (&extents, &rec_extents);
}
}
backend_status =
_cairo_analysis_surface_merge_status (backend_source_status,
backend_mask_status);
}
if (_cairo_operator_bounded_by_mask (op)) {
cairo_rectangle_int_t mask_extents;
_cairo_pattern_get_extents (mask, &mask_extents, surface->target->is_vector);
_cairo_rectangle_intersect (&extents, &mask_extents);
}
return _add_operation (surface, &extents, backend_status);
}
static cairo_int_status_t
_cairo_analysis_surface_stroke (void *abstract_surface,
cairo_operator_t op,
const cairo_pattern_t *source,
const cairo_path_fixed_t *path,
const cairo_stroke_style_t *style,
const cairo_matrix_t *ctm,
const cairo_matrix_t *ctm_inverse,
double tolerance,
cairo_antialias_t antialias,
const cairo_clip_t *clip)
{
cairo_analysis_surface_t *surface = abstract_surface;
cairo_int_status_t backend_status;
cairo_rectangle_int_t extents;
if (surface->target->backend->stroke == NULL) {
backend_status = CAIRO_INT_STATUS_UNSUPPORTED;
} else {
backend_status =
surface->target->backend->stroke (surface->target, op,
source, path, style,
ctm, ctm_inverse,
tolerance, antialias,
clip);
if (_cairo_int_status_is_error (backend_status))
return backend_status;
}
_cairo_analysis_surface_operation_extents (surface,
op, source, clip,
&extents);
if (backend_status == CAIRO_INT_STATUS_ANALYZE_RECORDING_SURFACE_PATTERN) {
cairo_rectangle_int_t rec_extents;
backend_status = _analyze_recording_surface_pattern (surface, source, &rec_extents);
_cairo_rectangle_intersect (&extents, &rec_extents);
}
if (_cairo_operator_bounded_by_mask (op)) {
cairo_rectangle_int_t mask_extents;
cairo_int_status_t status;
status = _cairo_path_fixed_stroke_extents (path, style,
ctm, ctm_inverse,
tolerance,
&mask_extents);
if (unlikely (status))
return status;
_cairo_rectangle_intersect (&extents, &mask_extents);
}
return _add_operation (surface, &extents, backend_status);
}
static cairo_int_status_t
_cairo_analysis_surface_fill (void *abstract_surface,
cairo_operator_t op,
const cairo_pattern_t *source,
const cairo_path_fixed_t *path,
cairo_fill_rule_t fill_rule,
double tolerance,
cairo_antialias_t antialias,
const cairo_clip_t *clip)
{
cairo_analysis_surface_t *surface = abstract_surface;
cairo_int_status_t backend_status;
cairo_rectangle_int_t extents;
if (surface->target->backend->fill == NULL) {
backend_status = CAIRO_INT_STATUS_UNSUPPORTED;
} else {
backend_status =
surface->target->backend->fill (surface->target, op,
source, path, fill_rule,
tolerance, antialias,
clip);
if (_cairo_int_status_is_error (backend_status))
return backend_status;
}
_cairo_analysis_surface_operation_extents (surface,
op, source, clip,
&extents);
if (backend_status == CAIRO_INT_STATUS_ANALYZE_RECORDING_SURFACE_PATTERN) {
cairo_rectangle_int_t rec_extents;
backend_status = _analyze_recording_surface_pattern (surface, source, &rec_extents);
_cairo_rectangle_intersect (&extents, &rec_extents);
}
if (_cairo_operator_bounded_by_mask (op)) {
cairo_rectangle_int_t mask_extents;
_cairo_path_fixed_fill_extents (path, fill_rule, tolerance,
&mask_extents);
_cairo_rectangle_intersect (&extents, &mask_extents);
}
return _add_operation (surface, &extents, backend_status);
}
static cairo_int_status_t
_cairo_analysis_surface_show_glyphs (void *abstract_surface,
cairo_operator_t op,
const cairo_pattern_t *source,
cairo_glyph_t *glyphs,
int num_glyphs,
cairo_scaled_font_t *scaled_font,
const cairo_clip_t *clip)
{
cairo_analysis_surface_t *surface = abstract_surface;
cairo_int_status_t status, backend_status;
cairo_rectangle_int_t extents, glyph_extents;
/* Adapted from _cairo_surface_show_glyphs */
if (surface->target->backend->show_glyphs != NULL) {
backend_status =
surface->target->backend->show_glyphs (surface->target, op,
source,
glyphs, num_glyphs,
scaled_font,
clip);
if (_cairo_int_status_is_error (backend_status))
return backend_status;
}
else if (surface->target->backend->show_text_glyphs != NULL)
{
backend_status =
surface->target->backend->show_text_glyphs (surface->target, op,
source,
NULL, 0,
glyphs, num_glyphs,
NULL, 0,
FALSE,
scaled_font,
clip);
if (_cairo_int_status_is_error (backend_status))
return backend_status;
}
else
{
backend_status = CAIRO_INT_STATUS_UNSUPPORTED;
}
_cairo_analysis_surface_operation_extents (surface,
op, source, clip,
&extents);
if (backend_status == CAIRO_INT_STATUS_ANALYZE_RECORDING_SURFACE_PATTERN) {
cairo_rectangle_int_t rec_extents;
backend_status = _analyze_recording_surface_pattern (surface, source, &rec_extents);
_cairo_rectangle_intersect (&extents, &rec_extents);
}
if (_cairo_operator_bounded_by_mask (op)) {
status = _cairo_scaled_font_glyph_device_extents (scaled_font,
glyphs,
num_glyphs,
&glyph_extents,
NULL);
if (unlikely (status))
return status;
_cairo_rectangle_intersect (&extents, &glyph_extents);
}
return _add_operation (surface, &extents, backend_status);
}
static cairo_bool_t
_cairo_analysis_surface_has_show_text_glyphs (void *abstract_surface)
{
cairo_analysis_surface_t *surface = abstract_surface;
return cairo_surface_has_show_text_glyphs (surface->target);
}
static cairo_int_status_t
_cairo_analysis_surface_show_text_glyphs (void *abstract_surface,
cairo_operator_t op,
const cairo_pattern_t *source,
const char *utf8,
int utf8_len,
cairo_glyph_t *glyphs,
int num_glyphs,
const cairo_text_cluster_t *clusters,
int num_clusters,
cairo_text_cluster_flags_t cluster_flags,
cairo_scaled_font_t *scaled_font,
const cairo_clip_t *clip)
{
cairo_analysis_surface_t *surface = abstract_surface;
cairo_int_status_t status, backend_status;
cairo_rectangle_int_t extents, glyph_extents;
/* Adapted from _cairo_surface_show_glyphs */
backend_status = CAIRO_INT_STATUS_UNSUPPORTED;
if (surface->target->backend->show_text_glyphs != NULL) {
backend_status =
surface->target->backend->show_text_glyphs (surface->target, op,
source,
utf8, utf8_len,
glyphs, num_glyphs,
clusters, num_clusters,
cluster_flags,
scaled_font,
clip);
if (_cairo_int_status_is_error (backend_status))
return backend_status;
}
if (backend_status == CAIRO_INT_STATUS_UNSUPPORTED &&
surface->target->backend->show_glyphs != NULL)
{
backend_status =
surface->target->backend->show_glyphs (surface->target, op,
source,
glyphs, num_glyphs,
scaled_font,
clip);
if (_cairo_int_status_is_error (backend_status))
return backend_status;
}
_cairo_analysis_surface_operation_extents (surface,
op, source, clip,
&extents);
if (backend_status == CAIRO_INT_STATUS_ANALYZE_RECORDING_SURFACE_PATTERN) {
cairo_rectangle_int_t rec_extents;
backend_status = _analyze_recording_surface_pattern (surface, source, &rec_extents);
_cairo_rectangle_intersect (&extents, &rec_extents);
}
if (_cairo_operator_bounded_by_mask (op)) {
status = _cairo_scaled_font_glyph_device_extents (scaled_font,
glyphs,
num_glyphs,
&glyph_extents,
NULL);
if (unlikely (status))
return status;
_cairo_rectangle_intersect (&extents, &glyph_extents);
}
return _add_operation (surface, &extents, backend_status);
}
static cairo_int_status_t
_cairo_analysis_surface_tag (void *abstract_surface,
cairo_bool_t begin,
const char *tag_name,
const char *attributes,
const cairo_pattern_t *source,
const cairo_stroke_style_t *stroke_style,
const cairo_matrix_t *ctm,
const cairo_matrix_t *ctm_inverse,
const cairo_clip_t *clip)
{
cairo_analysis_surface_t *surface = abstract_surface;
cairo_int_status_t backend_status;
backend_status = CAIRO_INT_STATUS_SUCCESS;
if (surface->target->backend->tag != NULL) {
backend_status =
surface->target->backend->tag (surface->target,
begin,
tag_name,
attributes,
source,
stroke_style,
ctm,
ctm_inverse,
clip);
if (backend_status == CAIRO_INT_STATUS_SUCCESS)
surface->has_supported = TRUE;
}
return backend_status;
}
static const cairo_surface_backend_t cairo_analysis_surface_backend = {
CAIRO_INTERNAL_SURFACE_TYPE_ANALYSIS,
_cairo_analysis_surface_finish,
NULL,
NULL, /* create_similar */
NULL, /* create_similar_image */
NULL, /* map_to_image */
NULL, /* unmap */
NULL, /* source */
NULL, /* acquire_source_image */
NULL, /* release_source_image */
NULL, /* snapshot */
NULL, /* copy_page */
NULL, /* show_page */
_cairo_analysis_surface_get_extents,
NULL, /* get_font_options */
NULL, /* flush */
NULL, /* mark_dirty_rectangle */
_cairo_analysis_surface_paint,
_cairo_analysis_surface_mask,
_cairo_analysis_surface_stroke,
_cairo_analysis_surface_fill,
NULL, /* fill_stroke */
_cairo_analysis_surface_show_glyphs,
_cairo_analysis_surface_has_show_text_glyphs,
_cairo_analysis_surface_show_text_glyphs,
NULL, /* get_supported_mime_types */
_cairo_analysis_surface_tag
};
cairo_surface_t *
_cairo_analysis_surface_create (cairo_surface_t *target)
{
cairo_analysis_surface_t *surface;
cairo_status_t status;
status = target->status;
if (unlikely (status))
return _cairo_surface_create_in_error (status);
surface = _cairo_malloc (sizeof (cairo_analysis_surface_t));
if (unlikely (surface == NULL))
return _cairo_surface_create_in_error (_cairo_error (CAIRO_STATUS_NO_MEMORY));
/* I believe the content type here is truly arbitrary. I'm quite
* sure nothing will ever use this value. */
_cairo_surface_init (&surface->base,
&cairo_analysis_surface_backend,
NULL, /* device */
CAIRO_CONTENT_COLOR_ALPHA,
target->is_vector);
cairo_matrix_init_identity (&surface->ctm);
surface->has_ctm = FALSE;
surface->target = cairo_surface_reference (target);
surface->first_op = TRUE;
surface->has_supported = FALSE;
surface->has_unsupported = FALSE;
_cairo_region_init (&surface->supported_region);
_cairo_region_init (&surface->fallback_region);
surface->page_bbox.p1.x = 0;
surface->page_bbox.p1.y = 0;
surface->page_bbox.p2.x = 0;
surface->page_bbox.p2.y = 0;
return &surface->base;
}
void
_cairo_analysis_surface_set_ctm (cairo_surface_t *abstract_surface,
const cairo_matrix_t *ctm)
{
cairo_analysis_surface_t *surface;
if (abstract_surface->status)
return;
surface = (cairo_analysis_surface_t *) abstract_surface;
surface->ctm = *ctm;
surface->has_ctm = ! _cairo_matrix_is_identity (&surface->ctm);
}
void
_cairo_analysis_surface_get_ctm (cairo_surface_t *abstract_surface,
cairo_matrix_t *ctm)
{
cairo_analysis_surface_t *surface = (cairo_analysis_surface_t *) abstract_surface;
*ctm = surface->ctm;
}
cairo_region_t *
_cairo_analysis_surface_get_supported (cairo_surface_t *abstract_surface)
{
cairo_analysis_surface_t *surface = (cairo_analysis_surface_t *) abstract_surface;
return &surface->supported_region;
}
cairo_region_t *
_cairo_analysis_surface_get_unsupported (cairo_surface_t *abstract_surface)
{
cairo_analysis_surface_t *surface = (cairo_analysis_surface_t *) abstract_surface;
return &surface->fallback_region;
}
cairo_bool_t
_cairo_analysis_surface_has_supported (cairo_surface_t *abstract_surface)
{
cairo_analysis_surface_t *surface = (cairo_analysis_surface_t *) abstract_surface;
return surface->has_supported;
}
cairo_bool_t
_cairo_analysis_surface_has_unsupported (cairo_surface_t *abstract_surface)
{
cairo_analysis_surface_t *surface = (cairo_analysis_surface_t *) abstract_surface;
return surface->has_unsupported;
}
void
_cairo_analysis_surface_get_bounding_box (cairo_surface_t *abstract_surface,
cairo_box_t *bbox)
{
cairo_analysis_surface_t *surface = (cairo_analysis_surface_t *) abstract_surface;
*bbox = surface->page_bbox;
}
/* null surface type: a surface that does nothing (has no side effects, yay!) */
static cairo_int_status_t
_return_success (void)
{
return CAIRO_STATUS_SUCCESS;
}
/* These typedefs are just to silence the compiler... */
typedef cairo_int_status_t
(*_paint_func) (void *surface,
cairo_operator_t op,
const cairo_pattern_t *source,
const cairo_clip_t *clip);
typedef cairo_int_status_t
(*_mask_func) (void *surface,
cairo_operator_t op,
const cairo_pattern_t *source,
const cairo_pattern_t *mask,
const cairo_clip_t *clip);
typedef cairo_int_status_t
(*_stroke_func) (void *surface,
cairo_operator_t op,
const cairo_pattern_t *source,
const cairo_path_fixed_t *path,
const cairo_stroke_style_t *style,
const cairo_matrix_t *ctm,
const cairo_matrix_t *ctm_inverse,
double tolerance,
cairo_antialias_t antialias,
const cairo_clip_t *clip);
typedef cairo_int_status_t
(*_fill_func) (void *surface,
cairo_operator_t op,
const cairo_pattern_t *source,
const cairo_path_fixed_t *path,
cairo_fill_rule_t fill_rule,
double tolerance,
cairo_antialias_t antialias,
const cairo_clip_t *clip);
typedef cairo_int_status_t
(*_show_glyphs_func) (void *surface,
cairo_operator_t op,
const cairo_pattern_t *source,
cairo_glyph_t *glyphs,
int num_glyphs,
cairo_scaled_font_t *scaled_font,
const cairo_clip_t *clip);
static const cairo_surface_backend_t cairo_null_surface_backend = {
CAIRO_INTERNAL_SURFACE_TYPE_NULL,
NULL, /* finish */
NULL, /* only accessed through the surface functions */
NULL, /* create_similar */
NULL, /* create similar image */
NULL, /* map to image */
NULL, /* unmap image*/
NULL, /* source */
NULL, /* acquire_source_image */
NULL, /* release_source_image */
NULL, /* snapshot */
NULL, /* copy_page */
NULL, /* show_page */
NULL, /* get_extents */
NULL, /* get_font_options */
NULL, /* flush */
NULL, /* mark_dirty_rectangle */
(_paint_func) _return_success, /* paint */
(_mask_func) _return_success, /* mask */
(_stroke_func) _return_success, /* stroke */
(_fill_func) _return_success, /* fill */
NULL, /* fill_stroke */
(_show_glyphs_func) _return_success, /* show_glyphs */
NULL, /* has_show_text_glyphs */
NULL /* show_text_glyphs */
};
cairo_surface_t *
_cairo_null_surface_create (cairo_content_t content)
{
cairo_surface_t *surface;
surface = _cairo_malloc (sizeof (cairo_surface_t));
if (unlikely (surface == NULL)) {
return _cairo_surface_create_in_error (_cairo_error (CAIRO_STATUS_NO_MEMORY));
}
_cairo_surface_init (surface,
&cairo_null_surface_backend,
NULL, /* device */
content,
TRUE); /* is_vector */
return surface;
}
| 0 |
D://workCode//uploadProject\awtk\3rd\cairo | D://workCode//uploadProject\awtk\3rd\cairo\cairo\cairo-arc-private.h | /* cairo - a vector graphics library with display and print output
*
* Copyright © 2005 Red Hat, Inc.
*
* This library is free software; you can redistribute it and/or
* modify it either under the terms of the GNU Lesser General Public
* License version 2.1 as published by the Free Software Foundation
* (the "LGPL") or, at your option, under the terms of the Mozilla
* Public License Version 1.1 (the "MPL"). If you do not alter this
* notice, a recipient may use your version of this file under either
* the MPL or the LGPL.
*
* You should have received a copy of the LGPL along with this library
* in the file COPYING-LGPL-2.1; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Suite 500, Boston, MA 02110-1335, USA
* You should have received a copy of the MPL along with this library
* in the file COPYING-MPL-1.1
*
* The contents of this file are subject to the Mozilla Public License
* Version 1.1 (the "License"); you may not use this file except in
* compliance with the License. You may obtain a copy of the License at
* http://www.mozilla.org/MPL/
*
* This software is distributed on an "AS IS" basis, WITHOUT WARRANTY
* OF ANY KIND, either express or implied. See the LGPL or the MPL for
* the specific language governing rights and limitations.
*
* The Original Code is the cairo graphics library.
*
* The Initial Developer of the Original Code is Red Hat, Inc.
*
* Contributor(s):
* Carl D. Worth <cworth@redhat.com>
*/
#ifndef CAIRO_ARC_PRIVATE_H
#define CAIRO_ARC_PRIVATE_H
#include "cairoint.h"
CAIRO_BEGIN_DECLS
cairo_private void
_cairo_arc_path (cairo_t *cr,
double xc,
double yc,
double radius,
double angle1,
double angle2);
cairo_private void
_cairo_arc_path_negative (cairo_t *cr,
double xc,
double yc,
double radius,
double angle1,
double angle2);
CAIRO_END_DECLS
#endif /* CAIRO_ARC_PRIVATE_H */
| 0 |
D://workCode//uploadProject\awtk\3rd\cairo | D://workCode//uploadProject\awtk\3rd\cairo\cairo\cairo-arc.c | /* cairo - a vector graphics library with display and print output
*
* Copyright © 2002 University of Southern California
*
* This library is free software; you can redistribute it and/or
* modify it either under the terms of the GNU Lesser General Public
* License version 2.1 as published by the Free Software Foundation
* (the "LGPL") or, at your option, under the terms of the Mozilla
* Public License Version 1.1 (the "MPL"). If you do not alter this
* notice, a recipient may use your version of this file under either
* the MPL or the LGPL.
*
* You should have received a copy of the LGPL along with this library
* in the file COPYING-LGPL-2.1; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Suite 500, Boston, MA 02110-1335, USA
* You should have received a copy of the MPL along with this library
* in the file COPYING-MPL-1.1
*
* The contents of this file are subject to the Mozilla Public License
* Version 1.1 (the "License"); you may not use this file except in
* compliance with the License. You may obtain a copy of the License at
* http://www.mozilla.org/MPL/
*
* This software is distributed on an "AS IS" basis, WITHOUT WARRANTY
* OF ANY KIND, either express or implied. See the LGPL or the MPL for
* the specific language governing rights and limitations.
*
* The Original Code is the cairo graphics library.
*
* The Initial Developer of the Original Code is University of Southern
* California.
*
* Contributor(s):
* Carl D. Worth <cworth@cworth.org>
*/
#include "cairoint.h"
#include "cairo-arc-private.h"
#define MAX_FULL_CIRCLES 65536
/* Spline deviation from the circle in radius would be given by:
error = sqrt (x**2 + y**2) - 1
A simpler error function to work with is:
e = x**2 + y**2 - 1
From "Good approximation of circles by curvature-continuous Bezier
curves", Tor Dokken and Morten Daehlen, Computer Aided Geometric
Design 8 (1990) 22-41, we learn:
abs (max(e)) = 4/27 * sin**6(angle/4) / cos**2(angle/4)
and
abs (error) =~ 1/2 * e
Of course, this error value applies only for the particular spline
approximation that is used in _cairo_gstate_arc_segment.
*/
static double
_arc_error_normalized (double angle)
{
return 2.0/27.0 * pow (sin (angle / 4), 6) / pow (cos (angle / 4), 2);
}
static double
_arc_max_angle_for_tolerance_normalized (double tolerance)
{
double angle, error;
int i;
/* Use table lookup to reduce search time in most cases. */
struct {
double angle;
double error;
} table[] = {
{ M_PI / 1.0, 0.0185185185185185036127 },
{ M_PI / 2.0, 0.000272567143730179811158 },
{ M_PI / 3.0, 2.38647043651461047433e-05 },
{ M_PI / 4.0, 4.2455377443222443279e-06 },
{ M_PI / 5.0, 1.11281001494389081528e-06 },
{ M_PI / 6.0, 3.72662000942734705475e-07 },
{ M_PI / 7.0, 1.47783685574284411325e-07 },
{ M_PI / 8.0, 6.63240432022601149057e-08 },
{ M_PI / 9.0, 3.2715520137536980553e-08 },
{ M_PI / 10.0, 1.73863223499021216974e-08 },
{ M_PI / 11.0, 9.81410988043554039085e-09 },
};
int table_size = ARRAY_LENGTH (table);
for (i = 0; i < table_size; i++)
if (table[i].error < tolerance)
return table[i].angle;
++i;
do {
angle = M_PI / i++;
error = _arc_error_normalized (angle);
} while (error > tolerance);
return angle;
}
static int
_arc_segments_needed (double angle,
double radius,
cairo_matrix_t *ctm,
double tolerance)
{
double major_axis, max_angle;
/* the error is amplified by at most the length of the
* major axis of the circle; see cairo-pen.c for a more detailed analysis
* of this. */
major_axis = _cairo_matrix_transformed_circle_major_axis (ctm, radius);
max_angle = _arc_max_angle_for_tolerance_normalized (tolerance / major_axis);
return ceil (fabs (angle) / max_angle);
}
/* We want to draw a single spline approximating a circular arc radius
R from angle A to angle B. Since we want a symmetric spline that
matches the endpoints of the arc in position and slope, we know
that the spline control points must be:
(R * cos(A), R * sin(A))
(R * cos(A) - h * sin(A), R * sin(A) + h * cos (A))
(R * cos(B) + h * sin(B), R * sin(B) - h * cos (B))
(R * cos(B), R * sin(B))
for some value of h.
"Approximation of circular arcs by cubic polynomials", Michael
Goldapp, Computer Aided Geometric Design 8 (1991) 227-238, provides
various values of h along with error analysis for each.
From that paper, a very practical value of h is:
h = 4/3 * R * tan(angle/4)
This value does not give the spline with minimal error, but it does
provide a very good approximation, (6th-order convergence), and the
error expression is quite simple, (see the comment for
_arc_error_normalized).
*/
static void
_cairo_arc_segment (cairo_t *cr,
double xc,
double yc,
double radius,
double angle_A,
double angle_B)
{
double r_sin_A, r_cos_A;
double r_sin_B, r_cos_B;
double h;
r_sin_A = radius * sin (angle_A);
r_cos_A = radius * cos (angle_A);
r_sin_B = radius * sin (angle_B);
r_cos_B = radius * cos (angle_B);
h = 4.0/3.0 * tan ((angle_B - angle_A) / 4.0);
cairo_curve_to (cr,
xc + r_cos_A - h * r_sin_A,
yc + r_sin_A + h * r_cos_A,
xc + r_cos_B + h * r_sin_B,
yc + r_sin_B - h * r_cos_B,
xc + r_cos_B,
yc + r_sin_B);
}
static void
_cairo_arc_in_direction (cairo_t *cr,
double xc,
double yc,
double radius,
double angle_min,
double angle_max,
cairo_direction_t dir)
{
if (cairo_status (cr))
return;
assert (angle_max >= angle_min);
if (angle_max - angle_min > 2 * M_PI * MAX_FULL_CIRCLES) {
angle_max = fmod (angle_max - angle_min, 2 * M_PI);
angle_min = fmod (angle_min, 2 * M_PI);
angle_max += angle_min + 2 * M_PI * MAX_FULL_CIRCLES;
}
/* Recurse if drawing arc larger than pi */
if (angle_max - angle_min > M_PI) {
double angle_mid = angle_min + (angle_max - angle_min) / 2.0;
if (dir == CAIRO_DIRECTION_FORWARD) {
_cairo_arc_in_direction (cr, xc, yc, radius,
angle_min, angle_mid,
dir);
_cairo_arc_in_direction (cr, xc, yc, radius,
angle_mid, angle_max,
dir);
} else {
_cairo_arc_in_direction (cr, xc, yc, radius,
angle_mid, angle_max,
dir);
_cairo_arc_in_direction (cr, xc, yc, radius,
angle_min, angle_mid,
dir);
}
} else if (angle_max != angle_min) {
cairo_matrix_t ctm;
int i, segments;
double step;
cairo_get_matrix (cr, &ctm);
segments = _arc_segments_needed (angle_max - angle_min,
radius, &ctm,
cairo_get_tolerance (cr));
step = (angle_max - angle_min) / segments;
segments -= 1;
if (dir == CAIRO_DIRECTION_REVERSE) {
double t;
t = angle_min;
angle_min = angle_max;
angle_max = t;
step = -step;
}
cairo_line_to (cr,
xc + radius * cos (angle_min),
yc + radius * sin (angle_min));
for (i = 0; i < segments; i++, angle_min += step) {
_cairo_arc_segment (cr, xc, yc, radius,
angle_min, angle_min + step);
}
_cairo_arc_segment (cr, xc, yc, radius,
angle_min, angle_max);
} else {
cairo_line_to (cr,
xc + radius * cos (angle_min),
yc + radius * sin (angle_min));
}
}
/**
* _cairo_arc_path:
* @cr: a cairo context
* @xc: X position of the center of the arc
* @yc: Y position of the center of the arc
* @radius: the radius of the arc
* @angle1: the start angle, in radians
* @angle2: the end angle, in radians
*
* Compute a path for the given arc and append it onto the current
* path within @cr. The arc will be accurate within the current
* tolerance and given the current transformation.
**/
void
_cairo_arc_path (cairo_t *cr,
double xc,
double yc,
double radius,
double angle1,
double angle2)
{
_cairo_arc_in_direction (cr, xc, yc,
radius,
angle1, angle2,
CAIRO_DIRECTION_FORWARD);
}
/**
* _cairo_arc_path_negative:
* @xc: X position of the center of the arc
* @yc: Y position of the center of the arc
* @radius: the radius of the arc
* @angle1: the start angle, in radians
* @angle2: the end angle, in radians
* @ctm: the current transformation matrix
* @tolerance: the current tolerance value
* @path: the path onto which the arc will be appended
*
* Compute a path for the given arc (defined in the negative
* direction) and append it onto the current path within @cr. The arc
* will be accurate within the current tolerance and given the current
* transformation.
**/
void
_cairo_arc_path_negative (cairo_t *cr,
double xc,
double yc,
double radius,
double angle1,
double angle2)
{
_cairo_arc_in_direction (cr, xc, yc,
radius,
angle2, angle1,
CAIRO_DIRECTION_REVERSE);
}
| 0 |
D://workCode//uploadProject\awtk\3rd\cairo | D://workCode//uploadProject\awtk\3rd\cairo\cairo\cairo-array-private.h | /* cairo - a vector graphics library with display and print output
*
* Copyright © 2002 University of Southern California
* Copyright © 2005 Red Hat, Inc.
*
* This library is free software; you can redistribute it and/or
* modify it either under the terms of the GNU Lesser General Public
* License version 2.1 as published by the Free Software Foundation
* (the "LGPL") or, at your option, under the terms of the Mozilla
* Public License Version 1.1 (the "MPL"). If you do not alter this
* notice, a recipient may use your version of this file under either
* the MPL or the LGPL.
*
* You should have received a copy of the LGPL along with this library
* in the file COPYING-LGPL-2.1; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Suite 500, Boston, MA 02110-1335, USA
* You should have received a copy of the MPL along with this library
* in the file COPYING-MPL-1.1
*
* The contents of this file are subject to the Mozilla Public License
* Version 1.1 (the "License"); you may not use this file except in
* compliance with the License. You may obtain a copy of the License at
* http://www.mozilla.org/MPL/
*
* This software is distributed on an "AS IS" basis, WITHOUT WARRANTY
* OF ANY KIND, either express or implied. See the LGPL or the MPL for
* the specific language governing rights and limitations.
*
* The Original Code is the cairo graphics library.
*
* The Initial Developer of the Original Code is University of Southern
* California.
*
* Contributor(s):
* Carl D. Worth <cworth@cworth.org>
*/
#ifndef CAIRO_ARRAY_PRIVATE_H
#define CAIRO_ARRAY_PRIVATE_H
#include "cairo-compiler-private.h"
#include "cairo-types-private.h"
CAIRO_BEGIN_DECLS
/* cairo-array.c structures and functions */
cairo_private void
_cairo_array_init (cairo_array_t *array, unsigned int element_size);
cairo_private void
_cairo_array_fini (cairo_array_t *array);
cairo_private cairo_status_t
_cairo_array_grow_by (cairo_array_t *array, unsigned int additional);
cairo_private void
_cairo_array_truncate (cairo_array_t *array, unsigned int num_elements);
cairo_private cairo_status_t
_cairo_array_append (cairo_array_t *array, const void *element);
cairo_private cairo_status_t
_cairo_array_append_multiple (cairo_array_t *array,
const void *elements,
unsigned int num_elements);
cairo_private cairo_status_t
_cairo_array_allocate (cairo_array_t *array,
unsigned int num_elements,
void **elements);
cairo_private void *
_cairo_array_index (cairo_array_t *array, unsigned int index);
cairo_private const void *
_cairo_array_index_const (const cairo_array_t *array, unsigned int index);
cairo_private void
_cairo_array_copy_element (const cairo_array_t *array, unsigned int index, void *dst);
cairo_private unsigned int
_cairo_array_num_elements (const cairo_array_t *array);
cairo_private unsigned int
_cairo_array_size (const cairo_array_t *array);
CAIRO_END_DECLS
#endif /* CAIRO_ARRAY_PRIVATE_H */
| 0 |
D://workCode//uploadProject\awtk\3rd\cairo | D://workCode//uploadProject\awtk\3rd\cairo\cairo\cairo-array.c | /* -*- Mode: c; c-basic-offset: 4; indent-tabs-mode: t; tab-width: 8; -*- */
/* cairo - a vector graphics library with display and print output
*
* Copyright © 2004 Red Hat, Inc
*
* This library is free software; you can redistribute it and/or
* modify it either under the terms of the GNU Lesser General Public
* License version 2.1 as published by the Free Software Foundation
* (the "LGPL") or, at your option, under the terms of the Mozilla
* Public License Version 1.1 (the "MPL"). If you do not alter this
* notice, a recipient may use your version of this file under either
* the MPL or the LGPL.
*
* You should have received a copy of the LGPL along with this library
* in the file COPYING-LGPL-2.1; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Suite 500, Boston, MA 02110-1335, USA
* You should have received a copy of the MPL along with this library
* in the file COPYING-MPL-1.1
*
* The contents of this file are subject to the Mozilla Public License
* Version 1.1 (the "License"); you may not use this file except in
* compliance with the License. You may obtain a copy of the License at
* http://www.mozilla.org/MPL/
*
* This software is distributed on an "AS IS" basis, WITHOUT WARRANTY
* OF ANY KIND, either express or implied. See the LGPL or the MPL for
* the specific language governing rights and limitations.
*
* The Original Code is the cairo graphics library.
*
* The Initial Developer of the Original Code is University of Southern
* California.
*
* Contributor(s):
* Kristian Høgsberg <krh@redhat.com>
* Carl Worth <cworth@cworth.org>
*/
#include "cairoint.h"
#include "cairo-array-private.h"
#include "cairo-error-private.h"
/**
* _cairo_array_init:
*
* Initialize a new #cairo_array_t object to store objects each of size
* @element_size.
*
* The #cairo_array_t object provides grow-by-doubling storage. It
* never interprets the data passed to it, nor does it provide any
* sort of callback mechanism for freeing resources held onto by
* stored objects.
*
* When finished using the array, _cairo_array_fini() should be
* called to free resources allocated during use of the array.
**/
void
_cairo_array_init (cairo_array_t *array, unsigned int element_size)
{
array->size = 0;
array->num_elements = 0;
array->element_size = element_size;
array->elements = NULL;
}
/**
* _cairo_array_fini:
* @array: A #cairo_array_t
*
* Free all resources associated with @array. After this call, @array
* should not be used again without a subsequent call to
* _cairo_array_init() again first.
**/
void
_cairo_array_fini (cairo_array_t *array)
{
free (array->elements);
}
/**
* _cairo_array_grow_by:
* @array: a #cairo_array_t
*
* Increase the size of @array (if needed) so that there are at least
* @additional free spaces in the array. The actual size of the array
* is always increased by doubling as many times as necessary.
**/
cairo_status_t
_cairo_array_grow_by (cairo_array_t *array, unsigned int additional)
{
char *new_elements;
unsigned int old_size = array->size;
unsigned int required_size = array->num_elements + additional;
unsigned int new_size;
/* check for integer overflow */
if (required_size > INT_MAX || required_size < array->num_elements)
return _cairo_error (CAIRO_STATUS_NO_MEMORY);
if (CAIRO_INJECT_FAULT ())
return _cairo_error (CAIRO_STATUS_NO_MEMORY);
if (required_size <= old_size)
return CAIRO_STATUS_SUCCESS;
if (old_size == 0)
new_size = 1;
else
new_size = old_size * 2;
while (new_size < required_size)
new_size = new_size * 2;
array->size = new_size;
new_elements = _cairo_realloc_ab (array->elements,
array->size, array->element_size);
if (unlikely (new_elements == NULL)) {
array->size = old_size;
return _cairo_error (CAIRO_STATUS_NO_MEMORY);
}
array->elements = new_elements;
return CAIRO_STATUS_SUCCESS;
}
/**
* _cairo_array_truncate:
* @array: a #cairo_array_t
*
* Truncate size of the array to @num_elements if less than the
* current size. No memory is actually freed. The stored objects
* beyond @num_elements are simply "forgotten".
**/
void
_cairo_array_truncate (cairo_array_t *array, unsigned int num_elements)
{
if (num_elements < array->num_elements)
array->num_elements = num_elements;
}
/**
* _cairo_array_index:
* @array: a #cairo_array_t
* Returns: A pointer to the object stored at @index.
*
* If the resulting value is assigned to a pointer to an object of the same
* element_size as initially passed to _cairo_array_init() then that
* pointer may be used for further direct indexing with []. For
* example:
*
* <informalexample><programlisting>
* cairo_array_t array;
* double *values;
*
* _cairo_array_init (&array, sizeof(double));
* ... calls to _cairo_array_append() here ...
*
* values = _cairo_array_index (&array, 0);
* for (i = 0; i < _cairo_array_num_elements (&array); i++)
* ... use values[i] here ...
* </programlisting></informalexample>
**/
void *
_cairo_array_index (cairo_array_t *array, unsigned int index)
{
/* We allow an index of 0 for the no-elements case.
* This makes for cleaner calling code which will often look like:
*
* elements = _cairo_array_index (array, 0);
* for (i=0; i < num_elements; i++) {
* ... use elements[i] here ...
* }
*
* which in the num_elements==0 case gets the NULL pointer here,
* but never dereferences it.
*/
if (index == 0 && array->num_elements == 0)
return NULL;
assert (index < array->num_elements);
return array->elements + index * array->element_size;
}
/**
* _cairo_array_index_const:
* @array: a #cairo_array_t
* Returns: A pointer to the object stored at @index.
*
* If the resulting value is assigned to a pointer to an object of the same
* element_size as initially passed to _cairo_array_init() then that
* pointer may be used for further direct indexing with []. For
* example:
*
* <informalexample><programlisting>
* cairo_array_t array;
* const double *values;
*
* _cairo_array_init (&array, sizeof(double));
* ... calls to _cairo_array_append() here ...
*
* values = _cairo_array_index_const (&array, 0);
* for (i = 0; i < _cairo_array_num_elements (&array); i++)
* ... read values[i] here ...
* </programlisting></informalexample>
**/
const void *
_cairo_array_index_const (const cairo_array_t *array, unsigned int index)
{
/* We allow an index of 0 for the no-elements case.
* This makes for cleaner calling code which will often look like:
*
* elements = _cairo_array_index_const (array, 0);
* for (i=0; i < num_elements; i++) {
* ... read elements[i] here ...
* }
*
* which in the num_elements==0 case gets the NULL pointer here,
* but never dereferences it.
*/
if (index == 0 && array->num_elements == 0)
return NULL;
assert (index < array->num_elements);
return array->elements + index * array->element_size;
}
/**
* _cairo_array_copy_element:
* @array: a #cairo_array_t
*
* Copy a single element out of the array from index @index into the
* location pointed to by @dst.
**/
void
_cairo_array_copy_element (const cairo_array_t *array,
unsigned int index,
void *dst)
{
memcpy (dst, _cairo_array_index_const (array, index), array->element_size);
}
/**
* _cairo_array_append:
* @array: a #cairo_array_t
*
* Append a single item onto the array by growing the array by at
* least one element, then copying element_size bytes from @element
* into the array. The address of the resulting object within the
* array can be determined with:
*
* _cairo_array_index (array, _cairo_array_num_elements (array) - 1);
*
* Return value: %CAIRO_STATUS_SUCCESS if successful or
* %CAIRO_STATUS_NO_MEMORY if insufficient memory is available for the
* operation.
**/
cairo_status_t
_cairo_array_append (cairo_array_t *array,
const void *element)
{
return _cairo_array_append_multiple (array, element, 1);
}
/**
* _cairo_array_append_multiple:
* @array: a #cairo_array_t
*
* Append one or more items onto the array by growing the array by
* @num_elements, then copying @num_elements * element_size bytes from
* @elements into the array.
*
* Return value: %CAIRO_STATUS_SUCCESS if successful or
* %CAIRO_STATUS_NO_MEMORY if insufficient memory is available for the
* operation.
**/
cairo_status_t
_cairo_array_append_multiple (cairo_array_t *array,
const void *elements,
unsigned int num_elements)
{
cairo_status_t status;
void *dest;
status = _cairo_array_allocate (array, num_elements, &dest);
if (unlikely (status))
return status;
memcpy (dest, elements, num_elements * array->element_size);
return CAIRO_STATUS_SUCCESS;
}
/**
* _cairo_array_allocate:
* @array: a #cairo_array_t
*
* Allocate space at the end of the array for @num_elements additional
* elements, providing the address of the new memory chunk in
* @elements. This memory will be uninitialized, but will be accounted
* for in the return value of _cairo_array_num_elements().
*
* Return value: %CAIRO_STATUS_SUCCESS if successful or
* %CAIRO_STATUS_NO_MEMORY if insufficient memory is available for the
* operation.
**/
cairo_status_t
_cairo_array_allocate (cairo_array_t *array,
unsigned int num_elements,
void **elements)
{
cairo_status_t status;
status = _cairo_array_grow_by (array, num_elements);
if (unlikely (status))
return status;
assert (array->num_elements + num_elements <= array->size);
*elements = array->elements + array->num_elements * array->element_size;
array->num_elements += num_elements;
return CAIRO_STATUS_SUCCESS;
}
/**
* _cairo_array_num_elements:
* @array: a #cairo_array_t
* Returns: The number of elements stored in @array.
*
* This space was left intentionally blank, but gtk-doc filled it.
**/
unsigned int
_cairo_array_num_elements (const cairo_array_t *array)
{
return array->num_elements;
}
/**
* _cairo_array_size:
* @array: a #cairo_array_t
* Returns: The number of elements for which there is currently space
* allocated in @array.
*
* This space was left intentionally blank, but gtk-doc filled it.
**/
unsigned int
_cairo_array_size (const cairo_array_t *array)
{
return array->size;
}
/**
* _cairo_user_data_array_init:
* @array: a #cairo_user_data_array_t
*
* Initializes a #cairo_user_data_array_t structure for future
* use. After initialization, the array has no keys. Call
* _cairo_user_data_array_fini() to free any allocated memory
* when done using the array.
**/
void
_cairo_user_data_array_init (cairo_user_data_array_t *array)
{
_cairo_array_init (array, sizeof (cairo_user_data_slot_t));
}
/**
* _cairo_user_data_array_fini:
* @array: a #cairo_user_data_array_t
*
* Destroys all current keys in the user data array and deallocates
* any memory allocated for the array itself.
**/
void
_cairo_user_data_array_fini (cairo_user_data_array_t *array)
{
unsigned int num_slots;
num_slots = array->num_elements;
if (num_slots) {
cairo_user_data_slot_t *slots;
slots = _cairo_array_index (array, 0);
while (num_slots--) {
cairo_user_data_slot_t *s = &slots[num_slots];
if (s->user_data != NULL && s->destroy != NULL)
s->destroy (s->user_data);
}
}
_cairo_array_fini (array);
}
/**
* _cairo_user_data_array_get_data:
* @array: a #cairo_user_data_array_t
* @key: the address of the #cairo_user_data_key_t the user data was
* attached to
*
* Returns user data previously attached using the specified
* key. If no user data has been attached with the given key this
* function returns %NULL.
*
* Return value: the user data previously attached or %NULL.
**/
void *
_cairo_user_data_array_get_data (cairo_user_data_array_t *array,
const cairo_user_data_key_t *key)
{
int i, num_slots;
cairo_user_data_slot_t *slots;
/* We allow this to support degenerate objects such as cairo_surface_nil. */
if (array == NULL)
return NULL;
num_slots = array->num_elements;
slots = _cairo_array_index (array, 0);
for (i = 0; i < num_slots; i++) {
if (slots[i].key == key)
return slots[i].user_data;
}
return NULL;
}
/**
* _cairo_user_data_array_set_data:
* @array: a #cairo_user_data_array_t
* @key: the address of a #cairo_user_data_key_t to attach the user data to
* @user_data: the user data to attach
* @destroy: a #cairo_destroy_func_t which will be called when the
* user data array is destroyed or when new user data is attached using the
* same key.
*
* Attaches user data to a user data array. To remove user data,
* call this function with the key that was used to set it and %NULL
* for @data.
*
* Return value: %CAIRO_STATUS_SUCCESS or %CAIRO_STATUS_NO_MEMORY if a
* slot could not be allocated for the user data.
**/
cairo_status_t
_cairo_user_data_array_set_data (cairo_user_data_array_t *array,
const cairo_user_data_key_t *key,
void *user_data,
cairo_destroy_func_t destroy)
{
cairo_status_t status;
int i, num_slots;
cairo_user_data_slot_t *slots, *slot, new_slot;
if (user_data) {
new_slot.key = key;
new_slot.user_data = user_data;
new_slot.destroy = destroy;
} else {
new_slot.key = NULL;
new_slot.user_data = NULL;
new_slot.destroy = NULL;
}
slot = NULL;
num_slots = array->num_elements;
slots = _cairo_array_index (array, 0);
for (i = 0; i < num_slots; i++) {
if (slots[i].key == key) {
slot = &slots[i];
if (slot->destroy && slot->user_data)
slot->destroy (slot->user_data);
break;
}
if (user_data && slots[i].user_data == NULL) {
slot = &slots[i]; /* Have to keep searching for an exact match */
}
}
if (slot) {
*slot = new_slot;
return CAIRO_STATUS_SUCCESS;
}
if (user_data == NULL)
return CAIRO_STATUS_SUCCESS;
status = _cairo_array_append (array, &new_slot);
if (unlikely (status))
return status;
return CAIRO_STATUS_SUCCESS;
}
cairo_status_t
_cairo_user_data_array_copy (cairo_user_data_array_t *dst,
const cairo_user_data_array_t *src)
{
/* discard any existing user-data */
if (dst->num_elements != 0) {
_cairo_user_data_array_fini (dst);
_cairo_user_data_array_init (dst);
}
return _cairo_array_append_multiple (dst,
_cairo_array_index_const (src, 0),
src->num_elements);
}
void
_cairo_user_data_array_foreach (cairo_user_data_array_t *array,
void (*func) (const void *key,
void *elt,
void *closure),
void *closure)
{
cairo_user_data_slot_t *slots;
int i, num_slots;
num_slots = array->num_elements;
slots = _cairo_array_index (array, 0);
for (i = 0; i < num_slots; i++) {
if (slots[i].user_data != NULL)
func (slots[i].key, slots[i].user_data, closure);
}
}
| 0 |
D://workCode//uploadProject\awtk\3rd\cairo | D://workCode//uploadProject\awtk\3rd\cairo\cairo\cairo-atomic-private.h | /* cairo - a vector graphics library with display and print output
*
* Copyright © 2007 Chris Wilson
* Copyright © 2010 Andrea Canciani
*
* This library is free software; you can redistribute it and/or
* modify it either under the terms of the GNU Lesser General Public
* License version 2.1 as published by the Free Software Foundation
* (the "LGPL") or, at your option, under the terms of the Mozilla
* Public License Version 1.1 (the "MPL"). If you do not alter this
* notice, a recipient may use your version of this file under either
* the MPL or the LGPL.
*
* You should have received a copy of the LGPL along with this library
* in the file COPYING-LGPL-2.1; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Suite 500, Boston, MA 02110-1335, USA
* You should have received a copy of the MPL along with this library
* in the file COPYING-MPL-1.1
*
* The contents of this file are subject to the Mozilla Public License
* Version 1.1 (the "License"); you may not use this file except in
* compliance with the License. You may obtain a copy of the License at
* http://www.mozilla.org/MPL/
*
* This software is distributed on an "AS IS" basis, WITHOUT WARRANTY
* OF ANY KIND, either express or implied. See the LGPL or the MPL for
* the specific language governing rights and limitations.
*
* The Original Code is the cairo graphics library.
*
* The Initial Developer of the Original Code is University of Southern
* California.
*
* Contributor(s):
* Chris Wilson <chris@chris-wilson.co.uk>
* Andrea Canciani <ranma42@gmail.com>
*/
#ifndef CAIRO_ATOMIC_PRIVATE_H
#define CAIRO_ATOMIC_PRIVATE_H
# include "cairo-compiler-private.h"
#if HAVE_CONFIG_H
#include "config.h"
#endif
#include <assert.h>
/* The autoconf on OpenBSD 4.5 produces the malformed constant name
* SIZEOF_VOID__ rather than SIZEOF_VOID_P. Work around that here. */
#if !defined(SIZEOF_VOID_P) && defined(SIZEOF_VOID__)
# define SIZEOF_VOID_P SIZEOF_VOID__
#endif
CAIRO_BEGIN_DECLS
/* C++11 atomic primitives were designed to be more flexible than the
* __sync_* family of primitives. Despite the name, they are available
* in C as well as C++. The motivating reason for using them is that
* for _cairo_atomic_{int,ptr}_get, the compiler is able to see that
* the load is intended to be atomic, as opposed to the __sync_*
* version, below, where the load looks like a plain load. Having
* the load appear atomic to the compiler is particular important for
* tools like ThreadSanitizer so they don't report false positives on
* memory operations that we intend to be atomic.
*/
#if HAVE_CXX11_ATOMIC_PRIMITIVES
#define HAS_ATOMIC_OPS 1
typedef int cairo_atomic_int_t;
static cairo_always_inline cairo_atomic_int_t
_cairo_atomic_int_get (cairo_atomic_int_t *x)
{
return __atomic_load_n(x, __ATOMIC_SEQ_CST);
}
static cairo_always_inline cairo_atomic_int_t
_cairo_atomic_int_get_relaxed (cairo_atomic_int_t *x)
{
return __atomic_load_n(x, __ATOMIC_RELAXED);
}
static cairo_always_inline void
_cairo_atomic_int_set_relaxed (cairo_atomic_int_t *x, cairo_atomic_int_t val)
{
__atomic_store_n(x, val, __ATOMIC_RELAXED);
}
static cairo_always_inline void *
_cairo_atomic_ptr_get (void **x)
{
return __atomic_load_n(x, __ATOMIC_SEQ_CST);
}
# define _cairo_atomic_int_inc(x) ((void) __atomic_fetch_add(x, 1, __ATOMIC_SEQ_CST))
# define _cairo_atomic_int_dec(x) ((void) __atomic_fetch_sub(x, 1, __ATOMIC_SEQ_CST))
# define _cairo_atomic_int_dec_and_test(x) (__atomic_fetch_sub(x, 1, __ATOMIC_SEQ_CST) == 1)
#if SIZEOF_VOID_P==SIZEOF_INT
typedef int cairo_atomic_intptr_t;
#elif SIZEOF_VOID_P==SIZEOF_LONG
typedef long cairo_atomic_intptr_t;
#elif SIZEOF_VOID_P==SIZEOF_LONG_LONG
typedef long long cairo_atomic_intptr_t;
#else
#error No matching integer pointer type
#endif
static cairo_always_inline cairo_bool_t
_cairo_atomic_int_cmpxchg_impl(cairo_atomic_int_t *x,
cairo_atomic_int_t oldv,
cairo_atomic_int_t newv)
{
cairo_atomic_int_t expected = oldv;
return __atomic_compare_exchange_n(x, &expected, newv, 0, __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST);
}
#define _cairo_atomic_int_cmpxchg(x, oldv, newv) \
_cairo_atomic_int_cmpxchg_impl(x, oldv, newv)
static cairo_always_inline cairo_atomic_int_t
_cairo_atomic_int_cmpxchg_return_old_impl(cairo_atomic_int_t *x,
cairo_atomic_int_t oldv,
cairo_atomic_int_t newv)
{
cairo_atomic_int_t expected = oldv;
(void) __atomic_compare_exchange_n(x, &expected, newv, 0, __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST);
return expected;
}
#define _cairo_atomic_int_cmpxchg_return_old(x, oldv, newv) \
_cairo_atomic_int_cmpxchg_return_old_impl(x, oldv, newv)
static cairo_always_inline cairo_bool_t
_cairo_atomic_ptr_cmpxchg_impl(void **x, void *oldv, void *newv)
{
void *expected = oldv;
return __atomic_compare_exchange_n(x, &expected, newv, 0, __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST);
}
#define _cairo_atomic_ptr_cmpxchg(x, oldv, newv) \
_cairo_atomic_ptr_cmpxchg_impl(x, oldv, newv)
static cairo_always_inline void *
_cairo_atomic_ptr_cmpxchg_return_old_impl(void **x, void *oldv, void *newv)
{
void *expected = oldv;
(void) __atomic_compare_exchange_n(x, &expected, newv, 0, __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST);
return expected;
}
#define _cairo_atomic_ptr_cmpxchg_return_old(x, oldv, newv) \
_cairo_atomic_ptr_cmpxchg_return_old_impl(x, oldv, newv)
#endif
#if HAVE_GCC_LEGACY_ATOMICS
#define HAS_ATOMIC_OPS 1
typedef int cairo_atomic_int_t;
static cairo_always_inline cairo_atomic_int_t
_cairo_atomic_int_get (cairo_atomic_int_t *x)
{
__sync_synchronize ();
return *x;
}
static cairo_always_inline cairo_atomic_int_t
_cairo_atomic_int_get_relaxed (cairo_atomic_int_t *x)
{
return *x;
}
static cairo_always_inline void
_cairo_atomic_int_set_relaxed (cairo_atomic_int_t *x, cairo_atomic_int_t val)
{
*x = val;
}
static cairo_always_inline void *
_cairo_atomic_ptr_get (void **x)
{
__sync_synchronize ();
return *x;
}
# define _cairo_atomic_int_inc(x) ((void) __sync_fetch_and_add(x, 1))
# define _cairo_atomic_int_dec(x) ((void) __sync_fetch_and_add(x, -1))
# define _cairo_atomic_int_dec_and_test(x) (__sync_fetch_and_add(x, -1) == 1)
# define _cairo_atomic_int_cmpxchg(x, oldv, newv) __sync_bool_compare_and_swap (x, oldv, newv)
# define _cairo_atomic_int_cmpxchg_return_old(x, oldv, newv) __sync_val_compare_and_swap (x, oldv, newv)
#if SIZEOF_VOID_P==SIZEOF_INT
typedef int cairo_atomic_intptr_t;
#elif SIZEOF_VOID_P==SIZEOF_LONG
typedef long cairo_atomic_intptr_t;
#elif SIZEOF_VOID_P==SIZEOF_LONG_LONG
typedef long long cairo_atomic_intptr_t;
#else
#error No matching integer pointer type
#endif
# define _cairo_atomic_ptr_cmpxchg(x, oldv, newv) \
__sync_bool_compare_and_swap ((cairo_atomic_intptr_t*)x, (cairo_atomic_intptr_t)oldv, (cairo_atomic_intptr_t)newv)
# define _cairo_atomic_ptr_cmpxchg_return_old(x, oldv, newv) \
_cairo_atomic_intptr_to_voidptr (__sync_val_compare_and_swap ((cairo_atomic_intptr_t*)x, (cairo_atomic_intptr_t)oldv, (cairo_atomic_intptr_t)newv))
#endif
#if HAVE_LIB_ATOMIC_OPS
#include <atomic_ops.h>
#define HAS_ATOMIC_OPS 1
typedef AO_t cairo_atomic_int_t;
# define _cairo_atomic_int_get(x) (AO_load_full (x))
# define _cairo_atomic_int_get_relaxed(x) (AO_load_full (x))
# define _cairo_atomic_int_set_relaxed(x, val) (AO_store_full ((x), (val)))
# define _cairo_atomic_int_inc(x) ((void) AO_fetch_and_add1_full(x))
# define _cairo_atomic_int_dec(x) ((void) AO_fetch_and_sub1_full(x))
# define _cairo_atomic_int_dec_and_test(x) (AO_fetch_and_sub1_full(x) == 1)
# define _cairo_atomic_int_cmpxchg(x, oldv, newv) AO_compare_and_swap_full(x, oldv, newv)
#if SIZEOF_VOID_P==SIZEOF_INT
typedef unsigned int cairo_atomic_intptr_t;
#elif SIZEOF_VOID_P==SIZEOF_LONG
typedef unsigned long cairo_atomic_intptr_t;
#elif SIZEOF_VOID_P==SIZEOF_LONG_LONG
typedef unsigned long long cairo_atomic_intptr_t;
#else
#error No matching integer pointer type
#endif
# define _cairo_atomic_ptr_get(x) _cairo_atomic_intptr_to_voidptr (AO_load_full (x))
# define _cairo_atomic_ptr_cmpxchg(x, oldv, newv) \
_cairo_atomic_int_cmpxchg ((cairo_atomic_intptr_t*)(x), (cairo_atomic_intptr_t)oldv, (cairo_atomic_intptr_t)newv)
#endif
#if HAVE_OS_ATOMIC_OPS
#include <libkern/OSAtomic.h>
#define HAS_ATOMIC_OPS 1
typedef int32_t cairo_atomic_int_t;
# define _cairo_atomic_int_get(x) (OSMemoryBarrier(), *(x))
# define _cairo_atomic_int_get_relaxed(x) *(x)
# define _cairo_atomic_int_set_relaxed(x, val) *(x) = (val)
# define _cairo_atomic_int_inc(x) ((void) OSAtomicIncrement32Barrier (x))
# define _cairo_atomic_int_dec(x) ((void) OSAtomicDecrement32Barrier (x))
# define _cairo_atomic_int_dec_and_test(x) (OSAtomicDecrement32Barrier (x) == 0)
# define _cairo_atomic_int_cmpxchg(x, oldv, newv) OSAtomicCompareAndSwap32Barrier(oldv, newv, x)
#if SIZEOF_VOID_P==4
typedef int32_t cairo_atomic_intptr_t;
# define _cairo_atomic_ptr_cmpxchg(x, oldv, newv) \
OSAtomicCompareAndSwap32Barrier((cairo_atomic_intptr_t)oldv, (cairo_atomic_intptr_t)newv, (cairo_atomic_intptr_t *)x)
#elif SIZEOF_VOID_P==8
typedef int64_t cairo_atomic_intptr_t;
# define _cairo_atomic_ptr_cmpxchg(x, oldv, newv) \
OSAtomicCompareAndSwap64Barrier((cairo_atomic_intptr_t)oldv, (cairo_atomic_intptr_t)newv, (cairo_atomic_intptr_t *)x)
#else
#error No matching integer pointer type
#endif
# define _cairo_atomic_ptr_get(x) (OSMemoryBarrier(), *(x))
#endif
#ifndef HAS_ATOMIC_OPS
#if SIZEOF_VOID_P==SIZEOF_INT
typedef unsigned int cairo_atomic_intptr_t;
#elif SIZEOF_VOID_P==SIZEOF_LONG
typedef unsigned long cairo_atomic_intptr_t;
#elif SIZEOF_VOID_P==SIZEOF_LONG_LONG
typedef unsigned long long cairo_atomic_intptr_t;
#else
#error No matching integer pointer type
#endif
typedef cairo_atomic_intptr_t cairo_atomic_int_t;
cairo_private void
_cairo_atomic_int_inc (cairo_atomic_int_t *x);
#define _cairo_atomic_int_dec(x) _cairo_atomic_int_dec_and_test(x)
cairo_private cairo_bool_t
_cairo_atomic_int_dec_and_test (cairo_atomic_int_t *x);
cairo_private cairo_atomic_int_t
_cairo_atomic_int_cmpxchg_return_old_impl (cairo_atomic_int_t *x, cairo_atomic_int_t oldv, cairo_atomic_int_t newv);
cairo_private void *
_cairo_atomic_ptr_cmpxchg_return_old_impl (void **x, void *oldv, void *newv);
#define _cairo_atomic_int_cmpxchg_return_old(x, oldv, newv) _cairo_atomic_int_cmpxchg_return_old_impl (x, oldv, newv)
#define _cairo_atomic_ptr_cmpxchg_return_old(x, oldv, newv) _cairo_atomic_ptr_cmpxchg_return_old_impl (x, oldv, newv)
#ifdef ATOMIC_OP_NEEDS_MEMORY_BARRIER
cairo_private cairo_atomic_int_t
_cairo_atomic_int_get (cairo_atomic_int_t *x);
cairo_private cairo_atomic_int_t
_cairo_atomic_int_get_relaxed (cairo_atomic_int_t *x);
void
_cairo_atomic_int_set_relaxed (cairo_atomic_int_t *x, cairo_atomic_int_t val);
# define _cairo_atomic_ptr_get(x) (void *) _cairo_atomic_int_get((cairo_atomic_int_t *) x)
#else
# define _cairo_atomic_int_get(x) (*x)
# define _cairo_atomic_int_get_relaxed(x) (*x)
# define _cairo_atomic_int_set_relaxed(x, val) (*x) = (val)
# define _cairo_atomic_ptr_get(x) (*x)
#endif
#else
/* Workaround GCC complaining about casts */
static cairo_always_inline void *
_cairo_atomic_intptr_to_voidptr (cairo_atomic_intptr_t x)
{
return (void *) x;
}
static cairo_always_inline cairo_atomic_int_t
_cairo_atomic_int_cmpxchg_return_old_fallback(cairo_atomic_int_t *x, cairo_atomic_int_t oldv, cairo_atomic_int_t newv)
{
cairo_atomic_int_t curr;
do {
curr = _cairo_atomic_int_get (x);
} while (curr == oldv && !_cairo_atomic_int_cmpxchg (x, oldv, newv));
return curr;
}
static cairo_always_inline void *
_cairo_atomic_ptr_cmpxchg_return_old_fallback(void **x, void *oldv, void *newv)
{
void *curr;
do {
curr = _cairo_atomic_ptr_get (x);
} while (curr == oldv && !_cairo_atomic_ptr_cmpxchg (x, oldv, newv));
return curr;
}
#endif
#ifndef _cairo_atomic_int_cmpxchg_return_old
#define _cairo_atomic_int_cmpxchg_return_old(x, oldv, newv) _cairo_atomic_int_cmpxchg_return_old_fallback (x, oldv, newv)
#endif
#ifndef _cairo_atomic_ptr_cmpxchg_return_old
#define _cairo_atomic_ptr_cmpxchg_return_old(x, oldv, newv) _cairo_atomic_ptr_cmpxchg_return_old_fallback (x, oldv, newv)
#endif
#ifndef _cairo_atomic_int_cmpxchg
#define _cairo_atomic_int_cmpxchg(x, oldv, newv) (_cairo_atomic_int_cmpxchg_return_old (x, oldv, newv) == oldv)
#endif
#ifndef _cairo_atomic_ptr_cmpxchg
#define _cairo_atomic_ptr_cmpxchg(x, oldv, newv) (_cairo_atomic_ptr_cmpxchg_return_old (x, oldv, newv) == oldv)
#endif
#define _cairo_atomic_uint_get(x) _cairo_atomic_int_get(x)
#define _cairo_atomic_uint_cmpxchg(x, oldv, newv) \
_cairo_atomic_int_cmpxchg((cairo_atomic_int_t *)x, oldv, newv)
#define _cairo_status_set_error(status, err) do { \
int ret__; \
assert (err < CAIRO_STATUS_LAST_STATUS); \
/* hide compiler warnings about cairo_status_t != int (gcc treats its as \
* an unsigned integer instead, and about ignoring the return value. */ \
ret__ = _cairo_atomic_int_cmpxchg ((cairo_atomic_int_t *) status, CAIRO_STATUS_SUCCESS, err); \
(void) ret__; \
} while (0)
typedef cairo_atomic_int_t cairo_atomic_once_t;
#define CAIRO_ATOMIC_ONCE_UNINITIALIZED (0)
#define CAIRO_ATOMIC_ONCE_INITIALIZING (1)
#define CAIRO_ATOMIC_ONCE_INITIALIZED (2)
#define CAIRO_ATOMIC_ONCE_INIT CAIRO_ATOMIC_ONCE_UNINITIALIZED
static cairo_always_inline cairo_bool_t
_cairo_atomic_init_once_enter(cairo_atomic_once_t *once)
{
if (likely(_cairo_atomic_int_get(once) == CAIRO_ATOMIC_ONCE_INITIALIZED))
return 0;
if (_cairo_atomic_int_cmpxchg(once,
CAIRO_ATOMIC_ONCE_UNINITIALIZED,
CAIRO_ATOMIC_ONCE_INITIALIZING))
return 1;
while (_cairo_atomic_int_get(once) != CAIRO_ATOMIC_ONCE_INITIALIZED) {}
return 0;
}
static cairo_always_inline void
_cairo_atomic_init_once_leave(cairo_atomic_once_t *once)
{
if (unlikely(!_cairo_atomic_int_cmpxchg(once,
CAIRO_ATOMIC_ONCE_INITIALIZING,
CAIRO_ATOMIC_ONCE_INITIALIZED)))
assert (0 && "incorrect use of _cairo_atomic_init_once API (once != CAIRO_ATOMIC_ONCE_INITIALIZING)");
}
CAIRO_END_DECLS
#endif
| 0 |
D://workCode//uploadProject\awtk\3rd\cairo | D://workCode//uploadProject\awtk\3rd\cairo\cairo\cairo-atomic.c | /* cairo - a vector graphics library with display and print output
*
* Copyright © 2007 Chris Wilson
*
* This library is free software; you can redistribute it and/or
* modify it either under the terms of the GNU Lesser General Public
* License version 2.1 as published by the Free Software Foundation
* (the "LGPL") or, at your option, under the terms of the Mozilla
* Public License Version 1.1 (the "MPL"). If you do not alter this
* notice, a recipient may use your version of this file under either
* the MPL or the LGPL.
*
* You should have received a copy of the LGPL along with this library
* in the file COPYING-LGPL-2.1; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Suite 500, Boston, MA 02110-1335, USA
* You should have received a copy of the MPL along with this library
* in the file COPYING-MPL-1.1
*
* The contents of this file are subject to the Mozilla Public License
* Version 1.1 (the "License"); you may not use this file except in
* compliance with the License. You may obtain a copy of the License at
* http://www.mozilla.org/MPL/
*
* This software is distributed on an "AS IS" basis, WITHOUT WARRANTY
* OF ANY KIND, either express or implied. See the LGPL or the MPL for
* the specific language governing rights and limitations.
*
* The Original Code is the cairo graphics library.
*
* Contributor(s):
* Chris Wilson <chris@chris-wilson.co.uk>
*/
#include "cairoint.h"
#include "cairo-atomic-private.h"
#include "cairo-mutex-private.h"
#ifdef HAS_ATOMIC_OPS
COMPILE_TIME_ASSERT(sizeof(void*) == sizeof(int) ||
sizeof(void*) == sizeof(long) ||
sizeof(void*) == sizeof(long long));
#else
void
_cairo_atomic_int_inc (cairo_atomic_intptr_t *x)
{
CAIRO_MUTEX_LOCK (_cairo_atomic_mutex);
*x += 1;
CAIRO_MUTEX_UNLOCK (_cairo_atomic_mutex);
}
cairo_bool_t
_cairo_atomic_int_dec_and_test (cairo_atomic_intptr_t *x)
{
cairo_bool_t ret;
CAIRO_MUTEX_LOCK (_cairo_atomic_mutex);
ret = --*x == 0;
CAIRO_MUTEX_UNLOCK (_cairo_atomic_mutex);
return ret;
}
cairo_atomic_intptr_t
_cairo_atomic_int_cmpxchg_return_old_impl (cairo_atomic_intptr_t *x, cairo_atomic_intptr_t oldv, cairo_atomic_intptr_t newv)
{
cairo_atomic_intptr_t ret;
CAIRO_MUTEX_LOCK (_cairo_atomic_mutex);
ret = *x;
if (ret == oldv)
*x = newv;
CAIRO_MUTEX_UNLOCK (_cairo_atomic_mutex);
return ret;
}
void *
_cairo_atomic_ptr_cmpxchg_return_old_impl (void **x, void *oldv, void *newv)
{
void *ret;
CAIRO_MUTEX_LOCK (_cairo_atomic_mutex);
ret = *x;
if (ret == oldv)
*x = newv;
CAIRO_MUTEX_UNLOCK (_cairo_atomic_mutex);
return ret;
}
#ifdef ATOMIC_OP_NEEDS_MEMORY_BARRIER
cairo_atomic_intptr_t
_cairo_atomic_int_get (cairo_atomic_intptr_t *x)
{
cairo_atomic_intptr_t ret;
CAIRO_MUTEX_LOCK (_cairo_atomic_mutex);
ret = *x;
CAIRO_MUTEX_UNLOCK (_cairo_atomic_mutex);
return ret;
}
cairo_atomic_intptr_t
_cairo_atomic_int_get_relaxed (cairo_atomic_intptr_t *x)
{
return _cairo_atomic_int_get (x);
}
void
_cairo_atomic_int_set_relaxed (cairo_atomic_intptr_t *x, cairo_atomic_intptr_t val)
{
CAIRO_MUTEX_LOCK (_cairo_atomic_mutex);
*x = val;
CAIRO_MUTEX_UNLOCK (_cairo_atomic_mutex);
}
#endif
#endif
| 0 |
D://workCode//uploadProject\awtk\3rd\cairo | D://workCode//uploadProject\awtk\3rd\cairo\cairo\cairo-backend-private.h | /* cairo - a vector graphics library with display and print output
*
* Copyright © 2010 Intel Corporation
*
* This library is free software; you can redistribute it and/or
* modify it either under the terms of the GNU Lesser General Public
* License version 2.1 as published by the Free Software Foundation
* (the "LGPL") or, at your option, under the terms of the Mozilla
* Public License Version 1.1 (the "MPL"). If you do not alter this
* notice, a recipient may use your version of this file under either
* the MPL or the LGPL.
*
* You should have received a copy of the LGPL along with this library
* in the file COPYING-LGPL-2.1; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Suite 500, Boston, MA 02110-1335, USA
* You should have received a copy of the MPL along with this library
* in the file COPYING-MPL-1.1
*
* The contents of this file are subject to the Mozilla Public License
* Version 1.1 (the "License"); you may not use this file except in
* compliance with the License. You may obtain a copy of the License at
* http://www.mozilla.org/MPL/
*
* This software is distributed on an "AS IS" basis, WITHOUT WARRANTY
* OF ANY KIND, either express or implied. See the LGPL or the MPL for
* the specific language governing rights and limitations.
*
* The Original Code is the cairo graphics library.
*
* The Initial Developer of the Original Code is Intel Corporation
*
* Contributor(s):
* Chris Wilson <chris@chris-wilson.co.uk>
*/
#ifndef CAIRO_BACKEND_PRIVATE_H
#define CAIRO_BACKEND_PRIVATE_H
#include "cairo-types-private.h"
#include "cairo-private.h"
typedef enum _cairo_backend_type {
CAIRO_TYPE_DEFAULT,
CAIRO_TYPE_SKIA,
} cairo_backend_type_t;
struct _cairo_backend {
cairo_backend_type_t type;
void (*destroy) (void *cr);
cairo_surface_t *(*get_original_target) (void *cr);
cairo_surface_t *(*get_current_target) (void *cr);
cairo_status_t (*save) (void *cr);
cairo_status_t (*restore) (void *cr);
cairo_status_t (*push_group) (void *cr, cairo_content_t content);
cairo_pattern_t *(*pop_group) (void *cr);
cairo_status_t (*set_source_rgba) (void *cr, double red, double green, double blue, double alpha);
cairo_status_t (*set_source_surface) (void *cr, cairo_surface_t *surface, double x, double y);
cairo_status_t (*set_source) (void *cr, cairo_pattern_t *source);
cairo_pattern_t *(*get_source) (void *cr);
cairo_status_t (*set_antialias) (void *cr, cairo_antialias_t antialias);
cairo_status_t (*set_dash) (void *cr, const double *dashes, int num_dashes, double offset);
cairo_status_t (*set_fill_rule) (void *cr, cairo_fill_rule_t fill_rule);
cairo_status_t (*set_line_cap) (void *cr, cairo_line_cap_t line_cap);
cairo_status_t (*set_line_join) (void *cr, cairo_line_join_t line_join);
cairo_status_t (*set_line_width) (void *cr, double line_width);
cairo_status_t (*set_miter_limit) (void *cr, double limit);
cairo_status_t (*set_opacity) (void *cr, double opacity);
cairo_status_t (*set_operator) (void *cr, cairo_operator_t op);
cairo_status_t (*set_tolerance) (void *cr, double tolerance);
cairo_antialias_t (*get_antialias) (void *cr);
void (*get_dash) (void *cr, double *dashes, int *num_dashes, double *offset);
cairo_fill_rule_t (*get_fill_rule) (void *cr);
cairo_line_cap_t (*get_line_cap) (void *cr);
cairo_line_join_t (*get_line_join) (void *cr);
double (*get_line_width) (void *cr);
double (*get_miter_limit) (void *cr);
double (*get_opacity) (void *cr);
cairo_operator_t (*get_operator) (void *cr);
double (*get_tolerance) (void *cr);
cairo_status_t (*translate) (void *cr, double tx, double ty);
cairo_status_t (*scale) (void *cr, double sx, double sy);
cairo_status_t (*rotate) (void *cr, double theta);
cairo_status_t (*transform) (void *cr, const cairo_matrix_t *matrix);
cairo_status_t (*set_matrix) (void *cr, const cairo_matrix_t *matrix);
cairo_status_t (*set_identity_matrix) (void *cr);
void (*get_matrix) (void *cr, cairo_matrix_t *matrix);
void (*user_to_device) (void *cr, double *x, double *y);
void (*user_to_device_distance) (void *cr, double *x, double *y);
void (*device_to_user) (void *cr, double *x, double *y);
void (*device_to_user_distance) (void *cr, double *x, double *y);
void (*user_to_backend) (void *cr, double *x, double *y);
void (*user_to_backend_distance) (void *cr, double *x, double *y);
void (*backend_to_user) (void *cr, double *x, double *y);
void (*backend_to_user_distance) (void *cr, double *x, double *y);
cairo_status_t (*new_path) (void *cr);
cairo_status_t (*new_sub_path) (void *cr);
cairo_status_t (*move_to) (void *cr, double x, double y);
cairo_status_t (*rel_move_to) (void *cr, double dx, double dy);
cairo_status_t (*line_to) (void *cr, double x, double y);
cairo_status_t (*rel_line_to) (void *cr, double dx, double dy);
cairo_status_t (*curve_to) (void *cr, double x1, double y1, double x2, double y2, double x3, double y3);
cairo_status_t (*rel_curve_to) (void *cr, double dx1, double dy1, double dx2, double dy2, double dx3, double dy3);
cairo_status_t (*arc_to) (void *cr, double x1, double y1, double x2, double y2, double radius);
cairo_status_t (*rel_arc_to) (void *cr, double dx1, double dy1, double dx2, double dy2, double radius);
cairo_status_t (*close_path) (void *cr);
cairo_status_t (*arc) (void *cr, double xc, double yc, double radius, double angle1, double angle2, cairo_bool_t forward);
cairo_status_t (*rectangle) (void *cr, double x, double y, double width, double height);
void (*path_extents) (void *cr, double *x1, double *y1, double *x2, double *y2);
cairo_bool_t (*has_current_point) (void *cr);
cairo_bool_t (*get_current_point) (void *cr, double *x, double *y);
cairo_path_t *(*copy_path) (void *cr);
cairo_path_t *(*copy_path_flat) (void *cr);
cairo_status_t (*append_path) (void *cr, const cairo_path_t *path);
cairo_status_t (*stroke_to_path) (void *cr);
cairo_status_t (*clip) (void *cr);
cairo_status_t (*clip_preserve) (void *cr);
cairo_status_t (*in_clip) (void *cr, double x, double y, cairo_bool_t *inside);
cairo_status_t (*clip_extents) (void *cr, double *x1, double *y1, double *x2, double *y2);
cairo_status_t (*reset_clip) (void *cr);
cairo_rectangle_list_t *(*clip_copy_rectangle_list) (void *cr);
cairo_status_t (*paint) (void *cr);
cairo_status_t (*paint_with_alpha) (void *cr, double opacity);
cairo_status_t (*mask) (void *cr, cairo_pattern_t *pattern);
cairo_status_t (*stroke) (void *cr);
cairo_status_t (*stroke_preserve) (void *cr);
cairo_status_t (*in_stroke) (void *cr, double x, double y, cairo_bool_t *inside);
cairo_status_t (*stroke_extents) (void *cr, double *x1, double *y1, double *x2, double *y2);
cairo_status_t (*fill) (void *cr);
cairo_status_t (*fill_preserve) (void *cr);
cairo_status_t (*in_fill) (void *cr, double x, double y, cairo_bool_t *inside);
cairo_status_t (*fill_extents) (void *cr, double *x1, double *y1, double *x2, double *y2);
cairo_status_t (*set_font_face) (void *cr, cairo_font_face_t *font_face);
cairo_font_face_t *(*get_font_face) (void *cr);
cairo_status_t (*set_font_size) (void *cr, double size);
cairo_status_t (*set_font_matrix) (void *cr, const cairo_matrix_t *matrix);
void (*get_font_matrix) (void *cr, cairo_matrix_t *matrix);
cairo_status_t (*set_font_options) (void *cr, const cairo_font_options_t *options);
void (*get_font_options) (void *cr, cairo_font_options_t *options);
cairo_status_t (*set_scaled_font) (void *cr, cairo_scaled_font_t *scaled_font);
cairo_scaled_font_t *(*get_scaled_font) (void *cr);
cairo_status_t (*font_extents) (void *cr, cairo_font_extents_t *extents);
cairo_status_t (*glyphs) (void *cr,
const cairo_glyph_t *glyphs, int num_glyphs,
cairo_glyph_text_info_t *info);
cairo_status_t (*glyph_path) (void *cr,
const cairo_glyph_t *glyphs, int num_glyphs);
cairo_status_t (*glyph_extents) (void *cr,
const cairo_glyph_t *glyphs,
int num_glyphs,
cairo_text_extents_t *extents);
cairo_status_t (*copy_page) (void *cr);
cairo_status_t (*show_page) (void *cr);
cairo_status_t (*tag_begin) (void *cr, const char *tag_name, const char *attributes);
cairo_status_t (*tag_end) (void *cr, const char *tag_name);
};
static inline void
_cairo_backend_to_user (cairo_t *cr, double *x, double *y)
{
cr->backend->backend_to_user (cr, x, y);
}
static inline void
_cairo_backend_to_user_distance (cairo_t *cr, double *x, double *y)
{
cr->backend->backend_to_user_distance (cr, x, y);
}
static inline void
_cairo_user_to_backend (cairo_t *cr, double *x, double *y)
{
cr->backend->user_to_backend (cr, x, y);
}
static inline void
_cairo_user_to_backend_distance (cairo_t *cr, double *x, double *y)
{
cr->backend->user_to_backend_distance (cr, x, y);
}
#endif /* CAIRO_BACKEND_PRIVATE_H */
| 0 |
D://workCode//uploadProject\awtk\3rd\cairo | D://workCode//uploadProject\awtk\3rd\cairo\cairo\cairo-base64-stream.c | /* -*- Mode: c; c-basic-offset: 4; indent-tabs-mode: t; tab-width: 8; -*- */
/* cairo - a vector graphics library with display and print output
*
* Copyright © 2005-2007 Emmanuel Pacaud <emmanuel.pacaud@free.fr>
* Copyright © 2009 Chris Wilson
*
* This library is free software; you can redistribute it and/or
* modify it either under the terms of the GNU Lesser General Public
* License version 2.1 as published by the Free Software Foundation
* (the "LGPL") or, at your option, under the terms of the Mozilla
* Public License Version 1.1 (the "MPL"). If you do not alter this
* notice, a recipient may use your version of this file under either
* the MPL or the LGPL.
*
* You should have received a copy of the LGPL along with this library
* in the file COPYING-LGPL-2.1; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Suite 500, Boston, MA 02110-1335, USA
* You should have received a copy of the MPL along with this library
* in the file COPYING-MPL-1.1
*
* The contents of this file are subject to the Mozilla Public License
* Version 1.1 (the "License"); you may not use this file except in
* compliance with the License. You may obtain a copy of the License at
* http://www.mozilla.org/MPL/
*
* This software is distributed on an "AS IS" basis, WITHOUT WARRANTY
* OF ANY KIND, either express or implied. See the LGPL or the MPL for
* the specific language governing rights and limitations.
*
* The Original Code is the cairo graphics library.
*
* The Initial Developer of the Original Code is Red Hat, Inc.
*
* Author(s):
* Kristian Høgsberg <krh@redhat.com>
* Chris Wilson <chris@chris-wilson.co.uk>
*/
#include "cairoint.h"
#include "cairo-error-private.h"
#include "cairo-output-stream-private.h"
typedef struct _cairo_base64_stream {
cairo_output_stream_t base;
cairo_output_stream_t *output;
unsigned int in_mem;
unsigned int trailing;
unsigned char src[3];
} cairo_base64_stream_t;
static char const base64_table[64] =
"ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/";
static cairo_status_t
_cairo_base64_stream_write (cairo_output_stream_t *base,
const unsigned char *data,
unsigned int length)
{
cairo_base64_stream_t * stream = (cairo_base64_stream_t *) base;
unsigned char *src = stream->src;
unsigned int i;
if (stream->in_mem + length < 3) {
for (i = 0; i < length; i++) {
src[i + stream->in_mem] = *data++;
}
stream->in_mem += length;
return CAIRO_STATUS_SUCCESS;
}
do {
unsigned char dst[4];
for (i = stream->in_mem; i < 3; i++) {
src[i] = *data++;
length--;
}
stream->in_mem = 0;
dst[0] = base64_table[src[0] >> 2];
dst[1] = base64_table[(src[0] & 0x03) << 4 | src[1] >> 4];
dst[2] = base64_table[(src[1] & 0x0f) << 2 | src[2] >> 6];
dst[3] = base64_table[src[2] & 0xfc >> 2];
/* Special case for the last missing bits */
switch (stream->trailing) {
case 2:
dst[2] = '=';
case 1:
dst[3] = '=';
default:
break;
}
_cairo_output_stream_write (stream->output, dst, 4);
} while (length >= 3);
for (i = 0; i < length; i++) {
src[i] = *data++;
}
stream->in_mem = length;
return _cairo_output_stream_get_status (stream->output);
}
static cairo_status_t
_cairo_base64_stream_close (cairo_output_stream_t *base)
{
cairo_base64_stream_t *stream = (cairo_base64_stream_t *) base;
cairo_status_t status = CAIRO_STATUS_SUCCESS;
if (stream->in_mem > 0) {
memset (stream->src + stream->in_mem, 0, 3 - stream->in_mem);
stream->trailing = 3 - stream->in_mem;
stream->in_mem = 3;
status = _cairo_base64_stream_write (base, NULL, 0);
}
return status;
}
cairo_output_stream_t *
_cairo_base64_stream_create (cairo_output_stream_t *output)
{
cairo_base64_stream_t *stream;
if (output->status)
return _cairo_output_stream_create_in_error (output->status);
stream = _cairo_malloc (sizeof (cairo_base64_stream_t));
if (unlikely (stream == NULL)) {
_cairo_error_throw (CAIRO_STATUS_NO_MEMORY);
return (cairo_output_stream_t *) &_cairo_output_stream_nil;
}
_cairo_output_stream_init (&stream->base,
_cairo_base64_stream_write,
NULL,
_cairo_base64_stream_close);
stream->output = output;
stream->in_mem = 0;
stream->trailing = 0;
return &stream->base;
}
| 0 |
D://workCode//uploadProject\awtk\3rd\cairo | D://workCode//uploadProject\awtk\3rd\cairo\cairo\cairo-base85-stream.c | /* -*- Mode: c; c-basic-offset: 4; indent-tabs-mode: t; tab-width: 8; -*- */
/* cairo - a vector graphics library with display and print output
*
* Copyright © 2005 Red Hat, Inc
*
* This library is free software; you can redistribute it and/or
* modify it either under the terms of the GNU Lesser General Public
* License version 2.1 as published by the Free Software Foundation
* (the "LGPL") or, at your option, under the terms of the Mozilla
* Public License Version 1.1 (the "MPL"). If you do not alter this
* notice, a recipient may use your version of this file under either
* the MPL or the LGPL.
*
* You should have received a copy of the LGPL along with this library
* in the file COPYING-LGPL-2.1; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Suite 500, Boston, MA 02110-1335, USA
* You should have received a copy of the MPL along with this library
* in the file COPYING-MPL-1.1
*
* The contents of this file are subject to the Mozilla Public License
* Version 1.1 (the "License"); you may not use this file except in
* compliance with the License. You may obtain a copy of the License at
* http://www.mozilla.org/MPL/
*
* This software is distributed on an "AS IS" basis, WITHOUT WARRANTY
* OF ANY KIND, either express or implied. See the LGPL or the MPL for
* the specific language governing rights and limitations.
*
* The Original Code is the cairo graphics library.
*
* The Initial Developer of the Original Code is Red Hat, Inc.
*
* Author(s):
* Kristian Høgsberg <krh@redhat.com>
*/
#include "cairoint.h"
#include "cairo-error-private.h"
#include "cairo-output-stream-private.h"
typedef struct _cairo_base85_stream {
cairo_output_stream_t base;
cairo_output_stream_t *output;
unsigned char four_tuple[4];
int pending;
} cairo_base85_stream_t;
static void
_expand_four_tuple_to_five (unsigned char four_tuple[4],
unsigned char five_tuple[5],
cairo_bool_t *all_zero)
{
uint32_t value;
int digit, i;
value = four_tuple[0] << 24 | four_tuple[1] << 16 | four_tuple[2] << 8 | four_tuple[3];
if (all_zero)
*all_zero = TRUE;
for (i = 0; i < 5; i++) {
digit = value % 85;
if (digit != 0 && all_zero)
*all_zero = FALSE;
five_tuple[4-i] = digit + 33;
value = value / 85;
}
}
static cairo_status_t
_cairo_base85_stream_write (cairo_output_stream_t *base,
const unsigned char *data,
unsigned int length)
{
cairo_base85_stream_t *stream = (cairo_base85_stream_t *) base;
const unsigned char *ptr = data;
unsigned char five_tuple[5];
cairo_bool_t is_zero;
while (length) {
stream->four_tuple[stream->pending++] = *ptr++;
length--;
if (stream->pending == 4) {
_expand_four_tuple_to_five (stream->four_tuple, five_tuple, &is_zero);
if (is_zero)
_cairo_output_stream_write (stream->output, "z", 1);
else
_cairo_output_stream_write (stream->output, five_tuple, 5);
stream->pending = 0;
}
}
return _cairo_output_stream_get_status (stream->output);
}
static cairo_status_t
_cairo_base85_stream_close (cairo_output_stream_t *base)
{
cairo_base85_stream_t *stream = (cairo_base85_stream_t *) base;
unsigned char five_tuple[5];
if (stream->pending) {
memset (stream->four_tuple + stream->pending, 0, 4 - stream->pending);
_expand_four_tuple_to_five (stream->four_tuple, five_tuple, NULL);
_cairo_output_stream_write (stream->output, five_tuple, stream->pending + 1);
}
return _cairo_output_stream_get_status (stream->output);
}
cairo_output_stream_t *
_cairo_base85_stream_create (cairo_output_stream_t *output)
{
cairo_base85_stream_t *stream;
if (output->status)
return _cairo_output_stream_create_in_error (output->status);
stream = _cairo_malloc (sizeof (cairo_base85_stream_t));
if (unlikely (stream == NULL)) {
_cairo_error_throw (CAIRO_STATUS_NO_MEMORY);
return (cairo_output_stream_t *) &_cairo_output_stream_nil;
}
_cairo_output_stream_init (&stream->base,
_cairo_base85_stream_write,
NULL,
_cairo_base85_stream_close);
stream->output = output;
stream->pending = 0;
return &stream->base;
}
| 0 |
D://workCode//uploadProject\awtk\3rd\cairo | D://workCode//uploadProject\awtk\3rd\cairo\cairo\cairo-bentley-ottmann-rectangular.c | /*
* Copyright © 2004 Carl Worth
* Copyright © 2006 Red Hat, Inc.
* Copyright © 2009 Chris Wilson
*
* This library is free software; you can redistribute it and/or
* modify it either under the terms of the GNU Lesser General Public
* License version 2.1 as published by the Free Software Foundation
* (the "LGPL") or, at your option, under the terms of the Mozilla
* Public License Version 1.1 (the "MPL"). If you do not alter this
* notice, a recipient may use your version of this file under either
* the MPL or the LGPL.
*
* You should have received a copy of the LGPL along with this library
* in the file COPYING-LGPL-2.1; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Suite 500, Boston, MA 02110-1335, USA
* You should have received a copy of the MPL along with this library
* in the file COPYING-MPL-1.1
*
* The contents of this file are subject to the Mozilla Public License
* Version 1.1 (the "License"); you may not use this file except in
* compliance with the License. You may obtain a copy of the License at
* http://www.mozilla.org/MPL/
*
* This software is distributed on an "AS IS" basis, WITHOUT WARRANTY
* OF ANY KIND, either express or implied. See the LGPL or the MPL for
* the specific language governing rights and limitations.
*
* The Original Code is the cairo graphics library.
*
* The Initial Developer of the Original Code is Carl Worth
*
* Contributor(s):
* Carl D. Worth <cworth@cworth.org>
* Chris Wilson <chris@chris-wilson.co.uk>
*/
/* Provide definitions for standalone compilation */
#include "cairoint.h"
#include "cairo-boxes-private.h"
#include "cairo-error-private.h"
#include "cairo-combsort-inline.h"
#include "cairo-list-private.h"
#include "cairo-traps-private.h"
#include <setjmp.h>
typedef struct _rectangle rectangle_t;
typedef struct _edge edge_t;
struct _edge {
edge_t *next, *prev;
edge_t *right;
cairo_fixed_t x, top;
int dir;
};
struct _rectangle {
edge_t left, right;
int32_t top, bottom;
};
#define UNROLL3(x) x x x
/* the parent is always given by index/2 */
#define PQ_PARENT_INDEX(i) ((i) >> 1)
#define PQ_FIRST_ENTRY 1
/* left and right children are index * 2 and (index * 2) +1 respectively */
#define PQ_LEFT_CHILD_INDEX(i) ((i) << 1)
typedef struct _sweep_line {
rectangle_t **rectangles;
rectangle_t **stop;
edge_t head, tail, *insert, *cursor;
int32_t current_y;
int32_t last_y;
int stop_size;
int32_t insert_x;
cairo_fill_rule_t fill_rule;
cairo_bool_t do_traps;
void *container;
jmp_buf unwind;
} sweep_line_t;
#define DEBUG_TRAPS 0
#if DEBUG_TRAPS
static void
dump_traps (cairo_traps_t *traps, const char *filename)
{
FILE *file;
int n;
if (getenv ("CAIRO_DEBUG_TRAPS") == NULL)
return;
file = fopen (filename, "a");
if (file != NULL) {
for (n = 0; n < traps->num_traps; n++) {
fprintf (file, "%d %d L:(%d, %d), (%d, %d) R:(%d, %d), (%d, %d)\n",
traps->traps[n].top,
traps->traps[n].bottom,
traps->traps[n].left.p1.x,
traps->traps[n].left.p1.y,
traps->traps[n].left.p2.x,
traps->traps[n].left.p2.y,
traps->traps[n].right.p1.x,
traps->traps[n].right.p1.y,
traps->traps[n].right.p2.x,
traps->traps[n].right.p2.y);
}
fprintf (file, "\n");
fclose (file);
}
}
#else
#define dump_traps(traps, filename)
#endif
static inline int
rectangle_compare_start (const rectangle_t *a,
const rectangle_t *b)
{
return a->top - b->top;
}
static inline int
rectangle_compare_stop (const rectangle_t *a,
const rectangle_t *b)
{
return a->bottom - b->bottom;
}
static inline void
pqueue_push (sweep_line_t *sweep, rectangle_t *rectangle)
{
rectangle_t **elements;
int i, parent;
elements = sweep->stop;
for (i = ++sweep->stop_size;
i != PQ_FIRST_ENTRY &&
rectangle_compare_stop (rectangle,
elements[parent = PQ_PARENT_INDEX (i)]) < 0;
i = parent)
{
elements[i] = elements[parent];
}
elements[i] = rectangle;
}
static inline void
rectangle_pop_stop (sweep_line_t *sweep)
{
rectangle_t **elements = sweep->stop;
rectangle_t *tail;
int child, i;
tail = elements[sweep->stop_size--];
if (sweep->stop_size == 0) {
elements[PQ_FIRST_ENTRY] = NULL;
return;
}
for (i = PQ_FIRST_ENTRY;
(child = PQ_LEFT_CHILD_INDEX (i)) <= sweep->stop_size;
i = child)
{
if (child != sweep->stop_size &&
rectangle_compare_stop (elements[child+1],
elements[child]) < 0)
{
child++;
}
if (rectangle_compare_stop (elements[child], tail) >= 0)
break;
elements[i] = elements[child];
}
elements[i] = tail;
}
static inline rectangle_t *
rectangle_pop_start (sweep_line_t *sweep_line)
{
return *sweep_line->rectangles++;
}
static inline rectangle_t *
rectangle_peek_stop (sweep_line_t *sweep_line)
{
return sweep_line->stop[PQ_FIRST_ENTRY];
}
CAIRO_COMBSORT_DECLARE (_rectangle_sort,
rectangle_t *,
rectangle_compare_start)
static void
sweep_line_init (sweep_line_t *sweep_line,
rectangle_t **rectangles,
int num_rectangles,
cairo_fill_rule_t fill_rule,
cairo_bool_t do_traps,
void *container)
{
rectangles[-2] = NULL;
rectangles[-1] = NULL;
rectangles[num_rectangles] = NULL;
sweep_line->rectangles = rectangles;
sweep_line->stop = rectangles - 2;
sweep_line->stop_size = 0;
sweep_line->insert = NULL;
sweep_line->insert_x = INT_MAX;
sweep_line->cursor = &sweep_line->tail;
sweep_line->head.dir = 0;
sweep_line->head.x = INT32_MIN;
sweep_line->head.right = NULL;
sweep_line->head.prev = NULL;
sweep_line->head.next = &sweep_line->tail;
sweep_line->tail.prev = &sweep_line->head;
sweep_line->tail.next = NULL;
sweep_line->tail.right = NULL;
sweep_line->tail.x = INT32_MAX;
sweep_line->tail.dir = 0;
sweep_line->current_y = INT32_MIN;
sweep_line->last_y = INT32_MIN;
sweep_line->fill_rule = fill_rule;
sweep_line->container = container;
sweep_line->do_traps = do_traps;
}
static void
edge_end_box (sweep_line_t *sweep_line, edge_t *left, int32_t bot)
{
cairo_status_t status = CAIRO_STATUS_SUCCESS;
/* Only emit (trivial) non-degenerate trapezoids with positive height. */
if (likely (left->top < bot)) {
if (sweep_line->do_traps) {
cairo_line_t _left = {
{ left->x, left->top },
{ left->x, bot },
}, _right = {
{ left->right->x, left->top },
{ left->right->x, bot },
};
_cairo_traps_add_trap (sweep_line->container, left->top, bot, &_left, &_right);
status = _cairo_traps_status ((cairo_traps_t *) sweep_line->container);
} else {
cairo_box_t box;
box.p1.x = left->x;
box.p1.y = left->top;
box.p2.x = left->right->x;
box.p2.y = bot;
status = _cairo_boxes_add (sweep_line->container,
CAIRO_ANTIALIAS_DEFAULT,
&box);
}
}
if (unlikely (status))
longjmp (sweep_line->unwind, status);
left->right = NULL;
}
/* Start a new trapezoid at the given top y coordinate, whose edges
* are `edge' and `edge->next'. If `edge' already has a trapezoid,
* then either add it to the traps in `traps', if the trapezoid's
* right edge differs from `edge->next', or do nothing if the new
* trapezoid would be a continuation of the existing one. */
static inline void
edge_start_or_continue_box (sweep_line_t *sweep_line,
edge_t *left,
edge_t *right,
int top)
{
if (left->right == right)
return;
if (left->right != NULL) {
if (left->right->x == right->x) {
/* continuation on right, so just swap edges */
left->right = right;
return;
}
edge_end_box (sweep_line, left, top);
}
if (left->x != right->x) {
left->top = top;
left->right = right;
}
}
/*
* Merge two sorted edge lists.
* Input:
* - head_a: The head of the first list.
* - head_b: The head of the second list; head_b cannot be NULL.
* Output:
* Returns the head of the merged list.
*
* Implementation notes:
* To make it fast (in particular, to reduce to an insertion sort whenever
* one of the two input lists only has a single element) we iterate through
* a list until its head becomes greater than the head of the other list,
* then we switch their roles. As soon as one of the two lists is empty, we
* just attach the other one to the current list and exit.
* Writes to memory are only needed to "switch" lists (as it also requires
* attaching to the output list the list which we will be iterating next) and
* to attach the last non-empty list.
*/
static edge_t *
merge_sorted_edges (edge_t *head_a, edge_t *head_b)
{
edge_t *head, *prev;
int32_t x;
prev = head_a->prev;
if (head_a->x <= head_b->x) {
head = head_a;
} else {
head_b->prev = prev;
head = head_b;
goto start_with_b;
}
do {
x = head_b->x;
while (head_a != NULL && head_a->x <= x) {
prev = head_a;
head_a = head_a->next;
}
head_b->prev = prev;
prev->next = head_b;
if (head_a == NULL)
return head;
start_with_b:
x = head_a->x;
while (head_b != NULL && head_b->x <= x) {
prev = head_b;
head_b = head_b->next;
}
head_a->prev = prev;
prev->next = head_a;
if (head_b == NULL)
return head;
} while (1);
}
/*
* Sort (part of) a list.
* Input:
* - list: The list to be sorted; list cannot be NULL.
* - limit: Recursion limit.
* Output:
* - head_out: The head of the sorted list containing the first 2^(level+1) elements of the
* input list; if the input list has fewer elements, head_out be a sorted list
* containing all the elements of the input list.
* Returns the head of the list of unprocessed elements (NULL if the sorted list contains
* all the elements of the input list).
*
* Implementation notes:
* Special case single element list, unroll/inline the sorting of the first two elements.
* Some tail recursion is used since we iterate on the bottom-up solution of the problem
* (we start with a small sorted list and keep merging other lists of the same size to it).
*/
static edge_t *
sort_edges (edge_t *list,
unsigned int level,
edge_t **head_out)
{
edge_t *head_other, *remaining;
unsigned int i;
head_other = list->next;
if (head_other == NULL) {
*head_out = list;
return NULL;
}
remaining = head_other->next;
if (list->x <= head_other->x) {
*head_out = list;
head_other->next = NULL;
} else {
*head_out = head_other;
head_other->prev = list->prev;
head_other->next = list;
list->prev = head_other;
list->next = NULL;
}
for (i = 0; i < level && remaining; i++) {
remaining = sort_edges (remaining, i, &head_other);
*head_out = merge_sorted_edges (*head_out, head_other);
}
return remaining;
}
static edge_t *
merge_unsorted_edges (edge_t *head, edge_t *unsorted)
{
sort_edges (unsorted, UINT_MAX, &unsorted);
return merge_sorted_edges (head, unsorted);
}
static void
active_edges_insert (sweep_line_t *sweep)
{
edge_t *prev;
int x;
x = sweep->insert_x;
prev = sweep->cursor;
if (prev->x > x) {
do {
prev = prev->prev;
} while (prev->x > x);
} else {
while (prev->next->x < x)
prev = prev->next;
}
prev->next = merge_unsorted_edges (prev->next, sweep->insert);
sweep->cursor = sweep->insert;
sweep->insert = NULL;
sweep->insert_x = INT_MAX;
}
static inline void
active_edges_to_traps (sweep_line_t *sweep)
{
int top = sweep->current_y;
edge_t *pos;
if (sweep->last_y == sweep->current_y)
return;
if (sweep->insert)
active_edges_insert (sweep);
pos = sweep->head.next;
if (pos == &sweep->tail)
return;
if (sweep->fill_rule == CAIRO_FILL_RULE_WINDING) {
do {
edge_t *left, *right;
int winding;
left = pos;
winding = left->dir;
right = left->next;
/* Check if there is a co-linear edge with an existing trap */
while (right->x == left->x) {
if (right->right != NULL) {
assert (left->right == NULL);
/* continuation on left */
left->top = right->top;
left->right = right->right;
right->right = NULL;
}
winding += right->dir;
right = right->next;
}
if (winding == 0) {
if (left->right != NULL)
edge_end_box (sweep, left, top);
pos = right;
continue;
}
do {
/* End all subsumed traps */
if (unlikely (right->right != NULL))
edge_end_box (sweep, right, top);
/* Greedily search for the closing edge, so that we generate
* the * maximal span width with the minimal number of
* boxes.
*/
winding += right->dir;
if (winding == 0 && right->x != right->next->x)
break;
right = right->next;
} while (TRUE);
edge_start_or_continue_box (sweep, left, right, top);
pos = right->next;
} while (pos != &sweep->tail);
} else {
do {
edge_t *right = pos->next;
int count = 0;
do {
/* End all subsumed traps */
if (unlikely (right->right != NULL))
edge_end_box (sweep, right, top);
/* skip co-linear edges */
if (++count & 1 && right->x != right->next->x)
break;
right = right->next;
} while (TRUE);
edge_start_or_continue_box (sweep, pos, right, top);
pos = right->next;
} while (pos != &sweep->tail);
}
sweep->last_y = sweep->current_y;
}
static inline void
sweep_line_delete_edge (sweep_line_t *sweep, edge_t *edge)
{
if (edge->right != NULL) {
edge_t *next = edge->next;
if (next->x == edge->x) {
next->top = edge->top;
next->right = edge->right;
} else
edge_end_box (sweep, edge, sweep->current_y);
}
if (sweep->cursor == edge)
sweep->cursor = edge->prev;
edge->prev->next = edge->next;
edge->next->prev = edge->prev;
}
static inline cairo_bool_t
sweep_line_delete (sweep_line_t *sweep, rectangle_t *rectangle)
{
cairo_bool_t update;
update = TRUE;
if (sweep->fill_rule == CAIRO_FILL_RULE_WINDING &&
rectangle->left.prev->dir == rectangle->left.dir)
{
update = rectangle->left.next != &rectangle->right;
}
sweep_line_delete_edge (sweep, &rectangle->left);
sweep_line_delete_edge (sweep, &rectangle->right);
rectangle_pop_stop (sweep);
return update;
}
static inline void
sweep_line_insert (sweep_line_t *sweep, rectangle_t *rectangle)
{
if (sweep->insert)
sweep->insert->prev = &rectangle->right;
rectangle->right.next = sweep->insert;
rectangle->right.prev = &rectangle->left;
rectangle->left.next = &rectangle->right;
rectangle->left.prev = NULL;
sweep->insert = &rectangle->left;
if (rectangle->left.x < sweep->insert_x)
sweep->insert_x = rectangle->left.x;
pqueue_push (sweep, rectangle);
}
static cairo_status_t
_cairo_bentley_ottmann_tessellate_rectangular (rectangle_t **rectangles,
int num_rectangles,
cairo_fill_rule_t fill_rule,
cairo_bool_t do_traps,
void *container)
{
sweep_line_t sweep_line;
rectangle_t *rectangle;
cairo_status_t status;
cairo_bool_t update;
sweep_line_init (&sweep_line,
rectangles, num_rectangles,
fill_rule,
do_traps, container);
if ((status = setjmp (sweep_line.unwind)))
return status;
update = FALSE;
rectangle = rectangle_pop_start (&sweep_line);
do {
if (rectangle->top != sweep_line.current_y) {
rectangle_t *stop;
stop = rectangle_peek_stop (&sweep_line);
while (stop != NULL && stop->bottom < rectangle->top) {
if (stop->bottom != sweep_line.current_y) {
if (update) {
active_edges_to_traps (&sweep_line);
update = FALSE;
}
sweep_line.current_y = stop->bottom;
}
update |= sweep_line_delete (&sweep_line, stop);
stop = rectangle_peek_stop (&sweep_line);
}
if (update) {
active_edges_to_traps (&sweep_line);
update = FALSE;
}
sweep_line.current_y = rectangle->top;
}
do {
sweep_line_insert (&sweep_line, rectangle);
} while ((rectangle = rectangle_pop_start (&sweep_line)) != NULL &&
sweep_line.current_y == rectangle->top);
update = TRUE;
} while (rectangle);
while ((rectangle = rectangle_peek_stop (&sweep_line)) != NULL) {
if (rectangle->bottom != sweep_line.current_y) {
if (update) {
active_edges_to_traps (&sweep_line);
update = FALSE;
}
sweep_line.current_y = rectangle->bottom;
}
update |= sweep_line_delete (&sweep_line, rectangle);
}
return CAIRO_STATUS_SUCCESS;
}
cairo_status_t
_cairo_bentley_ottmann_tessellate_rectangular_traps (cairo_traps_t *traps,
cairo_fill_rule_t fill_rule)
{
rectangle_t stack_rectangles[CAIRO_STACK_ARRAY_LENGTH (rectangle_t)];
rectangle_t *stack_rectangles_ptrs[ARRAY_LENGTH (stack_rectangles) + 3];
rectangle_t *rectangles, **rectangles_ptrs;
cairo_status_t status;
int i;
assert (traps->is_rectangular);
if (unlikely (traps->num_traps <= 1)) {
if (traps->num_traps == 1) {
cairo_trapezoid_t *trap = traps->traps;
if (trap->left.p1.x > trap->right.p1.x) {
cairo_line_t tmp = trap->left;
trap->left = trap->right;
trap->right = tmp;
}
}
return CAIRO_STATUS_SUCCESS;
}
dump_traps (traps, "bo-rects-traps-in.txt");
rectangles = stack_rectangles;
rectangles_ptrs = stack_rectangles_ptrs;
if (traps->num_traps > ARRAY_LENGTH (stack_rectangles)) {
rectangles = _cairo_malloc_ab_plus_c (traps->num_traps,
sizeof (rectangle_t) +
sizeof (rectangle_t *),
3*sizeof (rectangle_t *));
if (unlikely (rectangles == NULL))
return _cairo_error (CAIRO_STATUS_NO_MEMORY);
rectangles_ptrs = (rectangle_t **) (rectangles + traps->num_traps);
}
for (i = 0; i < traps->num_traps; i++) {
if (traps->traps[i].left.p1.x < traps->traps[i].right.p1.x) {
rectangles[i].left.x = traps->traps[i].left.p1.x;
rectangles[i].left.dir = 1;
rectangles[i].right.x = traps->traps[i].right.p1.x;
rectangles[i].right.dir = -1;
} else {
rectangles[i].right.x = traps->traps[i].left.p1.x;
rectangles[i].right.dir = 1;
rectangles[i].left.x = traps->traps[i].right.p1.x;
rectangles[i].left.dir = -1;
}
rectangles[i].left.right = NULL;
rectangles[i].right.right = NULL;
rectangles[i].top = traps->traps[i].top;
rectangles[i].bottom = traps->traps[i].bottom;
rectangles_ptrs[i+2] = &rectangles[i];
}
/* XXX incremental sort */
_rectangle_sort (rectangles_ptrs+2, i);
_cairo_traps_clear (traps);
status = _cairo_bentley_ottmann_tessellate_rectangular (rectangles_ptrs+2, i,
fill_rule,
TRUE, traps);
traps->is_rectilinear = TRUE;
traps->is_rectangular = TRUE;
if (rectangles != stack_rectangles)
free (rectangles);
dump_traps (traps, "bo-rects-traps-out.txt");
return status;
}
cairo_status_t
_cairo_bentley_ottmann_tessellate_boxes (const cairo_boxes_t *in,
cairo_fill_rule_t fill_rule,
cairo_boxes_t *out)
{
rectangle_t stack_rectangles[CAIRO_STACK_ARRAY_LENGTH (rectangle_t)];
rectangle_t *stack_rectangles_ptrs[ARRAY_LENGTH (stack_rectangles) + 3];
rectangle_t *rectangles, **rectangles_ptrs;
rectangle_t *stack_rectangles_chain[CAIRO_STACK_ARRAY_LENGTH (rectangle_t *) ];
rectangle_t **rectangles_chain = NULL;
const struct _cairo_boxes_chunk *chunk;
cairo_status_t status;
int i, j, y_min, y_max;
if (unlikely (in->num_boxes == 0)) {
_cairo_boxes_clear (out);
return CAIRO_STATUS_SUCCESS;
}
if (in->num_boxes == 1) {
if (in == out) {
cairo_box_t *box = &in->chunks.base[0];
if (box->p1.x > box->p2.x) {
cairo_fixed_t tmp = box->p1.x;
box->p1.x = box->p2.x;
box->p2.x = tmp;
}
} else {
cairo_box_t box = in->chunks.base[0];
if (box.p1.x > box.p2.x) {
cairo_fixed_t tmp = box.p1.x;
box.p1.x = box.p2.x;
box.p2.x = tmp;
}
_cairo_boxes_clear (out);
status = _cairo_boxes_add (out, CAIRO_ANTIALIAS_DEFAULT, &box);
assert (status == CAIRO_STATUS_SUCCESS);
}
return CAIRO_STATUS_SUCCESS;
}
y_min = INT_MAX; y_max = INT_MIN;
for (chunk = &in->chunks; chunk != NULL; chunk = chunk->next) {
const cairo_box_t *box = chunk->base;
for (i = 0; i < chunk->count; i++) {
if (box[i].p1.y < y_min)
y_min = box[i].p1.y;
if (box[i].p1.y > y_max)
y_max = box[i].p1.y;
}
}
y_min = _cairo_fixed_integer_floor (y_min);
y_max = _cairo_fixed_integer_floor (y_max) + 1;
y_max -= y_min;
if (y_max < in->num_boxes) {
rectangles_chain = stack_rectangles_chain;
if (y_max > ARRAY_LENGTH (stack_rectangles_chain)) {
rectangles_chain = _cairo_malloc_ab (y_max, sizeof (rectangle_t *));
if (unlikely (rectangles_chain == NULL))
return _cairo_error (CAIRO_STATUS_NO_MEMORY);
}
memset (rectangles_chain, 0, y_max * sizeof (rectangle_t*));
}
rectangles = stack_rectangles;
rectangles_ptrs = stack_rectangles_ptrs;
if (in->num_boxes > ARRAY_LENGTH (stack_rectangles)) {
rectangles = _cairo_malloc_ab_plus_c (in->num_boxes,
sizeof (rectangle_t) +
sizeof (rectangle_t *),
3*sizeof (rectangle_t *));
if (unlikely (rectangles == NULL)) {
if (rectangles_chain != stack_rectangles_chain)
free (rectangles_chain);
return _cairo_error (CAIRO_STATUS_NO_MEMORY);
}
rectangles_ptrs = (rectangle_t **) (rectangles + in->num_boxes);
}
j = 0;
for (chunk = &in->chunks; chunk != NULL; chunk = chunk->next) {
const cairo_box_t *box = chunk->base;
for (i = 0; i < chunk->count; i++) {
int h;
if (box[i].p1.x < box[i].p2.x) {
rectangles[j].left.x = box[i].p1.x;
rectangles[j].left.dir = 1;
rectangles[j].right.x = box[i].p2.x;
rectangles[j].right.dir = -1;
} else {
rectangles[j].right.x = box[i].p1.x;
rectangles[j].right.dir = 1;
rectangles[j].left.x = box[i].p2.x;
rectangles[j].left.dir = -1;
}
rectangles[j].left.right = NULL;
rectangles[j].right.right = NULL;
rectangles[j].top = box[i].p1.y;
rectangles[j].bottom = box[i].p2.y;
if (rectangles_chain) {
h = _cairo_fixed_integer_floor (box[i].p1.y) - y_min;
rectangles[j].left.next = (edge_t *)rectangles_chain[h];
rectangles_chain[h] = &rectangles[j];
} else {
rectangles_ptrs[j+2] = &rectangles[j];
}
j++;
}
}
if (rectangles_chain) {
j = 2;
for (y_min = 0; y_min < y_max; y_min++) {
rectangle_t *r;
int start = j;
for (r = rectangles_chain[y_min]; r; r = (rectangle_t *)r->left.next)
rectangles_ptrs[j++] = r;
if (j > start + 1)
_rectangle_sort (rectangles_ptrs + start, j - start);
}
if (rectangles_chain != stack_rectangles_chain)
free (rectangles_chain);
j -= 2;
} else {
_rectangle_sort (rectangles_ptrs + 2, j);
}
_cairo_boxes_clear (out);
status = _cairo_bentley_ottmann_tessellate_rectangular (rectangles_ptrs+2, j,
fill_rule,
FALSE, out);
if (rectangles != stack_rectangles)
free (rectangles);
return status;
}
| 0 |
D://workCode//uploadProject\awtk\3rd\cairo | D://workCode//uploadProject\awtk\3rd\cairo\cairo\cairo-bentley-ottmann-rectilinear.c | /*
* Copyright © 2004 Carl Worth
* Copyright © 2006 Red Hat, Inc.
* Copyright © 2008 Chris Wilson
*
* This library is free software; you can redistribute it and/or
* modify it either under the terms of the GNU Lesser General Public
* License version 2.1 as published by the Free Software Foundation
* (the "LGPL") or, at your option, under the terms of the Mozilla
* Public License Version 1.1 (the "MPL"). If you do not alter this
* notice, a recipient may use your version of this file under either
* the MPL or the LGPL.
*
* You should have received a copy of the LGPL along with this library
* in the file COPYING-LGPL-2.1; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Suite 500, Boston, MA 02110-1335, USA
* You should have received a copy of the MPL along with this library
* in the file COPYING-MPL-1.1
*
* The contents of this file are subject to the Mozilla Public License
* Version 1.1 (the "License"); you may not use this file except in
* compliance with the License. You may obtain a copy of the License at
* http://www.mozilla.org/MPL/
*
* This software is distributed on an "AS IS" basis, WITHOUT WARRANTY
* OF ANY KIND, either express or implied. See the LGPL or the MPL for
* the specific language governing rights and limitations.
*
* The Original Code is the cairo graphics library.
*
* The Initial Developer of the Original Code is Carl Worth
*
* Contributor(s):
* Carl D. Worth <cworth@cworth.org>
* Chris Wilson <chris@chris-wilson.co.uk>
*/
/* Provide definitions for standalone compilation */
#include "cairoint.h"
#include "cairo-boxes-private.h"
#include "cairo-combsort-inline.h"
#include "cairo-error-private.h"
#include "cairo-traps-private.h"
typedef struct _cairo_bo_edge cairo_bo_edge_t;
typedef struct _cairo_bo_trap cairo_bo_trap_t;
/* A deferred trapezoid of an edge */
struct _cairo_bo_trap {
cairo_bo_edge_t *right;
int32_t top;
};
struct _cairo_bo_edge {
cairo_edge_t edge;
cairo_bo_edge_t *prev;
cairo_bo_edge_t *next;
cairo_bo_trap_t deferred_trap;
};
typedef enum {
CAIRO_BO_EVENT_TYPE_START,
CAIRO_BO_EVENT_TYPE_STOP
} cairo_bo_event_type_t;
typedef struct _cairo_bo_event {
cairo_bo_event_type_t type;
cairo_point_t point;
cairo_bo_edge_t *edge;
} cairo_bo_event_t;
typedef struct _cairo_bo_sweep_line {
cairo_bo_event_t **events;
cairo_bo_edge_t *head;
cairo_bo_edge_t *stopped;
int32_t current_y;
cairo_bo_edge_t *current_edge;
} cairo_bo_sweep_line_t;
static inline int
_cairo_point_compare (const cairo_point_t *a,
const cairo_point_t *b)
{
int cmp;
cmp = a->y - b->y;
if (likely (cmp))
return cmp;
return a->x - b->x;
}
static inline int
_cairo_bo_edge_compare (const cairo_bo_edge_t *a,
const cairo_bo_edge_t *b)
{
int cmp;
cmp = a->edge.line.p1.x - b->edge.line.p1.x;
if (likely (cmp))
return cmp;
return b->edge.bottom - a->edge.bottom;
}
static inline int
cairo_bo_event_compare (const cairo_bo_event_t *a,
const cairo_bo_event_t *b)
{
int cmp;
cmp = _cairo_point_compare (&a->point, &b->point);
if (likely (cmp))
return cmp;
cmp = a->type - b->type;
if (cmp)
return cmp;
return a - b;
}
static inline cairo_bo_event_t *
_cairo_bo_event_dequeue (cairo_bo_sweep_line_t *sweep_line)
{
return *sweep_line->events++;
}
CAIRO_COMBSORT_DECLARE (_cairo_bo_event_queue_sort,
cairo_bo_event_t *,
cairo_bo_event_compare)
static void
_cairo_bo_sweep_line_init (cairo_bo_sweep_line_t *sweep_line,
cairo_bo_event_t **events,
int num_events)
{
_cairo_bo_event_queue_sort (events, num_events);
events[num_events] = NULL;
sweep_line->events = events;
sweep_line->head = NULL;
sweep_line->current_y = INT32_MIN;
sweep_line->current_edge = NULL;
}
static void
_cairo_bo_sweep_line_insert (cairo_bo_sweep_line_t *sweep_line,
cairo_bo_edge_t *edge)
{
if (sweep_line->current_edge != NULL) {
cairo_bo_edge_t *prev, *next;
int cmp;
cmp = _cairo_bo_edge_compare (sweep_line->current_edge, edge);
if (cmp < 0) {
prev = sweep_line->current_edge;
next = prev->next;
while (next != NULL && _cairo_bo_edge_compare (next, edge) < 0)
prev = next, next = prev->next;
prev->next = edge;
edge->prev = prev;
edge->next = next;
if (next != NULL)
next->prev = edge;
} else if (cmp > 0) {
next = sweep_line->current_edge;
prev = next->prev;
while (prev != NULL && _cairo_bo_edge_compare (prev, edge) > 0)
next = prev, prev = next->prev;
next->prev = edge;
edge->next = next;
edge->prev = prev;
if (prev != NULL)
prev->next = edge;
else
sweep_line->head = edge;
} else {
prev = sweep_line->current_edge;
edge->prev = prev;
edge->next = prev->next;
if (prev->next != NULL)
prev->next->prev = edge;
prev->next = edge;
}
} else {
sweep_line->head = edge;
}
sweep_line->current_edge = edge;
}
static void
_cairo_bo_sweep_line_delete (cairo_bo_sweep_line_t *sweep_line,
cairo_bo_edge_t *edge)
{
if (edge->prev != NULL)
edge->prev->next = edge->next;
else
sweep_line->head = edge->next;
if (edge->next != NULL)
edge->next->prev = edge->prev;
if (sweep_line->current_edge == edge)
sweep_line->current_edge = edge->prev ? edge->prev : edge->next;
}
static inline cairo_bool_t
edges_collinear (const cairo_bo_edge_t *a, const cairo_bo_edge_t *b)
{
return a->edge.line.p1.x == b->edge.line.p1.x;
}
static cairo_status_t
_cairo_bo_edge_end_trap (cairo_bo_edge_t *left,
int32_t bot,
cairo_bool_t do_traps,
void *container)
{
cairo_bo_trap_t *trap = &left->deferred_trap;
cairo_status_t status = CAIRO_STATUS_SUCCESS;
/* Only emit (trivial) non-degenerate trapezoids with positive height. */
if (likely (trap->top < bot)) {
if (do_traps) {
_cairo_traps_add_trap (container,
trap->top, bot,
&left->edge.line, &trap->right->edge.line);
status = _cairo_traps_status ((cairo_traps_t *) container);
} else {
cairo_box_t box;
box.p1.x = left->edge.line.p1.x;
box.p1.y = trap->top;
box.p2.x = trap->right->edge.line.p1.x;
box.p2.y = bot;
status = _cairo_boxes_add (container, CAIRO_ANTIALIAS_DEFAULT, &box);
}
}
trap->right = NULL;
return status;
}
/* Start a new trapezoid at the given top y coordinate, whose edges
* are `edge' and `edge->next'. If `edge' already has a trapezoid,
* then either add it to the traps in `traps', if the trapezoid's
* right edge differs from `edge->next', or do nothing if the new
* trapezoid would be a continuation of the existing one. */
static inline cairo_status_t
_cairo_bo_edge_start_or_continue_trap (cairo_bo_edge_t *left,
cairo_bo_edge_t *right,
int top,
cairo_bool_t do_traps,
void *container)
{
cairo_status_t status;
if (left->deferred_trap.right == right)
return CAIRO_STATUS_SUCCESS;
if (left->deferred_trap.right != NULL) {
if (right != NULL && edges_collinear (left->deferred_trap.right, right))
{
/* continuation on right, so just swap edges */
left->deferred_trap.right = right;
return CAIRO_STATUS_SUCCESS;
}
status = _cairo_bo_edge_end_trap (left, top, do_traps, container);
if (unlikely (status))
return status;
}
if (right != NULL && ! edges_collinear (left, right)) {
left->deferred_trap.top = top;
left->deferred_trap.right = right;
}
return CAIRO_STATUS_SUCCESS;
}
static inline cairo_status_t
_active_edges_to_traps (cairo_bo_edge_t *left,
int32_t top,
cairo_fill_rule_t fill_rule,
cairo_bool_t do_traps,
void *container)
{
cairo_bo_edge_t *right;
cairo_status_t status;
if (fill_rule == CAIRO_FILL_RULE_WINDING) {
while (left != NULL) {
int in_out;
/* Greedily search for the closing edge, so that we generate the
* maximal span width with the minimal number of trapezoids.
*/
in_out = left->edge.dir;
/* Check if there is a co-linear edge with an existing trap */
right = left->next;
if (left->deferred_trap.right == NULL) {
while (right != NULL && right->deferred_trap.right == NULL)
right = right->next;
if (right != NULL && edges_collinear (left, right)) {
/* continuation on left */
left->deferred_trap = right->deferred_trap;
right->deferred_trap.right = NULL;
}
}
/* End all subsumed traps */
right = left->next;
while (right != NULL) {
if (right->deferred_trap.right != NULL) {
status = _cairo_bo_edge_end_trap (right, top, do_traps, container);
if (unlikely (status))
return status;
}
in_out += right->edge.dir;
if (in_out == 0) {
/* skip co-linear edges */
if (right->next == NULL ||
! edges_collinear (right, right->next))
{
break;
}
}
right = right->next;
}
status = _cairo_bo_edge_start_or_continue_trap (left, right, top,
do_traps, container);
if (unlikely (status))
return status;
left = right;
if (left != NULL)
left = left->next;
}
} else {
while (left != NULL) {
int in_out = 0;
right = left->next;
while (right != NULL) {
if (right->deferred_trap.right != NULL) {
status = _cairo_bo_edge_end_trap (right, top, do_traps, container);
if (unlikely (status))
return status;
}
if ((in_out++ & 1) == 0) {
cairo_bo_edge_t *next;
cairo_bool_t skip = FALSE;
/* skip co-linear edges */
next = right->next;
if (next != NULL)
skip = edges_collinear (right, next);
if (! skip)
break;
}
right = right->next;
}
status = _cairo_bo_edge_start_or_continue_trap (left, right, top,
do_traps, container);
if (unlikely (status))
return status;
left = right;
if (left != NULL)
left = left->next;
}
}
return CAIRO_STATUS_SUCCESS;
}
static cairo_status_t
_cairo_bentley_ottmann_tessellate_rectilinear (cairo_bo_event_t **start_events,
int num_events,
cairo_fill_rule_t fill_rule,
cairo_bool_t do_traps,
void *container)
{
cairo_bo_sweep_line_t sweep_line;
cairo_bo_event_t *event;
cairo_status_t status;
_cairo_bo_sweep_line_init (&sweep_line, start_events, num_events);
while ((event = _cairo_bo_event_dequeue (&sweep_line))) {
if (event->point.y != sweep_line.current_y) {
status = _active_edges_to_traps (sweep_line.head,
sweep_line.current_y,
fill_rule, do_traps, container);
if (unlikely (status))
return status;
sweep_line.current_y = event->point.y;
}
switch (event->type) {
case CAIRO_BO_EVENT_TYPE_START:
_cairo_bo_sweep_line_insert (&sweep_line, event->edge);
break;
case CAIRO_BO_EVENT_TYPE_STOP:
_cairo_bo_sweep_line_delete (&sweep_line, event->edge);
if (event->edge->deferred_trap.right != NULL) {
status = _cairo_bo_edge_end_trap (event->edge,
sweep_line.current_y,
do_traps, container);
if (unlikely (status))
return status;
}
break;
}
}
return CAIRO_STATUS_SUCCESS;
}
cairo_status_t
_cairo_bentley_ottmann_tessellate_rectilinear_polygon_to_boxes (const cairo_polygon_t *polygon,
cairo_fill_rule_t fill_rule,
cairo_boxes_t *boxes)
{
cairo_status_t status;
cairo_bo_event_t stack_events[CAIRO_STACK_ARRAY_LENGTH (cairo_bo_event_t)];
cairo_bo_event_t *events;
cairo_bo_event_t *stack_event_ptrs[ARRAY_LENGTH (stack_events) + 1];
cairo_bo_event_t **event_ptrs;
cairo_bo_edge_t stack_edges[ARRAY_LENGTH (stack_events)];
cairo_bo_edge_t *edges;
int num_events;
int i, j;
if (unlikely (polygon->num_edges == 0))
return CAIRO_STATUS_SUCCESS;
num_events = 2 * polygon->num_edges;
events = stack_events;
event_ptrs = stack_event_ptrs;
edges = stack_edges;
if (num_events > ARRAY_LENGTH (stack_events)) {
events = _cairo_malloc_ab_plus_c (num_events,
sizeof (cairo_bo_event_t) +
sizeof (cairo_bo_edge_t) +
sizeof (cairo_bo_event_t *),
sizeof (cairo_bo_event_t *));
if (unlikely (events == NULL))
return _cairo_error (CAIRO_STATUS_NO_MEMORY);
event_ptrs = (cairo_bo_event_t **) (events + num_events);
edges = (cairo_bo_edge_t *) (event_ptrs + num_events + 1);
}
for (i = j = 0; i < polygon->num_edges; i++) {
edges[i].edge = polygon->edges[i];
edges[i].deferred_trap.right = NULL;
edges[i].prev = NULL;
edges[i].next = NULL;
event_ptrs[j] = &events[j];
events[j].type = CAIRO_BO_EVENT_TYPE_START;
events[j].point.y = polygon->edges[i].top;
events[j].point.x = polygon->edges[i].line.p1.x;
events[j].edge = &edges[i];
j++;
event_ptrs[j] = &events[j];
events[j].type = CAIRO_BO_EVENT_TYPE_STOP;
events[j].point.y = polygon->edges[i].bottom;
events[j].point.x = polygon->edges[i].line.p1.x;
events[j].edge = &edges[i];
j++;
}
status = _cairo_bentley_ottmann_tessellate_rectilinear (event_ptrs, j,
fill_rule,
FALSE, boxes);
if (events != stack_events)
free (events);
return status;
}
cairo_status_t
_cairo_bentley_ottmann_tessellate_rectilinear_traps (cairo_traps_t *traps,
cairo_fill_rule_t fill_rule)
{
cairo_bo_event_t stack_events[CAIRO_STACK_ARRAY_LENGTH (cairo_bo_event_t)];
cairo_bo_event_t *events;
cairo_bo_event_t *stack_event_ptrs[ARRAY_LENGTH (stack_events) + 1];
cairo_bo_event_t **event_ptrs;
cairo_bo_edge_t stack_edges[ARRAY_LENGTH (stack_events)];
cairo_bo_edge_t *edges;
cairo_status_t status;
int i, j, k;
if (unlikely (traps->num_traps == 0))
return CAIRO_STATUS_SUCCESS;
assert (traps->is_rectilinear);
i = 4 * traps->num_traps;
events = stack_events;
event_ptrs = stack_event_ptrs;
edges = stack_edges;
if (i > ARRAY_LENGTH (stack_events)) {
events = _cairo_malloc_ab_plus_c (i,
sizeof (cairo_bo_event_t) +
sizeof (cairo_bo_edge_t) +
sizeof (cairo_bo_event_t *),
sizeof (cairo_bo_event_t *));
if (unlikely (events == NULL))
return _cairo_error (CAIRO_STATUS_NO_MEMORY);
event_ptrs = (cairo_bo_event_t **) (events + i);
edges = (cairo_bo_edge_t *) (event_ptrs + i + 1);
}
for (i = j = k = 0; i < traps->num_traps; i++) {
edges[k].edge.top = traps->traps[i].top;
edges[k].edge.bottom = traps->traps[i].bottom;
edges[k].edge.line = traps->traps[i].left;
edges[k].edge.dir = 1;
edges[k].deferred_trap.right = NULL;
edges[k].prev = NULL;
edges[k].next = NULL;
event_ptrs[j] = &events[j];
events[j].type = CAIRO_BO_EVENT_TYPE_START;
events[j].point.y = traps->traps[i].top;
events[j].point.x = traps->traps[i].left.p1.x;
events[j].edge = &edges[k];
j++;
event_ptrs[j] = &events[j];
events[j].type = CAIRO_BO_EVENT_TYPE_STOP;
events[j].point.y = traps->traps[i].bottom;
events[j].point.x = traps->traps[i].left.p1.x;
events[j].edge = &edges[k];
j++;
k++;
edges[k].edge.top = traps->traps[i].top;
edges[k].edge.bottom = traps->traps[i].bottom;
edges[k].edge.line = traps->traps[i].right;
edges[k].edge.dir = -1;
edges[k].deferred_trap.right = NULL;
edges[k].prev = NULL;
edges[k].next = NULL;
event_ptrs[j] = &events[j];
events[j].type = CAIRO_BO_EVENT_TYPE_START;
events[j].point.y = traps->traps[i].top;
events[j].point.x = traps->traps[i].right.p1.x;
events[j].edge = &edges[k];
j++;
event_ptrs[j] = &events[j];
events[j].type = CAIRO_BO_EVENT_TYPE_STOP;
events[j].point.y = traps->traps[i].bottom;
events[j].point.x = traps->traps[i].right.p1.x;
events[j].edge = &edges[k];
j++;
k++;
}
_cairo_traps_clear (traps);
status = _cairo_bentley_ottmann_tessellate_rectilinear (event_ptrs, j,
fill_rule,
TRUE, traps);
traps->is_rectilinear = TRUE;
if (events != stack_events)
free (events);
return status;
}
| 0 |
D://workCode//uploadProject\awtk\3rd\cairo | D://workCode//uploadProject\awtk\3rd\cairo\cairo\cairo-bentley-ottmann.c | /*
* Copyright © 2004 Carl Worth
* Copyright © 2006 Red Hat, Inc.
* Copyright © 2008 Chris Wilson
*
* This library is free software; you can redistribute it and/or
* modify it either under the terms of the GNU Lesser General Public
* License version 2.1 as published by the Free Software Foundation
* (the "LGPL") or, at your option, under the terms of the Mozilla
* Public License Version 1.1 (the "MPL"). If you do not alter this
* notice, a recipient may use your version of this file under either
* the MPL or the LGPL.
*
* You should have received a copy of the LGPL along with this library
* in the file COPYING-LGPL-2.1; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Suite 500, Boston, MA 02110-1335, USA
* You should have received a copy of the MPL along with this library
* in the file COPYING-MPL-1.1
*
* The contents of this file are subject to the Mozilla Public License
* Version 1.1 (the "License"); you may not use this file except in
* compliance with the License. You may obtain a copy of the License at
* http://www.mozilla.org/MPL/
*
* This software is distributed on an "AS IS" basis, WITHOUT WARRANTY
* OF ANY KIND, either express or implied. See the LGPL or the MPL for
* the specific language governing rights and limitations.
*
* The Original Code is the cairo graphics library.
*
* The Initial Developer of the Original Code is Carl Worth
*
* Contributor(s):
* Carl D. Worth <cworth@cworth.org>
* Chris Wilson <chris@chris-wilson.co.uk>
*/
/* Provide definitions for standalone compilation */
#include "cairoint.h"
#include "cairo-combsort-inline.h"
#include "cairo-error-private.h"
#include "cairo-freelist-private.h"
#include "cairo-line-inline.h"
#include "cairo-traps-private.h"
#define DEBUG_PRINT_STATE 0
#define DEBUG_EVENTS 0
#define DEBUG_TRAPS 0
typedef cairo_point_t cairo_bo_point32_t;
typedef struct _cairo_bo_intersect_ordinate {
int32_t ordinate;
enum { EXACT, INEXACT } exactness;
} cairo_bo_intersect_ordinate_t;
typedef struct _cairo_bo_intersect_point {
cairo_bo_intersect_ordinate_t x;
cairo_bo_intersect_ordinate_t y;
} cairo_bo_intersect_point_t;
typedef struct _cairo_bo_edge cairo_bo_edge_t;
typedef struct _cairo_bo_trap cairo_bo_trap_t;
/* A deferred trapezoid of an edge */
struct _cairo_bo_trap {
cairo_bo_edge_t *right;
int32_t top;
};
struct _cairo_bo_edge {
cairo_edge_t edge;
cairo_bo_edge_t *prev;
cairo_bo_edge_t *next;
cairo_bo_edge_t *colinear;
cairo_bo_trap_t deferred_trap;
};
/* the parent is always given by index/2 */
#define PQ_PARENT_INDEX(i) ((i) >> 1)
#define PQ_FIRST_ENTRY 1
/* left and right children are index * 2 and (index * 2) +1 respectively */
#define PQ_LEFT_CHILD_INDEX(i) ((i) << 1)
typedef enum {
CAIRO_BO_EVENT_TYPE_STOP,
CAIRO_BO_EVENT_TYPE_INTERSECTION,
CAIRO_BO_EVENT_TYPE_START
} cairo_bo_event_type_t;
typedef struct _cairo_bo_event {
cairo_bo_event_type_t type;
cairo_point_t point;
} cairo_bo_event_t;
typedef struct _cairo_bo_start_event {
cairo_bo_event_type_t type;
cairo_point_t point;
cairo_bo_edge_t edge;
} cairo_bo_start_event_t;
typedef struct _cairo_bo_queue_event {
cairo_bo_event_type_t type;
cairo_point_t point;
cairo_bo_edge_t *e1;
cairo_bo_edge_t *e2;
} cairo_bo_queue_event_t;
typedef struct _pqueue {
int size, max_size;
cairo_bo_event_t **elements;
cairo_bo_event_t *elements_embedded[1024];
} pqueue_t;
typedef struct _cairo_bo_event_queue {
cairo_freepool_t pool;
pqueue_t pqueue;
cairo_bo_event_t **start_events;
} cairo_bo_event_queue_t;
typedef struct _cairo_bo_sweep_line {
cairo_bo_edge_t *head;
cairo_bo_edge_t *stopped;
int32_t current_y;
cairo_bo_edge_t *current_edge;
} cairo_bo_sweep_line_t;
#if DEBUG_TRAPS
static void
dump_traps (cairo_traps_t *traps, const char *filename)
{
FILE *file;
cairo_box_t extents;
int n;
if (getenv ("CAIRO_DEBUG_TRAPS") == NULL)
return;
#if 0
if (traps->has_limits) {
printf ("%s: limits=(%d, %d, %d, %d)\n",
filename,
traps->limits.p1.x, traps->limits.p1.y,
traps->limits.p2.x, traps->limits.p2.y);
}
#endif
_cairo_traps_extents (traps, &extents);
printf ("%s: extents=(%d, %d, %d, %d)\n",
filename,
extents.p1.x, extents.p1.y,
extents.p2.x, extents.p2.y);
file = fopen (filename, "a");
if (file != NULL) {
for (n = 0; n < traps->num_traps; n++) {
fprintf (file, "%d %d L:(%d, %d), (%d, %d) R:(%d, %d), (%d, %d)\n",
traps->traps[n].top,
traps->traps[n].bottom,
traps->traps[n].left.p1.x,
traps->traps[n].left.p1.y,
traps->traps[n].left.p2.x,
traps->traps[n].left.p2.y,
traps->traps[n].right.p1.x,
traps->traps[n].right.p1.y,
traps->traps[n].right.p2.x,
traps->traps[n].right.p2.y);
}
fprintf (file, "\n");
fclose (file);
}
}
static void
dump_edges (cairo_bo_start_event_t *events,
int num_edges,
const char *filename)
{
FILE *file;
int n;
if (getenv ("CAIRO_DEBUG_TRAPS") == NULL)
return;
file = fopen (filename, "a");
if (file != NULL) {
for (n = 0; n < num_edges; n++) {
fprintf (file, "(%d, %d), (%d, %d) %d %d %d\n",
events[n].edge.edge.line.p1.x,
events[n].edge.edge.line.p1.y,
events[n].edge.edge.line.p2.x,
events[n].edge.edge.line.p2.y,
events[n].edge.edge.top,
events[n].edge.edge.bottom,
events[n].edge.edge.dir);
}
fprintf (file, "\n");
fclose (file);
}
}
#endif
static cairo_fixed_t
_line_compute_intersection_x_for_y (const cairo_line_t *line,
cairo_fixed_t y)
{
cairo_fixed_t x, dy;
if (y == line->p1.y)
return line->p1.x;
if (y == line->p2.y)
return line->p2.x;
x = line->p1.x;
dy = line->p2.y - line->p1.y;
if (dy != 0) {
x += _cairo_fixed_mul_div_floor (y - line->p1.y,
line->p2.x - line->p1.x,
dy);
}
return x;
}
static inline int
_cairo_bo_point32_compare (cairo_bo_point32_t const *a,
cairo_bo_point32_t const *b)
{
int cmp;
cmp = a->y - b->y;
if (cmp)
return cmp;
return a->x - b->x;
}
/* Compare the slope of a to the slope of b, returning 1, 0, -1 if the
* slope a is respectively greater than, equal to, or less than the
* slope of b.
*
* For each edge, consider the direction vector formed from:
*
* top -> bottom
*
* which is:
*
* (dx, dy) = (line.p2.x - line.p1.x, line.p2.y - line.p1.y)
*
* We then define the slope of each edge as dx/dy, (which is the
* inverse of the slope typically used in math instruction). We never
* compute a slope directly as the value approaches infinity, but we
* can derive a slope comparison without division as follows, (where
* the ? represents our compare operator).
*
* 1. slope(a) ? slope(b)
* 2. adx/ady ? bdx/bdy
* 3. (adx * bdy) ? (bdx * ady)
*
* Note that from step 2 to step 3 there is no change needed in the
* sign of the result since both ady and bdy are guaranteed to be
* greater than or equal to 0.
*
* When using this slope comparison to sort edges, some care is needed
* when interpreting the results. Since the slope compare operates on
* distance vectors from top to bottom it gives a correct left to
* right sort for edges that have a common top point, (such as two
* edges with start events at the same location). On the other hand,
* the sense of the result will be exactly reversed for two edges that
* have a common stop point.
*/
static inline int
_slope_compare (const cairo_bo_edge_t *a,
const cairo_bo_edge_t *b)
{
/* XXX: We're assuming here that dx and dy will still fit in 32
* bits. That's not true in general as there could be overflow. We
* should prevent that before the tessellation algorithm
* begins.
*/
int32_t adx = a->edge.line.p2.x - a->edge.line.p1.x;
int32_t bdx = b->edge.line.p2.x - b->edge.line.p1.x;
/* Since the dy's are all positive by construction we can fast
* path several common cases.
*/
/* First check for vertical lines. */
if (adx == 0)
return -bdx;
if (bdx == 0)
return adx;
/* Then where the two edges point in different directions wrt x. */
if ((adx ^ bdx) < 0)
return adx;
/* Finally we actually need to do the general comparison. */
{
int32_t ady = a->edge.line.p2.y - a->edge.line.p1.y;
int32_t bdy = b->edge.line.p2.y - b->edge.line.p1.y;
cairo_int64_t adx_bdy = _cairo_int32x32_64_mul (adx, bdy);
cairo_int64_t bdx_ady = _cairo_int32x32_64_mul (bdx, ady);
return _cairo_int64_cmp (adx_bdy, bdx_ady);
}
}
/*
* We need to compare the x-coordinate of a line for a particular y wrt to a
* given x, without loss of precision.
*
* The x-coordinate along an edge for a given y is:
* X = A_x + (Y - A_y) * A_dx / A_dy
*
* So the inequality we wish to test is:
* A_x + (Y - A_y) * A_dx / A_dy ∘ X
* where ∘ is our inequality operator.
*
* By construction, we know that A_dy (and (Y - A_y)) are
* all positive, so we can rearrange it thus without causing a sign change:
* (Y - A_y) * A_dx ∘ (X - A_x) * A_dy
*
* Given the assumption that all the deltas fit within 32 bits, we can compute
* this comparison directly using 64 bit arithmetic.
*
* See the similar discussion for _slope_compare() and
* edges_compare_x_for_y_general().
*/
static int
edge_compare_for_y_against_x (const cairo_bo_edge_t *a,
int32_t y,
int32_t x)
{
int32_t adx, ady;
int32_t dx, dy;
cairo_int64_t L, R;
if (x < a->edge.line.p1.x && x < a->edge.line.p2.x)
return 1;
if (x > a->edge.line.p1.x && x > a->edge.line.p2.x)
return -1;
adx = a->edge.line.p2.x - a->edge.line.p1.x;
dx = x - a->edge.line.p1.x;
if (adx == 0)
return -dx;
if (dx == 0 || (adx ^ dx) < 0)
return adx;
dy = y - a->edge.line.p1.y;
ady = a->edge.line.p2.y - a->edge.line.p1.y;
L = _cairo_int32x32_64_mul (dy, adx);
R = _cairo_int32x32_64_mul (dx, ady);
return _cairo_int64_cmp (L, R);
}
static inline int
_cairo_bo_sweep_line_compare_edges (const cairo_bo_sweep_line_t *sweep_line,
const cairo_bo_edge_t *a,
const cairo_bo_edge_t *b)
{
int cmp;
cmp = cairo_lines_compare_at_y (&a->edge.line,
&b->edge.line,
sweep_line->current_y);
if (cmp)
return cmp;
/* We've got two collinear edges now. */
return b->edge.bottom - a->edge.bottom;
}
static inline cairo_int64_t
det32_64 (int32_t a, int32_t b,
int32_t c, int32_t d)
{
/* det = a * d - b * c */
return _cairo_int64_sub (_cairo_int32x32_64_mul (a, d),
_cairo_int32x32_64_mul (b, c));
}
static inline cairo_int128_t
det64x32_128 (cairo_int64_t a, int32_t b,
cairo_int64_t c, int32_t d)
{
/* det = a * d - b * c */
return _cairo_int128_sub (_cairo_int64x32_128_mul (a, d),
_cairo_int64x32_128_mul (c, b));
}
/* Compute the intersection of two lines as defined by two edges. The
* result is provided as a coordinate pair of 128-bit integers.
*
* Returns %CAIRO_BO_STATUS_INTERSECTION if there is an intersection or
* %CAIRO_BO_STATUS_PARALLEL if the two lines are exactly parallel.
*/
static cairo_bool_t
intersect_lines (cairo_bo_edge_t *a,
cairo_bo_edge_t *b,
cairo_bo_intersect_point_t *intersection)
{
cairo_int64_t a_det, b_det;
/* XXX: We're assuming here that dx and dy will still fit in 32
* bits. That's not true in general as there could be overflow. We
* should prevent that before the tessellation algorithm begins.
* What we're doing to mitigate this is to perform clamping in
* cairo_bo_tessellate_polygon().
*/
int32_t dx1 = a->edge.line.p1.x - a->edge.line.p2.x;
int32_t dy1 = a->edge.line.p1.y - a->edge.line.p2.y;
int32_t dx2 = b->edge.line.p1.x - b->edge.line.p2.x;
int32_t dy2 = b->edge.line.p1.y - b->edge.line.p2.y;
cairo_int64_t den_det;
cairo_int64_t R;
cairo_quorem64_t qr;
den_det = det32_64 (dx1, dy1, dx2, dy2);
/* Q: Can we determine that the lines do not intersect (within range)
* much more cheaply than computing the intersection point i.e. by
* avoiding the division?
*
* X = ax + t * adx = bx + s * bdx;
* Y = ay + t * ady = by + s * bdy;
* ∴ t * (ady*bdx - bdy*adx) = bdx * (by - ay) + bdy * (ax - bx)
* => t * L = R
*
* Therefore we can reject any intersection (under the criteria for
* valid intersection events) if:
* L^R < 0 => t < 0, or
* L<R => t > 1
*
* (where top/bottom must at least extend to the line endpoints).
*
* A similar substitution can be performed for s, yielding:
* s * (ady*bdx - bdy*adx) = ady * (ax - bx) - adx * (ay - by)
*/
R = det32_64 (dx2, dy2,
b->edge.line.p1.x - a->edge.line.p1.x,
b->edge.line.p1.y - a->edge.line.p1.y);
if (_cairo_int64_negative (den_det)) {
if (_cairo_int64_ge (den_det, R))
return FALSE;
} else {
if (_cairo_int64_le (den_det, R))
return FALSE;
}
R = det32_64 (dy1, dx1,
a->edge.line.p1.y - b->edge.line.p1.y,
a->edge.line.p1.x - b->edge.line.p1.x);
if (_cairo_int64_negative (den_det)) {
if (_cairo_int64_ge (den_det, R))
return FALSE;
} else {
if (_cairo_int64_le (den_det, R))
return FALSE;
}
/* We now know that the two lines should intersect within range. */
a_det = det32_64 (a->edge.line.p1.x, a->edge.line.p1.y,
a->edge.line.p2.x, a->edge.line.p2.y);
b_det = det32_64 (b->edge.line.p1.x, b->edge.line.p1.y,
b->edge.line.p2.x, b->edge.line.p2.y);
/* x = det (a_det, dx1, b_det, dx2) / den_det */
qr = _cairo_int_96by64_32x64_divrem (det64x32_128 (a_det, dx1,
b_det, dx2),
den_det);
if (_cairo_int64_eq (qr.rem, den_det))
return FALSE;
#if 0
intersection->x.exactness = _cairo_int64_is_zero (qr.rem) ? EXACT : INEXACT;
#else
intersection->x.exactness = EXACT;
if (! _cairo_int64_is_zero (qr.rem)) {
if (_cairo_int64_negative (den_det) ^ _cairo_int64_negative (qr.rem))
qr.rem = _cairo_int64_negate (qr.rem);
qr.rem = _cairo_int64_mul (qr.rem, _cairo_int32_to_int64 (2));
if (_cairo_int64_ge (qr.rem, den_det)) {
qr.quo = _cairo_int64_add (qr.quo,
_cairo_int32_to_int64 (_cairo_int64_negative (qr.quo) ? -1 : 1));
} else
intersection->x.exactness = INEXACT;
}
#endif
intersection->x.ordinate = _cairo_int64_to_int32 (qr.quo);
/* y = det (a_det, dy1, b_det, dy2) / den_det */
qr = _cairo_int_96by64_32x64_divrem (det64x32_128 (a_det, dy1,
b_det, dy2),
den_det);
if (_cairo_int64_eq (qr.rem, den_det))
return FALSE;
#if 0
intersection->y.exactness = _cairo_int64_is_zero (qr.rem) ? EXACT : INEXACT;
#else
intersection->y.exactness = EXACT;
if (! _cairo_int64_is_zero (qr.rem)) {
if (_cairo_int64_negative (den_det) ^ _cairo_int64_negative (qr.rem))
qr.rem = _cairo_int64_negate (qr.rem);
qr.rem = _cairo_int64_mul (qr.rem, _cairo_int32_to_int64 (2));
if (_cairo_int64_ge (qr.rem, den_det)) {
qr.quo = _cairo_int64_add (qr.quo,
_cairo_int32_to_int64 (_cairo_int64_negative (qr.quo) ? -1 : 1));
} else
intersection->y.exactness = INEXACT;
}
#endif
intersection->y.ordinate = _cairo_int64_to_int32 (qr.quo);
return TRUE;
}
static int
_cairo_bo_intersect_ordinate_32_compare (cairo_bo_intersect_ordinate_t a,
int32_t b)
{
/* First compare the quotient */
if (a.ordinate > b)
return +1;
if (a.ordinate < b)
return -1;
/* With quotient identical, if remainder is 0 then compare equal */
/* Otherwise, the non-zero remainder makes a > b */
return INEXACT == a.exactness;
}
/* Does the given edge contain the given point. The point must already
* be known to be contained within the line determined by the edge,
* (most likely the point results from an intersection of this edge
* with another).
*
* If we had exact arithmetic, then this function would simply be a
* matter of examining whether the y value of the point lies within
* the range of y values of the edge. But since intersection points
* are not exact due to being rounded to the nearest integer within
* the available precision, we must also examine the x value of the
* point.
*
* The definition of "contains" here is that the given intersection
* point will be seen by the sweep line after the start event for the
* given edge and before the stop event for the edge. See the comments
* in the implementation for more details.
*/
static cairo_bool_t
_cairo_bo_edge_contains_intersect_point (cairo_bo_edge_t *edge,
cairo_bo_intersect_point_t *point)
{
int cmp_top, cmp_bottom;
/* XXX: When running the actual algorithm, we don't actually need to
* compare against edge->top at all here, since any intersection above
* top is eliminated early via a slope comparison. We're leaving these
* here for now only for the sake of the quadratic-time intersection
* finder which needs them.
*/
cmp_top = _cairo_bo_intersect_ordinate_32_compare (point->y,
edge->edge.top);
cmp_bottom = _cairo_bo_intersect_ordinate_32_compare (point->y,
edge->edge.bottom);
if (cmp_top < 0 || cmp_bottom > 0)
{
return FALSE;
}
if (cmp_top > 0 && cmp_bottom < 0)
{
return TRUE;
}
/* At this stage, the point lies on the same y value as either
* edge->top or edge->bottom, so we have to examine the x value in
* order to properly determine containment. */
/* If the y value of the point is the same as the y value of the
* top of the edge, then the x value of the point must be greater
* to be considered as inside the edge. Similarly, if the y value
* of the point is the same as the y value of the bottom of the
* edge, then the x value of the point must be less to be
* considered as inside. */
if (cmp_top == 0) {
cairo_fixed_t top_x;
top_x = _line_compute_intersection_x_for_y (&edge->edge.line,
edge->edge.top);
return _cairo_bo_intersect_ordinate_32_compare (point->x, top_x) > 0;
} else { /* cmp_bottom == 0 */
cairo_fixed_t bot_x;
bot_x = _line_compute_intersection_x_for_y (&edge->edge.line,
edge->edge.bottom);
return _cairo_bo_intersect_ordinate_32_compare (point->x, bot_x) < 0;
}
}
/* Compute the intersection of two edges. The result is provided as a
* coordinate pair of 128-bit integers.
*
* Returns %CAIRO_BO_STATUS_INTERSECTION if there is an intersection
* that is within both edges, %CAIRO_BO_STATUS_NO_INTERSECTION if the
* intersection of the lines defined by the edges occurs outside of
* one or both edges, and %CAIRO_BO_STATUS_PARALLEL if the two edges
* are exactly parallel.
*
* Note that when determining if a candidate intersection is "inside"
* an edge, we consider both the infinitesimal shortening and the
* infinitesimal tilt rules described by John Hobby. Specifically, if
* the intersection is exactly the same as an edge point, it is
* effectively outside (no intersection is returned). Also, if the
* intersection point has the same
*/
static cairo_bool_t
_cairo_bo_edge_intersect (cairo_bo_edge_t *a,
cairo_bo_edge_t *b,
cairo_bo_point32_t *intersection)
{
cairo_bo_intersect_point_t quorem;
if (! intersect_lines (a, b, &quorem))
return FALSE;
if (! _cairo_bo_edge_contains_intersect_point (a, &quorem))
return FALSE;
if (! _cairo_bo_edge_contains_intersect_point (b, &quorem))
return FALSE;
/* Now that we've correctly compared the intersection point and
* determined that it lies within the edge, then we know that we
* no longer need any more bits of storage for the intersection
* than we do for our edge coordinates. We also no longer need the
* remainder from the division. */
intersection->x = quorem.x.ordinate;
intersection->y = quorem.y.ordinate;
return TRUE;
}
static inline int
cairo_bo_event_compare (const cairo_bo_event_t *a,
const cairo_bo_event_t *b)
{
int cmp;
cmp = _cairo_bo_point32_compare (&a->point, &b->point);
if (cmp)
return cmp;
cmp = a->type - b->type;
if (cmp)
return cmp;
return a - b;
}
static inline void
_pqueue_init (pqueue_t *pq)
{
pq->max_size = ARRAY_LENGTH (pq->elements_embedded);
pq->size = 0;
pq->elements = pq->elements_embedded;
}
static inline void
_pqueue_fini (pqueue_t *pq)
{
if (pq->elements != pq->elements_embedded)
free (pq->elements);
}
static cairo_status_t
_pqueue_grow (pqueue_t *pq)
{
cairo_bo_event_t **new_elements;
pq->max_size *= 2;
if (pq->elements == pq->elements_embedded) {
new_elements = _cairo_malloc_ab (pq->max_size,
sizeof (cairo_bo_event_t *));
if (unlikely (new_elements == NULL))
return _cairo_error (CAIRO_STATUS_NO_MEMORY);
memcpy (new_elements, pq->elements_embedded,
sizeof (pq->elements_embedded));
} else {
new_elements = _cairo_realloc_ab (pq->elements,
pq->max_size,
sizeof (cairo_bo_event_t *));
if (unlikely (new_elements == NULL))
return _cairo_error (CAIRO_STATUS_NO_MEMORY);
}
pq->elements = new_elements;
return CAIRO_STATUS_SUCCESS;
}
static inline cairo_status_t
_pqueue_push (pqueue_t *pq, cairo_bo_event_t *event)
{
cairo_bo_event_t **elements;
int i, parent;
if (unlikely (pq->size + 1 == pq->max_size)) {
cairo_status_t status;
status = _pqueue_grow (pq);
if (unlikely (status))
return status;
}
elements = pq->elements;
for (i = ++pq->size;
i != PQ_FIRST_ENTRY &&
cairo_bo_event_compare (event,
elements[parent = PQ_PARENT_INDEX (i)]) < 0;
i = parent)
{
elements[i] = elements[parent];
}
elements[i] = event;
return CAIRO_STATUS_SUCCESS;
}
static inline void
_pqueue_pop (pqueue_t *pq)
{
cairo_bo_event_t **elements = pq->elements;
cairo_bo_event_t *tail;
int child, i;
tail = elements[pq->size--];
if (pq->size == 0) {
elements[PQ_FIRST_ENTRY] = NULL;
return;
}
for (i = PQ_FIRST_ENTRY;
(child = PQ_LEFT_CHILD_INDEX (i)) <= pq->size;
i = child)
{
if (child != pq->size &&
cairo_bo_event_compare (elements[child+1],
elements[child]) < 0)
{
child++;
}
if (cairo_bo_event_compare (elements[child], tail) >= 0)
break;
elements[i] = elements[child];
}
elements[i] = tail;
}
static inline cairo_status_t
_cairo_bo_event_queue_insert (cairo_bo_event_queue_t *queue,
cairo_bo_event_type_t type,
cairo_bo_edge_t *e1,
cairo_bo_edge_t *e2,
const cairo_point_t *point)
{
cairo_bo_queue_event_t *event;
event = _cairo_freepool_alloc (&queue->pool);
if (unlikely (event == NULL))
return _cairo_error (CAIRO_STATUS_NO_MEMORY);
event->type = type;
event->e1 = e1;
event->e2 = e2;
event->point = *point;
return _pqueue_push (&queue->pqueue, (cairo_bo_event_t *) event);
}
static void
_cairo_bo_event_queue_delete (cairo_bo_event_queue_t *queue,
cairo_bo_event_t *event)
{
_cairo_freepool_free (&queue->pool, event);
}
static cairo_bo_event_t *
_cairo_bo_event_dequeue (cairo_bo_event_queue_t *event_queue)
{
cairo_bo_event_t *event, *cmp;
event = event_queue->pqueue.elements[PQ_FIRST_ENTRY];
cmp = *event_queue->start_events;
if (event == NULL ||
(cmp != NULL && cairo_bo_event_compare (cmp, event) < 0))
{
event = cmp;
event_queue->start_events++;
}
else
{
_pqueue_pop (&event_queue->pqueue);
}
return event;
}
CAIRO_COMBSORT_DECLARE (_cairo_bo_event_queue_sort,
cairo_bo_event_t *,
cairo_bo_event_compare)
static void
_cairo_bo_event_queue_init (cairo_bo_event_queue_t *event_queue,
cairo_bo_event_t **start_events,
int num_events)
{
event_queue->start_events = start_events;
_cairo_freepool_init (&event_queue->pool,
sizeof (cairo_bo_queue_event_t));
_pqueue_init (&event_queue->pqueue);
event_queue->pqueue.elements[PQ_FIRST_ENTRY] = NULL;
}
static cairo_status_t
_cairo_bo_event_queue_insert_stop (cairo_bo_event_queue_t *event_queue,
cairo_bo_edge_t *edge)
{
cairo_bo_point32_t point;
point.y = edge->edge.bottom;
point.x = _line_compute_intersection_x_for_y (&edge->edge.line,
point.y);
return _cairo_bo_event_queue_insert (event_queue,
CAIRO_BO_EVENT_TYPE_STOP,
edge, NULL,
&point);
}
static void
_cairo_bo_event_queue_fini (cairo_bo_event_queue_t *event_queue)
{
_pqueue_fini (&event_queue->pqueue);
_cairo_freepool_fini (&event_queue->pool);
}
static inline cairo_status_t
_cairo_bo_event_queue_insert_if_intersect_below_current_y (cairo_bo_event_queue_t *event_queue,
cairo_bo_edge_t *left,
cairo_bo_edge_t *right)
{
cairo_bo_point32_t intersection;
if (MAX (left->edge.line.p1.x, left->edge.line.p2.x) <=
MIN (right->edge.line.p1.x, right->edge.line.p2.x))
return CAIRO_STATUS_SUCCESS;
if (cairo_lines_equal (&left->edge.line, &right->edge.line))
return CAIRO_STATUS_SUCCESS;
/* The names "left" and "right" here are correct descriptions of
* the order of the two edges within the active edge list. So if a
* slope comparison also puts left less than right, then we know
* that the intersection of these two segments has already
* occurred before the current sweep line position. */
if (_slope_compare (left, right) <= 0)
return CAIRO_STATUS_SUCCESS;
if (! _cairo_bo_edge_intersect (left, right, &intersection))
return CAIRO_STATUS_SUCCESS;
return _cairo_bo_event_queue_insert (event_queue,
CAIRO_BO_EVENT_TYPE_INTERSECTION,
left, right,
&intersection);
}
static void
_cairo_bo_sweep_line_init (cairo_bo_sweep_line_t *sweep_line)
{
sweep_line->head = NULL;
sweep_line->stopped = NULL;
sweep_line->current_y = INT32_MIN;
sweep_line->current_edge = NULL;
}
static void
_cairo_bo_sweep_line_insert (cairo_bo_sweep_line_t *sweep_line,
cairo_bo_edge_t *edge)
{
if (sweep_line->current_edge != NULL) {
cairo_bo_edge_t *prev, *next;
int cmp;
cmp = _cairo_bo_sweep_line_compare_edges (sweep_line,
sweep_line->current_edge,
edge);
if (cmp < 0) {
prev = sweep_line->current_edge;
next = prev->next;
while (next != NULL &&
_cairo_bo_sweep_line_compare_edges (sweep_line,
next, edge) < 0)
{
prev = next, next = prev->next;
}
prev->next = edge;
edge->prev = prev;
edge->next = next;
if (next != NULL)
next->prev = edge;
} else if (cmp > 0) {
next = sweep_line->current_edge;
prev = next->prev;
while (prev != NULL &&
_cairo_bo_sweep_line_compare_edges (sweep_line,
prev, edge) > 0)
{
next = prev, prev = next->prev;
}
next->prev = edge;
edge->next = next;
edge->prev = prev;
if (prev != NULL)
prev->next = edge;
else
sweep_line->head = edge;
} else {
prev = sweep_line->current_edge;
edge->prev = prev;
edge->next = prev->next;
if (prev->next != NULL)
prev->next->prev = edge;
prev->next = edge;
}
} else {
sweep_line->head = edge;
edge->next = NULL;
}
sweep_line->current_edge = edge;
}
static void
_cairo_bo_sweep_line_delete (cairo_bo_sweep_line_t *sweep_line,
cairo_bo_edge_t *edge)
{
if (edge->prev != NULL)
edge->prev->next = edge->next;
else
sweep_line->head = edge->next;
if (edge->next != NULL)
edge->next->prev = edge->prev;
if (sweep_line->current_edge == edge)
sweep_line->current_edge = edge->prev ? edge->prev : edge->next;
}
static void
_cairo_bo_sweep_line_swap (cairo_bo_sweep_line_t *sweep_line,
cairo_bo_edge_t *left,
cairo_bo_edge_t *right)
{
if (left->prev != NULL)
left->prev->next = right;
else
sweep_line->head = right;
if (right->next != NULL)
right->next->prev = left;
left->next = right->next;
right->next = left;
right->prev = left->prev;
left->prev = right;
}
#if DEBUG_PRINT_STATE
static void
_cairo_bo_edge_print (cairo_bo_edge_t *edge)
{
printf ("(0x%x, 0x%x)-(0x%x, 0x%x)",
edge->edge.line.p1.x, edge->edge.line.p1.y,
edge->edge.line.p2.x, edge->edge.line.p2.y);
}
static void
_cairo_bo_event_print (cairo_bo_event_t *event)
{
switch (event->type) {
case CAIRO_BO_EVENT_TYPE_START:
printf ("Start: ");
break;
case CAIRO_BO_EVENT_TYPE_STOP:
printf ("Stop: ");
break;
case CAIRO_BO_EVENT_TYPE_INTERSECTION:
printf ("Intersection: ");
break;
}
printf ("(%d, %d)\t", event->point.x, event->point.y);
_cairo_bo_edge_print (event->e1);
if (event->type == CAIRO_BO_EVENT_TYPE_INTERSECTION) {
printf (" X ");
_cairo_bo_edge_print (event->e2);
}
printf ("\n");
}
static void
_cairo_bo_event_queue_print (cairo_bo_event_queue_t *event_queue)
{
/* XXX: fixme to print the start/stop array too. */
printf ("Event queue:\n");
}
static void
_cairo_bo_sweep_line_print (cairo_bo_sweep_line_t *sweep_line)
{
cairo_bool_t first = TRUE;
cairo_bo_edge_t *edge;
printf ("Sweep line from edge list: ");
first = TRUE;
for (edge = sweep_line->head;
edge;
edge = edge->next)
{
if (!first)
printf (", ");
_cairo_bo_edge_print (edge);
first = FALSE;
}
printf ("\n");
}
static void
print_state (const char *msg,
cairo_bo_event_t *event,
cairo_bo_event_queue_t *event_queue,
cairo_bo_sweep_line_t *sweep_line)
{
printf ("%s ", msg);
_cairo_bo_event_print (event);
_cairo_bo_event_queue_print (event_queue);
_cairo_bo_sweep_line_print (sweep_line);
printf ("\n");
}
#endif
#if DEBUG_EVENTS
static void CAIRO_PRINTF_FORMAT (1, 2)
event_log (const char *fmt, ...)
{
FILE *file;
if (getenv ("CAIRO_DEBUG_EVENTS") == NULL)
return;
file = fopen ("bo-events.txt", "a");
if (file != NULL) {
va_list ap;
va_start (ap, fmt);
vfprintf (file, fmt, ap);
va_end (ap);
fclose (file);
}
}
#endif
#define HAS_COLINEAR(a, b) ((cairo_bo_edge_t *)(((uintptr_t)(a))&~1) == (b))
#define IS_COLINEAR(e) (((uintptr_t)(e))&1)
#define MARK_COLINEAR(e, v) ((cairo_bo_edge_t *)(((uintptr_t)(e))|(v)))
static inline cairo_bool_t
edges_colinear (cairo_bo_edge_t *a, const cairo_bo_edge_t *b)
{
unsigned p;
if (HAS_COLINEAR(a->colinear, b))
return IS_COLINEAR(a->colinear);
if (HAS_COLINEAR(b->colinear, a)) {
p = IS_COLINEAR(b->colinear);
a->colinear = MARK_COLINEAR(b, p);
return p;
}
p = 0;
p |= (a->edge.line.p1.x == b->edge.line.p1.x) << 0;
p |= (a->edge.line.p1.y == b->edge.line.p1.y) << 1;
p |= (a->edge.line.p2.x == b->edge.line.p2.x) << 3;
p |= (a->edge.line.p2.y == b->edge.line.p2.y) << 4;
if (p == ((1 << 0) | (1 << 1) | (1 << 3) | (1 << 4))) {
a->colinear = MARK_COLINEAR(b, 1);
return TRUE;
}
if (_slope_compare (a, b)) {
a->colinear = MARK_COLINEAR(b, 0);
return FALSE;
}
/* The choice of y is not truly arbitrary since we must guarantee that it
* is greater than the start of either line.
*/
if (p != 0) {
/* colinear if either end-point are coincident */
p = (((p >> 1) & p) & 5) != 0;
} else if (a->edge.line.p1.y < b->edge.line.p1.y) {
p = edge_compare_for_y_against_x (b,
a->edge.line.p1.y,
a->edge.line.p1.x) == 0;
} else {
p = edge_compare_for_y_against_x (a,
b->edge.line.p1.y,
b->edge.line.p1.x) == 0;
}
a->colinear = MARK_COLINEAR(b, p);
return p;
}
/* Adds the trapezoid, if any, of the left edge to the #cairo_traps_t */
static void
_cairo_bo_edge_end_trap (cairo_bo_edge_t *left,
int32_t bot,
cairo_traps_t *traps)
{
cairo_bo_trap_t *trap = &left->deferred_trap;
/* Only emit (trivial) non-degenerate trapezoids with positive height. */
if (likely (trap->top < bot)) {
_cairo_traps_add_trap (traps,
trap->top, bot,
&left->edge.line, &trap->right->edge.line);
#if DEBUG_PRINT_STATE
printf ("Deferred trap: left=(%x, %x)-(%x,%x) "
"right=(%x,%x)-(%x,%x) top=%x, bot=%x\n",
left->edge.line.p1.x, left->edge.line.p1.y,
left->edge.line.p2.x, left->edge.line.p2.y,
trap->right->edge.line.p1.x, trap->right->edge.line.p1.y,
trap->right->edge.line.p2.x, trap->right->edge.line.p2.y,
trap->top, bot);
#endif
#if DEBUG_EVENTS
event_log ("end trap: %lu %lu %d %d\n",
(long) left,
(long) trap->right,
trap->top,
bot);
#endif
}
trap->right = NULL;
}
/* Start a new trapezoid at the given top y coordinate, whose edges
* are `edge' and `edge->next'. If `edge' already has a trapezoid,
* then either add it to the traps in `traps', if the trapezoid's
* right edge differs from `edge->next', or do nothing if the new
* trapezoid would be a continuation of the existing one. */
static inline void
_cairo_bo_edge_start_or_continue_trap (cairo_bo_edge_t *left,
cairo_bo_edge_t *right,
int top,
cairo_traps_t *traps)
{
if (left->deferred_trap.right == right)
return;
assert (right);
if (left->deferred_trap.right != NULL) {
if (edges_colinear (left->deferred_trap.right, right))
{
/* continuation on right, so just swap edges */
left->deferred_trap.right = right;
return;
}
_cairo_bo_edge_end_trap (left, top, traps);
}
if (! edges_colinear (left, right)) {
left->deferred_trap.top = top;
left->deferred_trap.right = right;
#if DEBUG_EVENTS
event_log ("begin trap: %lu %lu %d\n",
(long) left,
(long) right,
top);
#endif
}
}
static inline void
_active_edges_to_traps (cairo_bo_edge_t *pos,
int32_t top,
unsigned mask,
cairo_traps_t *traps)
{
cairo_bo_edge_t *left;
int in_out;
#if DEBUG_PRINT_STATE
printf ("Processing active edges for %x\n", top);
#endif
in_out = 0;
left = pos;
while (pos != NULL) {
if (pos != left && pos->deferred_trap.right) {
/* XXX It shouldn't be possible to here with 2 deferred traps
* on colinear edges... See bug-bo-rictoz.
*/
if (left->deferred_trap.right == NULL &&
edges_colinear (left, pos))
{
/* continuation on left */
left->deferred_trap = pos->deferred_trap;
pos->deferred_trap.right = NULL;
}
else
{
_cairo_bo_edge_end_trap (pos, top, traps);
}
}
in_out += pos->edge.dir;
if ((in_out & mask) == 0) {
/* skip co-linear edges */
if (pos->next == NULL || ! edges_colinear (pos, pos->next)) {
_cairo_bo_edge_start_or_continue_trap (left, pos, top, traps);
left = pos->next;
}
}
pos = pos->next;
}
}
/* Execute a single pass of the Bentley-Ottmann algorithm on edges,
* generating trapezoids according to the fill_rule and appending them
* to traps. */
static cairo_status_t
_cairo_bentley_ottmann_tessellate_bo_edges (cairo_bo_event_t **start_events,
int num_events,
unsigned fill_rule,
cairo_traps_t *traps,
int *num_intersections)
{
cairo_status_t status;
int intersection_count = 0;
cairo_bo_event_queue_t event_queue;
cairo_bo_sweep_line_t sweep_line;
cairo_bo_event_t *event;
cairo_bo_edge_t *left, *right;
cairo_bo_edge_t *e1, *e2;
/* convert the fill_rule into a winding mask */
if (fill_rule == CAIRO_FILL_RULE_WINDING)
fill_rule = (unsigned) -1;
else
fill_rule = 1;
#if DEBUG_EVENTS
{
int i;
for (i = 0; i < num_events; i++) {
cairo_bo_start_event_t *event =
((cairo_bo_start_event_t **) start_events)[i];
event_log ("edge: %lu (%d, %d) (%d, %d) (%d, %d) %d\n",
(long) &events[i].edge,
event->edge.edge.line.p1.x,
event->edge.edge.line.p1.y,
event->edge.edge.line.p2.x,
event->edge.edge.line.p2.y,
event->edge.top,
event->edge.bottom,
event->edge.edge.dir);
}
}
#endif
_cairo_bo_event_queue_init (&event_queue, start_events, num_events);
_cairo_bo_sweep_line_init (&sweep_line);
while ((event = _cairo_bo_event_dequeue (&event_queue))) {
if (event->point.y != sweep_line.current_y) {
for (e1 = sweep_line.stopped; e1; e1 = e1->next) {
if (e1->deferred_trap.right != NULL) {
_cairo_bo_edge_end_trap (e1,
e1->edge.bottom,
traps);
}
}
sweep_line.stopped = NULL;
_active_edges_to_traps (sweep_line.head,
sweep_line.current_y,
fill_rule, traps);
sweep_line.current_y = event->point.y;
}
#if DEBUG_EVENTS
event_log ("event: %d (%ld, %ld) %lu, %lu\n",
event->type,
(long) event->point.x,
(long) event->point.y,
(long) event->e1,
(long) event->e2);
#endif
switch (event->type) {
case CAIRO_BO_EVENT_TYPE_START:
e1 = &((cairo_bo_start_event_t *) event)->edge;
_cairo_bo_sweep_line_insert (&sweep_line, e1);
status = _cairo_bo_event_queue_insert_stop (&event_queue, e1);
if (unlikely (status))
goto unwind;
/* check to see if this is a continuation of a stopped edge */
/* XXX change to an infinitesimal lengthening rule */
for (left = sweep_line.stopped; left; left = left->next) {
if (e1->edge.top <= left->edge.bottom &&
edges_colinear (e1, left))
{
e1->deferred_trap = left->deferred_trap;
if (left->prev != NULL)
left->prev = left->next;
else
sweep_line.stopped = left->next;
if (left->next != NULL)
left->next->prev = left->prev;
break;
}
}
left = e1->prev;
right = e1->next;
if (left != NULL) {
status = _cairo_bo_event_queue_insert_if_intersect_below_current_y (&event_queue, left, e1);
if (unlikely (status))
goto unwind;
}
if (right != NULL) {
status = _cairo_bo_event_queue_insert_if_intersect_below_current_y (&event_queue, e1, right);
if (unlikely (status))
goto unwind;
}
break;
case CAIRO_BO_EVENT_TYPE_STOP:
e1 = ((cairo_bo_queue_event_t *) event)->e1;
_cairo_bo_event_queue_delete (&event_queue, event);
left = e1->prev;
right = e1->next;
_cairo_bo_sweep_line_delete (&sweep_line, e1);
/* first, check to see if we have a continuation via a fresh edge */
if (e1->deferred_trap.right != NULL) {
e1->next = sweep_line.stopped;
if (sweep_line.stopped != NULL)
sweep_line.stopped->prev = e1;
sweep_line.stopped = e1;
e1->prev = NULL;
}
if (left != NULL && right != NULL) {
status = _cairo_bo_event_queue_insert_if_intersect_below_current_y (&event_queue, left, right);
if (unlikely (status))
goto unwind;
}
break;
case CAIRO_BO_EVENT_TYPE_INTERSECTION:
e1 = ((cairo_bo_queue_event_t *) event)->e1;
e2 = ((cairo_bo_queue_event_t *) event)->e2;
_cairo_bo_event_queue_delete (&event_queue, event);
/* skip this intersection if its edges are not adjacent */
if (e2 != e1->next)
break;
intersection_count++;
left = e1->prev;
right = e2->next;
_cairo_bo_sweep_line_swap (&sweep_line, e1, e2);
/* after the swap e2 is left of e1 */
if (left != NULL) {
status = _cairo_bo_event_queue_insert_if_intersect_below_current_y (&event_queue, left, e2);
if (unlikely (status))
goto unwind;
}
if (right != NULL) {
status = _cairo_bo_event_queue_insert_if_intersect_below_current_y (&event_queue, e1, right);
if (unlikely (status))
goto unwind;
}
break;
}
}
*num_intersections = intersection_count;
for (e1 = sweep_line.stopped; e1; e1 = e1->next) {
if (e1->deferred_trap.right != NULL) {
_cairo_bo_edge_end_trap (e1, e1->edge.bottom, traps);
}
}
status = traps->status;
unwind:
_cairo_bo_event_queue_fini (&event_queue);
#if DEBUG_EVENTS
event_log ("\n");
#endif
return status;
}
cairo_status_t
_cairo_bentley_ottmann_tessellate_polygon (cairo_traps_t *traps,
const cairo_polygon_t *polygon,
cairo_fill_rule_t fill_rule)
{
int intersections;
cairo_bo_start_event_t stack_events[CAIRO_STACK_ARRAY_LENGTH (cairo_bo_start_event_t)];
cairo_bo_start_event_t *events;
cairo_bo_event_t *stack_event_ptrs[ARRAY_LENGTH (stack_events) + 1];
cairo_bo_event_t **event_ptrs;
cairo_bo_start_event_t *stack_event_y[64];
cairo_bo_start_event_t **event_y = NULL;
int i, num_events, y, ymin, ymax;
cairo_status_t status;
num_events = polygon->num_edges;
if (unlikely (0 == num_events))
return CAIRO_STATUS_SUCCESS;
if (polygon->num_limits) {
ymin = _cairo_fixed_integer_floor (polygon->limit.p1.y);
ymax = _cairo_fixed_integer_ceil (polygon->limit.p2.y) - ymin;
if (ymax > 64) {
event_y = _cairo_malloc_ab(sizeof (cairo_bo_event_t*), ymax);
if (unlikely (event_y == NULL))
return _cairo_error (CAIRO_STATUS_NO_MEMORY);
} else {
event_y = stack_event_y;
}
memset (event_y, 0, ymax * sizeof(cairo_bo_event_t *));
}
events = stack_events;
event_ptrs = stack_event_ptrs;
if (num_events > ARRAY_LENGTH (stack_events)) {
events = _cairo_malloc_ab_plus_c (num_events,
sizeof (cairo_bo_start_event_t) +
sizeof (cairo_bo_event_t *),
sizeof (cairo_bo_event_t *));
if (unlikely (events == NULL)) {
if (event_y != stack_event_y)
free (event_y);
return _cairo_error (CAIRO_STATUS_NO_MEMORY);
}
event_ptrs = (cairo_bo_event_t **) (events + num_events);
}
for (i = 0; i < num_events; i++) {
events[i].type = CAIRO_BO_EVENT_TYPE_START;
events[i].point.y = polygon->edges[i].top;
events[i].point.x =
_line_compute_intersection_x_for_y (&polygon->edges[i].line,
events[i].point.y);
events[i].edge.edge = polygon->edges[i];
events[i].edge.deferred_trap.right = NULL;
events[i].edge.prev = NULL;
events[i].edge.next = NULL;
events[i].edge.colinear = NULL;
if (event_y) {
y = _cairo_fixed_integer_floor (events[i].point.y) - ymin;
events[i].edge.next = (cairo_bo_edge_t *) event_y[y];
event_y[y] = (cairo_bo_start_event_t *) &events[i];
} else
event_ptrs[i] = (cairo_bo_event_t *) &events[i];
}
if (event_y) {
for (y = i = 0; y < ymax && i < num_events; y++) {
cairo_bo_start_event_t *e;
int j = i;
for (e = event_y[y]; e; e = (cairo_bo_start_event_t *)e->edge.next)
event_ptrs[i++] = (cairo_bo_event_t *) e;
if (i > j + 1)
_cairo_bo_event_queue_sort (event_ptrs+j, i-j);
}
if (event_y != stack_event_y)
free (event_y);
} else
_cairo_bo_event_queue_sort (event_ptrs, i);
event_ptrs[i] = NULL;
#if DEBUG_TRAPS
dump_edges (events, num_events, "bo-polygon-edges.txt");
#endif
/* XXX: This would be the convenient place to throw in multiple
* passes of the Bentley-Ottmann algorithm. It would merely
* require storing the results of each pass into a temporary
* cairo_traps_t. */
status = _cairo_bentley_ottmann_tessellate_bo_edges (event_ptrs, num_events,
fill_rule, traps,
&intersections);
#if DEBUG_TRAPS
dump_traps (traps, "bo-polygon-out.txt");
#endif
if (events != stack_events)
free (events);
return status;
}
cairo_status_t
_cairo_bentley_ottmann_tessellate_traps (cairo_traps_t *traps,
cairo_fill_rule_t fill_rule)
{
cairo_status_t status;
cairo_polygon_t polygon;
int i;
if (unlikely (0 == traps->num_traps))
return CAIRO_STATUS_SUCCESS;
#if DEBUG_TRAPS
dump_traps (traps, "bo-traps-in.txt");
#endif
_cairo_polygon_init (&polygon, traps->limits, traps->num_limits);
for (i = 0; i < traps->num_traps; i++) {
status = _cairo_polygon_add_line (&polygon,
&traps->traps[i].left,
traps->traps[i].top,
traps->traps[i].bottom,
1);
if (unlikely (status))
goto CLEANUP;
status = _cairo_polygon_add_line (&polygon,
&traps->traps[i].right,
traps->traps[i].top,
traps->traps[i].bottom,
-1);
if (unlikely (status))
goto CLEANUP;
}
_cairo_traps_clear (traps);
status = _cairo_bentley_ottmann_tessellate_polygon (traps,
&polygon,
fill_rule);
#if DEBUG_TRAPS
dump_traps (traps, "bo-traps-out.txt");
#endif
CLEANUP:
_cairo_polygon_fini (&polygon);
return status;
}
#if 0
static cairo_bool_t
edges_have_an_intersection_quadratic (cairo_bo_edge_t *edges,
int num_edges)
{
int i, j;
cairo_bo_edge_t *a, *b;
cairo_bo_point32_t intersection;
/* We must not be given any upside-down edges. */
for (i = 0; i < num_edges; i++) {
assert (_cairo_bo_point32_compare (&edges[i].top, &edges[i].bottom) < 0);
edges[i].line.p1.x <<= CAIRO_BO_GUARD_BITS;
edges[i].line.p1.y <<= CAIRO_BO_GUARD_BITS;
edges[i].line.p2.x <<= CAIRO_BO_GUARD_BITS;
edges[i].line.p2.y <<= CAIRO_BO_GUARD_BITS;
}
for (i = 0; i < num_edges; i++) {
for (j = 0; j < num_edges; j++) {
if (i == j)
continue;
a = &edges[i];
b = &edges[j];
if (! _cairo_bo_edge_intersect (a, b, &intersection))
continue;
printf ("Found intersection (%d,%d) between (%d,%d)-(%d,%d) and (%d,%d)-(%d,%d)\n",
intersection.x,
intersection.y,
a->line.p1.x, a->line.p1.y,
a->line.p2.x, a->line.p2.y,
b->line.p1.x, b->line.p1.y,
b->line.p2.x, b->line.p2.y);
return TRUE;
}
}
return FALSE;
}
#define TEST_MAX_EDGES 10
typedef struct test {
const char *name;
const char *description;
int num_edges;
cairo_bo_edge_t edges[TEST_MAX_EDGES];
} test_t;
static test_t
tests[] = {
{
"3 near misses",
"3 edges all intersecting very close to each other",
3,
{
{ { 4, 2}, {0, 0}, { 9, 9}, NULL, NULL },
{ { 7, 2}, {0, 0}, { 2, 3}, NULL, NULL },
{ { 5, 2}, {0, 0}, { 1, 7}, NULL, NULL }
}
},
{
"inconsistent data",
"Derived from random testing---was leading to skip list and edge list disagreeing.",
2,
{
{ { 2, 3}, {0, 0}, { 8, 9}, NULL, NULL },
{ { 2, 3}, {0, 0}, { 6, 7}, NULL, NULL }
}
},
{
"failed sort",
"A test derived from random testing that leads to an inconsistent sort --- looks like we just can't attempt to validate the sweep line with edge_compare?",
3,
{
{ { 6, 2}, {0, 0}, { 6, 5}, NULL, NULL },
{ { 3, 5}, {0, 0}, { 5, 6}, NULL, NULL },
{ { 9, 2}, {0, 0}, { 5, 6}, NULL, NULL },
}
},
{
"minimal-intersection",
"Intersection of a two from among the smallest possible edges.",
2,
{
{ { 0, 0}, {0, 0}, { 1, 1}, NULL, NULL },
{ { 1, 0}, {0, 0}, { 0, 1}, NULL, NULL }
}
},
{
"simple",
"A simple intersection of two edges at an integer (2,2).",
2,
{
{ { 1, 1}, {0, 0}, { 3, 3}, NULL, NULL },
{ { 2, 1}, {0, 0}, { 2, 3}, NULL, NULL }
}
},
{
"bend-to-horizontal",
"With intersection truncation one edge bends to horizontal",
2,
{
{ { 9, 1}, {0, 0}, {3, 7}, NULL, NULL },
{ { 3, 5}, {0, 0}, {9, 9}, NULL, NULL }
}
}
};
/*
{
"endpoint",
"An intersection that occurs at the endpoint of a segment.",
{
{ { 4, 6}, { 5, 6}, NULL, { { NULL }} },
{ { 4, 5}, { 5, 7}, NULL, { { NULL }} },
{ { 0, 0}, { 0, 0}, NULL, { { NULL }} },
}
}
{
name = "overlapping",
desc = "Parallel segments that share an endpoint, with different slopes.",
edges = {
{ top = { x = 2, y = 0}, bottom = { x = 1, y = 1}},
{ top = { x = 2, y = 0}, bottom = { x = 0, y = 2}},
{ top = { x = 0, y = 3}, bottom = { x = 1, y = 3}},
{ top = { x = 0, y = 3}, bottom = { x = 2, y = 3}},
{ top = { x = 0, y = 4}, bottom = { x = 0, y = 6}},
{ top = { x = 0, y = 5}, bottom = { x = 0, y = 6}}
}
},
{
name = "hobby_stage_3",
desc = "A particularly tricky part of the 3rd stage of the 'hobby' test below.",
edges = {
{ top = { x = -1, y = -2}, bottom = { x = 4, y = 2}},
{ top = { x = 5, y = 3}, bottom = { x = 9, y = 5}},
{ top = { x = 5, y = 3}, bottom = { x = 6, y = 3}},
}
},
{
name = "hobby",
desc = "Example from John Hobby's paper. Requires 3 passes of the iterative algorithm.",
edges = {
{ top = { x = 0, y = 0}, bottom = { x = 9, y = 5}},
{ top = { x = 0, y = 0}, bottom = { x = 13, y = 6}},
{ top = { x = -1, y = -2}, bottom = { x = 9, y = 5}}
}
},
{
name = "slope",
desc = "Edges with same start/stop points but different slopes",
edges = {
{ top = { x = 4, y = 1}, bottom = { x = 6, y = 3}},
{ top = { x = 4, y = 1}, bottom = { x = 2, y = 3}},
{ top = { x = 2, y = 4}, bottom = { x = 4, y = 6}},
{ top = { x = 6, y = 4}, bottom = { x = 4, y = 6}}
}
},
{
name = "horizontal",
desc = "Test of a horizontal edge",
edges = {
{ top = { x = 1, y = 1}, bottom = { x = 6, y = 6}},
{ top = { x = 2, y = 3}, bottom = { x = 5, y = 3}}
}
},
{
name = "vertical",
desc = "Test of a vertical edge",
edges = {
{ top = { x = 5, y = 1}, bottom = { x = 5, y = 7}},
{ top = { x = 2, y = 4}, bottom = { x = 8, y = 5}}
}
},
{
name = "congruent",
desc = "Two overlapping edges with the same slope",
edges = {
{ top = { x = 5, y = 1}, bottom = { x = 5, y = 7}},
{ top = { x = 5, y = 2}, bottom = { x = 5, y = 6}},
{ top = { x = 2, y = 4}, bottom = { x = 8, y = 5}}
}
},
{
name = "multi",
desc = "Several segments with a common intersection point",
edges = {
{ top = { x = 1, y = 2}, bottom = { x = 5, y = 4} },
{ top = { x = 1, y = 1}, bottom = { x = 5, y = 5} },
{ top = { x = 2, y = 1}, bottom = { x = 4, y = 5} },
{ top = { x = 4, y = 1}, bottom = { x = 2, y = 5} },
{ top = { x = 5, y = 1}, bottom = { x = 1, y = 5} },
{ top = { x = 5, y = 2}, bottom = { x = 1, y = 4} }
}
}
};
*/
static int
run_test (const char *test_name,
cairo_bo_edge_t *test_edges,
int num_edges)
{
int i, intersections, passes;
cairo_bo_edge_t *edges;
cairo_array_t intersected_edges;
printf ("Testing: %s\n", test_name);
_cairo_array_init (&intersected_edges, sizeof (cairo_bo_edge_t));
intersections = _cairo_bentley_ottmann_intersect_edges (test_edges, num_edges, &intersected_edges);
if (intersections)
printf ("Pass 1 found %d intersections:\n", intersections);
/* XXX: Multi-pass Bentley-Ottmmann. Preferable would be to add a
* pass of Hobby's tolerance-square algorithm instead. */
passes = 1;
while (intersections) {
int num_edges = _cairo_array_num_elements (&intersected_edges);
passes++;
edges = _cairo_malloc_ab (num_edges, sizeof (cairo_bo_edge_t));
assert (edges != NULL);
memcpy (edges, _cairo_array_index (&intersected_edges, 0), num_edges * sizeof (cairo_bo_edge_t));
_cairo_array_fini (&intersected_edges);
_cairo_array_init (&intersected_edges, sizeof (cairo_bo_edge_t));
intersections = _cairo_bentley_ottmann_intersect_edges (edges, num_edges, &intersected_edges);
free (edges);
if (intersections){
printf ("Pass %d found %d remaining intersections:\n", passes, intersections);
} else {
if (passes > 3)
for (i = 0; i < passes; i++)
printf ("*");
printf ("No remainining intersections found after pass %d\n", passes);
}
}
if (edges_have_an_intersection_quadratic (_cairo_array_index (&intersected_edges, 0),
_cairo_array_num_elements (&intersected_edges)))
printf ("*** FAIL ***\n");
else
printf ("PASS\n");
_cairo_array_fini (&intersected_edges);
return 0;
}
#define MAX_RANDOM 300
int
main (void)
{
char random_name[] = "random-XX";
cairo_bo_edge_t random_edges[MAX_RANDOM], *edge;
unsigned int i, num_random;
test_t *test;
for (i = 0; i < ARRAY_LENGTH (tests); i++) {
test = &tests[i];
run_test (test->name, test->edges, test->num_edges);
}
for (num_random = 0; num_random < MAX_RANDOM; num_random++) {
srand (0);
for (i = 0; i < num_random; i++) {
do {
edge = &random_edges[i];
edge->line.p1.x = (int32_t) (10.0 * (rand() / (RAND_MAX + 1.0)));
edge->line.p1.y = (int32_t) (10.0 * (rand() / (RAND_MAX + 1.0)));
edge->line.p2.x = (int32_t) (10.0 * (rand() / (RAND_MAX + 1.0)));
edge->line.p2.y = (int32_t) (10.0 * (rand() / (RAND_MAX + 1.0)));
if (edge->line.p1.y > edge->line.p2.y) {
int32_t tmp = edge->line.p1.y;
edge->line.p1.y = edge->line.p2.y;
edge->line.p2.y = tmp;
}
} while (edge->line.p1.y == edge->line.p2.y);
}
sprintf (random_name, "random-%02d", num_random);
run_test (random_name, random_edges, num_random);
}
return 0;
}
#endif
| 0 |
D://workCode//uploadProject\awtk\3rd\cairo | D://workCode//uploadProject\awtk\3rd\cairo\cairo\cairo-botor-scan-converter.c | /*
* Copyright © 2004 Carl Worth
* Copyright © 2006 Red Hat, Inc.
* Copyright © 2007 David Turner
* Copyright © 2008 M Joonas Pihlaja
* Copyright © 2008 Chris Wilson
* Copyright © 2009 Intel Corporation
*
* This library is free software; you can redistribute it and/or
* modify it either under the terms of the GNU Lesser General Public
* License version 2.1 as published by the Free Software Foundation
* (the "LGPL") or, at your option, under the terms of the Mozilla
* Public License Version 1.1 (the "MPL"). If you do not alter this
* notice, a recipient may use your version of this file under either
* the MPL or the LGPL.
*
* You should have received a copy of the LGPL along with this library
* in the file COPYING-LGPL-2.1; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Suite 500, Boston, MA 02110-1335, USA
* You should have received a copy of the MPL along with this library
* in the file COPYING-MPL-1.1
*
* The contents of this file are subject to the Mozilla Public License
* Version 1.1 (the "License"); you may not use this file except in
* compliance with the License. You may obtain a copy of the License at
* http://www.mozilla.org/MPL/
*
* This software is distributed on an "AS IS" basis, WITHOUT WARRANTY
* OF ANY KIND, either express or implied. See the LGPL or the MPL for
* the specific language governing rights and limitations.
*
* The Original Code is the cairo graphics library.
*
* The Initial Developer of the Original Code is Carl Worth
*
* Contributor(s):
* Carl D. Worth <cworth@cworth.org>
* M Joonas Pihlaja <jpihlaja@cc.helsinki.fi>
* Chris Wilson <chris@chris-wilson.co.uk>
*/
/* Provide definitions for standalone compilation */
#include "cairoint.h"
#include "cairo-error-private.h"
#include "cairo-list-inline.h"
#include "cairo-freelist-private.h"
#include "cairo-combsort-inline.h"
#include <setjmp.h>
#define STEP_X CAIRO_FIXED_ONE
#define STEP_Y CAIRO_FIXED_ONE
#define UNROLL3(x) x x x
#define STEP_XY (2*STEP_X*STEP_Y) /* Unit area in the step. */
#define AREA_TO_ALPHA(c) (((c)*255 + STEP_XY/2) / STEP_XY)
typedef struct _cairo_bo_intersect_ordinate {
int32_t ordinate;
enum { EXACT, INEXACT } exactness;
} cairo_bo_intersect_ordinate_t;
typedef struct _cairo_bo_intersect_point {
cairo_bo_intersect_ordinate_t x;
cairo_bo_intersect_ordinate_t y;
} cairo_bo_intersect_point_t;
struct quorem {
cairo_fixed_t quo;
cairo_fixed_t rem;
};
struct run {
struct run *next;
int sign;
cairo_fixed_t y;
};
typedef struct edge {
cairo_list_t link;
cairo_edge_t edge;
/* Current x coordinate and advancement.
* Initialised to the x coordinate of the top of the
* edge. The quotient is in cairo_fixed_t units and the
* remainder is mod dy in cairo_fixed_t units.
*/
cairo_fixed_t dy;
struct quorem x;
struct quorem dxdy;
struct quorem dxdy_full;
cairo_bool_t vertical;
unsigned int flags;
int current_sign;
struct run *runs;
} edge_t;
enum {
START = 0x1,
STOP = 0x2,
};
/* the parent is always given by index/2 */
#define PQ_PARENT_INDEX(i) ((i) >> 1)
#define PQ_FIRST_ENTRY 1
/* left and right children are index * 2 and (index * 2) +1 respectively */
#define PQ_LEFT_CHILD_INDEX(i) ((i) << 1)
typedef enum {
EVENT_TYPE_STOP,
EVENT_TYPE_INTERSECTION,
EVENT_TYPE_START
} event_type_t;
typedef struct _event {
cairo_fixed_t y;
event_type_t type;
} event_t;
typedef struct _start_event {
cairo_fixed_t y;
event_type_t type;
edge_t *edge;
} start_event_t;
typedef struct _queue_event {
cairo_fixed_t y;
event_type_t type;
edge_t *e1;
edge_t *e2;
} queue_event_t;
typedef struct _pqueue {
int size, max_size;
event_t **elements;
event_t *elements_embedded[1024];
} pqueue_t;
struct cell {
struct cell *prev;
struct cell *next;
int x;
int uncovered_area;
int covered_height;
};
typedef struct _sweep_line {
cairo_list_t active;
cairo_list_t stopped;
cairo_list_t *insert_cursor;
cairo_bool_t is_vertical;
cairo_fixed_t current_row;
cairo_fixed_t current_subrow;
struct coverage {
struct cell head;
struct cell tail;
struct cell *cursor;
int count;
cairo_freepool_t pool;
} coverage;
struct event_queue {
pqueue_t pq;
event_t **start_events;
cairo_freepool_t pool;
} queue;
cairo_freepool_t runs;
jmp_buf unwind;
} sweep_line_t;
cairo_always_inline static struct quorem
floored_divrem (int a, int b)
{
struct quorem qr;
qr.quo = a/b;
qr.rem = a%b;
if ((a^b)<0 && qr.rem) {
qr.quo--;
qr.rem += b;
}
return qr;
}
static struct quorem
floored_muldivrem(int x, int a, int b)
{
struct quorem qr;
long long xa = (long long)x*a;
qr.quo = xa/b;
qr.rem = xa%b;
if ((xa>=0) != (b>=0) && qr.rem) {
qr.quo--;
qr.rem += b;
}
return qr;
}
static cairo_fixed_t
line_compute_intersection_x_for_y (const cairo_line_t *line,
cairo_fixed_t y)
{
cairo_fixed_t x, dy;
if (y == line->p1.y)
return line->p1.x;
if (y == line->p2.y)
return line->p2.x;
x = line->p1.x;
dy = line->p2.y - line->p1.y;
if (dy != 0) {
x += _cairo_fixed_mul_div_floor (y - line->p1.y,
line->p2.x - line->p1.x,
dy);
}
return x;
}
/*
* We need to compare the x-coordinates of a pair of lines for a particular y,
* without loss of precision.
*
* The x-coordinate along an edge for a given y is:
* X = A_x + (Y - A_y) * A_dx / A_dy
*
* So the inequality we wish to test is:
* A_x + (Y - A_y) * A_dx / A_dy ∘ B_x + (Y - B_y) * B_dx / B_dy,
* where ∘ is our inequality operator.
*
* By construction, we know that A_dy and B_dy (and (Y - A_y), (Y - B_y)) are
* all positive, so we can rearrange it thus without causing a sign change:
* A_dy * B_dy * (A_x - B_x) ∘ (Y - B_y) * B_dx * A_dy
* - (Y - A_y) * A_dx * B_dy
*
* Given the assumption that all the deltas fit within 32 bits, we can compute
* this comparison directly using 128 bit arithmetic. For certain, but common,
* input we can reduce this down to a single 32 bit compare by inspecting the
* deltas.
*
* (And put the burden of the work on developing fast 128 bit ops, which are
* required throughout the tessellator.)
*
* See the similar discussion for _slope_compare().
*/
static int
edges_compare_x_for_y_general (const cairo_edge_t *a,
const cairo_edge_t *b,
int32_t y)
{
/* XXX: We're assuming here that dx and dy will still fit in 32
* bits. That's not true in general as there could be overflow. We
* should prevent that before the tessellation algorithm
* begins.
*/
int32_t dx;
int32_t adx, ady;
int32_t bdx, bdy;
enum {
HAVE_NONE = 0x0,
HAVE_DX = 0x1,
HAVE_ADX = 0x2,
HAVE_DX_ADX = HAVE_DX | HAVE_ADX,
HAVE_BDX = 0x4,
HAVE_DX_BDX = HAVE_DX | HAVE_BDX,
HAVE_ADX_BDX = HAVE_ADX | HAVE_BDX,
HAVE_ALL = HAVE_DX | HAVE_ADX | HAVE_BDX
} have_dx_adx_bdx = HAVE_ALL;
/* don't bother solving for abscissa if the edges' bounding boxes
* can be used to order them. */
{
int32_t amin, amax;
int32_t bmin, bmax;
if (a->line.p1.x < a->line.p2.x) {
amin = a->line.p1.x;
amax = a->line.p2.x;
} else {
amin = a->line.p2.x;
amax = a->line.p1.x;
}
if (b->line.p1.x < b->line.p2.x) {
bmin = b->line.p1.x;
bmax = b->line.p2.x;
} else {
bmin = b->line.p2.x;
bmax = b->line.p1.x;
}
if (amax < bmin) return -1;
if (amin > bmax) return +1;
}
ady = a->line.p2.y - a->line.p1.y;
adx = a->line.p2.x - a->line.p1.x;
if (adx == 0)
have_dx_adx_bdx &= ~HAVE_ADX;
bdy = b->line.p2.y - b->line.p1.y;
bdx = b->line.p2.x - b->line.p1.x;
if (bdx == 0)
have_dx_adx_bdx &= ~HAVE_BDX;
dx = a->line.p1.x - b->line.p1.x;
if (dx == 0)
have_dx_adx_bdx &= ~HAVE_DX;
#define L _cairo_int64x32_128_mul (_cairo_int32x32_64_mul (ady, bdy), dx)
#define A _cairo_int64x32_128_mul (_cairo_int32x32_64_mul (adx, bdy), y - a->line.p1.y)
#define B _cairo_int64x32_128_mul (_cairo_int32x32_64_mul (bdx, ady), y - b->line.p1.y)
switch (have_dx_adx_bdx) {
default:
case HAVE_NONE:
return 0;
case HAVE_DX:
/* A_dy * B_dy * (A_x - B_x) ∘ 0 */
return dx; /* ady * bdy is positive definite */
case HAVE_ADX:
/* 0 ∘ - (Y - A_y) * A_dx * B_dy */
return adx; /* bdy * (y - a->top.y) is positive definite */
case HAVE_BDX:
/* 0 ∘ (Y - B_y) * B_dx * A_dy */
return -bdx; /* ady * (y - b->top.y) is positive definite */
case HAVE_ADX_BDX:
/* 0 ∘ (Y - B_y) * B_dx * A_dy - (Y - A_y) * A_dx * B_dy */
if ((adx ^ bdx) < 0) {
return adx;
} else if (a->line.p1.y == b->line.p1.y) { /* common origin */
cairo_int64_t adx_bdy, bdx_ady;
/* ∴ A_dx * B_dy ∘ B_dx * A_dy */
adx_bdy = _cairo_int32x32_64_mul (adx, bdy);
bdx_ady = _cairo_int32x32_64_mul (bdx, ady);
return _cairo_int64_cmp (adx_bdy, bdx_ady);
} else
return _cairo_int128_cmp (A, B);
case HAVE_DX_ADX:
/* A_dy * (A_x - B_x) ∘ - (Y - A_y) * A_dx */
if ((-adx ^ dx) < 0) {
return dx;
} else {
cairo_int64_t ady_dx, dy_adx;
ady_dx = _cairo_int32x32_64_mul (ady, dx);
dy_adx = _cairo_int32x32_64_mul (a->line.p1.y - y, adx);
return _cairo_int64_cmp (ady_dx, dy_adx);
}
case HAVE_DX_BDX:
/* B_dy * (A_x - B_x) ∘ (Y - B_y) * B_dx */
if ((bdx ^ dx) < 0) {
return dx;
} else {
cairo_int64_t bdy_dx, dy_bdx;
bdy_dx = _cairo_int32x32_64_mul (bdy, dx);
dy_bdx = _cairo_int32x32_64_mul (y - b->line.p1.y, bdx);
return _cairo_int64_cmp (bdy_dx, dy_bdx);
}
case HAVE_ALL:
/* XXX try comparing (a->line.p2.x - b->line.p2.x) et al */
return _cairo_int128_cmp (L, _cairo_int128_sub (B, A));
}
#undef B
#undef A
#undef L
}
/*
* We need to compare the x-coordinate of a line for a particular y wrt to a
* given x, without loss of precision.
*
* The x-coordinate along an edge for a given y is:
* X = A_x + (Y - A_y) * A_dx / A_dy
*
* So the inequality we wish to test is:
* A_x + (Y - A_y) * A_dx / A_dy ∘ X
* where ∘ is our inequality operator.
*
* By construction, we know that A_dy (and (Y - A_y)) are
* all positive, so we can rearrange it thus without causing a sign change:
* (Y - A_y) * A_dx ∘ (X - A_x) * A_dy
*
* Given the assumption that all the deltas fit within 32 bits, we can compute
* this comparison directly using 64 bit arithmetic.
*
* See the similar discussion for _slope_compare() and
* edges_compare_x_for_y_general().
*/
static int
edge_compare_for_y_against_x (const cairo_edge_t *a,
int32_t y,
int32_t x)
{
int32_t adx, ady;
int32_t dx, dy;
cairo_int64_t L, R;
if (a->line.p1.x <= a->line.p2.x) {
if (x < a->line.p1.x)
return 1;
if (x > a->line.p2.x)
return -1;
} else {
if (x < a->line.p2.x)
return 1;
if (x > a->line.p1.x)
return -1;
}
adx = a->line.p2.x - a->line.p1.x;
dx = x - a->line.p1.x;
if (adx == 0)
return -dx;
if (dx == 0 || (adx ^ dx) < 0)
return adx;
dy = y - a->line.p1.y;
ady = a->line.p2.y - a->line.p1.y;
L = _cairo_int32x32_64_mul (dy, adx);
R = _cairo_int32x32_64_mul (dx, ady);
return _cairo_int64_cmp (L, R);
}
static int
edges_compare_x_for_y (const cairo_edge_t *a,
const cairo_edge_t *b,
int32_t y)
{
/* If the sweep-line is currently on an end-point of a line,
* then we know its precise x value (and considering that we often need to
* compare events at end-points, this happens frequently enough to warrant
* special casing).
*/
enum {
HAVE_NEITHER = 0x0,
HAVE_AX = 0x1,
HAVE_BX = 0x2,
HAVE_BOTH = HAVE_AX | HAVE_BX
} have_ax_bx = HAVE_BOTH;
int32_t ax = 0, bx = 0;
/* XXX given we have x and dx? */
if (y == a->line.p1.y)
ax = a->line.p1.x;
else if (y == a->line.p2.y)
ax = a->line.p2.x;
else
have_ax_bx &= ~HAVE_AX;
if (y == b->line.p1.y)
bx = b->line.p1.x;
else if (y == b->line.p2.y)
bx = b->line.p2.x;
else
have_ax_bx &= ~HAVE_BX;
switch (have_ax_bx) {
default:
case HAVE_NEITHER:
return edges_compare_x_for_y_general (a, b, y);
case HAVE_AX:
return -edge_compare_for_y_against_x (b, y, ax);
case HAVE_BX:
return edge_compare_for_y_against_x (a, y, bx);
case HAVE_BOTH:
return ax - bx;
}
}
static inline int
slope_compare (const edge_t *a,
const edge_t *b)
{
cairo_int64_t L, R;
int cmp;
cmp = a->dxdy.quo - b->dxdy.quo;
if (cmp)
return cmp;
if (a->dxdy.rem == 0)
return -b->dxdy.rem;
if (b->dxdy.rem == 0)
return a->dxdy.rem;
L = _cairo_int32x32_64_mul (b->dy, a->dxdy.rem);
R = _cairo_int32x32_64_mul (a->dy, b->dxdy.rem);
return _cairo_int64_cmp (L, R);
}
static inline int
line_equal (const cairo_line_t *a, const cairo_line_t *b)
{
return a->p1.x == b->p1.x && a->p1.y == b->p1.y &&
a->p2.x == b->p2.x && a->p2.y == b->p2.y;
}
static inline int
sweep_line_compare_edges (const edge_t *a,
const edge_t *b,
cairo_fixed_t y)
{
int cmp;
if (line_equal (&a->edge.line, &b->edge.line))
return 0;
cmp = edges_compare_x_for_y (&a->edge, &b->edge, y);
if (cmp)
return cmp;
return slope_compare (a, b);
}
static inline cairo_int64_t
det32_64 (int32_t a, int32_t b,
int32_t c, int32_t d)
{
/* det = a * d - b * c */
return _cairo_int64_sub (_cairo_int32x32_64_mul (a, d),
_cairo_int32x32_64_mul (b, c));
}
static inline cairo_int128_t
det64x32_128 (cairo_int64_t a, int32_t b,
cairo_int64_t c, int32_t d)
{
/* det = a * d - b * c */
return _cairo_int128_sub (_cairo_int64x32_128_mul (a, d),
_cairo_int64x32_128_mul (c, b));
}
/* Compute the intersection of two lines as defined by two edges. The
* result is provided as a coordinate pair of 128-bit integers.
*
* Returns %CAIRO_BO_STATUS_INTERSECTION if there is an intersection or
* %CAIRO_BO_STATUS_PARALLEL if the two lines are exactly parallel.
*/
static cairo_bool_t
intersect_lines (const edge_t *a, const edge_t *b,
cairo_bo_intersect_point_t *intersection)
{
cairo_int64_t a_det, b_det;
/* XXX: We're assuming here that dx and dy will still fit in 32
* bits. That's not true in general as there could be overflow. We
* should prevent that before the tessellation algorithm begins.
* What we're doing to mitigate this is to perform clamping in
* cairo_bo_tessellate_polygon().
*/
int32_t dx1 = a->edge.line.p1.x - a->edge.line.p2.x;
int32_t dy1 = a->edge.line.p1.y - a->edge.line.p2.y;
int32_t dx2 = b->edge.line.p1.x - b->edge.line.p2.x;
int32_t dy2 = b->edge.line.p1.y - b->edge.line.p2.y;
cairo_int64_t den_det;
cairo_int64_t R;
cairo_quorem64_t qr;
den_det = det32_64 (dx1, dy1, dx2, dy2);
/* Q: Can we determine that the lines do not intersect (within range)
* much more cheaply than computing the intersection point i.e. by
* avoiding the division?
*
* X = ax + t * adx = bx + s * bdx;
* Y = ay + t * ady = by + s * bdy;
* ∴ t * (ady*bdx - bdy*adx) = bdx * (by - ay) + bdy * (ax - bx)
* => t * L = R
*
* Therefore we can reject any intersection (under the criteria for
* valid intersection events) if:
* L^R < 0 => t < 0, or
* L<R => t > 1
*
* (where top/bottom must at least extend to the line endpoints).
*
* A similar substitution can be performed for s, yielding:
* s * (ady*bdx - bdy*adx) = ady * (ax - bx) - adx * (ay - by)
*/
R = det32_64 (dx2, dy2,
b->edge.line.p1.x - a->edge.line.p1.x,
b->edge.line.p1.y - a->edge.line.p1.y);
if (_cairo_int64_negative (den_det)) {
if (_cairo_int64_ge (den_det, R))
return FALSE;
} else {
if (_cairo_int64_le (den_det, R))
return FALSE;
}
R = det32_64 (dy1, dx1,
a->edge.line.p1.y - b->edge.line.p1.y,
a->edge.line.p1.x - b->edge.line.p1.x);
if (_cairo_int64_negative (den_det)) {
if (_cairo_int64_ge (den_det, R))
return FALSE;
} else {
if (_cairo_int64_le (den_det, R))
return FALSE;
}
/* We now know that the two lines should intersect within range. */
a_det = det32_64 (a->edge.line.p1.x, a->edge.line.p1.y,
a->edge.line.p2.x, a->edge.line.p2.y);
b_det = det32_64 (b->edge.line.p1.x, b->edge.line.p1.y,
b->edge.line.p2.x, b->edge.line.p2.y);
/* x = det (a_det, dx1, b_det, dx2) / den_det */
qr = _cairo_int_96by64_32x64_divrem (det64x32_128 (a_det, dx1,
b_det, dx2),
den_det);
if (_cairo_int64_eq (qr.rem, den_det))
return FALSE;
#if 0
intersection->x.exactness = _cairo_int64_is_zero (qr.rem) ? EXACT : INEXACT;
#else
intersection->x.exactness = EXACT;
if (! _cairo_int64_is_zero (qr.rem)) {
if (_cairo_int64_negative (den_det) ^ _cairo_int64_negative (qr.rem))
qr.rem = _cairo_int64_negate (qr.rem);
qr.rem = _cairo_int64_mul (qr.rem, _cairo_int32_to_int64 (2));
if (_cairo_int64_ge (qr.rem, den_det)) {
qr.quo = _cairo_int64_add (qr.quo,
_cairo_int32_to_int64 (_cairo_int64_negative (qr.quo) ? -1 : 1));
} else
intersection->x.exactness = INEXACT;
}
#endif
intersection->x.ordinate = _cairo_int64_to_int32 (qr.quo);
/* y = det (a_det, dy1, b_det, dy2) / den_det */
qr = _cairo_int_96by64_32x64_divrem (det64x32_128 (a_det, dy1,
b_det, dy2),
den_det);
if (_cairo_int64_eq (qr.rem, den_det))
return FALSE;
#if 0
intersection->y.exactness = _cairo_int64_is_zero (qr.rem) ? EXACT : INEXACT;
#else
intersection->y.exactness = EXACT;
if (! _cairo_int64_is_zero (qr.rem)) {
/* compute ceiling away from zero */
qr.quo = _cairo_int64_add (qr.quo,
_cairo_int32_to_int64 (_cairo_int64_negative (qr.quo) ? -1 : 1));
intersection->y.exactness = INEXACT;
}
#endif
intersection->y.ordinate = _cairo_int64_to_int32 (qr.quo);
return TRUE;
}
static int
bo_intersect_ordinate_32_compare (int32_t a, int32_t b, int exactness)
{
int cmp;
/* First compare the quotient */
cmp = a - b;
if (cmp)
return cmp;
/* With quotient identical, if remainder is 0 then compare equal */
/* Otherwise, the non-zero remainder makes a > b */
return -(INEXACT == exactness);
}
/* Does the given edge contain the given point. The point must already
* be known to be contained within the line determined by the edge,
* (most likely the point results from an intersection of this edge
* with another).
*
* If we had exact arithmetic, then this function would simply be a
* matter of examining whether the y value of the point lies within
* the range of y values of the edge. But since intersection points
* are not exact due to being rounded to the nearest integer within
* the available precision, we must also examine the x value of the
* point.
*
* The definition of "contains" here is that the given intersection
* point will be seen by the sweep line after the start event for the
* given edge and before the stop event for the edge. See the comments
* in the implementation for more details.
*/
static cairo_bool_t
bo_edge_contains_intersect_point (const edge_t *edge,
cairo_bo_intersect_point_t *point)
{
int cmp_top, cmp_bottom;
/* XXX: When running the actual algorithm, we don't actually need to
* compare against edge->top at all here, since any intersection above
* top is eliminated early via a slope comparison. We're leaving these
* here for now only for the sake of the quadratic-time intersection
* finder which needs them.
*/
cmp_top = bo_intersect_ordinate_32_compare (point->y.ordinate,
edge->edge.top,
point->y.exactness);
if (cmp_top < 0)
return FALSE;
cmp_bottom = bo_intersect_ordinate_32_compare (point->y.ordinate,
edge->edge.bottom,
point->y.exactness);
if (cmp_bottom > 0)
return FALSE;
if (cmp_top > 0 && cmp_bottom < 0)
return TRUE;
/* At this stage, the point lies on the same y value as either
* edge->top or edge->bottom, so we have to examine the x value in
* order to properly determine containment. */
/* If the y value of the point is the same as the y value of the
* top of the edge, then the x value of the point must be greater
* to be considered as inside the edge. Similarly, if the y value
* of the point is the same as the y value of the bottom of the
* edge, then the x value of the point must be less to be
* considered as inside. */
if (cmp_top == 0) {
cairo_fixed_t top_x;
top_x = line_compute_intersection_x_for_y (&edge->edge.line,
edge->edge.top);
return bo_intersect_ordinate_32_compare (top_x, point->x.ordinate, point->x.exactness) < 0;
} else { /* cmp_bottom == 0 */
cairo_fixed_t bot_x;
bot_x = line_compute_intersection_x_for_y (&edge->edge.line,
edge->edge.bottom);
return bo_intersect_ordinate_32_compare (point->x.ordinate, bot_x, point->x.exactness) < 0;
}
}
static cairo_bool_t
edge_intersect (const edge_t *a,
const edge_t *b,
cairo_point_t *intersection)
{
cairo_bo_intersect_point_t quorem;
if (! intersect_lines (a, b, &quorem))
return FALSE;
if (a->edge.top != a->edge.line.p1.y || a->edge.bottom != a->edge.line.p2.y) {
if (! bo_edge_contains_intersect_point (a, &quorem))
return FALSE;
}
if (b->edge.top != b->edge.line.p1.y || b->edge.bottom != b->edge.line.p2.y) {
if (! bo_edge_contains_intersect_point (b, &quorem))
return FALSE;
}
/* Now that we've correctly compared the intersection point and
* determined that it lies within the edge, then we know that we
* no longer need any more bits of storage for the intersection
* than we do for our edge coordinates. We also no longer need the
* remainder from the division. */
intersection->x = quorem.x.ordinate;
intersection->y = quorem.y.ordinate;
return TRUE;
}
static inline int
event_compare (const event_t *a, const event_t *b)
{
return a->y - b->y;
}
static void
pqueue_init (pqueue_t *pq)
{
pq->max_size = ARRAY_LENGTH (pq->elements_embedded);
pq->size = 0;
pq->elements = pq->elements_embedded;
}
static void
pqueue_fini (pqueue_t *pq)
{
if (pq->elements != pq->elements_embedded)
free (pq->elements);
}
static cairo_bool_t
pqueue_grow (pqueue_t *pq)
{
event_t **new_elements;
pq->max_size *= 2;
if (pq->elements == pq->elements_embedded) {
new_elements = _cairo_malloc_ab (pq->max_size,
sizeof (event_t *));
if (unlikely (new_elements == NULL))
return FALSE;
memcpy (new_elements, pq->elements_embedded,
sizeof (pq->elements_embedded));
} else {
new_elements = _cairo_realloc_ab (pq->elements,
pq->max_size,
sizeof (event_t *));
if (unlikely (new_elements == NULL))
return FALSE;
}
pq->elements = new_elements;
return TRUE;
}
static inline void
pqueue_push (sweep_line_t *sweep_line, event_t *event)
{
event_t **elements;
int i, parent;
if (unlikely (sweep_line->queue.pq.size + 1 == sweep_line->queue.pq.max_size)) {
if (unlikely (! pqueue_grow (&sweep_line->queue.pq))) {
longjmp (sweep_line->unwind,
_cairo_error (CAIRO_STATUS_NO_MEMORY));
}
}
elements = sweep_line->queue.pq.elements;
for (i = ++sweep_line->queue.pq.size;
i != PQ_FIRST_ENTRY &&
event_compare (event,
elements[parent = PQ_PARENT_INDEX (i)]) < 0;
i = parent)
{
elements[i] = elements[parent];
}
elements[i] = event;
}
static inline void
pqueue_pop (pqueue_t *pq)
{
event_t **elements = pq->elements;
event_t *tail;
int child, i;
tail = elements[pq->size--];
if (pq->size == 0) {
elements[PQ_FIRST_ENTRY] = NULL;
return;
}
for (i = PQ_FIRST_ENTRY;
(child = PQ_LEFT_CHILD_INDEX (i)) <= pq->size;
i = child)
{
if (child != pq->size &&
event_compare (elements[child+1],
elements[child]) < 0)
{
child++;
}
if (event_compare (elements[child], tail) >= 0)
break;
elements[i] = elements[child];
}
elements[i] = tail;
}
static inline void
event_insert (sweep_line_t *sweep_line,
event_type_t type,
edge_t *e1,
edge_t *e2,
cairo_fixed_t y)
{
queue_event_t *event;
event = _cairo_freepool_alloc (&sweep_line->queue.pool);
if (unlikely (event == NULL)) {
longjmp (sweep_line->unwind,
_cairo_error (CAIRO_STATUS_NO_MEMORY));
}
event->y = y;
event->type = type;
event->e1 = e1;
event->e2 = e2;
pqueue_push (sweep_line, (event_t *) event);
}
static void
event_delete (sweep_line_t *sweep_line,
event_t *event)
{
_cairo_freepool_free (&sweep_line->queue.pool, event);
}
static inline event_t *
event_next (sweep_line_t *sweep_line)
{
event_t *event, *cmp;
event = sweep_line->queue.pq.elements[PQ_FIRST_ENTRY];
cmp = *sweep_line->queue.start_events;
if (event == NULL ||
(cmp != NULL && event_compare (cmp, event) < 0))
{
event = cmp;
sweep_line->queue.start_events++;
}
else
{
pqueue_pop (&sweep_line->queue.pq);
}
return event;
}
CAIRO_COMBSORT_DECLARE (start_event_sort, event_t *, event_compare)
static inline void
event_insert_stop (sweep_line_t *sweep_line,
edge_t *edge)
{
event_insert (sweep_line,
EVENT_TYPE_STOP,
edge, NULL,
edge->edge.bottom);
}
static inline void
event_insert_if_intersect_below_current_y (sweep_line_t *sweep_line,
edge_t *left,
edge_t *right)
{
cairo_point_t intersection;
/* start points intersect */
if (left->edge.line.p1.x == right->edge.line.p1.x &&
left->edge.line.p1.y == right->edge.line.p1.y)
{
return;
}
/* end points intersect, process DELETE events first */
if (left->edge.line.p2.x == right->edge.line.p2.x &&
left->edge.line.p2.y == right->edge.line.p2.y)
{
return;
}
if (slope_compare (left, right) <= 0)
return;
if (! edge_intersect (left, right, &intersection))
return;
event_insert (sweep_line,
EVENT_TYPE_INTERSECTION,
left, right,
intersection.y);
}
static inline edge_t *
link_to_edge (cairo_list_t *link)
{
return (edge_t *) link;
}
static void
sweep_line_insert (sweep_line_t *sweep_line,
edge_t *edge)
{
cairo_list_t *pos;
cairo_fixed_t y = sweep_line->current_subrow;
pos = sweep_line->insert_cursor;
if (pos == &sweep_line->active)
pos = sweep_line->active.next;
if (pos != &sweep_line->active) {
int cmp;
cmp = sweep_line_compare_edges (link_to_edge (pos),
edge,
y);
if (cmp < 0) {
while (pos->next != &sweep_line->active &&
sweep_line_compare_edges (link_to_edge (pos->next),
edge,
y) < 0)
{
pos = pos->next;
}
} else if (cmp > 0) {
do {
pos = pos->prev;
} while (pos != &sweep_line->active &&
sweep_line_compare_edges (link_to_edge (pos),
edge,
y) > 0);
}
}
cairo_list_add (&edge->link, pos);
sweep_line->insert_cursor = &edge->link;
}
inline static void
coverage_rewind (struct coverage *cells)
{
cells->cursor = &cells->head;
}
static void
coverage_init (struct coverage *cells)
{
_cairo_freepool_init (&cells->pool,
sizeof (struct cell));
cells->head.prev = NULL;
cells->head.next = &cells->tail;
cells->head.x = INT_MIN;
cells->tail.prev = &cells->head;
cells->tail.next = NULL;
cells->tail.x = INT_MAX;
cells->count = 0;
coverage_rewind (cells);
}
static void
coverage_fini (struct coverage *cells)
{
_cairo_freepool_fini (&cells->pool);
}
inline static void
coverage_reset (struct coverage *cells)
{
cells->head.next = &cells->tail;
cells->tail.prev = &cells->head;
cells->count = 0;
_cairo_freepool_reset (&cells->pool);
coverage_rewind (cells);
}
static struct cell *
coverage_alloc (sweep_line_t *sweep_line,
struct cell *tail,
int x)
{
struct cell *cell;
cell = _cairo_freepool_alloc (&sweep_line->coverage.pool);
if (unlikely (NULL == cell)) {
longjmp (sweep_line->unwind,
_cairo_error (CAIRO_STATUS_NO_MEMORY));
}
tail->prev->next = cell;
cell->prev = tail->prev;
cell->next = tail;
tail->prev = cell;
cell->x = x;
cell->uncovered_area = 0;
cell->covered_height = 0;
sweep_line->coverage.count++;
return cell;
}
inline static struct cell *
coverage_find (sweep_line_t *sweep_line, int x)
{
struct cell *cell;
cell = sweep_line->coverage.cursor;
if (unlikely (cell->x > x)) {
do {
if (cell->prev->x < x)
break;
cell = cell->prev;
} while (TRUE);
} else {
if (cell->x == x)
return cell;
do {
UNROLL3({
cell = cell->next;
if (cell->x >= x)
break;
});
} while (TRUE);
}
if (cell->x != x)
cell = coverage_alloc (sweep_line, cell, x);
return sweep_line->coverage.cursor = cell;
}
static void
coverage_render_cells (sweep_line_t *sweep_line,
cairo_fixed_t left, cairo_fixed_t right,
cairo_fixed_t y1, cairo_fixed_t y2,
int sign)
{
int fx1, fx2;
int ix1, ix2;
int dx, dy;
/* Orient the edge left-to-right. */
dx = right - left;
if (dx >= 0) {
ix1 = _cairo_fixed_integer_part (left);
fx1 = _cairo_fixed_fractional_part (left);
ix2 = _cairo_fixed_integer_part (right);
fx2 = _cairo_fixed_fractional_part (right);
dy = y2 - y1;
} else {
ix1 = _cairo_fixed_integer_part (right);
fx1 = _cairo_fixed_fractional_part (right);
ix2 = _cairo_fixed_integer_part (left);
fx2 = _cairo_fixed_fractional_part (left);
dx = -dx;
sign = -sign;
dy = y1 - y2;
y1 = y2 - dy;
y2 = y1 + dy;
}
/* Add coverage for all pixels [ix1,ix2] on this row crossed
* by the edge. */
{
struct quorem y = floored_divrem ((STEP_X - fx1)*dy, dx);
struct cell *cell;
cell = sweep_line->coverage.cursor;
if (cell->x != ix1) {
if (unlikely (cell->x > ix1)) {
do {
if (cell->prev->x < ix1)
break;
cell = cell->prev;
} while (TRUE);
} else do {
UNROLL3({
if (cell->x >= ix1)
break;
cell = cell->next;
});
} while (TRUE);
if (cell->x != ix1)
cell = coverage_alloc (sweep_line, cell, ix1);
}
cell->uncovered_area += sign * y.quo * (STEP_X + fx1);
cell->covered_height += sign * y.quo;
y.quo += y1;
cell = cell->next;
if (cell->x != ++ix1)
cell = coverage_alloc (sweep_line, cell, ix1);
if (ix1 < ix2) {
struct quorem dydx_full = floored_divrem (STEP_X*dy, dx);
do {
cairo_fixed_t y_skip = dydx_full.quo;
y.rem += dydx_full.rem;
if (y.rem >= dx) {
++y_skip;
y.rem -= dx;
}
y.quo += y_skip;
y_skip *= sign;
cell->covered_height += y_skip;
cell->uncovered_area += y_skip*STEP_X;
cell = cell->next;
if (cell->x != ++ix1)
cell = coverage_alloc (sweep_line, cell, ix1);
} while (ix1 != ix2);
}
cell->uncovered_area += sign*(y2 - y.quo)*fx2;
cell->covered_height += sign*(y2 - y.quo);
sweep_line->coverage.cursor = cell;
}
}
inline static void
full_inc_edge (edge_t *edge)
{
edge->x.quo += edge->dxdy_full.quo;
edge->x.rem += edge->dxdy_full.rem;
if (edge->x.rem >= 0) {
++edge->x.quo;
edge->x.rem -= edge->dy;
}
}
static void
full_add_edge (sweep_line_t *sweep_line, edge_t *edge, int sign)
{
struct cell *cell;
cairo_fixed_t x1, x2;
int ix1, ix2;
int frac;
edge->current_sign = sign;
ix1 = _cairo_fixed_integer_part (edge->x.quo);
if (edge->vertical) {
frac = _cairo_fixed_fractional_part (edge->x.quo);
cell = coverage_find (sweep_line, ix1);
cell->covered_height += sign * STEP_Y;
cell->uncovered_area += sign * 2 * frac * STEP_Y;
return;
}
x1 = edge->x.quo;
full_inc_edge (edge);
x2 = edge->x.quo;
ix2 = _cairo_fixed_integer_part (edge->x.quo);
/* Edge is entirely within a column? */
if (likely (ix1 == ix2)) {
frac = _cairo_fixed_fractional_part (x1) +
_cairo_fixed_fractional_part (x2);
cell = coverage_find (sweep_line, ix1);
cell->covered_height += sign * STEP_Y;
cell->uncovered_area += sign * frac * STEP_Y;
return;
}
coverage_render_cells (sweep_line, x1, x2, 0, STEP_Y, sign);
}
static void
full_nonzero (sweep_line_t *sweep_line)
{
cairo_list_t *pos;
sweep_line->is_vertical = TRUE;
pos = sweep_line->active.next;
do {
edge_t *left = link_to_edge (pos), *right;
int winding = left->edge.dir;
sweep_line->is_vertical &= left->vertical;
pos = left->link.next;
do {
if (unlikely (pos == &sweep_line->active)) {
full_add_edge (sweep_line, left, +1);
return;
}
right = link_to_edge (pos);
pos = pos->next;
sweep_line->is_vertical &= right->vertical;
winding += right->edge.dir;
if (0 == winding) {
if (pos == &sweep_line->active ||
link_to_edge (pos)->x.quo != right->x.quo)
{
break;
}
}
if (! right->vertical)
full_inc_edge (right);
} while (TRUE);
full_add_edge (sweep_line, left, +1);
full_add_edge (sweep_line, right, -1);
} while (pos != &sweep_line->active);
}
static void
full_evenodd (sweep_line_t *sweep_line)
{
cairo_list_t *pos;
sweep_line->is_vertical = TRUE;
pos = sweep_line->active.next;
do {
edge_t *left = link_to_edge (pos), *right;
int winding = 0;
sweep_line->is_vertical &= left->vertical;
pos = left->link.next;
do {
if (pos == &sweep_line->active) {
full_add_edge (sweep_line, left, +1);
return;
}
right = link_to_edge (pos);
pos = pos->next;
sweep_line->is_vertical &= right->vertical;
if (++winding & 1) {
if (pos == &sweep_line->active ||
link_to_edge (pos)->x.quo != right->x.quo)
{
break;
}
}
if (! right->vertical)
full_inc_edge (right);
} while (TRUE);
full_add_edge (sweep_line, left, +1);
full_add_edge (sweep_line, right, -1);
} while (pos != &sweep_line->active);
}
static void
render_rows (cairo_botor_scan_converter_t *self,
sweep_line_t *sweep_line,
int y, int height,
cairo_span_renderer_t *renderer)
{
cairo_half_open_span_t spans_stack[CAIRO_STACK_ARRAY_LENGTH (cairo_half_open_span_t)];
cairo_half_open_span_t *spans = spans_stack;
struct cell *cell;
int prev_x, cover;
int num_spans;
cairo_status_t status;
if (unlikely (sweep_line->coverage.count == 0)) {
status = renderer->render_rows (renderer, y, height, NULL, 0);
if (unlikely (status))
longjmp (sweep_line->unwind, status);
return;
}
/* Allocate enough spans for the row. */
num_spans = 2*sweep_line->coverage.count+2;
if (unlikely (num_spans > ARRAY_LENGTH (spans_stack))) {
spans = _cairo_malloc_ab (num_spans, sizeof (cairo_half_open_span_t));
if (unlikely (spans == NULL)) {
longjmp (sweep_line->unwind,
_cairo_error (CAIRO_STATUS_NO_MEMORY));
}
}
/* Form the spans from the coverage and areas. */
num_spans = 0;
prev_x = self->xmin;
cover = 0;
cell = sweep_line->coverage.head.next;
do {
int x = cell->x;
int area;
if (x > prev_x) {
spans[num_spans].x = prev_x;
spans[num_spans].inverse = 0;
spans[num_spans].coverage = AREA_TO_ALPHA (cover);
++num_spans;
}
cover += cell->covered_height*STEP_X*2;
area = cover - cell->uncovered_area;
spans[num_spans].x = x;
spans[num_spans].coverage = AREA_TO_ALPHA (area);
++num_spans;
prev_x = x + 1;
} while ((cell = cell->next) != &sweep_line->coverage.tail);
if (prev_x <= self->xmax) {
spans[num_spans].x = prev_x;
spans[num_spans].inverse = 0;
spans[num_spans].coverage = AREA_TO_ALPHA (cover);
++num_spans;
}
if (cover && prev_x < self->xmax) {
spans[num_spans].x = self->xmax;
spans[num_spans].inverse = 1;
spans[num_spans].coverage = 0;
++num_spans;
}
status = renderer->render_rows (renderer, y, height, spans, num_spans);
if (unlikely (spans != spans_stack))
free (spans);
coverage_reset (&sweep_line->coverage);
if (unlikely (status))
longjmp (sweep_line->unwind, status);
}
static void
full_repeat (sweep_line_t *sweep)
{
edge_t *edge;
cairo_list_foreach_entry (edge, edge_t, &sweep->active, link) {
if (edge->current_sign)
full_add_edge (sweep, edge, edge->current_sign);
else if (! edge->vertical)
full_inc_edge (edge);
}
}
static void
full_reset (sweep_line_t *sweep)
{
edge_t *edge;
cairo_list_foreach_entry (edge, edge_t, &sweep->active, link)
edge->current_sign = 0;
}
static void
full_step (cairo_botor_scan_converter_t *self,
sweep_line_t *sweep_line,
cairo_fixed_t row,
cairo_span_renderer_t *renderer)
{
int top, bottom;
top = _cairo_fixed_integer_part (sweep_line->current_row);
bottom = _cairo_fixed_integer_part (row);
if (cairo_list_is_empty (&sweep_line->active)) {
cairo_status_t status;
status = renderer->render_rows (renderer, top, bottom - top, NULL, 0);
if (unlikely (status))
longjmp (sweep_line->unwind, status);
return;
}
if (self->fill_rule == CAIRO_FILL_RULE_WINDING)
full_nonzero (sweep_line);
else
full_evenodd (sweep_line);
if (sweep_line->is_vertical || bottom == top + 1) {
render_rows (self, sweep_line, top, bottom - top, renderer);
full_reset (sweep_line);
return;
}
render_rows (self, sweep_line, top++, 1, renderer);
do {
full_repeat (sweep_line);
render_rows (self, sweep_line, top, 1, renderer);
} while (++top != bottom);
full_reset (sweep_line);
}
cairo_always_inline static void
sub_inc_edge (edge_t *edge,
cairo_fixed_t height)
{
if (height == 1) {
edge->x.quo += edge->dxdy.quo;
edge->x.rem += edge->dxdy.rem;
if (edge->x.rem >= 0) {
++edge->x.quo;
edge->x.rem -= edge->dy;
}
} else {
edge->x.quo += height * edge->dxdy.quo;
edge->x.rem += height * edge->dxdy.rem;
if (edge->x.rem >= 0) {
int carry = edge->x.rem / edge->dy + 1;
edge->x.quo += carry;
edge->x.rem -= carry * edge->dy;
}
}
}
static void
sub_add_run (sweep_line_t *sweep_line, edge_t *edge, int y, int sign)
{
struct run *run;
run = _cairo_freepool_alloc (&sweep_line->runs);
if (unlikely (run == NULL))
longjmp (sweep_line->unwind, _cairo_error (CAIRO_STATUS_NO_MEMORY));
run->y = y;
run->sign = sign;
run->next = edge->runs;
edge->runs = run;
edge->current_sign = sign;
}
inline static cairo_bool_t
edges_coincident (edge_t *left, edge_t *right, cairo_fixed_t y)
{
/* XXX is compare_x_for_y() worth executing during sub steps? */
return line_equal (&left->edge.line, &right->edge.line);
//edges_compare_x_for_y (&left->edge, &right->edge, y) >= 0;
}
static void
sub_nonzero (sweep_line_t *sweep_line)
{
cairo_fixed_t y = sweep_line->current_subrow;
cairo_fixed_t fy = _cairo_fixed_fractional_part (y);
cairo_list_t *pos;
pos = sweep_line->active.next;
do {
edge_t *left = link_to_edge (pos), *right;
int winding = left->edge.dir;
pos = left->link.next;
do {
if (unlikely (pos == &sweep_line->active)) {
if (left->current_sign != +1)
sub_add_run (sweep_line, left, fy, +1);
return;
}
right = link_to_edge (pos);
pos = pos->next;
winding += right->edge.dir;
if (0 == winding) {
if (pos == &sweep_line->active ||
! edges_coincident (right, link_to_edge (pos), y))
{
break;
}
}
if (right->current_sign)
sub_add_run (sweep_line, right, fy, 0);
} while (TRUE);
if (left->current_sign != +1)
sub_add_run (sweep_line, left, fy, +1);
if (right->current_sign != -1)
sub_add_run (sweep_line, right, fy, -1);
} while (pos != &sweep_line->active);
}
static void
sub_evenodd (sweep_line_t *sweep_line)
{
cairo_fixed_t y = sweep_line->current_subrow;
cairo_fixed_t fy = _cairo_fixed_fractional_part (y);
cairo_list_t *pos;
pos = sweep_line->active.next;
do {
edge_t *left = link_to_edge (pos), *right;
int winding = 0;
pos = left->link.next;
do {
if (unlikely (pos == &sweep_line->active)) {
if (left->current_sign != +1)
sub_add_run (sweep_line, left, fy, +1);
return;
}
right = link_to_edge (pos);
pos = pos->next;
if (++winding & 1) {
if (pos == &sweep_line->active ||
! edges_coincident (right, link_to_edge (pos), y))
{
break;
}
}
if (right->current_sign)
sub_add_run (sweep_line, right, fy, 0);
} while (TRUE);
if (left->current_sign != +1)
sub_add_run (sweep_line, left, fy, +1);
if (right->current_sign != -1)
sub_add_run (sweep_line, right, fy, -1);
} while (pos != &sweep_line->active);
}
cairo_always_inline static void
sub_step (cairo_botor_scan_converter_t *self,
sweep_line_t *sweep_line)
{
if (cairo_list_is_empty (&sweep_line->active))
return;
if (self->fill_rule == CAIRO_FILL_RULE_WINDING)
sub_nonzero (sweep_line);
else
sub_evenodd (sweep_line);
}
static void
coverage_render_runs (sweep_line_t *sweep, edge_t *edge,
cairo_fixed_t y1, cairo_fixed_t y2)
{
struct run tail;
struct run *run = &tail;
tail.next = NULL;
tail.y = y2;
/* Order the runs top->bottom */
while (edge->runs) {
struct run *r;
r = edge->runs;
edge->runs = r->next;
r->next = run;
run = r;
}
if (run->y > y1)
sub_inc_edge (edge, run->y - y1);
do {
cairo_fixed_t x1, x2;
y1 = run->y;
y2 = run->next->y;
x1 = edge->x.quo;
if (y2 - y1 == STEP_Y)
full_inc_edge (edge);
else
sub_inc_edge (edge, y2 - y1);
x2 = edge->x.quo;
if (run->sign) {
int ix1, ix2;
ix1 = _cairo_fixed_integer_part (x1);
ix2 = _cairo_fixed_integer_part (x2);
/* Edge is entirely within a column? */
if (likely (ix1 == ix2)) {
struct cell *cell;
int frac;
frac = _cairo_fixed_fractional_part (x1) +
_cairo_fixed_fractional_part (x2);
cell = coverage_find (sweep, ix1);
cell->covered_height += run->sign * (y2 - y1);
cell->uncovered_area += run->sign * (y2 - y1) * frac;
} else {
coverage_render_cells (sweep, x1, x2, y1, y2, run->sign);
}
}
run = run->next;
} while (run->next != NULL);
}
static void
coverage_render_vertical_runs (sweep_line_t *sweep, edge_t *edge, cairo_fixed_t y2)
{
struct cell *cell;
struct run *run;
int height = 0;
for (run = edge->runs; run != NULL; run = run->next) {
if (run->sign)
height += run->sign * (y2 - run->y);
y2 = run->y;
}
cell = coverage_find (sweep, _cairo_fixed_integer_part (edge->x.quo));
cell->covered_height += height;
cell->uncovered_area += 2 * _cairo_fixed_fractional_part (edge->x.quo) * height;
}
cairo_always_inline static void
sub_emit (cairo_botor_scan_converter_t *self,
sweep_line_t *sweep,
cairo_span_renderer_t *renderer)
{
edge_t *edge;
sub_step (self, sweep);
/* convert the runs into coverages */
cairo_list_foreach_entry (edge, edge_t, &sweep->active, link) {
if (edge->runs == NULL) {
if (! edge->vertical) {
if (edge->flags & START) {
sub_inc_edge (edge,
STEP_Y - _cairo_fixed_fractional_part (edge->edge.top));
edge->flags &= ~START;
} else
full_inc_edge (edge);
}
} else {
if (edge->vertical) {
coverage_render_vertical_runs (sweep, edge, STEP_Y);
} else {
int y1 = 0;
if (edge->flags & START) {
y1 = _cairo_fixed_fractional_part (edge->edge.top);
edge->flags &= ~START;
}
coverage_render_runs (sweep, edge, y1, STEP_Y);
}
}
edge->current_sign = 0;
edge->runs = NULL;
}
cairo_list_foreach_entry (edge, edge_t, &sweep->stopped, link) {
int y2 = _cairo_fixed_fractional_part (edge->edge.bottom);
if (edge->vertical) {
coverage_render_vertical_runs (sweep, edge, y2);
} else {
int y1 = 0;
if (edge->flags & START)
y1 = _cairo_fixed_fractional_part (edge->edge.top);
coverage_render_runs (sweep, edge, y1, y2);
}
}
cairo_list_init (&sweep->stopped);
_cairo_freepool_reset (&sweep->runs);
render_rows (self, sweep,
_cairo_fixed_integer_part (sweep->current_row), 1,
renderer);
}
static void
sweep_line_init (sweep_line_t *sweep_line,
event_t **start_events,
int num_events)
{
cairo_list_init (&sweep_line->active);
cairo_list_init (&sweep_line->stopped);
sweep_line->insert_cursor = &sweep_line->active;
sweep_line->current_row = INT32_MIN;
sweep_line->current_subrow = INT32_MIN;
coverage_init (&sweep_line->coverage);
_cairo_freepool_init (&sweep_line->runs, sizeof (struct run));
start_event_sort (start_events, num_events);
start_events[num_events] = NULL;
sweep_line->queue.start_events = start_events;
_cairo_freepool_init (&sweep_line->queue.pool,
sizeof (queue_event_t));
pqueue_init (&sweep_line->queue.pq);
sweep_line->queue.pq.elements[PQ_FIRST_ENTRY] = NULL;
}
static void
sweep_line_delete (sweep_line_t *sweep_line,
edge_t *edge)
{
if (sweep_line->insert_cursor == &edge->link)
sweep_line->insert_cursor = edge->link.prev;
cairo_list_del (&edge->link);
if (edge->runs)
cairo_list_add_tail (&edge->link, &sweep_line->stopped);
edge->flags |= STOP;
}
static void
sweep_line_swap (sweep_line_t *sweep_line,
edge_t *left,
edge_t *right)
{
right->link.prev = left->link.prev;
left->link.next = right->link.next;
right->link.next = &left->link;
left->link.prev = &right->link;
left->link.next->prev = &left->link;
right->link.prev->next = &right->link;
}
static void
sweep_line_fini (sweep_line_t *sweep_line)
{
pqueue_fini (&sweep_line->queue.pq);
_cairo_freepool_fini (&sweep_line->queue.pool);
coverage_fini (&sweep_line->coverage);
_cairo_freepool_fini (&sweep_line->runs);
}
static cairo_status_t
botor_generate (cairo_botor_scan_converter_t *self,
event_t **start_events,
cairo_span_renderer_t *renderer)
{
cairo_status_t status;
sweep_line_t sweep_line;
cairo_fixed_t ybot;
event_t *event;
cairo_list_t *left, *right;
edge_t *e1, *e2;
int bottom;
sweep_line_init (&sweep_line, start_events, self->num_edges);
if ((status = setjmp (sweep_line.unwind)))
goto unwind;
ybot = self->extents.p2.y;
sweep_line.current_subrow = self->extents.p1.y;
sweep_line.current_row = _cairo_fixed_floor (self->extents.p1.y);
event = *sweep_line.queue.start_events++;
do {
/* Can we process a full step in one go? */
if (event->y >= sweep_line.current_row + STEP_Y) {
bottom = _cairo_fixed_floor (event->y);
full_step (self, &sweep_line, bottom, renderer);
sweep_line.current_row = bottom;
sweep_line.current_subrow = bottom;
}
do {
if (event->y > sweep_line.current_subrow) {
sub_step (self, &sweep_line);
sweep_line.current_subrow = event->y;
}
do {
/* Update the active list using Bentley-Ottmann */
switch (event->type) {
case EVENT_TYPE_START:
e1 = ((start_event_t *) event)->edge;
sweep_line_insert (&sweep_line, e1);
event_insert_stop (&sweep_line, e1);
left = e1->link.prev;
right = e1->link.next;
if (left != &sweep_line.active) {
event_insert_if_intersect_below_current_y (&sweep_line,
link_to_edge (left), e1);
}
if (right != &sweep_line.active) {
event_insert_if_intersect_below_current_y (&sweep_line,
e1, link_to_edge (right));
}
break;
case EVENT_TYPE_STOP:
e1 = ((queue_event_t *) event)->e1;
event_delete (&sweep_line, event);
left = e1->link.prev;
right = e1->link.next;
sweep_line_delete (&sweep_line, e1);
if (left != &sweep_line.active &&
right != &sweep_line.active)
{
event_insert_if_intersect_below_current_y (&sweep_line,
link_to_edge (left),
link_to_edge (right));
}
break;
case EVENT_TYPE_INTERSECTION:
e1 = ((queue_event_t *) event)->e1;
e2 = ((queue_event_t *) event)->e2;
event_delete (&sweep_line, event);
if (e1->flags & STOP)
break;
if (e2->flags & STOP)
break;
/* skip this intersection if its edges are not adjacent */
if (&e2->link != e1->link.next)
break;
left = e1->link.prev;
right = e2->link.next;
sweep_line_swap (&sweep_line, e1, e2);
/* after the swap e2 is left of e1 */
if (left != &sweep_line.active) {
event_insert_if_intersect_below_current_y (&sweep_line,
link_to_edge (left), e2);
}
if (right != &sweep_line.active) {
event_insert_if_intersect_below_current_y (&sweep_line,
e1, link_to_edge (right));
}
break;
}
event = event_next (&sweep_line);
if (event == NULL)
goto end;
} while (event->y == sweep_line.current_subrow);
} while (event->y < sweep_line.current_row + STEP_Y);
bottom = sweep_line.current_row + STEP_Y;
sub_emit (self, &sweep_line, renderer);
sweep_line.current_subrow = bottom;
sweep_line.current_row = sweep_line.current_subrow;
} while (TRUE);
end:
/* flush any partial spans */
if (sweep_line.current_subrow != sweep_line.current_row) {
sub_emit (self, &sweep_line, renderer);
sweep_line.current_row += STEP_Y;
sweep_line.current_subrow = sweep_line.current_row;
}
/* clear the rest */
if (sweep_line.current_subrow < ybot) {
bottom = _cairo_fixed_integer_part (sweep_line.current_row);
status = renderer->render_rows (renderer,
bottom, _cairo_fixed_integer_ceil (ybot) - bottom,
NULL, 0);
}
unwind:
sweep_line_fini (&sweep_line);
return status;
}
static cairo_status_t
_cairo_botor_scan_converter_generate (void *converter,
cairo_span_renderer_t *renderer)
{
cairo_botor_scan_converter_t *self = converter;
start_event_t stack_events[CAIRO_STACK_ARRAY_LENGTH (start_event_t)];
start_event_t *events;
event_t *stack_event_ptrs[ARRAY_LENGTH (stack_events) + 1];
event_t **event_ptrs;
struct _cairo_botor_scan_converter_chunk *chunk;
cairo_status_t status;
int num_events;
int i, j;
num_events = self->num_edges;
if (unlikely (0 == num_events)) {
return renderer->render_rows (renderer,
_cairo_fixed_integer_floor (self->extents.p1.y),
_cairo_fixed_integer_ceil (self->extents.p2.y) -
_cairo_fixed_integer_floor (self->extents.p1.y),
NULL, 0);
}
events = stack_events;
event_ptrs = stack_event_ptrs;
if (unlikely (num_events >= ARRAY_LENGTH (stack_events))) {
events = _cairo_malloc_ab_plus_c (num_events,
sizeof (start_event_t) + sizeof (event_t *),
sizeof (event_t *));
if (unlikely (events == NULL))
return _cairo_error (CAIRO_STATUS_NO_MEMORY);
event_ptrs = (event_t **) (events + num_events);
}
j = 0;
for (chunk = &self->chunks; chunk != NULL; chunk = chunk->next) {
edge_t *edge;
edge = chunk->base;
for (i = 0; i < chunk->count; i++) {
event_ptrs[j] = (event_t *) &events[j];
events[j].y = edge->edge.top;
events[j].type = EVENT_TYPE_START;
events[j].edge = edge;
edge++, j++;
}
}
status = botor_generate (self, event_ptrs, renderer);
if (events != stack_events)
free (events);
return status;
}
static edge_t *
botor_allocate_edge (cairo_botor_scan_converter_t *self)
{
struct _cairo_botor_scan_converter_chunk *chunk;
chunk = self->tail;
if (chunk->count == chunk->size) {
int size;
size = chunk->size * 2;
chunk->next = _cairo_malloc_ab_plus_c (size,
sizeof (edge_t),
sizeof (struct _cairo_botor_scan_converter_chunk));
if (unlikely (chunk->next == NULL))
return NULL;
chunk = chunk->next;
chunk->next = NULL;
chunk->count = 0;
chunk->size = size;
chunk->base = chunk + 1;
self->tail = chunk;
}
return (edge_t *) chunk->base + chunk->count++;
}
static cairo_status_t
botor_add_edge (cairo_botor_scan_converter_t *self,
const cairo_edge_t *edge)
{
edge_t *e;
cairo_fixed_t dx, dy;
e = botor_allocate_edge (self);
if (unlikely (e == NULL))
return _cairo_error (CAIRO_STATUS_NO_MEMORY);
cairo_list_init (&e->link);
e->edge = *edge;
dx = edge->line.p2.x - edge->line.p1.x;
dy = edge->line.p2.y - edge->line.p1.y;
e->dy = dy;
if (dx == 0) {
e->vertical = TRUE;
e->x.quo = edge->line.p1.x;
e->x.rem = 0;
e->dxdy.quo = 0;
e->dxdy.rem = 0;
e->dxdy_full.quo = 0;
e->dxdy_full.rem = 0;
} else {
e->vertical = FALSE;
e->dxdy = floored_divrem (dx, dy);
if (edge->top == edge->line.p1.y) {
e->x.quo = edge->line.p1.x;
e->x.rem = 0;
} else {
e->x = floored_muldivrem (edge->top - edge->line.p1.y,
dx, dy);
e->x.quo += edge->line.p1.x;
}
if (_cairo_fixed_integer_part (edge->bottom) - _cairo_fixed_integer_part (edge->top) > 1) {
e->dxdy_full = floored_muldivrem (STEP_Y, dx, dy);
} else {
e->dxdy_full.quo = 0;
e->dxdy_full.rem = 0;
}
}
e->x.rem = -e->dy;
e->current_sign = 0;
e->runs = NULL;
e->flags = START;
self->num_edges++;
return CAIRO_STATUS_SUCCESS;
}
#if 0
static cairo_status_t
_cairo_botor_scan_converter_add_edge (void *converter,
const cairo_point_t *p1,
const cairo_point_t *p2,
int top, int bottom,
int dir)
{
cairo_botor_scan_converter_t *self = converter;
cairo_edge_t edge;
edge.line.p1 = *p1;
edge.line.p2 = *p2;
edge.top = top;
edge.bottom = bottom;
edge.dir = dir;
return botor_add_edge (self, &edge);
}
#endif
cairo_status_t
_cairo_botor_scan_converter_add_polygon (cairo_botor_scan_converter_t *converter,
const cairo_polygon_t *polygon)
{
cairo_botor_scan_converter_t *self = converter;
cairo_status_t status;
int i;
for (i = 0; i < polygon->num_edges; i++) {
status = botor_add_edge (self, &polygon->edges[i]);
if (unlikely (status))
return status;
}
return CAIRO_STATUS_SUCCESS;
}
static void
_cairo_botor_scan_converter_destroy (void *converter)
{
cairo_botor_scan_converter_t *self = converter;
struct _cairo_botor_scan_converter_chunk *chunk, *next;
for (chunk = self->chunks.next; chunk != NULL; chunk = next) {
next = chunk->next;
free (chunk);
}
}
void
_cairo_botor_scan_converter_init (cairo_botor_scan_converter_t *self,
const cairo_box_t *extents,
cairo_fill_rule_t fill_rule)
{
self->base.destroy = _cairo_botor_scan_converter_destroy;
self->base.generate = _cairo_botor_scan_converter_generate;
self->extents = *extents;
self->fill_rule = fill_rule;
self->xmin = _cairo_fixed_integer_floor (extents->p1.x);
self->xmax = _cairo_fixed_integer_ceil (extents->p2.x);
self->chunks.base = self->buf;
self->chunks.next = NULL;
self->chunks.count = 0;
self->chunks.size = sizeof (self->buf) / sizeof (edge_t);
self->tail = &self->chunks;
self->num_edges = 0;
}
| 0 |
D://workCode//uploadProject\awtk\3rd\cairo | D://workCode//uploadProject\awtk\3rd\cairo\cairo\cairo-box-inline.h | /* -*- Mode: c; tab-width: 8; c-basic-offset: 4; indent-tabs-mode: t; -*- */
/* cairo - a vector graphics library with display and print output
*
* Copyright © 2010 Andrea Canciani
*
* This library is free software; you can redistribute it and/or
* modify it either under the terms of the GNU Lesser General Public
* License version 2.1 as published by the Free Software Foundation
* (the "LGPL") or, at your option, under the terms of the Mozilla
* Public License Version 1.1 (the "MPL"). If you do not alter this
* notice, a recipient may use your version of this file under either
* the MPL or the LGPL.
*
* You should have received a copy of the LGPL along with this library
* in the file COPYING-LGPL-2.1; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Suite 500, Boston, MA 02110-1335, USA
* You should have received a copy of the MPL along with this library
* in the file COPYING-MPL-1.1
*
* The contents of this file are subject to the Mozilla Public License
* Version 1.1 (the "License"); you may not use this file except in
* compliance with the License. You may obtain a copy of the License at
* http://www.mozilla.org/MPL/
*
* This software is distributed on an "AS IS" basis, WITHOUT WARRANTY
* OF ANY KIND, either express or implied. See the LGPL or the MPL for
* the specific language governing rights and limitations.
*
* The Original Code is the cairo graphics library.
*
* Contributor(s):
* Andrea Canciani <ranma42@gmail.com>
*/
#ifndef CAIRO_BOX_H
#define CAIRO_BOX_H
#include "cairo-types-private.h"
#include "cairo-compiler-private.h"
#include "cairo-fixed-private.h"
static inline void
_cairo_box_set (cairo_box_t *box,
const cairo_point_t *p1,
const cairo_point_t *p2)
{
box->p1 = *p1;
box->p2 = *p2;
}
static inline void
_cairo_box_from_integers (cairo_box_t *box, int x, int y, int w, int h)
{
box->p1.x = _cairo_fixed_from_int (x);
box->p1.y = _cairo_fixed_from_int (y);
box->p2.x = _cairo_fixed_from_int (x + w);
box->p2.y = _cairo_fixed_from_int (y + h);
}
static inline void
_cairo_box_from_rectangle_int (cairo_box_t *box,
const cairo_rectangle_int_t *rect)
{
box->p1.x = _cairo_fixed_from_int (rect->x);
box->p1.y = _cairo_fixed_from_int (rect->y);
box->p2.x = _cairo_fixed_from_int (rect->x + rect->width);
box->p2.y = _cairo_fixed_from_int (rect->y + rect->height);
}
/* assumes box->p1 is top-left, p2 bottom-right */
static inline void
_cairo_box_add_point (cairo_box_t *box,
const cairo_point_t *point)
{
if (point->x < box->p1.x)
box->p1.x = point->x;
else if (point->x > box->p2.x)
box->p2.x = point->x;
if (point->y < box->p1.y)
box->p1.y = point->y;
else if (point->y > box->p2.y)
box->p2.y = point->y;
}
static inline void
_cairo_box_add_box (cairo_box_t *box,
const cairo_box_t *add)
{
if (add->p1.x < box->p1.x)
box->p1.x = add->p1.x;
if (add->p2.x > box->p2.x)
box->p2.x = add->p2.x;
if (add->p1.y < box->p1.y)
box->p1.y = add->p1.y;
if (add->p2.y > box->p2.y)
box->p2.y = add->p2.y;
}
/* assumes box->p1 is top-left, p2 bottom-right */
static inline cairo_bool_t
_cairo_box_contains_point (const cairo_box_t *box,
const cairo_point_t *point)
{
return box->p1.x <= point->x && point->x <= box->p2.x &&
box->p1.y <= point->y && point->y <= box->p2.y;
}
static inline cairo_bool_t
_cairo_box_is_pixel_aligned (const cairo_box_t *box)
{
#if CAIRO_FIXED_FRAC_BITS <= 8 && 0
return ((box->p1.x & CAIRO_FIXED_FRAC_MASK) << 24 |
(box->p1.y & CAIRO_FIXED_FRAC_MASK) << 16 |
(box->p2.x & CAIRO_FIXED_FRAC_MASK) << 8 |
(box->p2.y & CAIRO_FIXED_FRAC_MASK) << 0) == 0;
#else /* GCC on i7 prefers this variant (bizarrely according to the profiler) */
cairo_fixed_t f;
f = 0;
f |= box->p1.x & CAIRO_FIXED_FRAC_MASK;
f |= box->p1.y & CAIRO_FIXED_FRAC_MASK;
f |= box->p2.x & CAIRO_FIXED_FRAC_MASK;
f |= box->p2.y & CAIRO_FIXED_FRAC_MASK;
return f == 0;
#endif
}
#endif /* CAIRO_BOX_H */
| 0 |
D://workCode//uploadProject\awtk\3rd\cairo | D://workCode//uploadProject\awtk\3rd\cairo\cairo\cairo-boxes-intersect.c | /*
* Copyright © 2004 Carl Worth
* Copyright © 2006 Red Hat, Inc.
* Copyright © 2009 Chris Wilson
* Copyright © 2011 Intel Corporation
*
* This library is free software; you can redistribute it and/or
* modify it either under the terms of the GNU Lesser General Public
* License version 2.1 as published by the Free Software Foundation
* (the "LGPL") or, at your option, under the terms of the Mozilla
* Public License Version 1.1 (the "MPL"). If you do not alter this
* notice, a recipient may use your version of this file under either
* the MPL or the LGPL.
*
* You should have received a copy of the LGPL along with this library
* in the file COPYING-LGPL-2.1; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Suite 500, Boston, MA 02110-1335, USA
* You should have received a copy of the MPL along with this library
* in the file COPYING-MPL-1.1
*
* The contents of this file are subject to the Mozilla Public License
* Version 1.1 (the "License"); you may not use this file except in
* compliance with the License. You may obtain a copy of the License at
* http://www.mozilla.org/MPL/
*
* This software is distributed on an "AS IS" basis, WITHOUT WARRANTY
* OF ANY KIND, either express or implied. See the LGPL or the MPL for
* the specific language governing rights and limitations.
*
* The Original Code is the cairo graphics library.
*
* The Initial Developer of the Original Code is Carl Worth
*
* Contributor(s):
* Carl D. Worth <cworth@cworth.org>
* Chris Wilson <chris@chris-wilson.co.uk>
*/
/* Provide definitions for standalone compilation */
#include "cairoint.h"
#include "cairo-boxes-private.h"
#include "cairo-error-private.h"
#include "cairo-combsort-inline.h"
#include "cairo-list-private.h"
#include <setjmp.h>
typedef struct _rectangle rectangle_t;
typedef struct _edge edge_t;
struct _edge {
edge_t *next, *prev;
edge_t *right;
cairo_fixed_t x, top;
int a_or_b;
int dir;
};
struct _rectangle {
edge_t left, right;
int32_t top, bottom;
};
#define UNROLL3(x) x x x
/* the parent is always given by index/2 */
#define PQ_PARENT_INDEX(i) ((i) >> 1)
#define PQ_FIRST_ENTRY 1
/* left and right children are index * 2 and (index * 2) +1 respectively */
#define PQ_LEFT_CHILD_INDEX(i) ((i) << 1)
typedef struct _pqueue {
int size, max_size;
rectangle_t **elements;
rectangle_t *elements_embedded[1024];
} pqueue_t;
typedef struct _sweep_line {
rectangle_t **rectangles;
pqueue_t pq;
edge_t head, tail;
edge_t *insert_left, *insert_right;
int32_t current_y;
int32_t last_y;
jmp_buf unwind;
} sweep_line_t;
#define DEBUG_TRAPS 0
#if DEBUG_TRAPS
static void
dump_traps (cairo_traps_t *traps, const char *filename)
{
FILE *file;
int n;
if (getenv ("CAIRO_DEBUG_TRAPS") == NULL)
return;
file = fopen (filename, "a");
if (file != NULL) {
for (n = 0; n < traps->num_traps; n++) {
fprintf (file, "%d %d L:(%d, %d), (%d, %d) R:(%d, %d), (%d, %d)\n",
traps->traps[n].top,
traps->traps[n].bottom,
traps->traps[n].left.p1.x,
traps->traps[n].left.p1.y,
traps->traps[n].left.p2.x,
traps->traps[n].left.p2.y,
traps->traps[n].right.p1.x,
traps->traps[n].right.p1.y,
traps->traps[n].right.p2.x,
traps->traps[n].right.p2.y);
}
fprintf (file, "\n");
fclose (file);
}
}
#else
#define dump_traps(traps, filename)
#endif
static inline int
rectangle_compare_start (const rectangle_t *a,
const rectangle_t *b)
{
return a->top - b->top;
}
static inline int
rectangle_compare_stop (const rectangle_t *a,
const rectangle_t *b)
{
return a->bottom - b->bottom;
}
static inline void
pqueue_init (pqueue_t *pq)
{
pq->max_size = ARRAY_LENGTH (pq->elements_embedded);
pq->size = 0;
pq->elements = pq->elements_embedded;
pq->elements[PQ_FIRST_ENTRY] = NULL;
}
static inline void
pqueue_fini (pqueue_t *pq)
{
if (pq->elements != pq->elements_embedded)
free (pq->elements);
}
static cairo_bool_t
pqueue_grow (pqueue_t *pq)
{
rectangle_t **new_elements;
pq->max_size *= 2;
if (pq->elements == pq->elements_embedded) {
new_elements = _cairo_malloc_ab (pq->max_size,
sizeof (rectangle_t *));
if (unlikely (new_elements == NULL))
return FALSE;
memcpy (new_elements, pq->elements_embedded,
sizeof (pq->elements_embedded));
} else {
new_elements = _cairo_realloc_ab (pq->elements,
pq->max_size,
sizeof (rectangle_t *));
if (unlikely (new_elements == NULL))
return FALSE;
}
pq->elements = new_elements;
return TRUE;
}
static inline void
pqueue_push (sweep_line_t *sweep, rectangle_t *rectangle)
{
rectangle_t **elements;
int i, parent;
if (unlikely (sweep->pq.size + 1 == sweep->pq.max_size)) {
if (unlikely (! pqueue_grow (&sweep->pq))) {
longjmp (sweep->unwind,
_cairo_error (CAIRO_STATUS_NO_MEMORY));
}
}
elements = sweep->pq.elements;
for (i = ++sweep->pq.size;
i != PQ_FIRST_ENTRY &&
rectangle_compare_stop (rectangle,
elements[parent = PQ_PARENT_INDEX (i)]) < 0;
i = parent)
{
elements[i] = elements[parent];
}
elements[i] = rectangle;
}
static inline void
pqueue_pop (pqueue_t *pq)
{
rectangle_t **elements = pq->elements;
rectangle_t *tail;
int child, i;
tail = elements[pq->size--];
if (pq->size == 0) {
elements[PQ_FIRST_ENTRY] = NULL;
return;
}
for (i = PQ_FIRST_ENTRY;
(child = PQ_LEFT_CHILD_INDEX (i)) <= pq->size;
i = child)
{
if (child != pq->size &&
rectangle_compare_stop (elements[child+1],
elements[child]) < 0)
{
child++;
}
if (rectangle_compare_stop (elements[child], tail) >= 0)
break;
elements[i] = elements[child];
}
elements[i] = tail;
}
static inline rectangle_t *
rectangle_pop_start (sweep_line_t *sweep_line)
{
return *sweep_line->rectangles++;
}
static inline rectangle_t *
rectangle_peek_stop (sweep_line_t *sweep_line)
{
return sweep_line->pq.elements[PQ_FIRST_ENTRY];
}
CAIRO_COMBSORT_DECLARE (_rectangle_sort,
rectangle_t *,
rectangle_compare_start)
static void
sweep_line_init (sweep_line_t *sweep_line,
rectangle_t **rectangles,
int num_rectangles)
{
_rectangle_sort (rectangles, num_rectangles);
rectangles[num_rectangles] = NULL;
sweep_line->rectangles = rectangles;
sweep_line->head.x = INT32_MIN;
sweep_line->head.right = NULL;
sweep_line->head.dir = 0;
sweep_line->head.next = &sweep_line->tail;
sweep_line->tail.x = INT32_MAX;
sweep_line->tail.right = NULL;
sweep_line->tail.dir = 0;
sweep_line->tail.prev = &sweep_line->head;
sweep_line->insert_left = &sweep_line->tail;
sweep_line->insert_right = &sweep_line->tail;
sweep_line->current_y = INT32_MIN;
sweep_line->last_y = INT32_MIN;
pqueue_init (&sweep_line->pq);
}
static void
sweep_line_fini (sweep_line_t *sweep_line)
{
pqueue_fini (&sweep_line->pq);
}
static void
end_box (sweep_line_t *sweep_line, edge_t *left, int32_t bot, cairo_boxes_t *out)
{
if (likely (left->top < bot)) {
cairo_status_t status;
cairo_box_t box;
box.p1.x = left->x;
box.p1.y = left->top;
box.p2.x = left->right->x;
box.p2.y = bot;
status = _cairo_boxes_add (out, CAIRO_ANTIALIAS_DEFAULT, &box);
if (unlikely (status))
longjmp (sweep_line->unwind, status);
}
left->right = NULL;
}
/* Start a new trapezoid at the given top y coordinate, whose edges
* are `edge' and `edge->next'. If `edge' already has a trapezoid,
* then either add it to the traps in `traps', if the trapezoid's
* right edge differs from `edge->next', or do nothing if the new
* trapezoid would be a continuation of the existing one. */
static inline void
start_or_continue_box (sweep_line_t *sweep_line,
edge_t *left,
edge_t *right,
int top,
cairo_boxes_t *out)
{
if (left->right == right)
return;
if (left->right != NULL) {
if (right != NULL && left->right->x == right->x) {
/* continuation on right, so just swap edges */
left->right = right;
return;
}
end_box (sweep_line, left, top, out);
}
if (right != NULL && left->x != right->x) {
left->top = top;
left->right = right;
}
}
static inline int is_zero(const int *winding)
{
return winding[0] == 0 || winding[1] == 0;
}
static inline void
active_edges (sweep_line_t *sweep, cairo_boxes_t *out)
{
int top = sweep->current_y;
int winding[2] = { 0 };
edge_t *pos;
if (sweep->last_y == sweep->current_y)
return;
pos = sweep->head.next;
if (pos == &sweep->tail)
return;
do {
edge_t *left, *right;
left = pos;
do {
winding[left->a_or_b] += left->dir;
if (!is_zero (winding))
break;
if (left->next == &sweep->tail)
goto out;
if (unlikely (left->right != NULL))
end_box (sweep, left, top, out);
left = left->next;
} while (1);
right = left->next;
do {
if (unlikely (right->right != NULL))
end_box (sweep, right, top, out);
winding[right->a_or_b] += right->dir;
if (is_zero (winding)) {
/* skip co-linear edges */
if (likely (right->x != right->next->x))
break;
}
right = right->next;
} while (TRUE);
start_or_continue_box (sweep, left, right, top, out);
pos = right->next;
} while (pos != &sweep->tail);
out:
sweep->last_y = sweep->current_y;
}
static inline void
sweep_line_delete_edge (sweep_line_t *sweep_line, edge_t *edge, cairo_boxes_t *out)
{
if (edge->right != NULL) {
edge_t *next = edge->next;
if (next->x == edge->x) {
next->top = edge->top;
next->right = edge->right;
} else {
end_box (sweep_line, edge, sweep_line->current_y, out);
}
}
if (sweep_line->insert_left == edge)
sweep_line->insert_left = edge->next;
if (sweep_line->insert_right == edge)
sweep_line->insert_right = edge->next;
edge->prev->next = edge->next;
edge->next->prev = edge->prev;
}
static inline void
sweep_line_delete (sweep_line_t *sweep,
rectangle_t *rectangle,
cairo_boxes_t *out)
{
sweep_line_delete_edge (sweep, &rectangle->left, out);
sweep_line_delete_edge (sweep, &rectangle->right, out);
pqueue_pop (&sweep->pq);
}
static inline void
insert_edge (edge_t *edge, edge_t *pos)
{
if (pos->x != edge->x) {
if (pos->x > edge->x) {
do {
UNROLL3({
if (pos->prev->x <= edge->x)
break;
pos = pos->prev;
})
} while (TRUE);
} else {
do {
UNROLL3({
pos = pos->next;
if (pos->x >= edge->x)
break;
})
} while (TRUE);
}
}
pos->prev->next = edge;
edge->prev = pos->prev;
edge->next = pos;
pos->prev = edge;
}
static inline void
sweep_line_insert (sweep_line_t *sweep, rectangle_t *rectangle)
{
edge_t *pos;
/* right edge */
pos = sweep->insert_right;
insert_edge (&rectangle->right, pos);
sweep->insert_right = &rectangle->right;
/* left edge */
pos = sweep->insert_left;
if (pos->x > sweep->insert_right->x)
pos = sweep->insert_right->prev;
insert_edge (&rectangle->left, pos);
sweep->insert_left = &rectangle->left;
pqueue_push (sweep, rectangle);
}
static cairo_status_t
intersect (rectangle_t **rectangles, int num_rectangles, cairo_boxes_t *out)
{
sweep_line_t sweep_line;
rectangle_t *rectangle;
cairo_status_t status;
sweep_line_init (&sweep_line, rectangles, num_rectangles);
if ((status = setjmp (sweep_line.unwind)))
goto unwind;
rectangle = rectangle_pop_start (&sweep_line);
do {
if (rectangle->top != sweep_line.current_y) {
rectangle_t *stop;
stop = rectangle_peek_stop (&sweep_line);
while (stop != NULL && stop->bottom < rectangle->top) {
if (stop->bottom != sweep_line.current_y) {
active_edges (&sweep_line, out);
sweep_line.current_y = stop->bottom;
}
sweep_line_delete (&sweep_line, stop, out);
stop = rectangle_peek_stop (&sweep_line);
}
active_edges (&sweep_line, out);
sweep_line.current_y = rectangle->top;
}
sweep_line_insert (&sweep_line, rectangle);
} while ((rectangle = rectangle_pop_start (&sweep_line)) != NULL);
while ((rectangle = rectangle_peek_stop (&sweep_line)) != NULL) {
if (rectangle->bottom != sweep_line.current_y) {
active_edges (&sweep_line, out);
sweep_line.current_y = rectangle->bottom;
}
sweep_line_delete (&sweep_line, rectangle, out);
}
unwind:
sweep_line_fini (&sweep_line);
return status;
}
static cairo_status_t
_cairo_boxes_intersect_with_box (const cairo_boxes_t *boxes,
const cairo_box_t *box,
cairo_boxes_t *out)
{
cairo_status_t status;
int i, j;
if (out == boxes) { /* inplace update */
struct _cairo_boxes_chunk *chunk;
out->num_boxes = 0;
for (chunk = &out->chunks; chunk != NULL; chunk = chunk->next) {
for (i = j = 0; i < chunk->count; i++) {
cairo_box_t *b = &chunk->base[i];
b->p1.x = MAX (b->p1.x, box->p1.x);
b->p1.y = MAX (b->p1.y, box->p1.y);
b->p2.x = MIN (b->p2.x, box->p2.x);
b->p2.y = MIN (b->p2.y, box->p2.y);
if (b->p1.x < b->p2.x && b->p1.y < b->p2.y) {
if (i != j)
chunk->base[j] = *b;
j++;
}
}
/* XXX unlink empty chains? */
chunk->count = j;
out->num_boxes += j;
}
} else {
const struct _cairo_boxes_chunk *chunk;
_cairo_boxes_clear (out);
_cairo_boxes_limit (out, box, 1);
for (chunk = &boxes->chunks; chunk != NULL; chunk = chunk->next) {
for (i = 0; i < chunk->count; i++) {
status = _cairo_boxes_add (out,
CAIRO_ANTIALIAS_DEFAULT,
&chunk->base[i]);
if (unlikely (status))
return status;
}
}
}
return CAIRO_STATUS_SUCCESS;
}
cairo_status_t
_cairo_boxes_intersect (const cairo_boxes_t *a,
const cairo_boxes_t *b,
cairo_boxes_t *out)
{
rectangle_t stack_rectangles[CAIRO_STACK_ARRAY_LENGTH (rectangle_t)];
rectangle_t *rectangles;
rectangle_t *stack_rectangles_ptrs[ARRAY_LENGTH (stack_rectangles) + 1];
rectangle_t **rectangles_ptrs;
const struct _cairo_boxes_chunk *chunk;
cairo_status_t status;
int i, j, count;
if (unlikely (a->num_boxes == 0 || b->num_boxes == 0)) {
_cairo_boxes_clear (out);
return CAIRO_STATUS_SUCCESS;
}
if (a->num_boxes == 1) {
cairo_box_t box = a->chunks.base[0];
return _cairo_boxes_intersect_with_box (b, &box, out);
}
if (b->num_boxes == 1) {
cairo_box_t box = b->chunks.base[0];
return _cairo_boxes_intersect_with_box (a, &box, out);
}
rectangles = stack_rectangles;
rectangles_ptrs = stack_rectangles_ptrs;
count = a->num_boxes + b->num_boxes;
if (count > ARRAY_LENGTH (stack_rectangles)) {
rectangles = _cairo_malloc_ab_plus_c (count,
sizeof (rectangle_t) +
sizeof (rectangle_t *),
sizeof (rectangle_t *));
if (unlikely (rectangles == NULL))
return _cairo_error (CAIRO_STATUS_NO_MEMORY);
rectangles_ptrs = (rectangle_t **) (rectangles + count);
}
j = 0;
for (chunk = &a->chunks; chunk != NULL; chunk = chunk->next) {
const cairo_box_t *box = chunk->base;
for (i = 0; i < chunk->count; i++) {
if (box[i].p1.x < box[i].p2.x) {
rectangles[j].left.x = box[i].p1.x;
rectangles[j].left.dir = 1;
rectangles[j].right.x = box[i].p2.x;
rectangles[j].right.dir = -1;
} else {
rectangles[j].right.x = box[i].p1.x;
rectangles[j].right.dir = 1;
rectangles[j].left.x = box[i].p2.x;
rectangles[j].left.dir = -1;
}
rectangles[j].left.a_or_b = 0;
rectangles[j].left.right = NULL;
rectangles[j].right.a_or_b = 0;
rectangles[j].right.right = NULL;
rectangles[j].top = box[i].p1.y;
rectangles[j].bottom = box[i].p2.y;
rectangles_ptrs[j] = &rectangles[j];
j++;
}
}
for (chunk = &b->chunks; chunk != NULL; chunk = chunk->next) {
const cairo_box_t *box = chunk->base;
for (i = 0; i < chunk->count; i++) {
if (box[i].p1.x < box[i].p2.x) {
rectangles[j].left.x = box[i].p1.x;
rectangles[j].left.dir = 1;
rectangles[j].right.x = box[i].p2.x;
rectangles[j].right.dir = -1;
} else {
rectangles[j].right.x = box[i].p1.x;
rectangles[j].right.dir = 1;
rectangles[j].left.x = box[i].p2.x;
rectangles[j].left.dir = -1;
}
rectangles[j].left.a_or_b = 1;
rectangles[j].left.right = NULL;
rectangles[j].right.a_or_b = 1;
rectangles[j].right.right = NULL;
rectangles[j].top = box[i].p1.y;
rectangles[j].bottom = box[i].p2.y;
rectangles_ptrs[j] = &rectangles[j];
j++;
}
}
assert (j == count);
_cairo_boxes_clear (out);
status = intersect (rectangles_ptrs, j, out);
if (rectangles != stack_rectangles)
free (rectangles);
return status;
}
| 0 |
D://workCode//uploadProject\awtk\3rd\cairo | D://workCode//uploadProject\awtk\3rd\cairo\cairo\cairo-boxes-private.h | /* cairo - a vector graphics library with display and print output
*
* Copyright © 2009 Intel Corporation
*
* This library is free software; you can redistribute it and/or
* modify it either under the terms of the GNU Lesser General Public
* License version 2.1 as published by the Free Software Foundation
* (the "LGPL") or, at your option, under the terms of the Mozilla
* Public License Version 1.1 (the "MPL"). If you do not alter this
* notice, a recipient may use your version of this file under either
* the MPL or the LGPL.
*
* You should have received a copy of the LGPL along with this library
* in the file COPYING-LGPL-2.1; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Suite 500, Boston, MA 02110-1335, USA
* You should have received a copy of the MPL along with this library
* in the file COPYING-MPL-1.1
*
* The contents of this file are subject to the Mozilla Public License
* Version 1.1 (the "License"); you may not use this file except in
* compliance with the License. You may obtain a copy of the License at
* http://www.mozilla.org/MPL/
*
* This software is distributed on an "AS IS" basis, WITHOUT WARRANTY
* OF ANY KIND, either express or implied. See the LGPL or the MPL for
* the specific language governing rights and limitations.
*
* The Original Code is the cairo graphics library.
*
* Contributor(s):
* Chris Wilson <chris@chris-wilson.co.uk>
*/
#ifndef CAIRO_BOXES_H
#define CAIRO_BOXES_H
#include "cairo-types-private.h"
#include "cairo-compiler-private.h"
#include <stdio.h>
#include <stdlib.h>
struct _cairo_boxes_t {
cairo_status_t status;
cairo_box_t limit;
const cairo_box_t *limits;
int num_limits;
int num_boxes;
unsigned int is_pixel_aligned;
struct _cairo_boxes_chunk {
struct _cairo_boxes_chunk *next;
cairo_box_t *base;
int count;
int size;
} chunks, *tail;
cairo_box_t boxes_embedded[32];
};
cairo_private void
_cairo_boxes_init (cairo_boxes_t *boxes);
cairo_private void
_cairo_boxes_init_with_clip (cairo_boxes_t *boxes,
cairo_clip_t *clip);
cairo_private void
_cairo_boxes_init_for_array (cairo_boxes_t *boxes,
cairo_box_t *array,
int num_boxes);
cairo_private void
_cairo_boxes_init_from_rectangle (cairo_boxes_t *boxes,
int x, int y, int w, int h);
cairo_private void
_cairo_boxes_limit (cairo_boxes_t *boxes,
const cairo_box_t *limits,
int num_limits);
cairo_private cairo_status_t
_cairo_boxes_add (cairo_boxes_t *boxes,
cairo_antialias_t antialias,
const cairo_box_t *box);
cairo_private void
_cairo_boxes_extents (const cairo_boxes_t *boxes,
cairo_box_t *box);
cairo_private cairo_box_t *
_cairo_boxes_to_array (const cairo_boxes_t *boxes,
int *num_boxes);
cairo_private cairo_status_t
_cairo_boxes_intersect (const cairo_boxes_t *a,
const cairo_boxes_t *b,
cairo_boxes_t *out);
cairo_private void
_cairo_boxes_clear (cairo_boxes_t *boxes);
cairo_private_no_warn cairo_bool_t
_cairo_boxes_for_each_box (cairo_boxes_t *boxes,
cairo_bool_t (*func) (cairo_box_t *box, void *data),
void *data);
cairo_private cairo_status_t
_cairo_rasterise_polygon_to_boxes (cairo_polygon_t *polygon,
cairo_fill_rule_t fill_rule,
cairo_boxes_t *boxes);
cairo_private void
_cairo_boxes_fini (cairo_boxes_t *boxes);
cairo_private void
_cairo_debug_print_boxes (FILE *stream,
const cairo_boxes_t *boxes);
#endif /* CAIRO_BOXES_H */
| 0 |
D://workCode//uploadProject\awtk\3rd\cairo | D://workCode//uploadProject\awtk\3rd\cairo\cairo\cairo-boxes.c | /* cairo - a vector graphics library with display and print output
*
* Copyright © 2009 Intel Corporation
*
* This library is free software; you can redistribute it and/or
* modify it either under the terms of the GNU Lesser General Public
* License version 2.1 as published by the Free Software Foundation
* (the "LGPL") or, at your option, under the terms of the Mozilla
* Public License Version 1.1 (the "MPL"). If you do not alter this
* notice, a recipient may use your version of this file under either
* the MPL or the LGPL.
*
* You should have received a copy of the LGPL along with this library
* in the file COPYING-LGPL-2.1; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Suite 500, Boston, MA 02110-1335, USA
* You should have received a copy of the MPL along with this library
* in the file COPYING-MPL-1.1
*
* The contents of this file are subject to the Mozilla Public License
* Version 1.1 (the "License"); you may not use this file except in
* compliance with the License. You may obtain a copy of the License at
* http://www.mozilla.org/MPL/
*
* This software is distributed on an "AS IS" basis, WITHOUT WARRANTY
* OF ANY KIND, either express or implied. See the LGPL or the MPL for
* the specific language governing rights and limitations.
*
* The Original Code is the cairo graphics library.
*
* Contributor(s):
* Chris Wilson <chris@chris-wilson.co.uk>
*/
#include "cairoint.h"
#include "cairo-box-inline.h"
#include "cairo-boxes-private.h"
#include "cairo-error-private.h"
void
_cairo_boxes_init (cairo_boxes_t *boxes)
{
boxes->status = CAIRO_STATUS_SUCCESS;
boxes->num_limits = 0;
boxes->num_boxes = 0;
boxes->tail = &boxes->chunks;
boxes->chunks.next = NULL;
boxes->chunks.base = boxes->boxes_embedded;
boxes->chunks.size = ARRAY_LENGTH (boxes->boxes_embedded);
boxes->chunks.count = 0;
boxes->is_pixel_aligned = TRUE;
}
void
_cairo_boxes_init_from_rectangle (cairo_boxes_t *boxes,
int x, int y, int w, int h)
{
_cairo_boxes_init (boxes);
_cairo_box_from_integers (&boxes->chunks.base[0], x, y, w, h);
boxes->num_boxes = 1;
}
void
_cairo_boxes_init_with_clip (cairo_boxes_t *boxes,
cairo_clip_t *clip)
{
_cairo_boxes_init (boxes);
if (clip)
_cairo_boxes_limit (boxes, clip->boxes, clip->num_boxes);
}
void
_cairo_boxes_init_for_array (cairo_boxes_t *boxes,
cairo_box_t *array,
int num_boxes)
{
int n;
boxes->status = CAIRO_STATUS_SUCCESS;
boxes->num_limits = 0;
boxes->num_boxes = num_boxes;
boxes->tail = &boxes->chunks;
boxes->chunks.next = NULL;
boxes->chunks.base = array;
boxes->chunks.size = num_boxes;
boxes->chunks.count = num_boxes;
for (n = 0; n < num_boxes; n++) {
if (! _cairo_fixed_is_integer (array[n].p1.x) ||
! _cairo_fixed_is_integer (array[n].p1.y) ||
! _cairo_fixed_is_integer (array[n].p2.x) ||
! _cairo_fixed_is_integer (array[n].p2.y))
{
break;
}
}
boxes->is_pixel_aligned = n == num_boxes;
}
/** _cairo_boxes_limit:
*
* Computes the minimum bounding box of the given list of boxes and assign
* it to the given boxes set. It also assigns that list as the list of
* limiting boxes in the box set.
*
* @param boxes the box set to be filled (return buffer)
* @param limits array of the limiting boxes to compute the bounding
* box from
* @param num_limits length of the limits array
*/
void
_cairo_boxes_limit (cairo_boxes_t *boxes,
const cairo_box_t *limits,
int num_limits)
{
int n;
boxes->limits = limits;
boxes->num_limits = num_limits;
if (boxes->num_limits) {
boxes->limit = limits[0];
for (n = 1; n < num_limits; n++) {
if (limits[n].p1.x < boxes->limit.p1.x)
boxes->limit.p1.x = limits[n].p1.x;
if (limits[n].p1.y < boxes->limit.p1.y)
boxes->limit.p1.y = limits[n].p1.y;
if (limits[n].p2.x > boxes->limit.p2.x)
boxes->limit.p2.x = limits[n].p2.x;
if (limits[n].p2.y > boxes->limit.p2.y)
boxes->limit.p2.y = limits[n].p2.y;
}
}
}
static void
_cairo_boxes_add_internal (cairo_boxes_t *boxes,
const cairo_box_t *box)
{
struct _cairo_boxes_chunk *chunk;
if (unlikely (boxes->status))
return;
chunk = boxes->tail;
if (unlikely (chunk->count == chunk->size)) {
int size;
size = chunk->size * 2;
chunk->next = _cairo_malloc_ab_plus_c (size,
sizeof (cairo_box_t),
sizeof (struct _cairo_boxes_chunk));
if (unlikely (chunk->next == NULL)) {
boxes->status = _cairo_error (CAIRO_STATUS_NO_MEMORY);
return;
}
chunk = chunk->next;
boxes->tail = chunk;
chunk->next = NULL;
chunk->count = 0;
chunk->size = size;
chunk->base = (cairo_box_t *) (chunk + 1);
}
chunk->base[chunk->count++] = *box;
boxes->num_boxes++;
if (boxes->is_pixel_aligned)
boxes->is_pixel_aligned = _cairo_box_is_pixel_aligned (box);
}
cairo_status_t
_cairo_boxes_add (cairo_boxes_t *boxes,
cairo_antialias_t antialias,
const cairo_box_t *box)
{
cairo_box_t b;
if (antialias == CAIRO_ANTIALIAS_NONE) {
b.p1.x = _cairo_fixed_round_down (box->p1.x);
b.p1.y = _cairo_fixed_round_down (box->p1.y);
b.p2.x = _cairo_fixed_round_down (box->p2.x);
b.p2.y = _cairo_fixed_round_down (box->p2.y);
box = &b;
}
if (box->p1.y == box->p2.y)
return CAIRO_STATUS_SUCCESS;
if (box->p1.x == box->p2.x)
return CAIRO_STATUS_SUCCESS;
if (boxes->num_limits) {
cairo_point_t p1, p2;
cairo_bool_t reversed = FALSE;
int n;
/* support counter-clockwise winding for rectangular tessellation */
if (box->p1.x < box->p2.x) {
p1.x = box->p1.x;
p2.x = box->p2.x;
} else {
p2.x = box->p1.x;
p1.x = box->p2.x;
reversed = ! reversed;
}
if (p1.x >= boxes->limit.p2.x || p2.x <= boxes->limit.p1.x)
return CAIRO_STATUS_SUCCESS;
if (box->p1.y < box->p2.y) {
p1.y = box->p1.y;
p2.y = box->p2.y;
} else {
p2.y = box->p1.y;
p1.y = box->p2.y;
reversed = ! reversed;
}
if (p1.y >= boxes->limit.p2.y || p2.y <= boxes->limit.p1.y)
return CAIRO_STATUS_SUCCESS;
for (n = 0; n < boxes->num_limits; n++) {
const cairo_box_t *limits = &boxes->limits[n];
cairo_box_t _box;
cairo_point_t _p1, _p2;
if (p1.x >= limits->p2.x || p2.x <= limits->p1.x)
continue;
if (p1.y >= limits->p2.y || p2.y <= limits->p1.y)
continue;
/* Otherwise, clip the box to the limits. */
_p1 = p1;
if (_p1.x < limits->p1.x)
_p1.x = limits->p1.x;
if (_p1.y < limits->p1.y)
_p1.y = limits->p1.y;
_p2 = p2;
if (_p2.x > limits->p2.x)
_p2.x = limits->p2.x;
if (_p2.y > limits->p2.y)
_p2.y = limits->p2.y;
if (_p2.y <= _p1.y || _p2.x <= _p1.x)
continue;
_box.p1.y = _p1.y;
_box.p2.y = _p2.y;
if (reversed) {
_box.p1.x = _p2.x;
_box.p2.x = _p1.x;
} else {
_box.p1.x = _p1.x;
_box.p2.x = _p2.x;
}
_cairo_boxes_add_internal (boxes, &_box);
}
} else {
_cairo_boxes_add_internal (boxes, box);
}
return boxes->status;
}
/** _cairo_boxes_extents:
*
* Computes the minimum bounding box of the given box set and stores
* it in the given box.
*
* @param boxes The box set whose minimum bounding is computed.
* @param box Return buffer for the computed result.
*/
void
_cairo_boxes_extents (const cairo_boxes_t *boxes,
cairo_box_t *box)
{
const struct _cairo_boxes_chunk *chunk;
cairo_box_t b;
int i;
if (boxes->num_boxes == 0) {
box->p1.x = box->p1.y = box->p2.x = box->p2.y = 0;
return;
}
b = boxes->chunks.base[0];
for (chunk = &boxes->chunks; chunk != NULL; chunk = chunk->next) {
for (i = 0; i < chunk->count; i++) {
if (chunk->base[i].p1.x < b.p1.x)
b.p1.x = chunk->base[i].p1.x;
if (chunk->base[i].p1.y < b.p1.y)
b.p1.y = chunk->base[i].p1.y;
if (chunk->base[i].p2.x > b.p2.x)
b.p2.x = chunk->base[i].p2.x;
if (chunk->base[i].p2.y > b.p2.y)
b.p2.y = chunk->base[i].p2.y;
}
}
*box = b;
}
void
_cairo_boxes_clear (cairo_boxes_t *boxes)
{
struct _cairo_boxes_chunk *chunk, *next;
for (chunk = boxes->chunks.next; chunk != NULL; chunk = next) {
next = chunk->next;
free (chunk);
}
boxes->tail = &boxes->chunks;
boxes->chunks.next = 0;
boxes->chunks.count = 0;
boxes->chunks.base = boxes->boxes_embedded;
boxes->chunks.size = ARRAY_LENGTH (boxes->boxes_embedded);
boxes->num_boxes = 0;
boxes->is_pixel_aligned = TRUE;
}
/** _cairo_boxes_to_array:
*
* Linearize a box set of possibly multiple chunks into one big chunk
* and returns an array of boxes
*
* @param boxes The box set to be converted.
* @param num_boxes Return buffer for the number of boxes (array count).
* @return Pointer to the newly allocated array of boxes
* (the number o elements is given in num_boxes).
*/
cairo_box_t *
_cairo_boxes_to_array (const cairo_boxes_t *boxes,
int *num_boxes)
{
const struct _cairo_boxes_chunk *chunk;
cairo_box_t *box;
int i, j;
*num_boxes = boxes->num_boxes;
box = _cairo_malloc_ab (boxes->num_boxes, sizeof (cairo_box_t));
if (box == NULL) {
_cairo_error_throw (CAIRO_STATUS_NO_MEMORY);
return NULL;
}
j = 0;
for (chunk = &boxes->chunks; chunk != NULL; chunk = chunk->next) {
for (i = 0; i < chunk->count; i++)
box[j++] = chunk->base[i];
}
return box;
}
void
_cairo_boxes_fini (cairo_boxes_t *boxes)
{
struct _cairo_boxes_chunk *chunk, *next;
for (chunk = boxes->chunks.next; chunk != NULL; chunk = next) {
next = chunk->next;
free (chunk);
}
}
cairo_bool_t
_cairo_boxes_for_each_box (cairo_boxes_t *boxes,
cairo_bool_t (*func) (cairo_box_t *box, void *data),
void *data)
{
struct _cairo_boxes_chunk *chunk;
int i;
for (chunk = &boxes->chunks; chunk != NULL; chunk = chunk->next) {
for (i = 0; i < chunk->count; i++)
if (! func (&chunk->base[i], data))
return FALSE;
}
return TRUE;
}
struct cairo_box_renderer {
cairo_span_renderer_t base;
cairo_boxes_t *boxes;
};
static cairo_status_t
span_to_boxes (void *abstract_renderer, int y, int h,
const cairo_half_open_span_t *spans, unsigned num_spans)
{
struct cairo_box_renderer *r = abstract_renderer;
cairo_status_t status = CAIRO_STATUS_SUCCESS;
cairo_box_t box;
if (num_spans == 0)
return CAIRO_STATUS_SUCCESS;
box.p1.y = _cairo_fixed_from_int (y);
box.p2.y = _cairo_fixed_from_int (y + h);
do {
if (spans[0].coverage) {
box.p1.x = _cairo_fixed_from_int(spans[0].x);
box.p2.x = _cairo_fixed_from_int(spans[1].x);
status = _cairo_boxes_add (r->boxes, CAIRO_ANTIALIAS_DEFAULT, &box);
}
spans++;
} while (--num_spans > 1 && status == CAIRO_STATUS_SUCCESS);
return status;
}
cairo_status_t
_cairo_rasterise_polygon_to_boxes (cairo_polygon_t *polygon,
cairo_fill_rule_t fill_rule,
cairo_boxes_t *boxes)
{
struct cairo_box_renderer renderer;
cairo_scan_converter_t *converter;
cairo_int_status_t status;
cairo_rectangle_int_t r;
TRACE ((stderr, "%s: fill_rule=%d\n", __FUNCTION__, fill_rule));
_cairo_box_round_to_rectangle (&polygon->extents, &r);
converter = _cairo_mono_scan_converter_create (r.x, r.y,
r.x + r.width,
r.y + r.height,
fill_rule);
status = _cairo_mono_scan_converter_add_polygon (converter, polygon);
if (unlikely (status))
goto cleanup_converter;
renderer.boxes = boxes;
renderer.base.render_rows = span_to_boxes;
status = converter->generate (converter, &renderer.base);
cleanup_converter:
converter->destroy (converter);
return status;
}
void
_cairo_debug_print_boxes (FILE *stream, const cairo_boxes_t *boxes)
{
const struct _cairo_boxes_chunk *chunk;
cairo_box_t extents;
int i;
_cairo_boxes_extents (boxes, &extents);
fprintf (stream, "boxes x %d: (%f, %f) x (%f, %f)\n",
boxes->num_boxes,
_cairo_fixed_to_double (extents.p1.x),
_cairo_fixed_to_double (extents.p1.y),
_cairo_fixed_to_double (extents.p2.x),
_cairo_fixed_to_double (extents.p2.y));
for (chunk = &boxes->chunks; chunk != NULL; chunk = chunk->next) {
for (i = 0; i < chunk->count; i++) {
fprintf (stderr, " box[%d]: (%f, %f), (%f, %f)\n", i,
_cairo_fixed_to_double (chunk->base[i].p1.x),
_cairo_fixed_to_double (chunk->base[i].p1.y),
_cairo_fixed_to_double (chunk->base[i].p2.x),
_cairo_fixed_to_double (chunk->base[i].p2.y));
}
}
}
| 0 |
D://workCode//uploadProject\awtk\3rd\cairo | D://workCode//uploadProject\awtk\3rd\cairo\cairo\cairo-cache-private.h | /* cairo - a vector graphics library with display and print output
*
* Copyright © 2004 Red Hat, Inc.
* Copyright © 2005 Red Hat, Inc.
*
* This library is free software; you can redistribute it and/or
* modify it either under the terms of the GNU Lesser General Public
* License version 2.1 as published by the Free Software Foundation
* (the "LGPL") or, at your option, under the terms of the Mozilla
* Public License Version 1.1 (the "MPL"). If you do not alter this
* notice, a recipient may use your version of this file under either
* the MPL or the LGPL.
*
* You should have received a copy of the LGPL along with this library
* in the file COPYING-LGPL-2.1; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Suite 500, Boston, MA 02110-1335, USA
* You should have received a copy of the MPL along with this library
* in the file COPYING-MPL-1.1
*
* The contents of this file are subject to the Mozilla Public License
* Version 1.1 (the "License"); you may not use this file except in
* compliance with the License. You may obtain a copy of the License at
* http://www.mozilla.org/MPL/
*
* This software is distributed on an "AS IS" basis, WITHOUT WARRANTY
* OF ANY KIND, either express or implied. See the LGPL or the MPL for
* the specific language governing rights and limitations.
*
* The Original Code is the cairo graphics library.
*
* The Initial Developer of the Original Code is Red Hat, Inc.
*
* Contributor(s):
* Keith Packard <keithp@keithp.com>
* Graydon Hoare <graydon@redhat.com>
* Carl Worth <cworth@cworth.org>
*/
#ifndef CAIRO_CACHE_PRIVATE_H
#define CAIRO_CACHE_PRIVATE_H
#include "cairo-compiler-private.h"
#include "cairo-types-private.h"
/**
* _cairo_cache_entry:
*
* A #cairo_cache_entry_t contains both a key and a value for
* #cairo_cache_t. User-derived types for #cairo_cache_entry_t must
* have a #cairo_cache_entry_t as their first field. For example:
*
* typedef _my_entry {
* cairo_cache_entry_t base;
* ... Remainder of key and value fields here ..
* } my_entry_t;
*
* which then allows a pointer to my_entry_t to be passed to any of
* the #cairo_cache_t functions as follows without requiring a cast:
*
* _cairo_cache_insert (cache, &my_entry->base, size);
*
* IMPORTANT: The caller is responsible for initializing
* my_entry->base.hash with a hash code derived from the key. The
* essential property of the hash code is that keys_equal must never
* return %TRUE for two keys that have different hashes. The best hash
* code will reduce the frequency of two keys with the same code for
* which keys_equal returns %FALSE.
*
* The user must also initialize my_entry->base.size to indicate
* the size of the current entry. What units to use for size is
* entirely up to the caller, (though the same units must be used for
* the max_size parameter passed to _cairo_cache_create()). If all
* entries are close to the same size, the simplest thing to do is to
* just use units of "entries", (eg. set size==1 in all entries and
* set max_size to the number of entries which you want to be saved
* in the cache).
*
* Which parts of the entry make up the "key" and which part make up
* the value are entirely up to the caller, (as determined by the
* computation going into base.hash as well as the keys_equal
* function). A few of the #cairo_cache_t functions accept an entry which
* will be used exclusively as a "key", (indicated by a parameter name
* of key). In these cases, the value-related fields of the entry need
* not be initialized if so desired.
**/
typedef struct _cairo_cache_entry {
unsigned long hash;
unsigned long size;
} cairo_cache_entry_t;
typedef cairo_bool_t (*cairo_cache_predicate_func_t) (const void *entry);
struct _cairo_cache {
cairo_hash_table_t *hash_table;
cairo_cache_predicate_func_t predicate;
cairo_destroy_func_t entry_destroy;
unsigned long max_size;
unsigned long size;
int freeze_count;
};
typedef cairo_bool_t
(*cairo_cache_keys_equal_func_t) (const void *key_a, const void *key_b);
typedef void
(*cairo_cache_callback_func_t) (void *entry,
void *closure);
cairo_private cairo_status_t
_cairo_cache_init (cairo_cache_t *cache,
cairo_cache_keys_equal_func_t keys_equal,
cairo_cache_predicate_func_t predicate,
cairo_destroy_func_t entry_destroy,
unsigned long max_size);
cairo_private void
_cairo_cache_fini (cairo_cache_t *cache);
cairo_private void
_cairo_cache_freeze (cairo_cache_t *cache);
cairo_private void
_cairo_cache_thaw (cairo_cache_t *cache);
cairo_private void *
_cairo_cache_lookup (cairo_cache_t *cache,
cairo_cache_entry_t *key);
cairo_private cairo_status_t
_cairo_cache_insert (cairo_cache_t *cache,
cairo_cache_entry_t *entry);
cairo_private void
_cairo_cache_remove (cairo_cache_t *cache,
cairo_cache_entry_t *entry);
cairo_private void
_cairo_cache_foreach (cairo_cache_t *cache,
cairo_cache_callback_func_t cache_callback,
void *closure);
#endif
| 0 |
D://workCode//uploadProject\awtk\3rd\cairo | D://workCode//uploadProject\awtk\3rd\cairo\cairo\cairo-cache.c | /* cairo - a vector graphics library with display and print output
*
* Copyright © 2004 Red Hat, Inc.
* Copyright © 2005 Red Hat, Inc.
*
* This library is free software; you can redistribute it and/or
* modify it either under the terms of the GNU Lesser General Public
* License version 2.1 as published by the Free Software Foundation
* (the "LGPL") or, at your option, under the terms of the Mozilla
* Public License Version 1.1 (the "MPL"). If you do not alter this
* notice, a recipient may use your version of this file under either
* the MPL or the LGPL.
*
* You should have received a copy of the LGPL along with this library
* in the file COPYING-LGPL-2.1; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Suite 500, Boston, MA 02110-1335, USA
* You should have received a copy of the MPL along with this library
* in the file COPYING-MPL-1.1
*
* The contents of this file are subject to the Mozilla Public License
* Version 1.1 (the "License"); you may not use this file except in
* compliance with the License. You may obtain a copy of the License at
* http://www.mozilla.org/MPL/
*
* This software is distributed on an "AS IS" basis, WITHOUT WARRANTY
* OF ANY KIND, either express or implied. See the LGPL or the MPL for
* the specific language governing rights and limitations.
*
* The Original Code is the cairo graphics library.
*
* The Initial Developer of the Original Code is Red Hat, Inc.
*
* Contributor(s):
* Keith Packard <keithp@keithp.com>
* Graydon Hoare <graydon@redhat.com>
* Carl Worth <cworth@cworth.org>
*/
#include "cairoint.h"
#include "cairo-error-private.h"
static void
_cairo_cache_shrink_to_accommodate (cairo_cache_t *cache,
unsigned long additional);
static cairo_bool_t
_cairo_cache_entry_is_non_zero (const void *entry)
{
return ((const cairo_cache_entry_t *) entry)->size;
}
/**
* _cairo_cache_init:
* @cache: the #cairo_cache_t to initialise
* @keys_equal: a function to return %TRUE if two keys are equal
* @entry_destroy: destroy notifier for cache entries
* @max_size: the maximum size for this cache
* Returns: the newly created #cairo_cache_t
*
* Creates a new cache using the keys_equal() function to determine
* the equality of entries.
*
* Data is provided to the cache in the form of user-derived version
* of #cairo_cache_entry_t. A cache entry must be able to hold hash
* code, a size, and the key/value pair being stored in the
* cache. Sometimes only the key will be necessary, (as in
* _cairo_cache_lookup()), and in these cases the value portion of the
* entry need not be initialized.
*
* The units for max_size can be chosen by the caller, but should be
* consistent with the units of the size field of cache entries. When
* adding an entry with _cairo_cache_insert() if the total size of
* entries in the cache would exceed max_size then entries will be
* removed at random until the new entry would fit or the cache is
* empty. Then the new entry is inserted.
*
* There are cases in which the automatic removal of entries is
* undesired. If the cache entries have reference counts, then it is a
* simple matter to use the reference counts to ensure that entries
* continue to live even after being ejected from the cache. However,
* in some cases the memory overhead of adding a reference count to
* the entry would be objectionable. In such cases, the
* _cairo_cache_freeze() and _cairo_cache_thaw() calls can be
* used to establish a window during which no automatic removal of
* entries will occur.
**/
cairo_status_t
_cairo_cache_init (cairo_cache_t *cache,
cairo_cache_keys_equal_func_t keys_equal,
cairo_cache_predicate_func_t predicate,
cairo_destroy_func_t entry_destroy,
unsigned long max_size)
{
cache->hash_table = _cairo_hash_table_create (keys_equal);
if (unlikely (cache->hash_table == NULL))
return _cairo_error (CAIRO_STATUS_NO_MEMORY);
if (predicate == NULL)
predicate = _cairo_cache_entry_is_non_zero;
cache->predicate = predicate;
cache->entry_destroy = entry_destroy;
cache->max_size = max_size;
cache->size = 0;
cache->freeze_count = 0;
return CAIRO_STATUS_SUCCESS;
}
static void
_cairo_cache_pluck (void *entry, void *closure)
{
_cairo_cache_remove (closure, entry);
}
/**
* _cairo_cache_fini:
* @cache: a cache to destroy
*
* Immediately destroys the given cache, freeing all resources
* associated with it. As part of this process, the entry_destroy()
* function, (as passed to _cairo_cache_init()), will be called for
* each entry in the cache.
**/
void
_cairo_cache_fini (cairo_cache_t *cache)
{
_cairo_hash_table_foreach (cache->hash_table,
_cairo_cache_pluck,
cache);
assert (cache->size == 0);
_cairo_hash_table_destroy (cache->hash_table);
}
/**
* _cairo_cache_freeze:
* @cache: a cache with some precious entries in it (or about to be
* added)
*
* Disable the automatic ejection of entries from the cache. For as
* long as the cache is "frozen", calls to _cairo_cache_insert() will
* add new entries to the cache regardless of how large the cache
* grows. See _cairo_cache_thaw().
*
* Note: Multiple calls to _cairo_cache_freeze() will stack, in that
* the cache will remain "frozen" until a corresponding number of
* calls are made to _cairo_cache_thaw().
**/
void
_cairo_cache_freeze (cairo_cache_t *cache)
{
assert (cache->freeze_count >= 0);
cache->freeze_count++;
}
/**
* _cairo_cache_thaw:
* @cache: a cache, just after the entries in it have become less
* precious
*
* Cancels the effects of _cairo_cache_freeze().
*
* When a number of calls to _cairo_cache_thaw() is made corresponding
* to the number of calls to _cairo_cache_freeze() the cache will no
* longer be "frozen". If the cache had grown larger than max_size
* while frozen, entries will immediately be ejected (by random) from
* the cache until the cache is smaller than max_size. Also, the
* automatic ejection of entries on _cairo_cache_insert() will resume.
**/
void
_cairo_cache_thaw (cairo_cache_t *cache)
{
assert (cache->freeze_count > 0);
if (--cache->freeze_count == 0)
_cairo_cache_shrink_to_accommodate (cache, 0);
}
/**
* _cairo_cache_lookup:
* @cache: a cache
* @key: the key of interest
* @entry_return: pointer for return value
*
* Performs a lookup in @cache looking for an entry which has a key
* that matches @key, (as determined by the keys_equal() function
* passed to _cairo_cache_init()).
*
* Return value: %TRUE if there is an entry in the cache that matches
* @key, (which will now be in *entry_return). %FALSE otherwise, (in
* which case *entry_return will be %NULL).
**/
void *
_cairo_cache_lookup (cairo_cache_t *cache,
cairo_cache_entry_t *key)
{
return _cairo_hash_table_lookup (cache->hash_table,
(cairo_hash_entry_t *) key);
}
/**
* _cairo_cache_remove_random:
* @cache: a cache
*
* Remove a random entry from the cache.
*
* Return value: %TRUE if an entry was successfully removed.
* %FALSE if there are no entries that can be removed.
**/
static cairo_bool_t
_cairo_cache_remove_random (cairo_cache_t *cache)
{
cairo_cache_entry_t *entry;
entry = _cairo_hash_table_random_entry (cache->hash_table,
cache->predicate);
if (unlikely (entry == NULL))
return FALSE;
_cairo_cache_remove (cache, entry);
return TRUE;
}
/**
* _cairo_cache_shrink_to_accommodate:
* @cache: a cache
* @additional: additional size requested in bytes
*
* If cache is not frozen, eject entries randomly until the size of
* the cache is at least @additional bytes less than
* cache->max_size. That is, make enough room to accommodate a new
* entry of size @additional.
**/
static void
_cairo_cache_shrink_to_accommodate (cairo_cache_t *cache,
unsigned long additional)
{
while (cache->size + additional > cache->max_size) {
if (! _cairo_cache_remove_random (cache))
return;
}
}
/**
* _cairo_cache_insert:
* @cache: a cache
* @entry: an entry to be inserted
*
* Insert @entry into the cache. If an entry exists in the cache with
* a matching key, then the old entry will be removed first, (and the
* entry_destroy() callback will be called on it).
*
* Return value: %CAIRO_STATUS_SUCCESS if successful or
* %CAIRO_STATUS_NO_MEMORY if insufficient memory is available.
**/
cairo_status_t
_cairo_cache_insert (cairo_cache_t *cache,
cairo_cache_entry_t *entry)
{
cairo_status_t status;
if (entry->size && ! cache->freeze_count)
_cairo_cache_shrink_to_accommodate (cache, entry->size);
status = _cairo_hash_table_insert (cache->hash_table,
(cairo_hash_entry_t *) entry);
if (unlikely (status))
return status;
cache->size += entry->size;
return CAIRO_STATUS_SUCCESS;
}
/**
* _cairo_cache_remove:
* @cache: a cache
* @entry: an entry that exists in the cache
*
* Remove an existing entry from the cache.
**/
void
_cairo_cache_remove (cairo_cache_t *cache,
cairo_cache_entry_t *entry)
{
cache->size -= entry->size;
_cairo_hash_table_remove (cache->hash_table,
(cairo_hash_entry_t *) entry);
if (cache->entry_destroy)
cache->entry_destroy (entry);
}
/**
* _cairo_cache_foreach:
* @cache: a cache
* @cache_callback: function to be called for each entry
* @closure: additional argument to be passed to @cache_callback
*
* Call @cache_callback for each entry in the cache, in a
* non-specified order.
**/
void
_cairo_cache_foreach (cairo_cache_t *cache,
cairo_cache_callback_func_t cache_callback,
void *closure)
{
_cairo_hash_table_foreach (cache->hash_table,
cache_callback,
closure);
}
unsigned long
_cairo_hash_string (const char *c)
{
/* This is the djb2 hash. */
unsigned long hash = _CAIRO_HASH_INIT_VALUE;
while (c && *c)
hash = ((hash << 5) + hash) + *c++;
return hash;
}
unsigned long
_cairo_hash_bytes (unsigned long hash,
const void *ptr,
unsigned int length)
{
const uint8_t *bytes = ptr;
/* This is the djb2 hash. */
while (length--)
hash = ((hash << 5) + hash) + *bytes++;
return hash;
}
| 0 |
D://workCode//uploadProject\awtk\3rd\cairo | D://workCode//uploadProject\awtk\3rd\cairo\cairo\cairo-clip-boxes.c | /* -*- Mode: c; tab-width: 8; c-basic-offset: 4; indent-tabs-mode: t; -*- */
/* cairo - a vector graphics library with display and print output
*
* Copyright © 2002 University of Southern California
* Copyright © 2005 Red Hat, Inc.
* Copyright © 2009 Chris Wilson
*
* This library is free software; you can redistribute it and/or
* modify it either under the terms of the GNU Lesser General Public
* License version 2.1 as published by the Free Software Foundation
* (the "LGPL") or, at your option, under the terms of the Mozilla
* Public License Version 1.1 (the "MPL"). If you do not alter this
* notice, a recipient may use your version of this file under either
* the MPL or the LGPL.
*
* You should have received a copy of the LGPL along with this library
* in the file COPYING-LGPL-2.1; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Suite 500, Boston, MA 02110-1335, USA
* You should have received a copy of the MPL along with this library
* in the file COPYING-MPL-1.1
*
* The contents of this file are subject to the Mozilla Public License
* Version 1.1 (the "License"); you may not use this file except in
* compliance with the License. You may obtain a copy of the License at
* http://www.mozilla.org/MPL/
*
* This software is distributed on an "AS IS" basis, WITHOUT WARRANTY
* OF ANY KIND, either express or implied. See the LGPL or the MPL for
* the specific language governing rights and limitations.
*
* The Original Code is the cairo graphics library.
*
* The Initial Developer of the Original Code is University of Southern
* California.
*
* Contributor(s):
* Carl D. Worth <cworth@cworth.org>
* Kristian Høgsberg <krh@redhat.com>
* Chris Wilson <chris@chris-wilson.co.uk>
*/
#include "cairoint.h"
#include "cairo-box-inline.h"
#include "cairo-clip-inline.h"
#include "cairo-clip-private.h"
#include "cairo-error-private.h"
#include "cairo-freed-pool-private.h"
#include "cairo-gstate-private.h"
#include "cairo-path-fixed-private.h"
#include "cairo-pattern-private.h"
#include "cairo-composite-rectangles-private.h"
#include "cairo-region-private.h"
static inline int
pot (int v)
{
v--;
v |= v >> 1;
v |= v >> 2;
v |= v >> 4;
v |= v >> 8;
v |= v >> 16;
v++;
return v;
}
static cairo_bool_t
_cairo_clip_contains_rectangle_box (const cairo_clip_t *clip,
const cairo_rectangle_int_t *rect,
const cairo_box_t *box)
{
int i;
/* clip == NULL means no clip, so the clip contains everything */
if (clip == NULL)
return TRUE;
if (_cairo_clip_is_all_clipped (clip))
return FALSE;
/* If we have a non-trivial path, just say no */
if (clip->path)
return FALSE;
if (! _cairo_rectangle_contains_rectangle (&clip->extents, rect))
return FALSE;
if (clip->num_boxes == 0)
return TRUE;
/* Check for a clip-box that wholly contains the rectangle */
for (i = 0; i < clip->num_boxes; i++) {
if (box->p1.x >= clip->boxes[i].p1.x &&
box->p1.y >= clip->boxes[i].p1.y &&
box->p2.x <= clip->boxes[i].p2.x &&
box->p2.y <= clip->boxes[i].p2.y)
{
return TRUE;
}
}
return FALSE;
}
cairo_bool_t
_cairo_clip_contains_box (const cairo_clip_t *clip,
const cairo_box_t *box)
{
cairo_rectangle_int_t rect;
_cairo_box_round_to_rectangle (box, &rect);
return _cairo_clip_contains_rectangle_box(clip, &rect, box);
}
cairo_bool_t
_cairo_clip_contains_rectangle (const cairo_clip_t *clip,
const cairo_rectangle_int_t *rect)
{
cairo_box_t box;
_cairo_box_from_rectangle_int (&box, rect);
return _cairo_clip_contains_rectangle_box (clip, rect, &box);
}
cairo_clip_t *
_cairo_clip_intersect_rectilinear_path (cairo_clip_t *clip,
const cairo_path_fixed_t *path,
cairo_fill_rule_t fill_rule,
cairo_antialias_t antialias)
{
cairo_status_t status;
cairo_boxes_t boxes;
_cairo_boxes_init (&boxes);
status = _cairo_path_fixed_fill_rectilinear_to_boxes (path,
fill_rule,
antialias,
&boxes);
if (likely (status == CAIRO_STATUS_SUCCESS && boxes.num_boxes))
clip = _cairo_clip_intersect_boxes (clip, &boxes);
else
clip = _cairo_clip_set_all_clipped (clip);
_cairo_boxes_fini (&boxes);
return clip;
}
static cairo_clip_t *
_cairo_clip_intersect_rectangle_box (cairo_clip_t *clip,
const cairo_rectangle_int_t *r,
const cairo_box_t *box)
{
cairo_box_t extents_box;
cairo_bool_t changed = FALSE;
int i, j;
if (clip == NULL) {
clip = _cairo_clip_create ();
if (clip == NULL)
return _cairo_clip_set_all_clipped (clip);
}
if (clip->num_boxes == 0) {
clip->boxes = &clip->embedded_box;
clip->boxes[0] = *box;
clip->num_boxes = 1;
if (clip->path == NULL) {
clip->extents = *r;
} else {
if (! _cairo_rectangle_intersect (&clip->extents, r))
return _cairo_clip_set_all_clipped (clip);
}
if (clip->path == NULL)
clip->is_region = _cairo_box_is_pixel_aligned (box);
return clip;
}
/* Does the new box wholly subsume the clip? Perform a cheap check
* for the common condition of a single clip rectangle.
*/
if (clip->num_boxes == 1 &&
clip->boxes[0].p1.x >= box->p1.x &&
clip->boxes[0].p1.y >= box->p1.y &&
clip->boxes[0].p2.x <= box->p2.x &&
clip->boxes[0].p2.y <= box->p2.y)
{
return clip;
}
for (i = j = 0; i < clip->num_boxes; i++) {
cairo_box_t *b = &clip->boxes[j];
if (j != i)
*b = clip->boxes[i];
if (box->p1.x > b->p1.x)
b->p1.x = box->p1.x, changed = TRUE;
if (box->p2.x < b->p2.x)
b->p2.x = box->p2.x, changed = TRUE;
if (box->p1.y > b->p1.y)
b->p1.y = box->p1.y, changed = TRUE;
if (box->p2.y < b->p2.y)
b->p2.y = box->p2.y, changed = TRUE;
j += b->p2.x > b->p1.x && b->p2.y > b->p1.y;
}
clip->num_boxes = j;
if (clip->num_boxes == 0)
return _cairo_clip_set_all_clipped (clip);
if (! changed)
return clip;
extents_box = clip->boxes[0];
for (i = 1; i < clip->num_boxes; i++) {
if (clip->boxes[i].p1.x < extents_box.p1.x)
extents_box.p1.x = clip->boxes[i].p1.x;
if (clip->boxes[i].p1.y < extents_box.p1.y)
extents_box.p1.y = clip->boxes[i].p1.y;
if (clip->boxes[i].p2.x > extents_box.p2.x)
extents_box.p2.x = clip->boxes[i].p2.x;
if (clip->boxes[i].p2.y > extents_box.p2.y)
extents_box.p2.y = clip->boxes[i].p2.y;
}
if (clip->path == NULL) {
_cairo_box_round_to_rectangle (&extents_box, &clip->extents);
} else {
cairo_rectangle_int_t extents_rect;
_cairo_box_round_to_rectangle (&extents_box, &extents_rect);
if (! _cairo_rectangle_intersect (&clip->extents, &extents_rect))
return _cairo_clip_set_all_clipped (clip);
}
if (clip->region) {
cairo_region_destroy (clip->region);
clip->region = NULL;
}
clip->is_region = FALSE;
return clip;
}
cairo_clip_t *
_cairo_clip_intersect_box (cairo_clip_t *clip,
const cairo_box_t *box)
{
cairo_rectangle_int_t r;
if (_cairo_clip_is_all_clipped (clip))
return clip;
_cairo_box_round_to_rectangle (box, &r);
if (r.width == 0 || r.height == 0)
return _cairo_clip_set_all_clipped (clip);
return _cairo_clip_intersect_rectangle_box (clip, &r, box);
}
/* Copy a box set to a clip
*
* @param boxes The box set to copy from.
* @param clip The clip to copy to (return buffer).
* @returns Zero if the allocation failed (the clip will be set to
* all-clipped), otherwise non-zero.
*/
static cairo_bool_t
_cairo_boxes_copy_to_clip (const cairo_boxes_t *boxes, cairo_clip_t *clip)
{
/* XXX cow-boxes? */
if (boxes->num_boxes == 1) {
clip->boxes = &clip->embedded_box;
clip->boxes[0] = boxes->chunks.base[0];
clip->num_boxes = 1;
return TRUE;
}
clip->boxes = _cairo_boxes_to_array (boxes, &clip->num_boxes);
if (unlikely (clip->boxes == NULL))
{
_cairo_clip_set_all_clipped (clip);
return FALSE;
}
return TRUE;
}
cairo_clip_t *
_cairo_clip_intersect_boxes (cairo_clip_t *clip,
const cairo_boxes_t *boxes)
{
cairo_boxes_t clip_boxes;
cairo_box_t limits;
cairo_rectangle_int_t extents;
if (_cairo_clip_is_all_clipped (clip))
return clip;
if (boxes->num_boxes == 0)
return _cairo_clip_set_all_clipped (clip);
if (boxes->num_boxes == 1)
return _cairo_clip_intersect_box (clip, boxes->chunks.base);
if (clip == NULL)
clip = _cairo_clip_create ();
if (clip->num_boxes) {
_cairo_boxes_init_for_array (&clip_boxes, clip->boxes, clip->num_boxes);
if (unlikely (_cairo_boxes_intersect (&clip_boxes, boxes, &clip_boxes))) {
clip = _cairo_clip_set_all_clipped (clip);
goto out;
}
if (clip->boxes != &clip->embedded_box)
free (clip->boxes);
clip->boxes = NULL;
boxes = &clip_boxes;
}
if (boxes->num_boxes == 0) {
clip = _cairo_clip_set_all_clipped (clip);
goto out;
}
_cairo_boxes_copy_to_clip (boxes, clip);
_cairo_boxes_extents (boxes, &limits);
_cairo_box_round_to_rectangle (&limits, &extents);
if (clip->path == NULL) {
clip->extents = extents;
} else if (! _cairo_rectangle_intersect (&clip->extents, &extents)) {
clip = _cairo_clip_set_all_clipped (clip);
goto out;
}
if (clip->region) {
cairo_region_destroy (clip->region);
clip->region = NULL;
}
clip->is_region = FALSE;
out:
if (boxes == &clip_boxes)
_cairo_boxes_fini (&clip_boxes);
return clip;
}
cairo_clip_t *
_cairo_clip_intersect_rectangle (cairo_clip_t *clip,
const cairo_rectangle_int_t *r)
{
cairo_box_t box;
if (_cairo_clip_is_all_clipped (clip))
return clip;
if (r->width == 0 || r->height == 0)
return _cairo_clip_set_all_clipped (clip);
_cairo_box_from_rectangle_int (&box, r);
return _cairo_clip_intersect_rectangle_box (clip, r, &box);
}
struct reduce {
cairo_clip_t *clip;
cairo_box_t limit;
cairo_box_t extents;
cairo_bool_t inside;
cairo_point_t current_point;
cairo_point_t last_move_to;
};
static void
_add_clipped_edge (struct reduce *r,
const cairo_point_t *p1,
const cairo_point_t *p2,
int y1, int y2)
{
cairo_fixed_t x;
x = _cairo_edge_compute_intersection_x_for_y (p1, p2, y1);
if (x < r->extents.p1.x)
r->extents.p1.x = x;
x = _cairo_edge_compute_intersection_x_for_y (p1, p2, y2);
if (x > r->extents.p2.x)
r->extents.p2.x = x;
if (y1 < r->extents.p1.y)
r->extents.p1.y = y1;
if (y2 > r->extents.p2.y)
r->extents.p2.y = y2;
r->inside = TRUE;
}
static void
_add_edge (struct reduce *r,
const cairo_point_t *p1,
const cairo_point_t *p2)
{
int top, bottom;
int top_y, bot_y;
int n;
if (p1->y < p2->y) {
top = p1->y;
bottom = p2->y;
} else {
top = p2->y;
bottom = p1->y;
}
if (bottom < r->limit.p1.y || top > r->limit.p2.y)
return;
if (p1->x > p2->x) {
const cairo_point_t *t = p1;
p1 = p2;
p2 = t;
}
if (p2->x <= r->limit.p1.x || p1->x >= r->limit.p2.x)
return;
for (n = 0; n < r->clip->num_boxes; n++) {
const cairo_box_t *limits = &r->clip->boxes[n];
if (bottom < limits->p1.y || top > limits->p2.y)
continue;
if (p2->x <= limits->p1.x || p1->x >= limits->p2.x)
continue;
if (p1->x >= limits->p1.x && p2->x <= limits->p1.x) {
top_y = top;
bot_y = bottom;
} else {
int p1_y, p2_y;
p1_y = _cairo_edge_compute_intersection_y_for_x (p1, p2,
limits->p1.x);
p2_y = _cairo_edge_compute_intersection_y_for_x (p1, p2,
limits->p2.x);
if (p1_y < p2_y) {
top_y = p1_y;
bot_y = p2_y;
} else {
top_y = p2_y;
bot_y = p1_y;
}
if (top_y < top)
top_y = top;
if (bot_y > bottom)
bot_y = bottom;
}
if (top_y < limits->p1.y)
top_y = limits->p1.y;
if (bot_y > limits->p2.y)
bot_y = limits->p2.y;
if (bot_y > top_y)
_add_clipped_edge (r, p1, p2, top_y, bot_y);
}
}
static cairo_status_t
_reduce_line_to (void *closure,
const cairo_point_t *point)
{
struct reduce *r = closure;
_add_edge (r, &r->current_point, point);
r->current_point = *point;
return CAIRO_STATUS_SUCCESS;
}
static cairo_status_t
_reduce_close (void *closure)
{
struct reduce *r = closure;
return _reduce_line_to (r, &r->last_move_to);
}
static cairo_status_t
_reduce_move_to (void *closure,
const cairo_point_t *point)
{
struct reduce *r = closure;
cairo_status_t status;
/* close current subpath */
status = _reduce_close (closure);
/* make sure that the closure represents a degenerate path */
r->current_point = *point;
r->last_move_to = *point;
return status;
}
static cairo_clip_t *
_cairo_clip_reduce_to_boxes (cairo_clip_t *clip)
{
struct reduce r;
cairo_clip_path_t *clip_path;
cairo_status_t status;
return clip;
if (clip->path == NULL)
return clip;
r.clip = clip;
r.extents.p1.x = r.extents.p1.y = INT_MAX;
r.extents.p2.x = r.extents.p2.y = INT_MIN;
r.inside = FALSE;
r.limit.p1.x = _cairo_fixed_from_int (clip->extents.x);
r.limit.p1.y = _cairo_fixed_from_int (clip->extents.y);
r.limit.p2.x = _cairo_fixed_from_int (clip->extents.x + clip->extents.width);
r.limit.p2.y = _cairo_fixed_from_int (clip->extents.y + clip->extents.height);
clip_path = clip->path;
do {
r.current_point.x = 0;
r.current_point.y = 0;
r.last_move_to = r.current_point;
status = _cairo_path_fixed_interpret_flat (&clip_path->path,
_reduce_move_to,
_reduce_line_to,
_reduce_close,
&r,
clip_path->tolerance);
assert (status == CAIRO_STATUS_SUCCESS);
_reduce_close (&r);
} while ((clip_path = clip_path->prev));
if (! r.inside) {
_cairo_clip_path_destroy (clip->path);
clip->path = NULL;
}
return _cairo_clip_intersect_box (clip, &r.extents);
}
cairo_clip_t *
_cairo_clip_reduce_to_rectangle (const cairo_clip_t *clip,
const cairo_rectangle_int_t *r)
{
cairo_clip_t *copy;
if (_cairo_clip_is_all_clipped (clip))
return (cairo_clip_t *) clip;
if (_cairo_clip_contains_rectangle (clip, r))
return _cairo_clip_intersect_rectangle (NULL, r);
copy = _cairo_clip_copy_intersect_rectangle (clip, r);
if (_cairo_clip_is_all_clipped (copy))
return copy;
return _cairo_clip_reduce_to_boxes (copy);
}
cairo_clip_t *
_cairo_clip_reduce_for_composite (const cairo_clip_t *clip,
cairo_composite_rectangles_t *extents)
{
const cairo_rectangle_int_t *r;
r = extents->is_bounded ? &extents->bounded : &extents->unbounded;
return _cairo_clip_reduce_to_rectangle (clip, r);
}
cairo_clip_t *
_cairo_clip_from_boxes (const cairo_boxes_t *boxes)
{
cairo_box_t extents;
cairo_clip_t *clip = _cairo_clip_create ();
if (clip == NULL)
return _cairo_clip_set_all_clipped (clip);
if (unlikely (! _cairo_boxes_copy_to_clip (boxes, clip)))
return clip;
_cairo_boxes_extents (boxes, &extents);
_cairo_box_round_to_rectangle (&extents, &clip->extents);
return clip;
}
| 0 |
D://workCode//uploadProject\awtk\3rd\cairo | D://workCode//uploadProject\awtk\3rd\cairo\cairo\cairo-clip-inline.h | /* cairo - a vector graphics library with display and print output
*
* Copyright © 2005 Red Hat, Inc.
*
* This library is free software; you can redistribute it and/or
* modify it either under the terms of the GNU Lesser General Public
* License version 2.1 as published by the Free Software Foundation
* (the "LGPL") or, at your option, under the terms of the Mozilla
* Public License Version 1.1 (the "MPL"). If you do not alter this
* notice, a recipient may use your version of this file under either
* the MPL or the LGPL.
*
* You should have received a copy of the LGPL along with this library
* in the file COPYING-LGPL-2.1; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Suite 500, Boston, MA 02110-1335, USA
* You should have received a copy of the MPL along with this library
* in the file COPYING-MPL-1.1
*
* The contents of this file are subject to the Mozilla Public License
* Version 1.1 (the "License"); you may not use this file except in
* compliance with the License. You may obtain a copy of the License at
* http://www.mozilla.org/MPL/
*
* This software is distributed on an "AS IS" basis, WITHOUT WARRANTY
* OF ANY KIND, either express or implied. See the LGPL or the MPL for
* the specific language governing rights and limitations.
*
* The Original Code is the cairo graphics library.
*
* The Initial Developer of the Original Code is Red Hat, Inc.
*
* Contributor(s):
* Kristian Høgsberg <krh@redhat.com>
* Chris Wilson <chris@chris-wilson.co.uk>
*/
#ifndef CAIRO_CLIP_INLINE_H
#define CAIRO_CLIP_INLINE_H
#include "cairo-clip-private.h"
static inline cairo_bool_t _cairo_clip_is_all_clipped(const cairo_clip_t *clip)
{
return clip == &__cairo_clip_all;
}
static inline cairo_clip_t *
_cairo_clip_set_all_clipped (cairo_clip_t *clip)
{
_cairo_clip_destroy (clip);
return (cairo_clip_t *) &__cairo_clip_all;
}
static inline cairo_clip_t *
_cairo_clip_copy_intersect_rectangle (const cairo_clip_t *clip,
const cairo_rectangle_int_t *r)
{
return _cairo_clip_intersect_rectangle (_cairo_clip_copy (clip), r);
}
static inline cairo_clip_t *
_cairo_clip_copy_intersect_clip (const cairo_clip_t *clip,
const cairo_clip_t *other)
{
return _cairo_clip_intersect_clip (_cairo_clip_copy (clip), other);
}
static inline void
_cairo_clip_steal_boxes (cairo_clip_t *clip, cairo_boxes_t *boxes)
{
_cairo_boxes_init_for_array (boxes, clip->boxes, clip->num_boxes);
clip->boxes = NULL;
clip->num_boxes = 0;
}
static inline void
_cairo_clip_unsteal_boxes (cairo_clip_t *clip, cairo_boxes_t *boxes)
{
clip->boxes = boxes->chunks.base;
clip->num_boxes = boxes->num_boxes;
}
#endif /* CAIRO_CLIP_INLINE_H */
| 0 |
D://workCode//uploadProject\awtk\3rd\cairo | D://workCode//uploadProject\awtk\3rd\cairo\cairo\cairo-clip-polygon.c | /* -*- Mode: c; tab-width: 8; c-basic-offset: 4; indent-tabs-mode: t; -*- */
/* cairo - a vector graphics library with display and print output
*
* Copyright © 2011 Intel Corporation
*
* This library is free software; you can redistribute it and/or
* modify it either under the terms of the GNU Lesser General Public
* License version 2.1 as published by the Free Software Foundation
* (the "LGPL") or, at your option, under the terms of the Mozilla
* Public License Version 1.1 (the "MPL"). If you do not alter this
* notice, a recipient may use your version of this file under either
* the MPL or the LGPL.
*
* You should have received a copy of the LGPL along with this library
* in the file COPYING-LGPL-2.1; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Suite 500, Boston, MA 02110-1335, USA
* You should have received a copy of the MPL along with this library
* in the file COPYING-MPL-1.1
*
* The contents of this file are subject to the Mozilla Public License
* Version 1.1 (the "License"); you may not use this file except in
* compliance with the License. You may obtain a copy of the License at
* http://www.mozilla.org/MPL/
*
* This software is distributed on an "AS IS" basis, WITHOUT WARRANTY
* OF ANY KIND, either express or implied. See the LGPL or the MPL for
* the specific language governing rights and limitations.
*
* The Original Code is the cairo graphics library.
*
* The Initial Developer of the Original Code is University of Southern
* California.
*
* Contributor(s):
* Chris Wilson <chris@chris-wilson.co.uk>
*/
#include "cairoint.h"
#include "cairo-clip-inline.h"
#include "cairo-clip-private.h"
#include "cairo-error-private.h"
#include "cairo-freed-pool-private.h"
#include "cairo-gstate-private.h"
#include "cairo-path-fixed-private.h"
#include "cairo-pattern-private.h"
#include "cairo-composite-rectangles-private.h"
#include "cairo-region-private.h"
static cairo_bool_t
can_convert_to_polygon (const cairo_clip_t *clip)
{
cairo_clip_path_t *clip_path = clip->path;
cairo_antialias_t antialias = clip_path->antialias;
while ((clip_path = clip_path->prev) != NULL) {
if (clip_path->antialias != antialias)
return FALSE;
}
return TRUE;
}
cairo_int_status_t
_cairo_clip_get_polygon (const cairo_clip_t *clip,
cairo_polygon_t *polygon,
cairo_fill_rule_t *fill_rule,
cairo_antialias_t *antialias)
{
cairo_status_t status;
cairo_clip_path_t *clip_path;
if (_cairo_clip_is_all_clipped (clip)) {
_cairo_polygon_init (polygon, NULL, 0);
return CAIRO_INT_STATUS_SUCCESS;
}
/* If there is no clip, we need an infinite polygon */
assert (clip && (clip->path || clip->num_boxes));
if (clip->path == NULL) {
*fill_rule = CAIRO_FILL_RULE_WINDING;
*antialias = CAIRO_ANTIALIAS_DEFAULT;
return _cairo_polygon_init_box_array (polygon,
clip->boxes,
clip->num_boxes);
}
/* check that residual is all of the same type/tolerance */
if (! can_convert_to_polygon (clip))
return CAIRO_INT_STATUS_UNSUPPORTED;
if (clip->num_boxes < 2)
_cairo_polygon_init_with_clip (polygon, clip);
else
_cairo_polygon_init_with_clip (polygon, NULL);
clip_path = clip->path;
*fill_rule = clip_path->fill_rule;
*antialias = clip_path->antialias;
status = _cairo_path_fixed_fill_to_polygon (&clip_path->path,
clip_path->tolerance,
polygon);
if (unlikely (status))
goto err;
if (clip->num_boxes > 1) {
status = _cairo_polygon_intersect_with_boxes (polygon, fill_rule,
clip->boxes, clip->num_boxes);
if (unlikely (status))
goto err;
}
polygon->limits = NULL;
polygon->num_limits = 0;
while ((clip_path = clip_path->prev) != NULL) {
cairo_polygon_t next;
_cairo_polygon_init (&next, NULL, 0);
status = _cairo_path_fixed_fill_to_polygon (&clip_path->path,
clip_path->tolerance,
&next);
if (likely (status == CAIRO_STATUS_SUCCESS))
status = _cairo_polygon_intersect (polygon, *fill_rule,
&next, clip_path->fill_rule);
_cairo_polygon_fini (&next);
if (unlikely (status))
goto err;
*fill_rule = CAIRO_FILL_RULE_WINDING;
}
return CAIRO_STATUS_SUCCESS;
err:
_cairo_polygon_fini (polygon);
return status;
}
cairo_bool_t
_cairo_clip_is_polygon (const cairo_clip_t *clip)
{
if (_cairo_clip_is_all_clipped (clip))
return TRUE;
/* If there is no clip, we need an infinite polygon */
if (clip == NULL)
return FALSE;
if (clip->path == NULL)
return TRUE;
/* check that residual is all of the same type/tolerance */
return can_convert_to_polygon (clip);
}
| 0 |
D://workCode//uploadProject\awtk\3rd\cairo | D://workCode//uploadProject\awtk\3rd\cairo\cairo\cairo-clip-private.h | /* cairo - a vector graphics library with display and print output
*
* Copyright © 2005 Red Hat, Inc.
*
* This library is free software; you can redistribute it and/or
* modify it either under the terms of the GNU Lesser General Public
* License version 2.1 as published by the Free Software Foundation
* (the "LGPL") or, at your option, under the terms of the Mozilla
* Public License Version 1.1 (the "MPL"). If you do not alter this
* notice, a recipient may use your version of this file under either
* the MPL or the LGPL.
*
* You should have received a copy of the LGPL along with this library
* in the file COPYING-LGPL-2.1; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Suite 500, Boston, MA 02110-1335, USA
* You should have received a copy of the MPL along with this library
* in the file COPYING-MPL-1.1
*
* The contents of this file are subject to the Mozilla Public License
* Version 1.1 (the "License"); you may not use this file except in
* compliance with the License. You may obtain a copy of the License at
* http://www.mozilla.org/MPL/
*
* This software is distributed on an "AS IS" basis, WITHOUT WARRANTY
* OF ANY KIND, either express or implied. See the LGPL or the MPL for
* the specific language governing rights and limitations.
*
* The Original Code is the cairo graphics library.
*
* The Initial Developer of the Original Code is Red Hat, Inc.
*
* Contributor(s):
* Kristian Høgsberg <krh@redhat.com>
* Chris Wilson <chris@chris-wilson.co.uk>
*/
#ifndef CAIRO_CLIP_PRIVATE_H
#define CAIRO_CLIP_PRIVATE_H
#include "cairo-types-private.h"
#include "cairo-boxes-private.h"
#include "cairo-error-private.h"
#include "cairo-compiler-private.h"
#include "cairo-error-private.h"
#include "cairo-path-fixed-private.h"
#include "cairo-reference-count-private.h"
extern const cairo_private cairo_rectangle_list_t _cairo_rectangles_nil;
struct _cairo_clip_path {
cairo_reference_count_t ref_count;
cairo_path_fixed_t path;
cairo_fill_rule_t fill_rule;
double tolerance;
cairo_antialias_t antialias;
cairo_clip_path_t *prev;
};
struct _cairo_clip {
cairo_rectangle_int_t extents;
cairo_clip_path_t *path;
cairo_box_t *boxes;
int num_boxes;
cairo_region_t *region;
cairo_bool_t is_region;
cairo_box_t embedded_box;
};
cairo_private cairo_clip_t *
_cairo_clip_create (void);
cairo_private cairo_clip_path_t *
_cairo_clip_path_reference (cairo_clip_path_t *clip_path);
cairo_private void
_cairo_clip_path_destroy (cairo_clip_path_t *clip_path);
cairo_private void
_cairo_clip_destroy (cairo_clip_t *clip);
cairo_private extern const cairo_clip_t __cairo_clip_all;
cairo_private cairo_clip_t *
_cairo_clip_copy (const cairo_clip_t *clip);
cairo_private cairo_clip_t *
_cairo_clip_copy_region (const cairo_clip_t *clip);
cairo_private cairo_clip_t *
_cairo_clip_copy_path (const cairo_clip_t *clip);
cairo_private cairo_clip_t *
_cairo_clip_translate (cairo_clip_t *clip, int tx, int ty);
cairo_private cairo_clip_t *
_cairo_clip_transform (cairo_clip_t *clip, const cairo_matrix_t *m);
cairo_private cairo_clip_t *
_cairo_clip_copy_with_translation (const cairo_clip_t *clip, int tx, int ty);
cairo_private cairo_bool_t
_cairo_clip_equal (const cairo_clip_t *clip_a,
const cairo_clip_t *clip_b);
cairo_private cairo_clip_t *
_cairo_clip_intersect_rectangle (cairo_clip_t *clip,
const cairo_rectangle_int_t *rectangle);
cairo_private cairo_clip_t *
_cairo_clip_intersect_clip (cairo_clip_t *clip,
const cairo_clip_t *other);
cairo_private cairo_clip_t *
_cairo_clip_intersect_box (cairo_clip_t *clip,
const cairo_box_t *box);
cairo_private cairo_clip_t *
_cairo_clip_intersect_boxes (cairo_clip_t *clip,
const cairo_boxes_t *boxes);
cairo_private cairo_clip_t *
_cairo_clip_intersect_rectilinear_path (cairo_clip_t *clip,
const cairo_path_fixed_t *path,
cairo_fill_rule_t fill_rule,
cairo_antialias_t antialias);
cairo_private cairo_clip_t *
_cairo_clip_intersect_path (cairo_clip_t *clip,
const cairo_path_fixed_t *path,
cairo_fill_rule_t fill_rule,
double tolerance,
cairo_antialias_t antialias);
cairo_private const cairo_rectangle_int_t *
_cairo_clip_get_extents (const cairo_clip_t *clip);
cairo_private cairo_surface_t *
_cairo_clip_get_surface (const cairo_clip_t *clip, cairo_surface_t *dst, int *tx, int *ty);
cairo_private cairo_surface_t *
_cairo_clip_get_image (const cairo_clip_t *clip,
cairo_surface_t *target,
const cairo_rectangle_int_t *extents);
cairo_private cairo_status_t
_cairo_clip_combine_with_surface (const cairo_clip_t *clip,
cairo_surface_t *dst,
int dst_x, int dst_y);
cairo_private cairo_clip_t *
_cairo_clip_from_boxes (const cairo_boxes_t *boxes);
cairo_private cairo_region_t *
_cairo_clip_get_region (const cairo_clip_t *clip);
cairo_private cairo_bool_t
_cairo_clip_is_region (const cairo_clip_t *clip);
cairo_private cairo_clip_t *
_cairo_clip_reduce_to_rectangle (const cairo_clip_t *clip,
const cairo_rectangle_int_t *r);
cairo_private cairo_clip_t *
_cairo_clip_reduce_for_composite (const cairo_clip_t *clip,
cairo_composite_rectangles_t *extents);
cairo_private cairo_bool_t
_cairo_clip_contains_rectangle (const cairo_clip_t *clip,
const cairo_rectangle_int_t *rect);
cairo_private cairo_bool_t
_cairo_clip_contains_box (const cairo_clip_t *clip,
const cairo_box_t *box);
cairo_private cairo_bool_t
_cairo_clip_contains_extents (const cairo_clip_t *clip,
const cairo_composite_rectangles_t *extents);
cairo_private cairo_rectangle_list_t*
_cairo_clip_copy_rectangle_list (cairo_clip_t *clip, cairo_gstate_t *gstate);
cairo_private cairo_rectangle_list_t *
_cairo_rectangle_list_create_in_error (cairo_status_t status);
cairo_private cairo_bool_t
_cairo_clip_is_polygon (const cairo_clip_t *clip);
cairo_private cairo_int_status_t
_cairo_clip_get_polygon (const cairo_clip_t *clip,
cairo_polygon_t *polygon,
cairo_fill_rule_t *fill_rule,
cairo_antialias_t *antialias);
#endif /* CAIRO_CLIP_PRIVATE_H */
| 0 |
D://workCode//uploadProject\awtk\3rd\cairo | D://workCode//uploadProject\awtk\3rd\cairo\cairo\cairo-clip-region.c | /* -*- Mode: c; tab-width: 8; c-basic-offset: 4; indent-tabs-mode: t; -*- */
/* cairo - a vector graphics library with display and print output
*
* Copyright © 2002 University of Southern California
* Copyright © 2005 Red Hat, Inc.
* Copyright © 2009 Chris Wilson
*
* This library is free software; you can redistribute it and/or
* modify it either under the terms of the GNU Lesser General Public
* License version 2.1 as published by the Free Software Foundation
* (the "LGPL") or, at your option, under the terms of the Mozilla
* Public License Version 1.1 (the "MPL"). If you do not alter this
* notice, a recipient may use your version of this file under either
* the MPL or the LGPL.
*
* You should have received a copy of the LGPL along with this library
* in the file COPYING-LGPL-2.1; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Suite 500, Boston, MA 02110-1335, USA
* You should have received a copy of the MPL along with this library
* in the file COPYING-MPL-1.1
*
* The contents of this file are subject to the Mozilla Public License
* Version 1.1 (the "License"); you may not use this file except in
* compliance with the License. You may obtain a copy of the License at
* http://www.mozilla.org/MPL/
*
* This software is distributed on an "AS IS" basis, WITHOUT WARRANTY
* OF ANY KIND, either express or implied. See the LGPL or the MPL for
* the specific language governing rights and limitations.
*
* The Original Code is the cairo graphics library.
*
* The Initial Developer of the Original Code is University of Southern
* California.
*
* Contributor(s):
* Carl D. Worth <cworth@cworth.org>
* Kristian Høgsberg <krh@redhat.com>
* Chris Wilson <chris@chris-wilson.co.uk>
*/
#include "cairoint.h"
#include "cairo-clip-private.h"
#include "cairo-error-private.h"
#include "cairo-freed-pool-private.h"
#include "cairo-gstate-private.h"
#include "cairo-path-fixed-private.h"
#include "cairo-pattern-private.h"
#include "cairo-composite-rectangles-private.h"
#include "cairo-region-private.h"
static void
_cairo_clip_extract_region (cairo_clip_t *clip)
{
cairo_rectangle_int_t stack_rects[CAIRO_STACK_ARRAY_LENGTH (cairo_rectangle_int_t)];
cairo_rectangle_int_t *r = stack_rects;
cairo_bool_t is_region;
int i;
if (clip->num_boxes == 0)
return;
if (clip->num_boxes > ARRAY_LENGTH (stack_rects)) {
r = _cairo_malloc_ab (clip->num_boxes, sizeof (cairo_rectangle_int_t));
if (r == NULL){
_cairo_error_throw (CAIRO_STATUS_NO_MEMORY);
return;
}
}
is_region = clip->path == NULL;
for (i = 0; i < clip->num_boxes; i++) {
cairo_box_t *b = &clip->boxes[i];
if (is_region)
is_region =
_cairo_fixed_is_integer (b->p1.x | b->p1.y | b->p2.x | b->p2.y);
r[i].x = _cairo_fixed_integer_floor (b->p1.x);
r[i].y = _cairo_fixed_integer_floor (b->p1.y);
r[i].width = _cairo_fixed_integer_ceil (b->p2.x) - r[i].x;
r[i].height = _cairo_fixed_integer_ceil (b->p2.y) - r[i].y;
}
clip->is_region = is_region;
clip->region = cairo_region_create_rectangles (r, i);
if (r != stack_rects)
free (r);
}
cairo_region_t *
_cairo_clip_get_region (const cairo_clip_t *clip)
{
if (clip == NULL)
return NULL;
if (clip->region == NULL)
_cairo_clip_extract_region ((cairo_clip_t *) clip);
return clip->region;
}
cairo_bool_t
_cairo_clip_is_region (const cairo_clip_t *clip)
{
if (clip == NULL)
return TRUE;
if (clip->is_region)
return TRUE;
/* XXX Geometric reduction? */
if (clip->path)
return FALSE;
if (clip->num_boxes == 0)
return TRUE;
if (clip->region == NULL)
_cairo_clip_extract_region ((cairo_clip_t *) clip);
return clip->is_region;
}
| 0 |
D://workCode//uploadProject\awtk\3rd\cairo | D://workCode//uploadProject\awtk\3rd\cairo\cairo\cairo-clip-surface.c | /* -*- Mode: c; tab-width: 8; c-basic-offset: 4; indent-tabs-mode: t; -*- */
/* cairo - a vector graphics library with display and print output
*
* Copyright © 2002 University of Southern California
* Copyright © 2005 Red Hat, Inc.
* Copyright © 2009 Chris Wilson
*
* This library is free software; you can redistribute it and/or
* modify it either under the terms of the GNU Lesser General Public
* License version 2.1 as published by the Free Software Foundation
* (the "LGPL") or, at your option, under the terms of the Mozilla
* Public License Version 1.1 (the "MPL"). If you do not alter this
* notice, a recipient may use your version of this file under either
* the MPL or the LGPL.
*
* You should have received a copy of the LGPL along with this library
* in the file COPYING-LGPL-2.1; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Suite 500, Boston, MA 02110-1335, USA
* You should have received a copy of the MPL along with this library
* in the file COPYING-MPL-1.1
*
* The contents of this file are subject to the Mozilla Public License
* Version 1.1 (the "License"); you may not use this file except in
* compliance with the License. You may obtain a copy of the License at
* http://www.mozilla.org/MPL/
*
* This software is distributed on an "AS IS" basis, WITHOUT WARRANTY
* OF ANY KIND, either express or implied. See the LGPL or the MPL for
* the specific language governing rights and limitations.
*
* The Original Code is the cairo graphics library.
*
* The Initial Developer of the Original Code is University of Southern
* California.
*
* Contributor(s):
* Carl D. Worth <cworth@cworth.org>
* Kristian Høgsberg <krh@redhat.com>
* Chris Wilson <chris@chris-wilson.co.uk>
*/
#include "cairoint.h"
#include "cairo-clip-private.h"
#include "cairo-error-private.h"
#include "cairo-freed-pool-private.h"
#include "cairo-gstate-private.h"
#include "cairo-path-fixed-private.h"
#include "cairo-pattern-private.h"
#include "cairo-composite-rectangles-private.h"
#include "cairo-region-private.h"
cairo_status_t
_cairo_clip_combine_with_surface (const cairo_clip_t *clip,
cairo_surface_t *dst,
int dst_x, int dst_y)
{
cairo_clip_path_t *copy_path;
cairo_clip_path_t *clip_path;
cairo_clip_t *copy;
cairo_status_t status = CAIRO_STATUS_SUCCESS;
copy = _cairo_clip_copy_with_translation (clip, -dst_x, -dst_y);
copy_path = copy->path;
copy->path = NULL;
if (copy->boxes) {
status = _cairo_surface_paint (dst,
CAIRO_OPERATOR_IN,
&_cairo_pattern_white.base,
copy);
}
clip = NULL;
if (_cairo_clip_is_region (copy))
clip = copy;
clip_path = copy_path;
while (status == CAIRO_STATUS_SUCCESS && clip_path) {
status = _cairo_surface_fill (dst,
CAIRO_OPERATOR_IN,
&_cairo_pattern_white.base,
&clip_path->path,
clip_path->fill_rule,
clip_path->tolerance,
clip_path->antialias,
clip);
clip_path = clip_path->prev;
}
copy->path = copy_path;
_cairo_clip_destroy (copy);
return status;
}
static cairo_status_t
_cairo_path_fixed_add_box (cairo_path_fixed_t *path,
const cairo_box_t *box,
cairo_fixed_t fx,
cairo_fixed_t fy)
{
cairo_status_t status;
status = _cairo_path_fixed_move_to (path, box->p1.x + fx, box->p1.y + fy);
if (unlikely (status))
return status;
status = _cairo_path_fixed_line_to (path, box->p2.x + fx, box->p1.y + fy);
if (unlikely (status))
return status;
status = _cairo_path_fixed_line_to (path, box->p2.x + fx, box->p2.y + fy);
if (unlikely (status))
return status;
status = _cairo_path_fixed_line_to (path, box->p1.x + fx, box->p2.y + fy);
if (unlikely (status))
return status;
return _cairo_path_fixed_close_path (path);
}
cairo_surface_t *
_cairo_clip_get_surface (const cairo_clip_t *clip,
cairo_surface_t *target,
int *tx, int *ty)
{
cairo_surface_t *surface;
cairo_status_t status;
cairo_clip_t *copy, *region;
cairo_clip_path_t *copy_path, *clip_path;
if (clip->num_boxes) {
cairo_path_fixed_t path;
int i;
surface = _cairo_surface_create_scratch (target,
CAIRO_CONTENT_ALPHA,
clip->extents.width,
clip->extents.height,
CAIRO_COLOR_TRANSPARENT);
if (unlikely (surface->status))
return surface;
_cairo_path_fixed_init (&path);
status = CAIRO_STATUS_SUCCESS;
for (i = 0; status == CAIRO_STATUS_SUCCESS && i < clip->num_boxes; i++) {
status = _cairo_path_fixed_add_box (&path, &clip->boxes[i],
-_cairo_fixed_from_int (clip->extents.x),
-_cairo_fixed_from_int (clip->extents.y));
}
if (status == CAIRO_STATUS_SUCCESS)
status = _cairo_surface_fill (surface,
CAIRO_OPERATOR_ADD,
&_cairo_pattern_white.base,
&path,
CAIRO_FILL_RULE_WINDING,
1.,
CAIRO_ANTIALIAS_DEFAULT,
NULL);
_cairo_path_fixed_fini (&path);
if (unlikely (status)) {
cairo_surface_destroy (surface);
return _cairo_surface_create_in_error (status);
}
} else {
surface = _cairo_surface_create_scratch (target,
CAIRO_CONTENT_ALPHA,
clip->extents.width,
clip->extents.height,
CAIRO_COLOR_WHITE);
if (unlikely (surface->status))
return surface;
}
copy = _cairo_clip_copy_with_translation (clip,
-clip->extents.x,
-clip->extents.y);
copy_path = copy->path;
copy->path = NULL;
region = copy;
if (! _cairo_clip_is_region (copy))
region = _cairo_clip_copy_region (copy);
status = CAIRO_STATUS_SUCCESS;
clip_path = copy_path;
while (status == CAIRO_STATUS_SUCCESS && clip_path) {
status = _cairo_surface_fill (surface,
CAIRO_OPERATOR_IN,
&_cairo_pattern_white.base,
&clip_path->path,
clip_path->fill_rule,
clip_path->tolerance,
clip_path->antialias,
region);
clip_path = clip_path->prev;
}
copy->path = copy_path;
_cairo_clip_destroy (copy);
if (region != copy)
_cairo_clip_destroy (region);
if (unlikely (status)) {
cairo_surface_destroy (surface);
return _cairo_surface_create_in_error (status);
}
*tx = clip->extents.x;
*ty = clip->extents.y;
return surface;
}
cairo_surface_t *
_cairo_clip_get_image (const cairo_clip_t *clip,
cairo_surface_t *target,
const cairo_rectangle_int_t *extents)
{
cairo_surface_t *surface;
cairo_status_t status;
surface = cairo_surface_create_similar_image (target,
CAIRO_FORMAT_A8,
extents->width,
extents->height);
if (unlikely (surface->status))
return surface;
status = _cairo_surface_paint (surface, CAIRO_OPERATOR_SOURCE,
&_cairo_pattern_white.base, NULL);
if (likely (status == CAIRO_STATUS_SUCCESS))
status = _cairo_clip_combine_with_surface (clip, surface,
extents->x, extents->y);
if (unlikely (status)) {
cairo_surface_destroy (surface);
surface = _cairo_surface_create_in_error (status);
}
return surface;
}
| 0 |
D://workCode//uploadProject\awtk\3rd\cairo | D://workCode//uploadProject\awtk\3rd\cairo\cairo\cairo-clip-tor-scan-converter.c | /* -*- Mode: c; tab-width: 8; c-basic-offset: 4; indent-tabs-mode: t; -*- */
/* glitter-paths - polygon scan converter
*
* Copyright (c) 2008 M Joonas Pihlaja
* Copyright (c) 2007 David Turner
*
* Permission is hereby granted, free of charge, to any person
* obtaining a copy of this software and associated documentation
* files (the "Software"), to deal in the Software without
* restriction, including without limitation the rights to use,
* copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following
* conditions:
*
* The above copyright notice and this permission notice shall be
* included in all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
* OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
* HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
* WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*/
/* This is the Glitter paths scan converter incorporated into cairo.
* The source is from commit 734c53237a867a773640bd5b64816249fa1730f8
* of
*
* https://gitweb.freedesktop.org/?p=users/joonas/glitter-paths
*/
/* Glitter-paths is a stand alone polygon rasteriser derived from
* David Turner's reimplementation of Tor Anderssons's 15x17
* supersampling rasteriser from the Apparition graphics library. The
* main new feature here is cheaply choosing per-scan line between
* doing fully analytical coverage computation for an entire row at a
* time vs. using a supersampling approach.
*
* David Turner's code can be found at
*
* http://david.freetype.org/rasterizer-shootout/raster-comparison-20070813.tar.bz2
*
* In particular this file incorporates large parts of ftgrays_tor10.h
* from raster-comparison-20070813.tar.bz2
*/
/* Overview
*
* A scan converter's basic purpose to take polygon edges and convert
* them into an RLE compressed A8 mask. This one works in two phases:
* gathering edges and generating spans.
*
* 1) As the user feeds the scan converter edges they are vertically
* clipped and bucketted into a _polygon_ data structure. The edges
* are also snapped from the user's coordinates to the subpixel grid
* coordinates used during scan conversion.
*
* user
* |
* | edges
* V
* polygon buckets
*
* 2) Generating spans works by performing a vertical sweep of pixel
* rows from top to bottom and maintaining an _active_list_ of edges
* that intersect the row. From the active list the fill rule
* determines which edges are the left and right edges of the start of
* each span, and their contribution is then accumulated into a pixel
* coverage list (_cell_list_) as coverage deltas. Once the coverage
* deltas of all edges are known we can form spans of constant pixel
* coverage by summing the deltas during a traversal of the cell list.
* At the end of a pixel row the cell list is sent to a coverage
* blitter for rendering to some target surface.
*
* The pixel coverages are computed by either supersampling the row
* and box filtering a mono rasterisation, or by computing the exact
* coverages of edges in the active list. The supersampling method is
* used whenever some edge starts or stops within the row or there are
* edge intersections in the row.
*
* polygon bucket for \
* current pixel row |
* | |
* | activate new edges | Repeat GRID_Y times if we
* V \ are supersampling this row,
* active list / or just once if we're computing
* | | analytical coverage.
* | coverage deltas |
* V |
* pixel coverage list /
* |
* V
* coverage blitter
*/
#include "cairoint.h"
#include "cairo-spans-private.h"
#include "cairo-error-private.h"
#include <assert.h>
#include <stdlib.h>
#include <string.h>
#include <limits.h>
#include <setjmp.h>
/* The input coordinate scale and the rasterisation grid scales. */
#define GLITTER_INPUT_BITS CAIRO_FIXED_FRAC_BITS
#define GRID_X_BITS CAIRO_FIXED_FRAC_BITS
#define GRID_Y 15
/* Set glitter up to use a cairo span renderer to do the coverage
* blitting. */
struct pool;
struct cell_list;
/*-------------------------------------------------------------------------
* glitter-paths.h
*/
/* "Input scaled" numbers are fixed precision reals with multiplier
* 2**GLITTER_INPUT_BITS. Input coordinates are given to glitter as
* pixel scaled numbers. These get converted to the internal grid
* scaled numbers as soon as possible. Internal overflow is possible
* if GRID_X/Y inside glitter-paths.c is larger than
* 1<<GLITTER_INPUT_BITS. */
#ifndef GLITTER_INPUT_BITS
# define GLITTER_INPUT_BITS 8
#endif
#define GLITTER_INPUT_SCALE (1<<GLITTER_INPUT_BITS)
typedef int glitter_input_scaled_t;
/* Opaque type for scan converting. */
typedef struct glitter_scan_converter glitter_scan_converter_t;
/*-------------------------------------------------------------------------
* glitter-paths.c: Implementation internal types
*/
#include <stdlib.h>
#include <string.h>
#include <limits.h>
/* All polygon coordinates are snapped onto a subsample grid. "Grid
* scaled" numbers are fixed precision reals with multiplier GRID_X or
* GRID_Y. */
typedef int grid_scaled_t;
typedef int grid_scaled_x_t;
typedef int grid_scaled_y_t;
/* Default x/y scale factors.
* You can either define GRID_X/Y_BITS to get a power-of-two scale
* or define GRID_X/Y separately. */
#if !defined(GRID_X) && !defined(GRID_X_BITS)
# define GRID_X_BITS 8
#endif
#if !defined(GRID_Y) && !defined(GRID_Y_BITS)
# define GRID_Y 15
#endif
/* Use GRID_X/Y_BITS to define GRID_X/Y if they're available. */
#ifdef GRID_X_BITS
# define GRID_X (1 << GRID_X_BITS)
#endif
#ifdef GRID_Y_BITS
# define GRID_Y (1 << GRID_Y_BITS)
#endif
/* The GRID_X_TO_INT_FRAC macro splits a grid scaled coordinate into
* integer and fractional parts. The integer part is floored. */
#if defined(GRID_X_TO_INT_FRAC)
/* do nothing */
#elif defined(GRID_X_BITS)
# define GRID_X_TO_INT_FRAC(x, i, f) \
_GRID_TO_INT_FRAC_shift(x, i, f, GRID_X_BITS)
#else
# define GRID_X_TO_INT_FRAC(x, i, f) \
_GRID_TO_INT_FRAC_general(x, i, f, GRID_X)
#endif
#define _GRID_TO_INT_FRAC_general(t, i, f, m) do { \
(i) = (t) / (m); \
(f) = (t) % (m); \
if ((f) < 0) { \
--(i); \
(f) += (m); \
} \
} while (0)
#define _GRID_TO_INT_FRAC_shift(t, i, f, b) do { \
(f) = (t) & ((1 << (b)) - 1); \
(i) = (t) >> (b); \
} while (0)
/* A grid area is a real in [0,1] scaled by 2*GRID_X*GRID_Y. We want
* to be able to represent exactly areas of subpixel trapezoids whose
* vertices are given in grid scaled coordinates. The scale factor
* comes from needing to accurately represent the area 0.5*dx*dy of a
* triangle with base dx and height dy in grid scaled numbers. */
typedef int grid_area_t;
#define GRID_XY (2*GRID_X*GRID_Y) /* Unit area on the grid. */
/* GRID_AREA_TO_ALPHA(area): map [0,GRID_XY] to [0,255]. */
#if GRID_XY == 510
# define GRID_AREA_TO_ALPHA(c) (((c)+1) >> 1)
#elif GRID_XY == 255
# define GRID_AREA_TO_ALPHA(c) (c)
#elif GRID_XY == 64
# define GRID_AREA_TO_ALPHA(c) (((c) << 2) | -(((c) & 0x40) >> 6))
#elif GRID_XY == 128
# define GRID_AREA_TO_ALPHA(c) ((((c) << 1) | -((c) >> 7)) & 255)
#elif GRID_XY == 256
# define GRID_AREA_TO_ALPHA(c) (((c) | -((c) >> 8)) & 255)
#elif GRID_XY == 15
# define GRID_AREA_TO_ALPHA(c) (((c) << 4) + (c))
#elif GRID_XY == 2*256*15
# define GRID_AREA_TO_ALPHA(c) (((c) + ((c)<<4) + 256) >> 9)
#else
# define GRID_AREA_TO_ALPHA(c) (((c)*255 + GRID_XY/2) / GRID_XY)
#endif
#define UNROLL3(x) x x x
struct quorem {
int32_t quo;
int32_t rem;
};
/* Header for a chunk of memory in a memory pool. */
struct _pool_chunk {
/* # bytes used in this chunk. */
size_t size;
/* # bytes total in this chunk */
size_t capacity;
/* Pointer to the previous chunk or %NULL if this is the sentinel
* chunk in the pool header. */
struct _pool_chunk *prev_chunk;
/* Actual data starts here. Well aligned for pointers. */
};
/* A memory pool. This is supposed to be embedded on the stack or
* within some other structure. It may optionally be followed by an
* embedded array from which requests are fulfilled until
* malloc needs to be called to allocate a first real chunk. */
struct pool {
/* Chunk we're allocating from. */
struct _pool_chunk *current;
jmp_buf *jmp;
/* Free list of previously allocated chunks. All have >= default
* capacity. */
struct _pool_chunk *first_free;
/* The default capacity of a chunk. */
size_t default_capacity;
/* Header for the sentinel chunk. Directly following the pool
* struct should be some space for embedded elements from which
* the sentinel chunk allocates from. */
struct _pool_chunk sentinel[1];
};
/* A polygon edge. */
struct edge {
/* Next in y-bucket or active list. */
struct edge *next;
/* Current x coordinate while the edge is on the active
* list. Initialised to the x coordinate of the top of the
* edge. The quotient is in grid_scaled_x_t units and the
* remainder is mod dy in grid_scaled_y_t units.*/
struct quorem x;
/* Advance of the current x when moving down a subsample line. */
struct quorem dxdy;
/* Advance of the current x when moving down a full pixel
* row. Only initialised when the height of the edge is large
* enough that there's a chance the edge could be stepped by a
* full row's worth of subsample rows at a time. */
struct quorem dxdy_full;
/* The clipped y of the top of the edge. */
grid_scaled_y_t ytop;
/* y2-y1 after orienting the edge downwards. */
grid_scaled_y_t dy;
/* Number of subsample rows remaining to scan convert of this
* edge. */
grid_scaled_y_t height_left;
/* Original sign of the edge: +1 for downwards, -1 for upwards
* edges. */
int dir;
int vertical;
int clip;
};
/* Number of subsample rows per y-bucket. Must be GRID_Y. */
#define EDGE_Y_BUCKET_HEIGHT GRID_Y
#define EDGE_Y_BUCKET_INDEX(y, ymin) (((y) - (ymin))/EDGE_Y_BUCKET_HEIGHT)
/* A collection of sorted and vertically clipped edges of the polygon.
* Edges are moved from the polygon to an active list while scan
* converting. */
struct polygon {
/* The vertical clip extents. */
grid_scaled_y_t ymin, ymax;
/* Array of edges all starting in the same bucket. An edge is put
* into bucket EDGE_BUCKET_INDEX(edge->ytop, polygon->ymin) when
* it is added to the polygon. */
struct edge **y_buckets;
struct edge *y_buckets_embedded[64];
struct {
struct pool base[1];
struct edge embedded[32];
} edge_pool;
};
/* A cell records the effect on pixel coverage of polygon edges
* passing through a pixel. It contains two accumulators of pixel
* coverage.
*
* Consider the effects of a polygon edge on the coverage of a pixel
* it intersects and that of the following one. The coverage of the
* following pixel is the height of the edge multiplied by the width
* of the pixel, and the coverage of the pixel itself is the area of
* the trapezoid formed by the edge and the right side of the pixel.
*
* +-----------------------+-----------------------+
* | | |
* | | |
* |_______________________|_______________________|
* | \...................|.......................|\
* | \..................|.......................| |
* | \.................|.......................| |
* | \....covered.....|.......................| |
* | \....area.......|.......................| } covered height
* | \..............|.......................| |
* |uncovered\.............|.......................| |
* | area \............|.......................| |
* |___________\...........|.......................|/
* | | |
* | | |
* | | |
* +-----------------------+-----------------------+
*
* Since the coverage of the following pixel will always be a multiple
* of the width of the pixel, we can store the height of the covered
* area instead. The coverage of the pixel itself is the total
* coverage minus the area of the uncovered area to the left of the
* edge. As it's faster to compute the uncovered area we only store
* that and subtract it from the total coverage later when forming
* spans to blit.
*
* The heights and areas are signed, with left edges of the polygon
* having positive sign and right edges having negative sign. When
* two edges intersect they swap their left/rightness so their
* contribution above and below the intersection point must be
* computed separately. */
struct cell {
struct cell *next;
int x;
grid_area_t uncovered_area;
grid_scaled_y_t covered_height;
grid_scaled_y_t clipped_height;
};
/* A cell list represents the scan line sparsely as cells ordered by
* ascending x. It is geared towards scanning the cells in order
* using an internal cursor. */
struct cell_list {
/* Sentinel nodes */
struct cell head, tail;
/* Cursor state for iterating through the cell list. */
struct cell *cursor;
/* Cells in the cell list are owned by the cell list and are
* allocated from this pool. */
struct {
struct pool base[1];
struct cell embedded[32];
} cell_pool;
};
struct cell_pair {
struct cell *cell1;
struct cell *cell2;
};
/* The active list contains edges in the current scan line ordered by
* the x-coordinate of the intercept of the edge and the scan line. */
struct active_list {
/* Leftmost edge on the current scan line. */
struct edge *head;
/* A lower bound on the height of the active edges is used to
* estimate how soon some active edge ends. We can't advance the
* scan conversion by a full pixel row if an edge ends somewhere
* within it. */
grid_scaled_y_t min_height;
};
struct glitter_scan_converter {
struct polygon polygon[1];
struct active_list active[1];
struct cell_list coverages[1];
/* Clip box. */
grid_scaled_y_t ymin, ymax;
};
/* Compute the floored division a/b. Assumes / and % perform symmetric
* division. */
inline static struct quorem
floored_divrem(int a, int b)
{
struct quorem qr;
qr.quo = a/b;
qr.rem = a%b;
if ((a^b)<0 && qr.rem) {
qr.quo -= 1;
qr.rem += b;
}
return qr;
}
/* Compute the floored division (x*a)/b. Assumes / and % perform symmetric
* division. */
static struct quorem
floored_muldivrem(int x, int a, int b)
{
struct quorem qr;
long long xa = (long long)x*a;
qr.quo = xa/b;
qr.rem = xa%b;
if ((xa>=0) != (b>=0) && qr.rem) {
qr.quo -= 1;
qr.rem += b;
}
return qr;
}
static struct _pool_chunk *
_pool_chunk_init(
struct _pool_chunk *p,
struct _pool_chunk *prev_chunk,
size_t capacity)
{
p->prev_chunk = prev_chunk;
p->size = 0;
p->capacity = capacity;
return p;
}
static struct _pool_chunk *
_pool_chunk_create(struct pool *pool, size_t size)
{
struct _pool_chunk *p;
p = _cairo_malloc (size + sizeof(struct _pool_chunk));
if (unlikely (NULL == p))
longjmp (*pool->jmp, _cairo_error (CAIRO_STATUS_NO_MEMORY));
return _pool_chunk_init(p, pool->current, size);
}
static void
pool_init(struct pool *pool,
jmp_buf *jmp,
size_t default_capacity,
size_t embedded_capacity)
{
pool->jmp = jmp;
pool->current = pool->sentinel;
pool->first_free = NULL;
pool->default_capacity = default_capacity;
_pool_chunk_init(pool->sentinel, NULL, embedded_capacity);
}
static void
pool_fini(struct pool *pool)
{
struct _pool_chunk *p = pool->current;
do {
while (NULL != p) {
struct _pool_chunk *prev = p->prev_chunk;
if (p != pool->sentinel)
free(p);
p = prev;
}
p = pool->first_free;
pool->first_free = NULL;
} while (NULL != p);
}
/* Satisfy an allocation by first allocating a new large enough chunk
* and adding it to the head of the pool's chunk list. This function
* is called as a fallback if pool_alloc() couldn't do a quick
* allocation from the current chunk in the pool. */
static void *
_pool_alloc_from_new_chunk(
struct pool *pool,
size_t size)
{
struct _pool_chunk *chunk;
void *obj;
size_t capacity;
/* If the allocation is smaller than the default chunk size then
* try getting a chunk off the free list. Force alloc of a new
* chunk for large requests. */
capacity = size;
chunk = NULL;
if (size < pool->default_capacity) {
capacity = pool->default_capacity;
chunk = pool->first_free;
if (chunk) {
pool->first_free = chunk->prev_chunk;
_pool_chunk_init(chunk, pool->current, chunk->capacity);
}
}
if (NULL == chunk)
chunk = _pool_chunk_create (pool, capacity);
pool->current = chunk;
obj = ((unsigned char*)chunk + sizeof(*chunk) + chunk->size);
chunk->size += size;
return obj;
}
/* Allocate size bytes from the pool. The first allocated address
* returned from a pool is aligned to sizeof(void*). Subsequent
* addresses will maintain alignment as long as multiples of void* are
* allocated. Returns the address of a new memory area or %NULL on
* allocation failures. The pool retains ownership of the returned
* memory. */
inline static void *
pool_alloc (struct pool *pool, size_t size)
{
struct _pool_chunk *chunk = pool->current;
if (size <= chunk->capacity - chunk->size) {
void *obj = ((unsigned char*)chunk + sizeof(*chunk) + chunk->size);
chunk->size += size;
return obj;
} else {
return _pool_alloc_from_new_chunk(pool, size);
}
}
/* Relinquish all pool_alloced memory back to the pool. */
static void
pool_reset (struct pool *pool)
{
/* Transfer all used chunks to the chunk free list. */
struct _pool_chunk *chunk = pool->current;
if (chunk != pool->sentinel) {
while (chunk->prev_chunk != pool->sentinel) {
chunk = chunk->prev_chunk;
}
chunk->prev_chunk = pool->first_free;
pool->first_free = pool->current;
}
/* Reset the sentinel as the current chunk. */
pool->current = pool->sentinel;
pool->sentinel->size = 0;
}
/* Rewinds the cell list's cursor to the beginning. After rewinding
* we're good to cell_list_find() the cell any x coordinate. */
inline static void
cell_list_rewind (struct cell_list *cells)
{
cells->cursor = &cells->head;
}
/* Rewind the cell list if its cursor has been advanced past x. */
inline static void
cell_list_maybe_rewind (struct cell_list *cells, int x)
{
struct cell *tail = cells->cursor;
if (tail->x > x)
cell_list_rewind (cells);
}
static void
cell_list_init(struct cell_list *cells, jmp_buf *jmp)
{
pool_init(cells->cell_pool.base, jmp,
256*sizeof(struct cell),
sizeof(cells->cell_pool.embedded));
cells->tail.next = NULL;
cells->tail.x = INT_MAX;
cells->head.x = INT_MIN;
cells->head.next = &cells->tail;
cell_list_rewind (cells);
}
static void
cell_list_fini(struct cell_list *cells)
{
pool_fini (cells->cell_pool.base);
}
/* Empty the cell list. This is called at the start of every pixel
* row. */
inline static void
cell_list_reset (struct cell_list *cells)
{
cell_list_rewind (cells);
cells->head.next = &cells->tail;
pool_reset (cells->cell_pool.base);
}
static struct cell *
cell_list_alloc (struct cell_list *cells,
struct cell *tail,
int x)
{
struct cell *cell;
cell = pool_alloc (cells->cell_pool.base, sizeof (struct cell));
cell->next = tail->next;
tail->next = cell;
cell->x = x;
cell->uncovered_area = 0;
cell->covered_height = 0;
cell->clipped_height = 0;
return cell;
}
/* Find a cell at the given x-coordinate. Returns %NULL if a new cell
* needed to be allocated but couldn't be. Cells must be found with
* non-decreasing x-coordinate until the cell list is rewound using
* cell_list_rewind(). Ownership of the returned cell is retained by
* the cell list. */
inline static struct cell *
cell_list_find (struct cell_list *cells, int x)
{
struct cell *tail = cells->cursor;
while (1) {
UNROLL3({
if (tail->next->x > x)
break;
tail = tail->next;
});
}
if (tail->x != x)
tail = cell_list_alloc (cells, tail, x);
return cells->cursor = tail;
}
/* Find two cells at x1 and x2. This is exactly equivalent
* to
*
* pair.cell1 = cell_list_find(cells, x1);
* pair.cell2 = cell_list_find(cells, x2);
*
* except with less function call overhead. */
inline static struct cell_pair
cell_list_find_pair(struct cell_list *cells, int x1, int x2)
{
struct cell_pair pair;
pair.cell1 = cells->cursor;
while (1) {
UNROLL3({
if (pair.cell1->next->x > x1)
break;
pair.cell1 = pair.cell1->next;
});
}
if (pair.cell1->x != x1) {
struct cell *cell = pool_alloc (cells->cell_pool.base,
sizeof (struct cell));
cell->x = x1;
cell->uncovered_area = 0;
cell->covered_height = 0;
cell->clipped_height = 0;
cell->next = pair.cell1->next;
pair.cell1->next = cell;
pair.cell1 = cell;
}
pair.cell2 = pair.cell1;
while (1) {
UNROLL3({
if (pair.cell2->next->x > x2)
break;
pair.cell2 = pair.cell2->next;
});
}
if (pair.cell2->x != x2) {
struct cell *cell = pool_alloc (cells->cell_pool.base,
sizeof (struct cell));
cell->uncovered_area = 0;
cell->covered_height = 0;
cell->clipped_height = 0;
cell->x = x2;
cell->next = pair.cell2->next;
pair.cell2->next = cell;
pair.cell2 = cell;
}
cells->cursor = pair.cell2;
return pair;
}
/* Add a subpixel span covering [x1, x2) to the coverage cells. */
inline static void
cell_list_add_subspan(struct cell_list *cells,
grid_scaled_x_t x1,
grid_scaled_x_t x2)
{
int ix1, fx1;
int ix2, fx2;
GRID_X_TO_INT_FRAC(x1, ix1, fx1);
GRID_X_TO_INT_FRAC(x2, ix2, fx2);
if (ix1 != ix2) {
struct cell_pair p;
p = cell_list_find_pair(cells, ix1, ix2);
p.cell1->uncovered_area += 2*fx1;
++p.cell1->covered_height;
p.cell2->uncovered_area -= 2*fx2;
--p.cell2->covered_height;
} else {
struct cell *cell = cell_list_find(cells, ix1);
cell->uncovered_area += 2*(fx1-fx2);
}
}
/* Adds the analytical coverage of an edge crossing the current pixel
* row to the coverage cells and advances the edge's x position to the
* following row.
*
* This function is only called when we know that during this pixel row:
*
* 1) The relative order of all edges on the active list doesn't
* change. In particular, no edges intersect within this row to pixel
* precision.
*
* 2) No new edges start in this row.
*
* 3) No existing edges end mid-row.
*
* This function depends on being called with all edges from the
* active list in the order they appear on the list (i.e. with
* non-decreasing x-coordinate.) */
static void
cell_list_render_edge(
struct cell_list *cells,
struct edge *edge,
int sign)
{
grid_scaled_y_t y1, y2, dy;
grid_scaled_x_t dx;
int ix1, ix2;
grid_scaled_x_t fx1, fx2;
struct quorem x1 = edge->x;
struct quorem x2 = x1;
if (! edge->vertical) {
x2.quo += edge->dxdy_full.quo;
x2.rem += edge->dxdy_full.rem;
if (x2.rem >= 0) {
++x2.quo;
x2.rem -= edge->dy;
}
edge->x = x2;
}
GRID_X_TO_INT_FRAC(x1.quo, ix1, fx1);
GRID_X_TO_INT_FRAC(x2.quo, ix2, fx2);
/* Edge is entirely within a column? */
if (ix1 == ix2) {
/* We always know that ix1 is >= the cell list cursor in this
* case due to the no-intersections precondition. */
struct cell *cell = cell_list_find(cells, ix1);
cell->covered_height += sign*GRID_Y;
cell->uncovered_area += sign*(fx1 + fx2)*GRID_Y;
return;
}
/* Orient the edge left-to-right. */
dx = x2.quo - x1.quo;
if (dx >= 0) {
y1 = 0;
y2 = GRID_Y;
} else {
int tmp;
tmp = ix1; ix1 = ix2; ix2 = tmp;
tmp = fx1; fx1 = fx2; fx2 = tmp;
dx = -dx;
sign = -sign;
y1 = GRID_Y;
y2 = 0;
}
dy = y2 - y1;
/* Add coverage for all pixels [ix1,ix2] on this row crossed
* by the edge. */
{
struct cell_pair pair;
struct quorem y = floored_divrem((GRID_X - fx1)*dy, dx);
/* When rendering a previous edge on the active list we may
* advance the cell list cursor past the leftmost pixel of the
* current edge even though the two edges don't intersect.
* e.g. consider two edges going down and rightwards:
*
* --\_+---\_+-----+-----+----
* \_ \_ | |
* | \_ | \_ | |
* | \_| \_| |
* | \_ \_ |
* ----+-----+-\---+-\---+----
*
* The left edge touches cells past the starting cell of the
* right edge. Fortunately such cases are rare.
*
* The rewinding is never necessary if the current edge stays
* within a single column because we've checked before calling
* this function that the active list order won't change. */
cell_list_maybe_rewind(cells, ix1);
pair = cell_list_find_pair(cells, ix1, ix1+1);
pair.cell1->uncovered_area += sign*y.quo*(GRID_X + fx1);
pair.cell1->covered_height += sign*y.quo;
y.quo += y1;
if (ix1+1 < ix2) {
struct quorem dydx_full = floored_divrem(GRID_X*dy, dx);
struct cell *cell = pair.cell2;
++ix1;
do {
grid_scaled_y_t y_skip = dydx_full.quo;
y.rem += dydx_full.rem;
if (y.rem >= dx) {
++y_skip;
y.rem -= dx;
}
y.quo += y_skip;
y_skip *= sign;
cell->uncovered_area += y_skip*GRID_X;
cell->covered_height += y_skip;
++ix1;
cell = cell_list_find(cells, ix1);
} while (ix1 != ix2);
pair.cell2 = cell;
}
pair.cell2->uncovered_area += sign*(y2 - y.quo)*fx2;
pair.cell2->covered_height += sign*(y2 - y.quo);
}
}
static void
polygon_init (struct polygon *polygon, jmp_buf *jmp)
{
polygon->ymin = polygon->ymax = 0;
polygon->y_buckets = polygon->y_buckets_embedded;
pool_init (polygon->edge_pool.base, jmp,
8192 - sizeof (struct _pool_chunk),
sizeof (polygon->edge_pool.embedded));
}
static void
polygon_fini (struct polygon *polygon)
{
if (polygon->y_buckets != polygon->y_buckets_embedded)
free (polygon->y_buckets);
pool_fini (polygon->edge_pool.base);
}
/* Empties the polygon of all edges. The polygon is then prepared to
* receive new edges and clip them to the vertical range
* [ymin,ymax). */
static cairo_status_t
polygon_reset (struct polygon *polygon,
grid_scaled_y_t ymin,
grid_scaled_y_t ymax)
{
unsigned h = ymax - ymin;
unsigned num_buckets = EDGE_Y_BUCKET_INDEX(ymax + EDGE_Y_BUCKET_HEIGHT-1,
ymin);
pool_reset(polygon->edge_pool.base);
if (unlikely (h > 0x7FFFFFFFU - EDGE_Y_BUCKET_HEIGHT))
goto bail_no_mem; /* even if you could, you wouldn't want to. */
if (polygon->y_buckets != polygon->y_buckets_embedded)
free (polygon->y_buckets);
polygon->y_buckets = polygon->y_buckets_embedded;
if (num_buckets > ARRAY_LENGTH (polygon->y_buckets_embedded)) {
polygon->y_buckets = _cairo_malloc_ab (num_buckets,
sizeof (struct edge *));
if (unlikely (NULL == polygon->y_buckets))
goto bail_no_mem;
}
memset (polygon->y_buckets, 0, num_buckets * sizeof (struct edge *));
polygon->ymin = ymin;
polygon->ymax = ymax;
return CAIRO_STATUS_SUCCESS;
bail_no_mem:
polygon->ymin = 0;
polygon->ymax = 0;
return CAIRO_STATUS_NO_MEMORY;
}
static void
_polygon_insert_edge_into_its_y_bucket(
struct polygon *polygon,
struct edge *e)
{
unsigned ix = EDGE_Y_BUCKET_INDEX(e->ytop, polygon->ymin);
struct edge **ptail = &polygon->y_buckets[ix];
e->next = *ptail;
*ptail = e;
}
inline static void
polygon_add_edge (struct polygon *polygon,
const cairo_edge_t *edge,
int clip)
{
struct edge *e;
grid_scaled_x_t dx;
grid_scaled_y_t dy;
grid_scaled_y_t ytop, ybot;
grid_scaled_y_t ymin = polygon->ymin;
grid_scaled_y_t ymax = polygon->ymax;
assert (edge->bottom > edge->top);
if (unlikely (edge->top >= ymax || edge->bottom <= ymin))
return;
e = pool_alloc (polygon->edge_pool.base, sizeof (struct edge));
dx = edge->line.p2.x - edge->line.p1.x;
dy = edge->line.p2.y - edge->line.p1.y;
e->dy = dy;
e->dir = edge->dir;
e->clip = clip;
ytop = edge->top >= ymin ? edge->top : ymin;
ybot = edge->bottom <= ymax ? edge->bottom : ymax;
e->ytop = ytop;
e->height_left = ybot - ytop;
if (dx == 0) {
e->vertical = TRUE;
e->x.quo = edge->line.p1.x;
e->x.rem = 0;
e->dxdy.quo = 0;
e->dxdy.rem = 0;
e->dxdy_full.quo = 0;
e->dxdy_full.rem = 0;
} else {
e->vertical = FALSE;
e->dxdy = floored_divrem (dx, dy);
if (ytop == edge->line.p1.y) {
e->x.quo = edge->line.p1.x;
e->x.rem = 0;
} else {
e->x = floored_muldivrem (ytop - edge->line.p1.y, dx, dy);
e->x.quo += edge->line.p1.x;
}
if (e->height_left >= GRID_Y) {
e->dxdy_full = floored_muldivrem (GRID_Y, dx, dy);
} else {
e->dxdy_full.quo = 0;
e->dxdy_full.rem = 0;
}
}
_polygon_insert_edge_into_its_y_bucket (polygon, e);
e->x.rem -= dy; /* Bias the remainder for faster
* edge advancement. */
}
static void
active_list_reset (struct active_list *active)
{
active->head = NULL;
active->min_height = 0;
}
static void
active_list_init(struct active_list *active)
{
active_list_reset(active);
}
/*
* Merge two sorted edge lists.
* Input:
* - head_a: The head of the first list.
* - head_b: The head of the second list; head_b cannot be NULL.
* Output:
* Returns the head of the merged list.
*
* Implementation notes:
* To make it fast (in particular, to reduce to an insertion sort whenever
* one of the two input lists only has a single element) we iterate through
* a list until its head becomes greater than the head of the other list,
* then we switch their roles. As soon as one of the two lists is empty, we
* just attach the other one to the current list and exit.
* Writes to memory are only needed to "switch" lists (as it also requires
* attaching to the output list the list which we will be iterating next) and
* to attach the last non-empty list.
*/
static struct edge *
merge_sorted_edges (struct edge *head_a, struct edge *head_b)
{
struct edge *head, **next;
int32_t x;
if (head_a == NULL)
return head_b;
next = &head;
if (head_a->x.quo <= head_b->x.quo) {
head = head_a;
} else {
head = head_b;
goto start_with_b;
}
do {
x = head_b->x.quo;
while (head_a != NULL && head_a->x.quo <= x) {
next = &head_a->next;
head_a = head_a->next;
}
*next = head_b;
if (head_a == NULL)
return head;
start_with_b:
x = head_a->x.quo;
while (head_b != NULL && head_b->x.quo <= x) {
next = &head_b->next;
head_b = head_b->next;
}
*next = head_a;
if (head_b == NULL)
return head;
} while (1);
}
/*
* Sort (part of) a list.
* Input:
* - list: The list to be sorted; list cannot be NULL.
* - limit: Recursion limit.
* Output:
* - head_out: The head of the sorted list containing the first 2^(level+1) elements of the
* input list; if the input list has fewer elements, head_out be a sorted list
* containing all the elements of the input list.
* Returns the head of the list of unprocessed elements (NULL if the sorted list contains
* all the elements of the input list).
*
* Implementation notes:
* Special case single element list, unroll/inline the sorting of the first two elements.
* Some tail recursion is used since we iterate on the bottom-up solution of the problem
* (we start with a small sorted list and keep merging other lists of the same size to it).
*/
static struct edge *
sort_edges (struct edge *list,
unsigned int level,
struct edge **head_out)
{
struct edge *head_other, *remaining;
unsigned int i;
head_other = list->next;
/* Single element list -> return */
if (head_other == NULL) {
*head_out = list;
return NULL;
}
/* Unroll the first iteration of the following loop (halves the number of calls to merge_sorted_edges):
* - Initialize remaining to be the list containing the elements after the second in the input list.
* - Initialize *head_out to be the sorted list containing the first two element.
*/
remaining = head_other->next;
if (list->x.quo <= head_other->x.quo) {
*head_out = list;
/* list->next = head_other; */ /* The input list is already like this. */
head_other->next = NULL;
} else {
*head_out = head_other;
head_other->next = list;
list->next = NULL;
}
for (i = 0; i < level && remaining; i++) {
/* Extract a sorted list of the same size as *head_out
* (2^(i+1) elements) from the list of remaining elements. */
remaining = sort_edges (remaining, i, &head_other);
*head_out = merge_sorted_edges (*head_out, head_other);
}
/* *head_out now contains (at most) 2^(level+1) elements. */
return remaining;
}
/* Test if the edges on the active list can be safely advanced by a
* full row without intersections or any edges ending. */
inline static int
active_list_can_step_full_row (struct active_list *active)
{
const struct edge *e;
int prev_x = INT_MIN;
/* Recomputes the minimum height of all edges on the active
* list if we have been dropping edges. */
if (active->min_height <= 0) {
int min_height = INT_MAX;
e = active->head;
while (NULL != e) {
if (e->height_left < min_height)
min_height = e->height_left;
e = e->next;
}
active->min_height = min_height;
}
if (active->min_height < GRID_Y)
return 0;
/* Check for intersections as no edges end during the next row. */
e = active->head;
while (NULL != e) {
struct quorem x = e->x;
if (! e->vertical) {
x.quo += e->dxdy_full.quo;
x.rem += e->dxdy_full.rem;
if (x.rem >= 0)
++x.quo;
}
if (x.quo <= prev_x)
return 0;
prev_x = x.quo;
e = e->next;
}
return 1;
}
/* Merges edges on the given subpixel row from the polygon to the
* active_list. */
inline static void
active_list_merge_edges_from_polygon(struct active_list *active,
struct edge **ptail,
grid_scaled_y_t y,
struct polygon *polygon)
{
/* Split off the edges on the current subrow and merge them into
* the active list. */
int min_height = active->min_height;
struct edge *subrow_edges = NULL;
struct edge *tail = *ptail;
do {
struct edge *next = tail->next;
if (y == tail->ytop) {
tail->next = subrow_edges;
subrow_edges = tail;
if (tail->height_left < min_height)
min_height = tail->height_left;
*ptail = next;
} else
ptail = &tail->next;
tail = next;
} while (tail);
if (subrow_edges) {
sort_edges (subrow_edges, UINT_MAX, &subrow_edges);
active->head = merge_sorted_edges (active->head, subrow_edges);
active->min_height = min_height;
}
}
/* Advance the edges on the active list by one subsample row by
* updating their x positions. Drop edges from the list that end. */
inline static void
active_list_substep_edges(struct active_list *active)
{
struct edge **cursor = &active->head;
grid_scaled_x_t prev_x = INT_MIN;
struct edge *unsorted = NULL;
struct edge *edge = *cursor;
do {
UNROLL3({
struct edge *next;
if (NULL == edge)
break;
next = edge->next;
if (--edge->height_left) {
edge->x.quo += edge->dxdy.quo;
edge->x.rem += edge->dxdy.rem;
if (edge->x.rem >= 0) {
++edge->x.quo;
edge->x.rem -= edge->dy;
}
if (edge->x.quo < prev_x) {
*cursor = next;
edge->next = unsorted;
unsorted = edge;
} else {
prev_x = edge->x.quo;
cursor = &edge->next;
}
} else {
*cursor = next;
}
edge = next;
})
} while (1);
if (unsorted) {
sort_edges (unsorted, UINT_MAX, &unsorted);
active->head = merge_sorted_edges (active->head, unsorted);
}
}
inline static void
apply_nonzero_fill_rule_for_subrow (struct active_list *active,
struct cell_list *coverages)
{
struct edge *edge = active->head;
int winding = 0;
int xstart;
int xend;
cell_list_rewind (coverages);
while (NULL != edge) {
xstart = edge->x.quo;
winding = edge->dir;
while (1) {
edge = edge->next;
if (NULL == edge) {
ASSERT_NOT_REACHED;
return;
}
winding += edge->dir;
if (0 == winding) {
if (edge->next == NULL || edge->next->x.quo != edge->x.quo)
break;
}
}
xend = edge->x.quo;
cell_list_add_subspan (coverages, xstart, xend);
edge = edge->next;
}
}
static void
apply_evenodd_fill_rule_for_subrow (struct active_list *active,
struct cell_list *coverages)
{
struct edge *edge = active->head;
int xstart;
int xend;
cell_list_rewind (coverages);
while (NULL != edge) {
xstart = edge->x.quo;
while (1) {
edge = edge->next;
if (NULL == edge) {
ASSERT_NOT_REACHED;
return;
}
if (edge->next == NULL || edge->next->x.quo != edge->x.quo)
break;
edge = edge->next;
}
xend = edge->x.quo;
cell_list_add_subspan (coverages, xstart, xend);
edge = edge->next;
}
}
static void
apply_nonzero_fill_rule_and_step_edges (struct active_list *active,
struct cell_list *coverages)
{
struct edge **cursor = &active->head;
struct edge *left_edge;
left_edge = *cursor;
while (NULL != left_edge) {
struct edge *right_edge;
int winding = left_edge->dir;
left_edge->height_left -= GRID_Y;
if (left_edge->height_left)
cursor = &left_edge->next;
else
*cursor = left_edge->next;
while (1) {
right_edge = *cursor;
if (NULL == right_edge) {
cell_list_render_edge (coverages, left_edge, +1);
return;
}
right_edge->height_left -= GRID_Y;
if (right_edge->height_left)
cursor = &right_edge->next;
else
*cursor = right_edge->next;
winding += right_edge->dir;
if (0 == winding) {
if (right_edge->next == NULL ||
right_edge->next->x.quo != right_edge->x.quo)
{
break;
}
}
if (! right_edge->vertical) {
right_edge->x.quo += right_edge->dxdy_full.quo;
right_edge->x.rem += right_edge->dxdy_full.rem;
if (right_edge->x.rem >= 0) {
++right_edge->x.quo;
right_edge->x.rem -= right_edge->dy;
}
}
}
cell_list_render_edge (coverages, left_edge, +1);
cell_list_render_edge (coverages, right_edge, -1);
left_edge = *cursor;
}
}
static void
apply_evenodd_fill_rule_and_step_edges (struct active_list *active,
struct cell_list *coverages)
{
struct edge **cursor = &active->head;
struct edge *left_edge;
left_edge = *cursor;
while (NULL != left_edge) {
struct edge *right_edge;
left_edge->height_left -= GRID_Y;
if (left_edge->height_left)
cursor = &left_edge->next;
else
*cursor = left_edge->next;
while (1) {
right_edge = *cursor;
if (NULL == right_edge) {
cell_list_render_edge (coverages, left_edge, +1);
return;
}
right_edge->height_left -= GRID_Y;
if (right_edge->height_left)
cursor = &right_edge->next;
else
*cursor = right_edge->next;
if (right_edge->next == NULL ||
right_edge->next->x.quo != right_edge->x.quo)
{
break;
}
if (! right_edge->vertical) {
right_edge->x.quo += right_edge->dxdy_full.quo;
right_edge->x.rem += right_edge->dxdy_full.rem;
if (right_edge->x.rem >= 0) {
++right_edge->x.quo;
right_edge->x.rem -= right_edge->dy;
}
}
}
cell_list_render_edge (coverages, left_edge, +1);
cell_list_render_edge (coverages, right_edge, -1);
left_edge = *cursor;
}
}
static void
_glitter_scan_converter_init(glitter_scan_converter_t *converter, jmp_buf *jmp)
{
polygon_init(converter->polygon, jmp);
active_list_init(converter->active);
cell_list_init(converter->coverages, jmp);
converter->ymin=0;
converter->ymax=0;
}
static void
_glitter_scan_converter_fini(glitter_scan_converter_t *converter)
{
polygon_fini(converter->polygon);
cell_list_fini(converter->coverages);
converter->ymin=0;
converter->ymax=0;
}
static grid_scaled_t
int_to_grid_scaled(int i, int scale)
{
/* Clamp to max/min representable scaled number. */
if (i >= 0) {
if (i >= INT_MAX/scale)
i = INT_MAX/scale;
}
else {
if (i <= INT_MIN/scale)
i = INT_MIN/scale;
}
return i*scale;
}
#define int_to_grid_scaled_x(x) int_to_grid_scaled((x), GRID_X)
#define int_to_grid_scaled_y(x) int_to_grid_scaled((x), GRID_Y)
static cairo_status_t
glitter_scan_converter_reset(glitter_scan_converter_t *converter,
int ymin, int ymax)
{
cairo_status_t status;
converter->ymin = 0;
converter->ymax = 0;
ymin = int_to_grid_scaled_y(ymin);
ymax = int_to_grid_scaled_y(ymax);
active_list_reset(converter->active);
cell_list_reset(converter->coverages);
status = polygon_reset(converter->polygon, ymin, ymax);
if (status)
return status;
converter->ymin = ymin;
converter->ymax = ymax;
return CAIRO_STATUS_SUCCESS;
}
/* INPUT_TO_GRID_X/Y (in_coord, out_grid_scaled, grid_scale)
* These macros convert an input coordinate in the client's
* device space to the rasterisation grid.
*/
/* Gah.. this bit of ugly defines INPUT_TO_GRID_X/Y so as to use
* shifts if possible, and something saneish if not.
*/
#if !defined(INPUT_TO_GRID_Y) && defined(GRID_Y_BITS) && GRID_Y_BITS <= GLITTER_INPUT_BITS
# define INPUT_TO_GRID_Y(in, out) (out) = (in) >> (GLITTER_INPUT_BITS - GRID_Y_BITS)
#else
# define INPUT_TO_GRID_Y(in, out) INPUT_TO_GRID_general(in, out, GRID_Y)
#endif
#if !defined(INPUT_TO_GRID_X) && defined(GRID_X_BITS) && GRID_X_BITS <= GLITTER_INPUT_BITS
# define INPUT_TO_GRID_X(in, out) (out) = (in) >> (GLITTER_INPUT_BITS - GRID_X_BITS)
#else
# define INPUT_TO_GRID_X(in, out) INPUT_TO_GRID_general(in, out, GRID_X)
#endif
#define INPUT_TO_GRID_general(in, out, grid_scale) do { \
long long tmp__ = (long long)(grid_scale) * (in); \
tmp__ >>= GLITTER_INPUT_BITS; \
(out) = tmp__; \
} while (0)
static void
glitter_scan_converter_add_edge (glitter_scan_converter_t *converter,
const cairo_edge_t *edge,
int clip)
{
cairo_edge_t e;
INPUT_TO_GRID_Y (edge->top, e.top);
INPUT_TO_GRID_Y (edge->bottom, e.bottom);
if (e.top >= e.bottom)
return;
/* XXX: possible overflows if GRID_X/Y > 2**GLITTER_INPUT_BITS */
INPUT_TO_GRID_Y (edge->line.p1.y, e.line.p1.y);
INPUT_TO_GRID_Y (edge->line.p2.y, e.line.p2.y);
if (e.line.p1.y == e.line.p2.y)
return;
INPUT_TO_GRID_X (edge->line.p1.x, e.line.p1.x);
INPUT_TO_GRID_X (edge->line.p2.x, e.line.p2.x);
e.dir = edge->dir;
polygon_add_edge (converter->polygon, &e, clip);
}
static cairo_bool_t
active_list_is_vertical (struct active_list *active)
{
struct edge *e;
for (e = active->head; e != NULL; e = e->next) {
if (! e->vertical)
return FALSE;
}
return TRUE;
}
static void
step_edges (struct active_list *active, int count)
{
struct edge **cursor = &active->head;
struct edge *edge;
for (edge = *cursor; edge != NULL; edge = *cursor) {
edge->height_left -= GRID_Y * count;
if (edge->height_left)
cursor = &edge->next;
else
*cursor = edge->next;
}
}
static cairo_status_t
blit_coverages (struct cell_list *cells,
cairo_span_renderer_t *renderer,
struct pool *span_pool,
int y, int height)
{
struct cell *cell = cells->head.next;
int prev_x = -1;
int cover = 0, last_cover = 0;
int clip = 0;
cairo_half_open_span_t *spans;
unsigned num_spans;
assert (cell != &cells->tail);
/* Count number of cells remaining. */
{
struct cell *next = cell;
num_spans = 2;
while (next->next) {
next = next->next;
++num_spans;
}
num_spans = 2*num_spans;
}
/* Allocate enough spans for the row. */
pool_reset (span_pool);
spans = pool_alloc (span_pool, sizeof(spans[0])*num_spans);
num_spans = 0;
/* Form the spans from the coverages and areas. */
for (; cell->next; cell = cell->next) {
int x = cell->x;
int area;
if (x > prev_x && cover != last_cover) {
spans[num_spans].x = prev_x;
spans[num_spans].coverage = GRID_AREA_TO_ALPHA (cover);
spans[num_spans].inverse = 0;
last_cover = cover;
++num_spans;
}
cover += cell->covered_height*GRID_X*2;
clip += cell->covered_height*GRID_X*2;
area = cover - cell->uncovered_area;
if (area != last_cover) {
spans[num_spans].x = x;
spans[num_spans].coverage = GRID_AREA_TO_ALPHA (area);
spans[num_spans].inverse = 0;
last_cover = area;
++num_spans;
}
prev_x = x+1;
}
/* Dump them into the renderer. */
return renderer->render_rows (renderer, y, height, spans, num_spans);
}
static void
glitter_scan_converter_render(glitter_scan_converter_t *converter,
int nonzero_fill,
cairo_span_renderer_t *span_renderer,
struct pool *span_pool)
{
int i, j;
int ymax_i = converter->ymax / GRID_Y;
int ymin_i = converter->ymin / GRID_Y;
int h = ymax_i - ymin_i;
struct polygon *polygon = converter->polygon;
struct cell_list *coverages = converter->coverages;
struct active_list *active = converter->active;
/* Render each pixel row. */
for (i = 0; i < h; i = j) {
int do_full_step = 0;
j = i + 1;
/* Determine if we can ignore this row or use the full pixel
* stepper. */
if (GRID_Y == EDGE_Y_BUCKET_HEIGHT && ! polygon->y_buckets[i]) {
if (! active->head) {
for (; j < h && ! polygon->y_buckets[j]; j++)
;
continue;
}
do_full_step = active_list_can_step_full_row (active);
}
if (do_full_step) {
/* Step by a full pixel row's worth. */
if (nonzero_fill)
apply_nonzero_fill_rule_and_step_edges (active, coverages);
else
apply_evenodd_fill_rule_and_step_edges (active, coverages);
if (active_list_is_vertical (active)) {
while (j < h &&
polygon->y_buckets[j] == NULL &&
active->min_height >= 2*GRID_Y)
{
active->min_height -= GRID_Y;
j++;
}
if (j != i + 1)
step_edges (active, j - (i + 1));
}
} else {
grid_scaled_y_t suby;
/* Subsample this row. */
for (suby = 0; suby < GRID_Y; suby++) {
grid_scaled_y_t y = (i+ymin_i)*GRID_Y + suby;
if (polygon->y_buckets[i]) {
active_list_merge_edges_from_polygon (active,
&polygon->y_buckets[i], y,
polygon);
}
if (nonzero_fill)
apply_nonzero_fill_rule_for_subrow (active, coverages);
else
apply_evenodd_fill_rule_for_subrow (active, coverages);
active_list_substep_edges(active);
}
}
blit_coverages (coverages, span_renderer, span_pool, i+ymin_i, j -i);
cell_list_reset (coverages);
if (! active->head)
active->min_height = INT_MAX;
else
active->min_height -= GRID_Y;
}
}
struct _cairo_clip_tor_scan_converter {
cairo_scan_converter_t base;
glitter_scan_converter_t converter[1];
cairo_fill_rule_t fill_rule;
cairo_antialias_t antialias;
cairo_fill_rule_t clip_fill_rule;
cairo_antialias_t clip_antialias;
jmp_buf jmp;
struct {
struct pool base[1];
cairo_half_open_span_t embedded[32];
} span_pool;
};
typedef struct _cairo_clip_tor_scan_converter cairo_clip_tor_scan_converter_t;
static void
_cairo_clip_tor_scan_converter_destroy (void *converter)
{
cairo_clip_tor_scan_converter_t *self = converter;
if (self == NULL) {
return;
}
_glitter_scan_converter_fini (self->converter);
pool_fini (self->span_pool.base);
free(self);
}
static cairo_status_t
_cairo_clip_tor_scan_converter_generate (void *converter,
cairo_span_renderer_t *renderer)
{
cairo_clip_tor_scan_converter_t *self = converter;
cairo_status_t status;
if ((status = setjmp (self->jmp)))
return _cairo_scan_converter_set_error (self, _cairo_error (status));
glitter_scan_converter_render (self->converter,
self->fill_rule == CAIRO_FILL_RULE_WINDING,
renderer,
self->span_pool.base);
return CAIRO_STATUS_SUCCESS;
}
cairo_scan_converter_t *
_cairo_clip_tor_scan_converter_create (cairo_clip_t *clip,
cairo_polygon_t *polygon,
cairo_fill_rule_t fill_rule,
cairo_antialias_t antialias)
{
cairo_clip_tor_scan_converter_t *self;
cairo_polygon_t clipper;
cairo_status_t status;
int i;
self = calloc (1, sizeof(struct _cairo_clip_tor_scan_converter));
if (unlikely (self == NULL)) {
status = _cairo_error (CAIRO_STATUS_NO_MEMORY);
goto bail_nomem;
}
self->base.destroy = _cairo_clip_tor_scan_converter_destroy;
self->base.generate = _cairo_clip_tor_scan_converter_generate;
pool_init (self->span_pool.base, &self->jmp,
250 * sizeof(self->span_pool.embedded[0]),
sizeof(self->span_pool.embedded));
_glitter_scan_converter_init (self->converter, &self->jmp);
status = glitter_scan_converter_reset (self->converter,
clip->extents.y,
clip->extents.y + clip->extents.height);
if (unlikely (status))
goto bail;
self->fill_rule = fill_rule;
self->antialias = antialias;
for (i = 0; i < polygon->num_edges; i++)
glitter_scan_converter_add_edge (self->converter,
&polygon->edges[i],
FALSE);
status = _cairo_clip_get_polygon (clip,
&clipper,
&self->clip_fill_rule,
&self->clip_antialias);
if (unlikely (status))
goto bail;
for (i = 0; i < clipper.num_edges; i++)
glitter_scan_converter_add_edge (self->converter,
&clipper.edges[i],
TRUE);
_cairo_polygon_fini (&clipper);
return &self->base;
bail:
self->base.destroy(&self->base);
bail_nomem:
return _cairo_scan_converter_create_in_error (status);
}
| 0 |
D://workCode//uploadProject\awtk\3rd\cairo | D://workCode//uploadProject\awtk\3rd\cairo\cairo\cairo-clip.c | /* -*- Mode: c; tab-width: 8; c-basic-offset: 4; indent-tabs-mode: t; -*- */
/* cairo - a vector graphics library with display and print output
*
* Copyright © 2002 University of Southern California
* Copyright © 2005 Red Hat, Inc.
* Copyright © 2009 Chris Wilson
*
* This library is free software; you can redistribute it and/or
* modify it either under the terms of the GNU Lesser General Public
* License version 2.1 as published by the Free Software Foundation
* (the "LGPL") or, at your option, under the terms of the Mozilla
* Public License Version 1.1 (the "MPL"). If you do not alter this
* notice, a recipient may use your version of this file under either
* the MPL or the LGPL.
*
* You should have received a copy of the LGPL along with this library
* in the file COPYING-LGPL-2.1; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Suite 500, Boston, MA 02110-1335, USA
* You should have received a copy of the MPL along with this library
* in the file COPYING-MPL-1.1
*
* The contents of this file are subject to the Mozilla Public License
* Version 1.1 (the "License"); you may not use this file except in
* compliance with the License. You may obtain a copy of the License at
* http://www.mozilla.org/MPL/
*
* This software is distributed on an "AS IS" basis, WITHOUT WARRANTY
* OF ANY KIND, either express or implied. See the LGPL or the MPL for
* the specific language governing rights and limitations.
*
* The Original Code is the cairo graphics library.
*
* The Initial Developer of the Original Code is University of Southern
* California.
*
* Contributor(s):
* Carl D. Worth <cworth@cworth.org>
* Kristian Høgsberg <krh@redhat.com>
* Chris Wilson <chris@chris-wilson.co.uk>
*/
#include "cairoint.h"
#include "cairo-clip-inline.h"
#include "cairo-clip-private.h"
#include "cairo-error-private.h"
#include "cairo-freed-pool-private.h"
#include "cairo-gstate-private.h"
#include "cairo-path-fixed-private.h"
#include "cairo-pattern-private.h"
#include "cairo-composite-rectangles-private.h"
#include "cairo-region-private.h"
static freed_pool_t clip_path_pool;
static freed_pool_t clip_pool;
const cairo_clip_t __cairo_clip_all;
static cairo_clip_path_t *
_cairo_clip_path_create (cairo_clip_t *clip)
{
cairo_clip_path_t *clip_path;
clip_path = _freed_pool_get (&clip_path_pool);
if (unlikely (clip_path == NULL)) {
clip_path = _cairo_malloc (sizeof (cairo_clip_path_t));
if (unlikely (clip_path == NULL))
return NULL;
}
CAIRO_REFERENCE_COUNT_INIT (&clip_path->ref_count, 1);
clip_path->prev = clip->path;
clip->path = clip_path;
return clip_path;
}
cairo_clip_path_t *
_cairo_clip_path_reference (cairo_clip_path_t *clip_path)
{
assert (CAIRO_REFERENCE_COUNT_HAS_REFERENCE (&clip_path->ref_count));
_cairo_reference_count_inc (&clip_path->ref_count);
return clip_path;
}
void
_cairo_clip_path_destroy (cairo_clip_path_t *clip_path)
{
assert (CAIRO_REFERENCE_COUNT_HAS_REFERENCE (&clip_path->ref_count));
if (! _cairo_reference_count_dec_and_test (&clip_path->ref_count))
return;
_cairo_path_fixed_fini (&clip_path->path);
if (clip_path->prev != NULL)
_cairo_clip_path_destroy (clip_path->prev);
_freed_pool_put (&clip_path_pool, clip_path);
}
cairo_clip_t *
_cairo_clip_create (void)
{
cairo_clip_t *clip;
clip = _freed_pool_get (&clip_pool);
if (unlikely (clip == NULL)) {
clip = _cairo_malloc (sizeof (cairo_clip_t));
if (unlikely (clip == NULL))
return NULL;
}
clip->extents = _cairo_unbounded_rectangle;
clip->path = NULL;
clip->boxes = NULL;
clip->num_boxes = 0;
clip->region = NULL;
clip->is_region = FALSE;
return clip;
}
void
_cairo_clip_destroy (cairo_clip_t *clip)
{
if (clip == NULL || _cairo_clip_is_all_clipped (clip))
return;
if (clip->path != NULL)
_cairo_clip_path_destroy (clip->path);
if (clip->boxes != &clip->embedded_box)
free (clip->boxes);
cairo_region_destroy (clip->region);
_freed_pool_put (&clip_pool, clip);
}
cairo_clip_t *
_cairo_clip_copy (const cairo_clip_t *clip)
{
cairo_clip_t *copy;
if (clip == NULL || _cairo_clip_is_all_clipped (clip))
return (cairo_clip_t *) clip;
copy = _cairo_clip_create ();
if (clip->path)
copy->path = _cairo_clip_path_reference (clip->path);
if (clip->num_boxes) {
if (clip->num_boxes == 1) {
copy->boxes = ©->embedded_box;
} else {
copy->boxes = _cairo_malloc_ab (clip->num_boxes, sizeof (cairo_box_t));
if (unlikely (copy->boxes == NULL))
return _cairo_clip_set_all_clipped (copy);
}
memcpy (copy->boxes, clip->boxes,
clip->num_boxes * sizeof (cairo_box_t));
copy->num_boxes = clip->num_boxes;
}
copy->extents = clip->extents;
copy->region = cairo_region_reference (clip->region);
copy->is_region = clip->is_region;
return copy;
}
cairo_clip_t *
_cairo_clip_copy_path (const cairo_clip_t *clip)
{
cairo_clip_t *copy;
if (clip == NULL || _cairo_clip_is_all_clipped (clip))
return (cairo_clip_t *) clip;
assert (clip->num_boxes);
copy = _cairo_clip_create ();
copy->extents = clip->extents;
if (clip->path)
copy->path = _cairo_clip_path_reference (clip->path);
return copy;
}
cairo_clip_t *
_cairo_clip_copy_region (const cairo_clip_t *clip)
{
cairo_clip_t *copy;
int i;
if (clip == NULL || _cairo_clip_is_all_clipped (clip))
return (cairo_clip_t *) clip;
assert (clip->num_boxes);
copy = _cairo_clip_create ();
copy->extents = clip->extents;
if (clip->num_boxes == 1) {
copy->boxes = ©->embedded_box;
} else {
copy->boxes = _cairo_malloc_ab (clip->num_boxes, sizeof (cairo_box_t));
if (unlikely (copy->boxes == NULL))
return _cairo_clip_set_all_clipped (copy);
}
for (i = 0; i < clip->num_boxes; i++) {
copy->boxes[i].p1.x = _cairo_fixed_floor (clip->boxes[i].p1.x);
copy->boxes[i].p1.y = _cairo_fixed_floor (clip->boxes[i].p1.y);
copy->boxes[i].p2.x = _cairo_fixed_ceil (clip->boxes[i].p2.x);
copy->boxes[i].p2.y = _cairo_fixed_ceil (clip->boxes[i].p2.y);
}
copy->num_boxes = clip->num_boxes;
copy->region = cairo_region_reference (clip->region);
copy->is_region = TRUE;
return copy;
}
cairo_clip_t *
_cairo_clip_intersect_path (cairo_clip_t *clip,
const cairo_path_fixed_t *path,
cairo_fill_rule_t fill_rule,
double tolerance,
cairo_antialias_t antialias)
{
cairo_clip_path_t *clip_path;
cairo_status_t status;
cairo_rectangle_int_t extents;
cairo_box_t box;
if (_cairo_clip_is_all_clipped (clip))
return clip;
/* catch the empty clip path */
if (_cairo_path_fixed_fill_is_empty (path))
return _cairo_clip_set_all_clipped (clip);
if (_cairo_path_fixed_is_box (path, &box)) {
if (antialias == CAIRO_ANTIALIAS_NONE) {
box.p1.x = _cairo_fixed_round_down (box.p1.x);
box.p1.y = _cairo_fixed_round_down (box.p1.y);
box.p2.x = _cairo_fixed_round_down (box.p2.x);
box.p2.y = _cairo_fixed_round_down (box.p2.y);
}
return _cairo_clip_intersect_box (clip, &box);
}
if (_cairo_path_fixed_fill_is_rectilinear (path))
return _cairo_clip_intersect_rectilinear_path (clip, path,
fill_rule, antialias);
_cairo_path_fixed_approximate_clip_extents (path, &extents);
if (extents.width == 0 || extents.height == 0)
return _cairo_clip_set_all_clipped (clip);
clip = _cairo_clip_intersect_rectangle (clip, &extents);
if (_cairo_clip_is_all_clipped (clip))
return clip;
clip_path = _cairo_clip_path_create (clip);
if (unlikely (clip_path == NULL))
return _cairo_clip_set_all_clipped (clip);
status = _cairo_path_fixed_init_copy (&clip_path->path, path);
if (unlikely (status))
return _cairo_clip_set_all_clipped (clip);
clip_path->fill_rule = fill_rule;
clip_path->tolerance = tolerance;
clip_path->antialias = antialias;
if (clip->region) {
cairo_region_destroy (clip->region);
clip->region = NULL;
}
clip->is_region = FALSE;
return clip;
}
static cairo_clip_t *
_cairo_clip_intersect_clip_path (cairo_clip_t *clip,
const cairo_clip_path_t *clip_path)
{
if (clip_path->prev)
clip = _cairo_clip_intersect_clip_path (clip, clip_path->prev);
return _cairo_clip_intersect_path (clip,
&clip_path->path,
clip_path->fill_rule,
clip_path->tolerance,
clip_path->antialias);
}
cairo_clip_t *
_cairo_clip_intersect_clip (cairo_clip_t *clip,
const cairo_clip_t *other)
{
if (_cairo_clip_is_all_clipped (clip))
return clip;
if (other == NULL)
return clip;
if (clip == NULL)
return _cairo_clip_copy (other);
if (_cairo_clip_is_all_clipped (other))
return _cairo_clip_set_all_clipped (clip);
if (! _cairo_rectangle_intersect (&clip->extents, &other->extents))
return _cairo_clip_set_all_clipped (clip);
if (other->num_boxes) {
cairo_boxes_t boxes;
_cairo_boxes_init_for_array (&boxes, other->boxes, other->num_boxes);
clip = _cairo_clip_intersect_boxes (clip, &boxes);
}
if (! _cairo_clip_is_all_clipped (clip)) {
if (other->path) {
if (clip->path == NULL)
clip->path = _cairo_clip_path_reference (other->path);
else
clip = _cairo_clip_intersect_clip_path (clip, other->path);
}
}
if (clip->region) {
cairo_region_destroy (clip->region);
clip->region = NULL;
}
clip->is_region = FALSE;
return clip;
}
cairo_bool_t
_cairo_clip_equal (const cairo_clip_t *clip_a,
const cairo_clip_t *clip_b)
{
const cairo_clip_path_t *cp_a, *cp_b;
/* are both all-clipped or no-clip? */
if (clip_a == clip_b)
return TRUE;
/* or just one of them? */
if (clip_a == NULL || clip_b == NULL ||
_cairo_clip_is_all_clipped (clip_a) ||
_cairo_clip_is_all_clipped (clip_b))
{
return FALSE;
}
/* We have a pair of normal clips, check their contents */
if (clip_a->num_boxes != clip_b->num_boxes)
return FALSE;
if (memcmp (clip_a->boxes, clip_b->boxes,
sizeof (cairo_box_t) * clip_a->num_boxes))
return FALSE;
cp_a = clip_a->path;
cp_b = clip_b->path;
while (cp_a && cp_b) {
if (cp_a == cp_b)
return TRUE;
/* XXX compare reduced polygons? */
if (cp_a->antialias != cp_b->antialias)
return FALSE;
if (cp_a->tolerance != cp_b->tolerance)
return FALSE;
if (cp_a->fill_rule != cp_b->fill_rule)
return FALSE;
if (! _cairo_path_fixed_equal (&cp_a->path,
&cp_b->path))
return FALSE;
cp_a = cp_a->prev;
cp_b = cp_b->prev;
}
return cp_a == NULL && cp_b == NULL;
}
static cairo_clip_t *
_cairo_clip_path_copy_with_translation (cairo_clip_t *clip,
cairo_clip_path_t *other_path,
int fx, int fy)
{
cairo_status_t status;
cairo_clip_path_t *clip_path;
if (other_path->prev != NULL)
clip = _cairo_clip_path_copy_with_translation (clip, other_path->prev,
fx, fy);
if (_cairo_clip_is_all_clipped (clip))
return clip;
clip_path = _cairo_clip_path_create (clip);
if (unlikely (clip_path == NULL))
return _cairo_clip_set_all_clipped (clip);
status = _cairo_path_fixed_init_copy (&clip_path->path,
&other_path->path);
if (unlikely (status))
return _cairo_clip_set_all_clipped (clip);
_cairo_path_fixed_translate (&clip_path->path, fx, fy);
clip_path->fill_rule = other_path->fill_rule;
clip_path->tolerance = other_path->tolerance;
clip_path->antialias = other_path->antialias;
return clip;
}
cairo_clip_t *
_cairo_clip_translate (cairo_clip_t *clip, int tx, int ty)
{
int fx, fy, i;
cairo_clip_path_t *clip_path;
if (clip == NULL || _cairo_clip_is_all_clipped (clip))
return clip;
if (tx == 0 && ty == 0)
return clip;
fx = _cairo_fixed_from_int (tx);
fy = _cairo_fixed_from_int (ty);
for (i = 0; i < clip->num_boxes; i++) {
clip->boxes[i].p1.x += fx;
clip->boxes[i].p2.x += fx;
clip->boxes[i].p1.y += fy;
clip->boxes[i].p2.y += fy;
}
clip->extents.x += tx;
clip->extents.y += ty;
if (clip->path == NULL)
return clip;
clip_path = clip->path;
clip->path = NULL;
clip = _cairo_clip_path_copy_with_translation (clip, clip_path, fx, fy);
_cairo_clip_path_destroy (clip_path);
return clip;
}
static cairo_status_t
_cairo_path_fixed_add_box (cairo_path_fixed_t *path,
const cairo_box_t *box)
{
cairo_status_t status;
status = _cairo_path_fixed_move_to (path, box->p1.x, box->p1.y);
if (unlikely (status))
return status;
status = _cairo_path_fixed_line_to (path, box->p2.x, box->p1.y);
if (unlikely (status))
return status;
status = _cairo_path_fixed_line_to (path, box->p2.x, box->p2.y);
if (unlikely (status))
return status;
status = _cairo_path_fixed_line_to (path, box->p1.x, box->p2.y);
if (unlikely (status))
return status;
return _cairo_path_fixed_close_path (path);
}
static cairo_status_t
_cairo_path_fixed_init_from_boxes (cairo_path_fixed_t *path,
const cairo_boxes_t *boxes)
{
cairo_status_t status;
const struct _cairo_boxes_chunk *chunk;
int i;
_cairo_path_fixed_init (path);
if (boxes->num_boxes == 0)
return CAIRO_STATUS_SUCCESS;
for (chunk = &boxes->chunks; chunk; chunk = chunk->next) {
for (i = 0; i < chunk->count; i++) {
status = _cairo_path_fixed_add_box (path, &chunk->base[i]);
if (unlikely (status)) {
_cairo_path_fixed_fini (path);
return status;
}
}
}
return CAIRO_STATUS_SUCCESS;
}
static cairo_clip_t *
_cairo_clip_intersect_clip_path_transformed (cairo_clip_t *clip,
const cairo_clip_path_t *clip_path,
const cairo_matrix_t *m)
{
cairo_path_fixed_t path;
if (clip_path->prev)
clip = _cairo_clip_intersect_clip_path_transformed (clip,
clip_path->prev,
m);
if (_cairo_path_fixed_init_copy (&path, &clip_path->path))
return _cairo_clip_set_all_clipped (clip);
_cairo_path_fixed_transform (&path, m);
clip = _cairo_clip_intersect_path (clip,
&path,
clip_path->fill_rule,
clip_path->tolerance,
clip_path->antialias);
_cairo_path_fixed_fini (&path);
return clip;
}
cairo_clip_t *
_cairo_clip_transform (cairo_clip_t *clip, const cairo_matrix_t *m)
{
cairo_clip_t *copy;
if (clip == NULL || _cairo_clip_is_all_clipped (clip))
return clip;
if (_cairo_matrix_is_translation (m))
return _cairo_clip_translate (clip, m->x0, m->y0);
copy = _cairo_clip_create ();
if (clip->num_boxes) {
cairo_path_fixed_t path;
cairo_boxes_t boxes;
_cairo_boxes_init_for_array (&boxes, clip->boxes, clip->num_boxes);
_cairo_path_fixed_init_from_boxes (&path, &boxes);
_cairo_path_fixed_transform (&path, m);
copy = _cairo_clip_intersect_path (copy, &path,
CAIRO_FILL_RULE_WINDING,
0.1,
CAIRO_ANTIALIAS_DEFAULT);
_cairo_path_fixed_fini (&path);
}
if (clip->path)
copy = _cairo_clip_intersect_clip_path_transformed (copy, clip->path,m);
_cairo_clip_destroy (clip);
return copy;
}
cairo_clip_t *
_cairo_clip_copy_with_translation (const cairo_clip_t *clip, int tx, int ty)
{
cairo_clip_t *copy;
int fx, fy, i;
if (clip == NULL || _cairo_clip_is_all_clipped (clip))
return (cairo_clip_t *)clip;
if (tx == 0 && ty == 0)
return _cairo_clip_copy (clip);
copy = _cairo_clip_create ();
if (copy == NULL)
return _cairo_clip_set_all_clipped (copy);
fx = _cairo_fixed_from_int (tx);
fy = _cairo_fixed_from_int (ty);
if (clip->num_boxes) {
if (clip->num_boxes == 1) {
copy->boxes = ©->embedded_box;
} else {
copy->boxes = _cairo_malloc_ab (clip->num_boxes, sizeof (cairo_box_t));
if (unlikely (copy->boxes == NULL))
return _cairo_clip_set_all_clipped (copy);
}
for (i = 0; i < clip->num_boxes; i++) {
copy->boxes[i].p1.x = clip->boxes[i].p1.x + fx;
copy->boxes[i].p2.x = clip->boxes[i].p2.x + fx;
copy->boxes[i].p1.y = clip->boxes[i].p1.y + fy;
copy->boxes[i].p2.y = clip->boxes[i].p2.y + fy;
}
copy->num_boxes = clip->num_boxes;
}
copy->extents = clip->extents;
copy->extents.x += tx;
copy->extents.y += ty;
if (clip->path == NULL)
return copy;
return _cairo_clip_path_copy_with_translation (copy, clip->path, fx, fy);
}
cairo_bool_t
_cairo_clip_contains_extents (const cairo_clip_t *clip,
const cairo_composite_rectangles_t *extents)
{
const cairo_rectangle_int_t *rect;
rect = extents->is_bounded ? &extents->bounded : &extents->unbounded;
return _cairo_clip_contains_rectangle (clip, rect);
}
void
_cairo_debug_print_clip (FILE *stream, const cairo_clip_t *clip)
{
int i;
if (clip == NULL) {
fprintf (stream, "no clip\n");
return;
}
if (_cairo_clip_is_all_clipped (clip)) {
fprintf (stream, "clip: all-clipped\n");
return;
}
fprintf (stream, "clip:\n");
fprintf (stream, " extents: (%d, %d) x (%d, %d), is-region? %d",
clip->extents.x, clip->extents.y,
clip->extents.width, clip->extents.height,
clip->is_region);
fprintf (stream, " num_boxes = %d\n", clip->num_boxes);
for (i = 0; i < clip->num_boxes; i++) {
fprintf (stream, " [%d] = (%f, %f), (%f, %f)\n", i,
_cairo_fixed_to_double (clip->boxes[i].p1.x),
_cairo_fixed_to_double (clip->boxes[i].p1.y),
_cairo_fixed_to_double (clip->boxes[i].p2.x),
_cairo_fixed_to_double (clip->boxes[i].p2.y));
}
if (clip->path) {
cairo_clip_path_t *clip_path = clip->path;
do {
fprintf (stream, "path: aa=%d, tolerance=%f, rule=%d: ",
clip_path->antialias,
clip_path->tolerance,
clip_path->fill_rule);
_cairo_debug_print_path (stream, &clip_path->path);
fprintf (stream, "\n");
} while ((clip_path = clip_path->prev) != NULL);
}
}
const cairo_rectangle_int_t *
_cairo_clip_get_extents (const cairo_clip_t *clip)
{
if (clip == NULL)
return &_cairo_unbounded_rectangle;
if (_cairo_clip_is_all_clipped (clip))
return &_cairo_empty_rectangle;
return &clip->extents;
}
const cairo_rectangle_list_t _cairo_rectangles_nil =
{ CAIRO_STATUS_NO_MEMORY, NULL, 0 };
static const cairo_rectangle_list_t _cairo_rectangles_not_representable =
{ CAIRO_STATUS_CLIP_NOT_REPRESENTABLE, NULL, 0 };
static cairo_bool_t
_cairo_clip_int_rect_to_user (cairo_gstate_t *gstate,
cairo_rectangle_int_t *clip_rect,
cairo_rectangle_t *user_rect)
{
cairo_bool_t is_tight;
double x1 = clip_rect->x;
double y1 = clip_rect->y;
double x2 = clip_rect->x + (int) clip_rect->width;
double y2 = clip_rect->y + (int) clip_rect->height;
_cairo_gstate_backend_to_user_rectangle (gstate,
&x1, &y1, &x2, &y2,
&is_tight);
user_rect->x = x1;
user_rect->y = y1;
user_rect->width = x2 - x1;
user_rect->height = y2 - y1;
return is_tight;
}
cairo_rectangle_list_t *
_cairo_rectangle_list_create_in_error (cairo_status_t status)
{
cairo_rectangle_list_t *list;
if (status == CAIRO_STATUS_NO_MEMORY)
return (cairo_rectangle_list_t*) &_cairo_rectangles_nil;
if (status == CAIRO_STATUS_CLIP_NOT_REPRESENTABLE)
return (cairo_rectangle_list_t*) &_cairo_rectangles_not_representable;
list = _cairo_malloc (sizeof (*list));
if (unlikely (list == NULL)) {
status = _cairo_error (CAIRO_STATUS_NO_MEMORY);
return (cairo_rectangle_list_t*) &_cairo_rectangles_nil;
}
list->status = status;
list->rectangles = NULL;
list->num_rectangles = 0;
return list;
}
cairo_rectangle_list_t *
_cairo_clip_copy_rectangle_list (cairo_clip_t *clip, cairo_gstate_t *gstate)
{
#define ERROR_LIST(S) _cairo_rectangle_list_create_in_error (_cairo_error (S))
cairo_rectangle_list_t *list;
cairo_rectangle_t *rectangles = NULL;
cairo_region_t *region = NULL;
int n_rects = 0;
int i;
if (clip == NULL)
return ERROR_LIST (CAIRO_STATUS_CLIP_NOT_REPRESENTABLE);
if (_cairo_clip_is_all_clipped (clip))
goto DONE;
if (! _cairo_clip_is_region (clip))
return ERROR_LIST (CAIRO_STATUS_CLIP_NOT_REPRESENTABLE);
region = _cairo_clip_get_region (clip);
if (region == NULL)
return ERROR_LIST (CAIRO_STATUS_NO_MEMORY);
n_rects = cairo_region_num_rectangles (region);
if (n_rects) {
rectangles = _cairo_malloc_ab (n_rects, sizeof (cairo_rectangle_t));
if (unlikely (rectangles == NULL)) {
return ERROR_LIST (CAIRO_STATUS_NO_MEMORY);
}
for (i = 0; i < n_rects; ++i) {
cairo_rectangle_int_t clip_rect;
cairo_region_get_rectangle (region, i, &clip_rect);
if (! _cairo_clip_int_rect_to_user (gstate,
&clip_rect,
&rectangles[i]))
{
free (rectangles);
return ERROR_LIST (CAIRO_STATUS_CLIP_NOT_REPRESENTABLE);
}
}
}
DONE:
list = _cairo_malloc (sizeof (cairo_rectangle_list_t));
if (unlikely (list == NULL)) {
free (rectangles);
return ERROR_LIST (CAIRO_STATUS_NO_MEMORY);
}
list->status = CAIRO_STATUS_SUCCESS;
list->rectangles = rectangles;
list->num_rectangles = n_rects;
return list;
#undef ERROR_LIST
}
/**
* cairo_rectangle_list_destroy:
* @rectangle_list: a rectangle list, as obtained from cairo_copy_clip_rectangle_list()
*
* Unconditionally frees @rectangle_list and all associated
* references. After this call, the @rectangle_list pointer must not
* be dereferenced.
*
* Since: 1.4
**/
void
cairo_rectangle_list_destroy (cairo_rectangle_list_t *rectangle_list)
{
if (rectangle_list == NULL || rectangle_list == &_cairo_rectangles_nil ||
rectangle_list == &_cairo_rectangles_not_representable)
return;
free (rectangle_list->rectangles);
free (rectangle_list);
}
void
_cairo_clip_reset_static_data (void)
{
_freed_pool_reset (&clip_path_pool);
_freed_pool_reset (&clip_pool);
}
| 0 |
D://workCode//uploadProject\awtk\3rd\cairo | D://workCode//uploadProject\awtk\3rd\cairo\cairo\cairo-color.c | /* cairo - a vector graphics library with display and print output
*
* Copyright © 2002 University of Southern California
* Copyright © 2005 Red Hat, Inc.
*
* This library is free software; you can redistribute it and/or
* modify it either under the terms of the GNU Lesser General Public
* License version 2.1 as published by the Free Software Foundation
* (the "LGPL") or, at your option, under the terms of the Mozilla
* Public License Version 1.1 (the "MPL"). If you do not alter this
* notice, a recipient may use your version of this file under either
* the MPL or the LGPL.
*
* You should have received a copy of the LGPL along with this library
* in the file COPYING-LGPL-2.1; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Suite 500, Boston, MA 02110-1335, USA
* You should have received a copy of the MPL along with this library
* in the file COPYING-MPL-1.1
*
* The contents of this file are subject to the Mozilla Public License
* Version 1.1 (the "License"); you may not use this file except in
* compliance with the License. You may obtain a copy of the License at
* http://www.mozilla.org/MPL/
*
* This software is distributed on an "AS IS" basis, WITHOUT WARRANTY
* OF ANY KIND, either express or implied. See the LGPL or the MPL for
* the specific language governing rights and limitations.
*
* The Original Code is the cairo graphics library.
*
* The Initial Developer of the Original Code is University of Southern
* California.
*
* Contributor(s):
* Carl D. Worth <cworth@cworth.org>
*/
#include "cairoint.h"
static cairo_color_t const cairo_color_white = {
1.0, 1.0, 1.0, 1.0,
0xffff, 0xffff, 0xffff, 0xffff
};
static cairo_color_t const cairo_color_black = {
0.0, 0.0, 0.0, 1.0,
0x0, 0x0, 0x0, 0xffff
};
static cairo_color_t const cairo_color_transparent = {
0.0, 0.0, 0.0, 0.0,
0x0, 0x0, 0x0, 0x0
};
static cairo_color_t const cairo_color_magenta = {
1.0, 0.0, 1.0, 1.0,
0xffff, 0x0, 0xffff, 0xffff
};
const cairo_color_t *
_cairo_stock_color (cairo_stock_t stock)
{
switch (stock) {
case CAIRO_STOCK_WHITE:
return &cairo_color_white;
case CAIRO_STOCK_BLACK:
return &cairo_color_black;
case CAIRO_STOCK_TRANSPARENT:
return &cairo_color_transparent;
case CAIRO_STOCK_NUM_COLORS:
default:
ASSERT_NOT_REACHED;
/* If the user can get here somehow, give a color that indicates a
* problem. */
return &cairo_color_magenta;
}
}
/* Convert a double in [0.0, 1.0] to an integer in [0, 65535]
* The conversion is designed to choose the integer i such that
* i / 65535.0 is as close as possible to the input value.
*/
uint16_t
_cairo_color_double_to_short (double d)
{
return d * 65535.0 + 0.5;
}
static void
_cairo_color_compute_shorts (cairo_color_t *color)
{
color->red_short = _cairo_color_double_to_short (color->red * color->alpha);
color->green_short = _cairo_color_double_to_short (color->green * color->alpha);
color->blue_short = _cairo_color_double_to_short (color->blue * color->alpha);
color->alpha_short = _cairo_color_double_to_short (color->alpha);
}
void
_cairo_color_init_rgba (cairo_color_t *color,
double red, double green, double blue,
double alpha)
{
color->red = red;
color->green = green;
color->blue = blue;
color->alpha = alpha;
_cairo_color_compute_shorts (color);
}
void
_cairo_color_multiply_alpha (cairo_color_t *color,
double alpha)
{
color->alpha *= alpha;
_cairo_color_compute_shorts (color);
}
void
_cairo_color_get_rgba (cairo_color_t *color,
double *red,
double *green,
double *blue,
double *alpha)
{
*red = color->red;
*green = color->green;
*blue = color->blue;
*alpha = color->alpha;
}
void
_cairo_color_get_rgba_premultiplied (cairo_color_t *color,
double *red,
double *green,
double *blue,
double *alpha)
{
*red = color->red * color->alpha;
*green = color->green * color->alpha;
*blue = color->blue * color->alpha;
*alpha = color->alpha;
}
/* NB: This function works both for unmultiplied and premultiplied colors */
cairo_bool_t
_cairo_color_equal (const cairo_color_t *color_a,
const cairo_color_t *color_b)
{
if (color_a == color_b)
return TRUE;
if (color_a->alpha_short != color_b->alpha_short)
return FALSE;
if (color_a->alpha_short == 0)
return TRUE;
return color_a->red_short == color_b->red_short &&
color_a->green_short == color_b->green_short &&
color_a->blue_short == color_b->blue_short;
}
cairo_bool_t
_cairo_color_stop_equal (const cairo_color_stop_t *color_a,
const cairo_color_stop_t *color_b)
{
if (color_a == color_b)
return TRUE;
return color_a->alpha_short == color_b->alpha_short &&
color_a->red_short == color_b->red_short &&
color_a->green_short == color_b->green_short &&
color_a->blue_short == color_b->blue_short;
}
cairo_content_t
_cairo_color_get_content (const cairo_color_t *color)
{
if (CAIRO_COLOR_IS_OPAQUE (color))
return CAIRO_CONTENT_COLOR;
if (color->red_short == 0 &&
color->green_short == 0 &&
color->blue_short == 0)
{
return CAIRO_CONTENT_ALPHA;
}
return CAIRO_CONTENT_COLOR_ALPHA;
}
| 0 |
D://workCode//uploadProject\awtk\3rd\cairo | D://workCode//uploadProject\awtk\3rd\cairo\cairo\cairo-combsort-inline.h | /*
* Copyright © 2008 Chris Wilson
*
* This library is free software; you can redistribute it and/or
* modify it either under the terms of the GNU Lesser General Public
* License version 2.1 as published by the Free Software Foundation
* (the "LGPL") or, at your option, under the terms of the Mozilla
* Public License Version 1.1 (the "MPL"). If you do not alter this
* notice, a recipient may use your version of this file under either
* the MPL or the LGPL.
*
* You should have received a copy of the LGPL along with this library
* in the file COPYING-LGPL-2.1; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Suite 500, Boston, MA 02110-1335, USA
* You should have received a copy of the MPL along with this library
* in the file COPYING-MPL-1.1
*
* The contents of this file are subject to the Mozilla Public License
* Version 1.1 (the "License"); you may not use this file except in
* compliance with the License. You may obtain a copy of the License at
* http://www.mozilla.org/MPL/
*
* This software is distributed on an "AS IS" basis, WITHOUT WARRANTY
* OF ANY KIND, either express or implied. See the LGPL or the MPL for
* the specific language governing rights and limitations.
*
* The Original Code is the cairo graphics library.
*
* The Initial Developer of the Original Code is Chris Wilson
*
* Contributor(s):
* Chris Wilson <chris@chris-wilson.co.uk>
*/
/* This fragment implements a comb sort (specifically combsort11) */
#ifndef _HAVE_CAIRO_COMBSORT_NEWGAP
#define _HAVE_CAIRO_COMBSORT_NEWGAP
static inline unsigned int
_cairo_combsort_newgap (unsigned int gap)
{
gap = 10 * gap / 13;
if (gap == 9 || gap == 10)
gap = 11;
if (gap < 1)
gap = 1;
return gap;
}
#endif
#define CAIRO_COMBSORT_DECLARE(NAME, TYPE, CMP) \
static void \
NAME (TYPE *base, unsigned int nmemb) \
{ \
unsigned int gap = nmemb; \
unsigned int i, j; \
int swapped; \
do { \
gap = _cairo_combsort_newgap (gap); \
swapped = gap > 1; \
for (i = 0; i < nmemb-gap ; i++) { \
j = i + gap; \
if (CMP (base[i], base[j]) > 0 ) { \
TYPE tmp; \
tmp = base[i]; \
base[i] = base[j]; \
base[j] = tmp; \
swapped = 1; \
} \
} \
} while (swapped); \
}
#define CAIRO_COMBSORT_DECLARE_WITH_DATA(NAME, TYPE, CMP) \
static void \
NAME (TYPE *base, unsigned int nmemb, void *data) \
{ \
unsigned int gap = nmemb; \
unsigned int i, j; \
int swapped; \
do { \
gap = _cairo_combsort_newgap (gap); \
swapped = gap > 1; \
for (i = 0; i < nmemb-gap ; i++) { \
j = i + gap; \
if (CMP (base[i], base[j], data) > 0 ) { \
TYPE tmp; \
tmp = base[i]; \
base[i] = base[j]; \
base[j] = tmp; \
swapped = 1; \
} \
} \
} while (swapped); \
}
| 0 |
D://workCode//uploadProject\awtk\3rd\cairo | D://workCode//uploadProject\awtk\3rd\cairo\cairo\cairo-compiler-private.h | /* cairo - a vector graphics library with display and print output
*
* Copyright © 2002 University of Southern California
* Copyright © 2005 Red Hat, Inc.
*
* This library is free software; you can redistribute it and/or
* modify it either under the terms of the GNU Lesser General Public
* License version 2.1 as published by the Free Software Foundation
* (the "LGPL") or, at your option, under the terms of the Mozilla
* Public License Version 1.1 (the "MPL"). If you do not alter this
* notice, a recipient may use your version of this file under either
* the MPL or the LGPL.
*
* You should have received a copy of the LGPL along with this library
* in the file COPYING-LGPL-2.1; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Suite 500, Boston, MA 02110-1335, USA
* You should have received a copy of the MPL along with this library
* in the file COPYING-MPL-1.1
*
* The contents of this file are subject to the Mozilla Public License
* Version 1.1 (the "License"); you may not use this file except in
* compliance with the License. You may obtain a copy of the License at
* http://www.mozilla.org/MPL/
*
* This software is distributed on an "AS IS" basis, WITHOUT WARRANTY
* OF ANY KIND, either express or implied. See the LGPL or the MPL for
* the specific language governing rights and limitations.
*
* The Original Code is the cairo graphics library.
*
* The Initial Developer of the Original Code is University of Southern
* California.
*
* Contributor(s):
* Carl D. Worth <cworth@cworth.org>
*/
#ifndef CAIRO_COMPILER_PRIVATE_H
#define CAIRO_COMPILER_PRIVATE_H
#include "cairo.h"
#if HAVE_CONFIG_H
#include "config.h"
#endif
/* Size in bytes of buffer to use off the stack per functions.
* Mostly used by text functions. For larger allocations, they'll
* malloc(). */
#ifndef CAIRO_STACK_BUFFER_SIZE
#define CAIRO_STACK_BUFFER_SIZE (512 * sizeof (int))
#endif
#define CAIRO_STACK_ARRAY_LENGTH(T) (CAIRO_STACK_BUFFER_SIZE / sizeof(T))
/*
* The goal of this block is to define the following macros for
* providing faster linkage to functions in the public API for calls
* from within cairo.
*
* slim_hidden_proto(f)
* slim_hidden_proto_no_warn(f)
*
* Declares `f' as a library internal function and hides the
* function from the global symbol table. This macro must be
* expanded after `f' has been declared with a prototype but before
* any calls to the function are seen by the compiler. The no_warn
* variant inhibits warnings about the return value being unused at
* call sites. The macro works by renaming `f' to an internal name
* in the symbol table and hiding that. As far as cairo internal
* calls are concerned they're calling a library internal function
* and thus don't need to bounce via the procedure linkage table (PLT).
*
* slim_hidden_def(f)
*
* Exports `f' back to the global symbol table. This macro must be
* expanded right after the function definition and only for symbols
* hidden previously with slim_hidden_proto(). The macro works by
* adding a global entry to the symbol table which points at the
* internal name of `f' created by slim_hidden_proto().
*
* Functions in the public API which aren't called by the library
* don't need to be hidden and re-exported using the slim hidden
* macros.
*/
#if __GNUC__ >= 3 && defined(__ELF__) && !defined(__sun)
# define slim_hidden_proto(name) slim_hidden_proto1(name, slim_hidden_int_name(name)) cairo_private
# define slim_hidden_proto_no_warn(name) slim_hidden_proto1(name, slim_hidden_int_name(name)) cairo_private_no_warn
# define slim_hidden_def(name) slim_hidden_def1(name, slim_hidden_int_name(name))
# define slim_hidden_int_name(name) INT_##name
# define slim_hidden_proto1(name, internal) \
extern __typeof (name) name \
__asm__ (slim_hidden_asmname (internal))
# define slim_hidden_def1(name, internal) \
extern __typeof (name) EXT_##name __asm__(slim_hidden_asmname(name)) \
__attribute__((__alias__(slim_hidden_asmname(internal))))
# define slim_hidden_ulp slim_hidden_ulp1(__USER_LABEL_PREFIX__)
# define slim_hidden_ulp1(x) slim_hidden_ulp2(x)
# define slim_hidden_ulp2(x) #x
# define slim_hidden_asmname(name) slim_hidden_asmname1(name)
# define slim_hidden_asmname1(name) slim_hidden_ulp #name
#else
# define slim_hidden_proto(name) int _cairo_dummy_prototype(void)
# define slim_hidden_proto_no_warn(name) int _cairo_dummy_prototype(void)
# define slim_hidden_def(name) int _cairo_dummy_prototype(void)
#endif
#if __GNUC__ > 2 || (__GNUC__ == 2 && __GNUC_MINOR__ > 4)
#define CAIRO_PRINTF_FORMAT(fmt_index, va_index) \
__attribute__((__format__(__printf__, fmt_index, va_index)))
#else
#define CAIRO_PRINTF_FORMAT(fmt_index, va_index)
#endif
/* slim_internal.h */
#define CAIRO_HAS_HIDDEN_SYMBOLS 1
#if (__GNUC__ > 3 || (__GNUC__ == 3 && __GNUC_MINOR__ >= 3)) && \
(defined(__ELF__) || defined(__APPLE__)) && \
!defined(__sun)
#define cairo_private_no_warn __attribute__((__visibility__("hidden")))
#elif defined(__SUNPRO_C) && (__SUNPRO_C >= 0x550)
#define cairo_private_no_warn __hidden
#else /* not gcc >= 3.3 and not Sun Studio >= 8 */
#define cairo_private_no_warn
#undef CAIRO_HAS_HIDDEN_SYMBOLS
#endif
#ifndef WARN_UNUSED_RESULT
#define WARN_UNUSED_RESULT
#endif
/* Add attribute(warn_unused_result) if supported */
#define cairo_warn WARN_UNUSED_RESULT
#define cairo_private cairo_private_no_warn cairo_warn
/* This macro allow us to deprecate a function by providing an alias
for the old function name to the new function name. With this
macro, binary compatibility is preserved. The macro only works on
some platforms --- tough.
Meanwhile, new definitions in the public header file break the
source code so that it will no longer link against the old
symbols. Instead it will give a descriptive error message
indicating that the old function has been deprecated by the new
function.
*/
#if __GNUC__ >= 2 && defined(__ELF__)
# define CAIRO_FUNCTION_ALIAS(old, new) \
extern __typeof (new) old \
__asm__ ("" #old) \
__attribute__((__alias__("" #new)))
#else
# define CAIRO_FUNCTION_ALIAS(old, new)
#endif
/*
* Cairo uses the following function attributes in order to improve the
* generated code (effectively by manual inter-procedural analysis).
*
* 'cairo_pure': The function is only allowed to read from its arguments
* and global memory (i.e. following a pointer argument or
* accessing a shared variable). The return value should
* only depend on its arguments, and for an identical set of
* arguments should return the same value.
*
* 'cairo_const': The function is only allowed to read from its arguments.
* It is not allowed to access global memory. The return
* value should only depend its arguments, and for an
* identical set of arguments should return the same value.
* This is currently the most strict function attribute.
*
* Both these function attributes allow gcc to perform CSE and
* constant-folding, with 'cairo_const 'also guaranteeing that pointer contents
* do not change across the function call.
*/
#if __GNUC__ >= 3
#define cairo_pure __attribute__((pure))
#define cairo_const __attribute__((const))
#define cairo_always_inline inline __attribute__((always_inline))
#else
#define cairo_pure
#define cairo_const
#define cairo_always_inline inline
#endif
#if defined(__GNUC__) && (__GNUC__ > 2) && defined(__OPTIMIZE__)
#define likely(expr) (__builtin_expect (!!(expr), 1))
#define unlikely(expr) (__builtin_expect (!!(expr), 0))
#else
#define likely(expr) (expr)
#define unlikely(expr) (expr)
#endif
#ifndef __GNUC__
#undef __attribute__
#define __attribute__(x)
#endif
#if (defined(__WIN32__) && !defined(__WINE__)) || defined(_MSC_VER)
#define access _access
#define fdopen _fdopen
#define hypot _hypot
#define pclose _pclose
#define popen _popen
#define snprintf _snprintf
#define strdup _strdup
#define unlink _unlink
#define vsnprintf _vsnprintf
#endif
#ifdef _MSC_VER
#ifndef __cplusplus
#undef inline
#define inline __inline
#endif
#endif
#if defined(_MSC_VER) && defined(_M_IX86)
/* When compiling with /Gy and /OPT:ICF identical functions will be folded in together.
The CAIRO_ENSURE_UNIQUE macro ensures that a function is always unique and
will never be folded into another one. Something like this might eventually
be needed for GCC but it seems fine for now. */
#define CAIRO_ENSURE_UNIQUE \
do { \
char file[] = __FILE__; \
__asm { \
__asm jmp __internal_skip_line_no \
__asm _emit (__COUNTER__ & 0xff) \
__asm _emit ((__COUNTER__>>8) & 0xff) \
__asm _emit ((__COUNTER__>>16) & 0xff)\
__asm _emit ((__COUNTER__>>24) & 0xff)\
__asm lea eax, dword ptr file \
__asm __internal_skip_line_no: \
}; \
} while (0)
#else
#define CAIRO_ENSURE_UNIQUE do { } while (0)
#endif
#ifdef __STRICT_ANSI__
#undef inline
#define inline __inline__
#endif
#endif
| 0 |
D://workCode//uploadProject\awtk\3rd\cairo | D://workCode//uploadProject\awtk\3rd\cairo\cairo\cairo-composite-rectangles-private.h | /* cairo - a vector graphics library with display and print output
*
* Copyright © 2009 Intel Corporation
*
* This library is free software; you can redistribute it and/or
* modify it either under the terms of the GNU Lesser General Public
* License version 2.1 as published by the Free Software Foundation
* (the "LGPL") or, at your option, under the terms of the Mozilla
* Public License Version 1.1 (the "MPL"). If you do not alter this
* notice, a recipient may use your version of this file under either
* the MPL or the LGPL.
*
* You should have received a copy of the LGPL along with this library
* in the file COPYING-LGPL-2.1; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Suite 500, Boston, MA 02110-1335, USA
* You should have received a copy of the MPL along with this library
* in the file COPYING-MPL-1.1
*
* The contents of this file are subject to the Mozilla Public License
* Version 1.1 (the "License"); you may not use this file except in
* compliance with the License. You may obtain a copy of the License at
* http://www.mozilla.org/MPL/
*
* This software is distributed on an "AS IS" basis, WITHOUT WARRANTY
* OF ANY KIND, either express or implied. See the LGPL or the MPL for
* the specific language governing rights and limitations.
*
* The Original Code is the cairo graphics library.
*
* The Initial Developer of the Original Code is University of Southern
* California.
*
* Contributor(s):
* Chris Wilson <chris@chris-wilson.co.u>
*/
#ifndef CAIRO_COMPOSITE_RECTANGLES_PRIVATE_H
#define CAIRO_COMPOSITE_RECTANGLES_PRIVATE_H
#include "cairo-types-private.h"
#include "cairo-error-private.h"
#include "cairo-pattern-private.h"
CAIRO_BEGIN_DECLS
/* Rectangles that take part in a composite operation.
*
* The source and mask track the extents of the respective patterns in device
* space. The unbounded rectangle is essentially the clip rectangle. And the
* intersection of all is the bounded rectangle, which is the minimum extents
* the operation may require. Whether or not the operation is actually bounded
* is tracked in the is_bounded boolean.
*
*/
struct _cairo_composite_rectangles {
cairo_surface_t *surface;
cairo_operator_t op;
cairo_rectangle_int_t source;
cairo_rectangle_int_t mask;
cairo_rectangle_int_t destination;
cairo_rectangle_int_t bounded; /* source? IN mask? IN unbounded */
cairo_rectangle_int_t unbounded; /* destination IN clip */
uint32_t is_bounded;
cairo_rectangle_int_t source_sample_area;
cairo_rectangle_int_t mask_sample_area;
cairo_pattern_union_t source_pattern;
cairo_pattern_union_t mask_pattern;
const cairo_pattern_t *original_source_pattern;
const cairo_pattern_t *original_mask_pattern;
cairo_clip_t *clip; /* clip will be reduced to the minimal container */
};
cairo_private cairo_int_status_t
_cairo_composite_rectangles_init_for_paint (cairo_composite_rectangles_t *extents,
cairo_surface_t *surface,
cairo_operator_t op,
const cairo_pattern_t *source,
const cairo_clip_t *clip);
cairo_private cairo_int_status_t
_cairo_composite_rectangles_init_for_mask (cairo_composite_rectangles_t *extents,
cairo_surface_t *surface,
cairo_operator_t op,
const cairo_pattern_t *source,
const cairo_pattern_t *mask,
const cairo_clip_t *clip);
cairo_private cairo_int_status_t
_cairo_composite_rectangles_init_for_stroke (cairo_composite_rectangles_t *extents,
cairo_surface_t *surface,
cairo_operator_t op,
const cairo_pattern_t *source,
const cairo_path_fixed_t *path,
const cairo_stroke_style_t *style,
const cairo_matrix_t *ctm,
const cairo_clip_t *clip);
cairo_private cairo_int_status_t
_cairo_composite_rectangles_init_for_fill (cairo_composite_rectangles_t *extents,
cairo_surface_t *surface,
cairo_operator_t op,
const cairo_pattern_t *source,
const cairo_path_fixed_t *path,
const cairo_clip_t *clip);
cairo_private cairo_int_status_t
_cairo_composite_rectangles_init_for_boxes (cairo_composite_rectangles_t *extents,
cairo_surface_t *surface,
cairo_operator_t op,
const cairo_pattern_t *source,
const cairo_boxes_t *boxes,
const cairo_clip_t *clip);
cairo_private cairo_int_status_t
_cairo_composite_rectangles_init_for_polygon (cairo_composite_rectangles_t *extents,
cairo_surface_t *surface,
cairo_operator_t op,
const cairo_pattern_t *source,
const cairo_polygon_t *polygon,
const cairo_clip_t *clip);
cairo_private cairo_int_status_t
_cairo_composite_rectangles_init_for_glyphs (cairo_composite_rectangles_t *extents,
cairo_surface_t *surface,
cairo_operator_t op,
const cairo_pattern_t *source,
cairo_scaled_font_t *scaled_font,
cairo_glyph_t *glyphs,
int num_glyphs,
const cairo_clip_t *clip,
cairo_bool_t *overlap);
cairo_private cairo_int_status_t
_cairo_composite_rectangles_intersect_source_extents (cairo_composite_rectangles_t *extents,
const cairo_box_t *box);
cairo_private cairo_int_status_t
_cairo_composite_rectangles_intersect_mask_extents (cairo_composite_rectangles_t *extents,
const cairo_box_t *box);
cairo_private cairo_bool_t
_cairo_composite_rectangles_can_reduce_clip (cairo_composite_rectangles_t *composite,
cairo_clip_t *clip);
cairo_private cairo_int_status_t
_cairo_composite_rectangles_add_to_damage (cairo_composite_rectangles_t *composite,
cairo_boxes_t *damage);
cairo_private void
_cairo_composite_rectangles_fini (cairo_composite_rectangles_t *extents);
CAIRO_END_DECLS
#endif /* CAIRO_COMPOSITE_RECTANGLES_PRIVATE_H */
| 0 |
D://workCode//uploadProject\awtk\3rd\cairo | D://workCode//uploadProject\awtk\3rd\cairo\cairo\cairo-composite-rectangles.c | /* cairo - a vector graphics library with display and print output
*
* Copyright © 2009 Intel Corporation
*
* This library is free software; you can redistribute it and/or
* modify it either under the terms of the GNU Lesser General Public
* License version 2.1 as published by the Free Software Foundation
* (the "LGPL") or, at your option, under the terms of the Mozilla
* Public License Version 1.1 (the "MPL"). If you do not alter this
* notice, a recipient may use your version of this file under either
* the MPL or the LGPL.
*
* You should have received a copy of the LGPL along with this library
* in the file COPYING-LGPL-2.1; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Suite 500, Boston, MA 02110-1335, USA
* You should have received a copy of the MPL along with this library
* in the file COPYING-MPL-1.1
*
* The contents of this file are subject to the Mozilla Public License
* Version 1.1 (the "License"); you may not use this file except in
* compliance with the License. You may obtain a copy of the License at
* http://www.mozilla.org/MPL/
*
* This software is distributed on an "AS IS" basis, WITHOUT WARRANTY
* OF ANY KIND, either express or implied. See the LGPL or the MPL for
* the specific language governing rights and limitations.
*
* The Original Code is the cairo graphics library.
*
* The Initial Developer of the Original Code is Red Hat, Inc.
*
* Contributor(s):
* Chris Wilson <chris@chris-wilson.co.uk>
*/
#include "cairoint.h"
#include "cairo-clip-inline.h"
#include "cairo-error-private.h"
#include "cairo-composite-rectangles-private.h"
#include "cairo-pattern-private.h"
/* A collection of routines to facilitate writing compositors. */
void _cairo_composite_rectangles_fini (cairo_composite_rectangles_t *extents)
{
_cairo_clip_destroy (extents->clip);
}
static void
_cairo_composite_reduce_pattern (const cairo_pattern_t *src,
cairo_pattern_union_t *dst)
{
int tx, ty;
_cairo_pattern_init_static_copy (&dst->base, src);
if (dst->base.type == CAIRO_PATTERN_TYPE_SOLID)
return;
dst->base.filter = _cairo_pattern_analyze_filter (&dst->base);
tx = ty = 0;
if (_cairo_matrix_is_pixman_translation (&dst->base.matrix,
dst->base.filter,
&tx, &ty))
{
dst->base.matrix.x0 = tx;
dst->base.matrix.y0 = ty;
}
}
static inline cairo_bool_t
_cairo_composite_rectangles_init (cairo_composite_rectangles_t *extents,
cairo_surface_t *surface,
cairo_operator_t op,
const cairo_pattern_t *source,
const cairo_clip_t *clip)
{
if (_cairo_clip_is_all_clipped (clip))
return FALSE;
extents->surface = surface;
extents->op = op;
_cairo_surface_get_extents (surface, &extents->destination);
extents->clip = NULL;
extents->unbounded = extents->destination;
if (clip && ! _cairo_rectangle_intersect (&extents->unbounded,
_cairo_clip_get_extents (clip)))
return FALSE;
extents->bounded = extents->unbounded;
extents->is_bounded = _cairo_operator_bounded_by_either (op);
extents->original_source_pattern = source;
_cairo_composite_reduce_pattern (source, &extents->source_pattern);
_cairo_pattern_get_extents (&extents->source_pattern.base,
&extents->source,
surface->is_vector);
if (extents->is_bounded & CAIRO_OPERATOR_BOUND_BY_SOURCE) {
if (! _cairo_rectangle_intersect (&extents->bounded, &extents->source))
return FALSE;
}
extents->original_mask_pattern = NULL;
extents->mask_pattern.base.type = CAIRO_PATTERN_TYPE_SOLID;
extents->mask_pattern.solid.color.alpha = 1.; /* XXX full initialisation? */
extents->mask_pattern.solid.color.alpha_short = 0xffff;
return TRUE;
}
cairo_int_status_t
_cairo_composite_rectangles_init_for_paint (cairo_composite_rectangles_t *extents,
cairo_surface_t *surface,
cairo_operator_t op,
const cairo_pattern_t *source,
const cairo_clip_t *clip)
{
if (! _cairo_composite_rectangles_init (extents,
surface, op, source, clip))
{
return CAIRO_INT_STATUS_NOTHING_TO_DO;
}
extents->mask = extents->destination;
extents->clip = _cairo_clip_reduce_for_composite (clip, extents);
if (_cairo_clip_is_all_clipped (extents->clip))
return CAIRO_INT_STATUS_NOTHING_TO_DO;
if (! _cairo_rectangle_intersect (&extents->unbounded,
_cairo_clip_get_extents (extents->clip)))
return CAIRO_INT_STATUS_NOTHING_TO_DO;
if (extents->source_pattern.base.type != CAIRO_PATTERN_TYPE_SOLID)
_cairo_pattern_sampled_area (&extents->source_pattern.base,
&extents->bounded,
&extents->source_sample_area);
return CAIRO_STATUS_SUCCESS;
}
static cairo_int_status_t
_cairo_composite_rectangles_intersect (cairo_composite_rectangles_t *extents,
const cairo_clip_t *clip)
{
if ((!_cairo_rectangle_intersect (&extents->bounded, &extents->mask)) &&
(extents->is_bounded & CAIRO_OPERATOR_BOUND_BY_MASK))
return CAIRO_INT_STATUS_NOTHING_TO_DO;
if (extents->is_bounded == (CAIRO_OPERATOR_BOUND_BY_MASK | CAIRO_OPERATOR_BOUND_BY_SOURCE)) {
extents->unbounded = extents->bounded;
} else if (extents->is_bounded & CAIRO_OPERATOR_BOUND_BY_MASK) {
if (!_cairo_rectangle_intersect (&extents->unbounded, &extents->mask))
return CAIRO_INT_STATUS_NOTHING_TO_DO;
}
extents->clip = _cairo_clip_reduce_for_composite (clip, extents);
if (_cairo_clip_is_all_clipped (extents->clip))
return CAIRO_INT_STATUS_NOTHING_TO_DO;
if (! _cairo_rectangle_intersect (&extents->unbounded,
_cairo_clip_get_extents (extents->clip)))
return CAIRO_INT_STATUS_NOTHING_TO_DO;
if (! _cairo_rectangle_intersect (&extents->bounded,
_cairo_clip_get_extents (extents->clip)) &&
extents->is_bounded & CAIRO_OPERATOR_BOUND_BY_MASK)
{
return CAIRO_INT_STATUS_NOTHING_TO_DO;
}
if (extents->source_pattern.base.type != CAIRO_PATTERN_TYPE_SOLID)
_cairo_pattern_sampled_area (&extents->source_pattern.base,
&extents->bounded,
&extents->source_sample_area);
if (extents->mask_pattern.base.type != CAIRO_PATTERN_TYPE_SOLID) {
_cairo_pattern_sampled_area (&extents->mask_pattern.base,
&extents->bounded,
&extents->mask_sample_area);
if (extents->mask_sample_area.width == 0 ||
extents->mask_sample_area.height == 0) {
_cairo_composite_rectangles_fini (extents);
return CAIRO_INT_STATUS_NOTHING_TO_DO;
}
}
return CAIRO_STATUS_SUCCESS;
}
cairo_int_status_t
_cairo_composite_rectangles_intersect_source_extents (cairo_composite_rectangles_t *extents,
const cairo_box_t *box)
{
cairo_rectangle_int_t rect;
cairo_clip_t *clip;
_cairo_box_round_to_rectangle (box, &rect);
if (rect.x == extents->source.x &&
rect.y == extents->source.y &&
rect.width == extents->source.width &&
rect.height == extents->source.height)
{
return CAIRO_INT_STATUS_SUCCESS;
}
_cairo_rectangle_intersect (&extents->source, &rect);
rect = extents->bounded;
if (! _cairo_rectangle_intersect (&extents->bounded, &extents->source) &&
extents->is_bounded & CAIRO_OPERATOR_BOUND_BY_SOURCE)
return CAIRO_INT_STATUS_NOTHING_TO_DO;
if (rect.width == extents->bounded.width &&
rect.height == extents->bounded.height)
return CAIRO_INT_STATUS_SUCCESS;
if (extents->is_bounded == (CAIRO_OPERATOR_BOUND_BY_MASK | CAIRO_OPERATOR_BOUND_BY_SOURCE)) {
extents->unbounded = extents->bounded;
} else if (extents->is_bounded & CAIRO_OPERATOR_BOUND_BY_MASK) {
if (!_cairo_rectangle_intersect (&extents->unbounded, &extents->mask))
return CAIRO_INT_STATUS_NOTHING_TO_DO;
}
clip = extents->clip;
extents->clip = _cairo_clip_reduce_for_composite (clip, extents);
if (clip != extents->clip)
_cairo_clip_destroy (clip);
if (_cairo_clip_is_all_clipped (extents->clip))
return CAIRO_INT_STATUS_NOTHING_TO_DO;
if (! _cairo_rectangle_intersect (&extents->unbounded,
_cairo_clip_get_extents (extents->clip)))
return CAIRO_INT_STATUS_NOTHING_TO_DO;
if (extents->source_pattern.base.type != CAIRO_PATTERN_TYPE_SOLID)
_cairo_pattern_sampled_area (&extents->source_pattern.base,
&extents->bounded,
&extents->source_sample_area);
if (extents->mask_pattern.base.type != CAIRO_PATTERN_TYPE_SOLID) {
_cairo_pattern_sampled_area (&extents->mask_pattern.base,
&extents->bounded,
&extents->mask_sample_area);
if (extents->mask_sample_area.width == 0 ||
extents->mask_sample_area.height == 0)
return CAIRO_INT_STATUS_NOTHING_TO_DO;
}
return CAIRO_INT_STATUS_SUCCESS;
}
cairo_int_status_t
_cairo_composite_rectangles_intersect_mask_extents (cairo_composite_rectangles_t *extents,
const cairo_box_t *box)
{
cairo_rectangle_int_t mask;
cairo_clip_t *clip;
_cairo_box_round_to_rectangle (box, &mask);
if (mask.x == extents->mask.x &&
mask.y == extents->mask.y &&
mask.width == extents->mask.width &&
mask.height == extents->mask.height)
{
return CAIRO_INT_STATUS_SUCCESS;
}
_cairo_rectangle_intersect (&extents->mask, &mask);
mask = extents->bounded;
if (! _cairo_rectangle_intersect (&extents->bounded, &extents->mask) &&
extents->is_bounded & CAIRO_OPERATOR_BOUND_BY_MASK)
return CAIRO_INT_STATUS_NOTHING_TO_DO;
if (mask.width == extents->bounded.width &&
mask.height == extents->bounded.height)
return CAIRO_INT_STATUS_SUCCESS;
if (extents->is_bounded == (CAIRO_OPERATOR_BOUND_BY_MASK | CAIRO_OPERATOR_BOUND_BY_SOURCE)) {
extents->unbounded = extents->bounded;
} else if (extents->is_bounded & CAIRO_OPERATOR_BOUND_BY_MASK) {
if (!_cairo_rectangle_intersect (&extents->unbounded, &extents->mask))
return CAIRO_INT_STATUS_NOTHING_TO_DO;
}
clip = extents->clip;
extents->clip = _cairo_clip_reduce_for_composite (clip, extents);
if (clip != extents->clip)
_cairo_clip_destroy (clip);
if (_cairo_clip_is_all_clipped (extents->clip))
return CAIRO_INT_STATUS_NOTHING_TO_DO;
if (! _cairo_rectangle_intersect (&extents->unbounded,
_cairo_clip_get_extents (extents->clip)))
return CAIRO_INT_STATUS_NOTHING_TO_DO;
if (extents->source_pattern.base.type != CAIRO_PATTERN_TYPE_SOLID)
_cairo_pattern_sampled_area (&extents->source_pattern.base,
&extents->bounded,
&extents->source_sample_area);
if (extents->mask_pattern.base.type != CAIRO_PATTERN_TYPE_SOLID) {
_cairo_pattern_sampled_area (&extents->mask_pattern.base,
&extents->bounded,
&extents->mask_sample_area);
if (extents->mask_sample_area.width == 0 ||
extents->mask_sample_area.height == 0)
return CAIRO_INT_STATUS_NOTHING_TO_DO;
}
return CAIRO_INT_STATUS_SUCCESS;
}
cairo_int_status_t
_cairo_composite_rectangles_init_for_mask (cairo_composite_rectangles_t *extents,
cairo_surface_t *surface,
cairo_operator_t op,
const cairo_pattern_t *source,
const cairo_pattern_t *mask,
const cairo_clip_t *clip)
{
if (! _cairo_composite_rectangles_init (extents,
surface, op, source, clip))
{
return CAIRO_INT_STATUS_NOTHING_TO_DO;
}
extents->original_mask_pattern = mask;
_cairo_composite_reduce_pattern (mask, &extents->mask_pattern);
_cairo_pattern_get_extents (&extents->mask_pattern.base, &extents->mask, surface->is_vector);
return _cairo_composite_rectangles_intersect (extents, clip);
}
cairo_int_status_t
_cairo_composite_rectangles_init_for_stroke (cairo_composite_rectangles_t *extents,
cairo_surface_t *surface,
cairo_operator_t op,
const cairo_pattern_t *source,
const cairo_path_fixed_t *path,
const cairo_stroke_style_t *style,
const cairo_matrix_t *ctm,
const cairo_clip_t *clip)
{
if (! _cairo_composite_rectangles_init (extents,
surface, op, source, clip))
{
return CAIRO_INT_STATUS_NOTHING_TO_DO;
}
_cairo_path_fixed_approximate_stroke_extents (path, style, ctm, surface->is_vector, &extents->mask);
return _cairo_composite_rectangles_intersect (extents, clip);
}
cairo_int_status_t
_cairo_composite_rectangles_init_for_fill (cairo_composite_rectangles_t *extents,
cairo_surface_t *surface,
cairo_operator_t op,
const cairo_pattern_t *source,
const cairo_path_fixed_t *path,
const cairo_clip_t *clip)
{
if (! _cairo_composite_rectangles_init (extents,
surface, op, source, clip))
{
return CAIRO_INT_STATUS_NOTHING_TO_DO;
}
_cairo_path_fixed_approximate_fill_extents (path, &extents->mask);
return _cairo_composite_rectangles_intersect (extents, clip);
}
cairo_int_status_t
_cairo_composite_rectangles_init_for_polygon (cairo_composite_rectangles_t *extents,
cairo_surface_t *surface,
cairo_operator_t op,
const cairo_pattern_t *source,
const cairo_polygon_t *polygon,
const cairo_clip_t *clip)
{
if (! _cairo_composite_rectangles_init (extents,
surface, op, source, clip))
{
return CAIRO_INT_STATUS_NOTHING_TO_DO;
}
_cairo_box_round_to_rectangle (&polygon->extents, &extents->mask);
return _cairo_composite_rectangles_intersect (extents, clip);
}
cairo_int_status_t
_cairo_composite_rectangles_init_for_boxes (cairo_composite_rectangles_t *extents,
cairo_surface_t *surface,
cairo_operator_t op,
const cairo_pattern_t *source,
const cairo_boxes_t *boxes,
const cairo_clip_t *clip)
{
cairo_box_t box;
if (! _cairo_composite_rectangles_init (extents,
surface, op, source, clip))
{
return CAIRO_INT_STATUS_NOTHING_TO_DO;
}
_cairo_boxes_extents (boxes, &box);
_cairo_box_round_to_rectangle (&box, &extents->mask);
return _cairo_composite_rectangles_intersect (extents, clip);
}
cairo_int_status_t
_cairo_composite_rectangles_init_for_glyphs (cairo_composite_rectangles_t *extents,
cairo_surface_t *surface,
cairo_operator_t op,
const cairo_pattern_t *source,
cairo_scaled_font_t *scaled_font,
cairo_glyph_t *glyphs,
int num_glyphs,
const cairo_clip_t *clip,
cairo_bool_t *overlap)
{
cairo_status_t status;
if (! _cairo_composite_rectangles_init (extents, surface, op, source, clip))
return CAIRO_INT_STATUS_NOTHING_TO_DO;
/* Computing the exact bbox and the overlap is expensive.
* First perform a cheap test to see if the glyphs are all clipped out.
*/
if (extents->is_bounded & CAIRO_OPERATOR_BOUND_BY_MASK &&
_cairo_scaled_font_glyph_approximate_extents (scaled_font,
glyphs, num_glyphs,
&extents->mask))
{
if (! _cairo_rectangle_intersect (&extents->bounded, &extents->mask))
return CAIRO_INT_STATUS_NOTHING_TO_DO;
}
status = _cairo_scaled_font_glyph_device_extents (scaled_font,
glyphs, num_glyphs,
&extents->mask,
overlap);
if (unlikely (status))
return status;
if (overlap && *overlap &&
scaled_font->options.antialias == CAIRO_ANTIALIAS_NONE &&
_cairo_pattern_is_opaque_solid (&extents->source_pattern.base))
{
*overlap = FALSE;
}
return _cairo_composite_rectangles_intersect (extents, clip);
}
cairo_bool_t
_cairo_composite_rectangles_can_reduce_clip (cairo_composite_rectangles_t *composite,
cairo_clip_t *clip)
{
cairo_rectangle_int_t extents;
cairo_box_t box;
if (clip == NULL)
return TRUE;
extents = composite->destination;
if (composite->is_bounded & CAIRO_OPERATOR_BOUND_BY_SOURCE)
_cairo_rectangle_intersect (&extents, &composite->source);
if (composite->is_bounded & CAIRO_OPERATOR_BOUND_BY_MASK)
_cairo_rectangle_intersect (&extents, &composite->mask);
_cairo_box_from_rectangle (&box, &extents);
return _cairo_clip_contains_box (clip, &box);
}
cairo_int_status_t
_cairo_composite_rectangles_add_to_damage (cairo_composite_rectangles_t *composite,
cairo_boxes_t *damage)
{
cairo_int_status_t status;
int n;
for (n = 0; n < composite->clip->num_boxes; n++) {
status = _cairo_boxes_add (damage,
CAIRO_ANTIALIAS_NONE,
&composite->clip->boxes[n]);
if (unlikely (status))
return status;
}
return CAIRO_INT_STATUS_SUCCESS;
}
| 0 |
D://workCode//uploadProject\awtk\3rd\cairo | D://workCode//uploadProject\awtk\3rd\cairo\cairo\cairo-compositor-private.h | /* -*- Mode: c; tab-width: 8; c-basic-offset: 4; indent-tabs-mode: t; -*- */
/* cairo - a vector graphics library with display and print output
*
* Copyright © 2011 Intel Corporation
*
* This library is free software; you can redistribute it and/or
* modify it either under the terms of the GNU Lesser General Public
* License version 2.1 as published by the Free Software Foundation
* (the "LGPL") or, at your option, under the terms of the Mozilla
* Public License Version 1.1 (the "MPL"). If you do not alter this
* notice, a recipient may use your version of this file under either
* the MPL or the LGPL.
*
* You should have received a copy of the LGPL along with this library
* in the file COPYING-LGPL-2.1; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Suite 500, Boston, MA 02110-1335, USA
* You should have received a copy of the MPL along with this library
* in the file COPYING-MPL-1.1
*
* The contents of this file are subject to the Mozilla Public License
* Version 1.1 (the "License"); you may not use this file except in
* compliance with the License. You may obtain a copy of the License at
* http://www.mozilla.org/MPL/
*
* This software is distributed on an "AS IS" basis, WITHOUT WARRANTY
* OF ANY KIND, either express or implied. See the LGPL or the MPL for
* the specific language governing rights and limitations.
*
* The Original Code is the cairo graphics library.
*
* The Initial Developer of the Original Code is University of Southern
* California.
*
* Contributor(s):
* Chris Wilson <chris@chris-wilson.co.uk>
*/
#ifndef CAIRO_COMPOSITOR_PRIVATE_H
#define CAIRO_COMPOSITOR_PRIVATE_H
#include "cairo-composite-rectangles-private.h"
CAIRO_BEGIN_DECLS
typedef struct {
cairo_scaled_font_t *font;
cairo_glyph_t *glyphs;
int num_glyphs;
cairo_bool_t use_mask;
cairo_rectangle_int_t extents;
} cairo_composite_glyphs_info_t;
struct cairo_compositor {
const cairo_compositor_t *delegate;
cairo_warn cairo_int_status_t
(*paint) (const cairo_compositor_t *compositor,
cairo_composite_rectangles_t *extents);
cairo_warn cairo_int_status_t
(*mask) (const cairo_compositor_t *compositor,
cairo_composite_rectangles_t *extents);
cairo_warn cairo_int_status_t
(*stroke) (const cairo_compositor_t *compositor,
cairo_composite_rectangles_t *extents,
const cairo_path_fixed_t *path,
const cairo_stroke_style_t *style,
const cairo_matrix_t *ctm,
const cairo_matrix_t *ctm_inverse,
double tolerance,
cairo_antialias_t antialias);
cairo_warn cairo_int_status_t
(*fill) (const cairo_compositor_t *compositor,
cairo_composite_rectangles_t *extents,
const cairo_path_fixed_t *path,
cairo_fill_rule_t fill_rule,
double tolerance,
cairo_antialias_t antialias);
cairo_warn cairo_int_status_t
(*glyphs) (const cairo_compositor_t *compositor,
cairo_composite_rectangles_t *extents,
cairo_scaled_font_t *scaled_font,
cairo_glyph_t *glyphs,
int num_glyphs,
cairo_bool_t overlap);
};
struct cairo_mask_compositor {
cairo_compositor_t base;
cairo_int_status_t (*acquire) (void *surface);
cairo_int_status_t (*release) (void *surface);
cairo_int_status_t (*set_clip_region) (void *surface,
cairo_region_t *clip_region);
cairo_surface_t * (*pattern_to_surface) (cairo_surface_t *dst,
const cairo_pattern_t *pattern,
cairo_bool_t is_mask,
const cairo_rectangle_int_t *extents,
const cairo_rectangle_int_t *sample,
int *src_x, int *src_y);
cairo_int_status_t (*draw_image_boxes) (void *surface,
cairo_image_surface_t *image,
cairo_boxes_t *boxes,
int dx, int dy);
cairo_int_status_t (*copy_boxes) (void *surface,
cairo_surface_t *src,
cairo_boxes_t *boxes,
const cairo_rectangle_int_t *extents,
int dx, int dy);
cairo_int_status_t
(*fill_rectangles) (void *surface,
cairo_operator_t op,
const cairo_color_t *color,
cairo_rectangle_int_t *rectangles,
int num_rects);
cairo_int_status_t
(*fill_boxes) (void *surface,
cairo_operator_t op,
const cairo_color_t *color,
cairo_boxes_t *boxes);
cairo_int_status_t
(*check_composite) (const cairo_composite_rectangles_t *extents);
cairo_int_status_t
(*composite) (void *dst,
cairo_operator_t op,
cairo_surface_t *src,
cairo_surface_t *mask,
int src_x,
int src_y,
int mask_x,
int mask_y,
int dst_x,
int dst_y,
unsigned int width,
unsigned int height);
cairo_int_status_t
(*composite_boxes) (void *surface,
cairo_operator_t op,
cairo_surface_t *source,
cairo_surface_t *mask,
int src_x,
int src_y,
int mask_x,
int mask_y,
int dst_x,
int dst_y,
cairo_boxes_t *boxes,
const cairo_rectangle_int_t *extents);
cairo_int_status_t
(*check_composite_glyphs) (const cairo_composite_rectangles_t *extents,
cairo_scaled_font_t *scaled_font,
cairo_glyph_t *glyphs,
int *num_glyphs);
cairo_int_status_t
(*composite_glyphs) (void *surface,
cairo_operator_t op,
cairo_surface_t *src,
int src_x,
int src_y,
int dst_x,
int dst_y,
cairo_composite_glyphs_info_t *info);
};
struct cairo_traps_compositor {
cairo_compositor_t base;
cairo_int_status_t
(*acquire) (void *surface);
cairo_int_status_t
(*release) (void *surface);
cairo_int_status_t
(*set_clip_region) (void *surface,
cairo_region_t *clip_region);
cairo_surface_t *
(*pattern_to_surface) (cairo_surface_t *dst,
const cairo_pattern_t *pattern,
cairo_bool_t is_mask,
const cairo_rectangle_int_t *extents,
const cairo_rectangle_int_t *sample,
int *src_x, int *src_y);
cairo_int_status_t (*draw_image_boxes) (void *surface,
cairo_image_surface_t *image,
cairo_boxes_t *boxes,
int dx, int dy);
cairo_int_status_t (*copy_boxes) (void *surface,
cairo_surface_t *src,
cairo_boxes_t *boxes,
const cairo_rectangle_int_t *extents,
int dx, int dy);
cairo_int_status_t
(*fill_boxes) (void *surface,
cairo_operator_t op,
const cairo_color_t *color,
cairo_boxes_t *boxes);
cairo_int_status_t
(*check_composite) (const cairo_composite_rectangles_t *extents);
cairo_int_status_t
(*composite) (void *dst,
cairo_operator_t op,
cairo_surface_t *src,
cairo_surface_t *mask,
int src_x,
int src_y,
int mask_x,
int mask_y,
int dst_x,
int dst_y,
unsigned int width,
unsigned int height);
cairo_int_status_t
(*lerp) (void *_dst,
cairo_surface_t *abstract_src,
cairo_surface_t *abstract_mask,
int src_x,
int src_y,
int mask_x,
int mask_y,
int dst_x,
int dst_y,
unsigned int width,
unsigned int height);
cairo_int_status_t
(*composite_boxes) (void *surface,
cairo_operator_t op,
cairo_surface_t *source,
cairo_surface_t *mask,
int src_x,
int src_y,
int mask_x,
int mask_y,
int dst_x,
int dst_y,
cairo_boxes_t *boxes,
const cairo_rectangle_int_t *extents);
cairo_int_status_t
(*composite_traps) (void *dst,
cairo_operator_t op,
cairo_surface_t *source,
int src_x,
int src_y,
int dst_x,
int dst_y,
const cairo_rectangle_int_t *extents,
cairo_antialias_t antialias,
cairo_traps_t *traps);
cairo_int_status_t
(*composite_tristrip) (void *dst,
cairo_operator_t op,
cairo_surface_t *source,
int src_x,
int src_y,
int dst_x,
int dst_y,
const cairo_rectangle_int_t *extents,
cairo_antialias_t antialias,
cairo_tristrip_t *tristrip);
cairo_int_status_t
(*check_composite_glyphs) (const cairo_composite_rectangles_t *extents,
cairo_scaled_font_t *scaled_font,
cairo_glyph_t *glyphs,
int *num_glyphs);
cairo_int_status_t
(*composite_glyphs) (void *surface,
cairo_operator_t op,
cairo_surface_t *src,
int src_x,
int src_y,
int dst_x,
int dst_y,
cairo_composite_glyphs_info_t *info);
};
cairo_private extern const cairo_compositor_t __cairo_no_compositor;
cairo_private extern const cairo_compositor_t _cairo_fallback_compositor;
cairo_private void
_cairo_mask_compositor_init (cairo_mask_compositor_t *compositor,
const cairo_compositor_t *delegate);
cairo_private void
_cairo_shape_mask_compositor_init (cairo_compositor_t *compositor,
const cairo_compositor_t *delegate);
cairo_private void
_cairo_traps_compositor_init (cairo_traps_compositor_t *compositor,
const cairo_compositor_t *delegate);
cairo_private cairo_int_status_t
_cairo_compositor_paint (const cairo_compositor_t *compositor,
cairo_surface_t *surface,
cairo_operator_t op,
const cairo_pattern_t *source,
const cairo_clip_t *clip);
cairo_private cairo_int_status_t
_cairo_compositor_mask (const cairo_compositor_t *compositor,
cairo_surface_t *surface,
cairo_operator_t op,
const cairo_pattern_t *source,
const cairo_pattern_t *mask,
const cairo_clip_t *clip);
cairo_private cairo_int_status_t
_cairo_compositor_stroke (const cairo_compositor_t *compositor,
cairo_surface_t *surface,
cairo_operator_t op,
const cairo_pattern_t *source,
const cairo_path_fixed_t *path,
const cairo_stroke_style_t *style,
const cairo_matrix_t *ctm,
const cairo_matrix_t *ctm_inverse,
double tolerance,
cairo_antialias_t antialias,
const cairo_clip_t *clip);
cairo_private cairo_int_status_t
_cairo_compositor_fill (const cairo_compositor_t *compositor,
cairo_surface_t *surface,
cairo_operator_t op,
const cairo_pattern_t *source,
const cairo_path_fixed_t *path,
cairo_fill_rule_t fill_rule,
double tolerance,
cairo_antialias_t antialias,
const cairo_clip_t *clip);
cairo_private cairo_int_status_t
_cairo_compositor_glyphs (const cairo_compositor_t *compositor,
cairo_surface_t *surface,
cairo_operator_t op,
const cairo_pattern_t *source,
cairo_glyph_t *glyphs,
int num_glyphs,
cairo_scaled_font_t *scaled_font,
const cairo_clip_t *clip);
CAIRO_END_DECLS
#endif /* CAIRO_COMPOSITOR_PRIVATE_H */
| 0 |
D://workCode//uploadProject\awtk\3rd\cairo | D://workCode//uploadProject\awtk\3rd\cairo\cairo\cairo-compositor.c | /* -*- Mode: c; tab-width: 8; c-basic-offset: 4; indent-tabs-mode: t; -*- */
/* cairo - a vector graphics library with display and print output
*
* Copyright © 2011 Intel Corporation
*
* This library is free software; you can redistribute it and/or
* modify it either under the terms of the GNU Lesser General Public
* License version 2.1 as published by the Free Software Foundation
* (the "LGPL") or, at your option, under the terms of the Mozilla
* Public License Version 1.1 (the "MPL"). If you do not alter this
* notice, a recipient may use your version of this file under either
* the MPL or the LGPL.
*
* You should have received a copy of the LGPL along with this library
* in the file COPYING-LGPL-2.1; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Suite 500, Boston, MA 02110-1335, USA
* You should have received a copy of the MPL along with this library
* in the file COPYING-MPL-1.1
*
* The contents of this file are subject to the Mozilla Public License
* Version 1.1 (the "License"); you may not use this file except in
* compliance with the License. You may obtain a copy of the License at
* http://www.mozilla.org/MPL/
*
* This software is distributed on an "AS IS" basis, WITHOUT WARRANTY
* OF ANY KIND, either express or implied. See the LGPL or the MPL for
* the specific language governing rights and limitations.
*
* The Original Code is the cairo graphics library.
*
* The Initial Developer of the Original Code is University of Southern
* California.
*
* Contributor(s):
* Chris Wilson <chris@chris-wilson.co.uk>
*/
#include "cairoint.h"
#include "cairo-compositor-private.h"
#include "cairo-damage-private.h"
#include "cairo-error-private.h"
cairo_int_status_t
_cairo_compositor_paint (const cairo_compositor_t *compositor,
cairo_surface_t *surface,
cairo_operator_t op,
const cairo_pattern_t *source,
const cairo_clip_t *clip)
{
cairo_composite_rectangles_t extents;
cairo_int_status_t status;
TRACE ((stderr, "%s\n", __FUNCTION__));
status = _cairo_composite_rectangles_init_for_paint (&extents, surface,
op, source,
clip);
if (unlikely (status))
return status;
do {
while (compositor->paint == NULL)
compositor = compositor->delegate;
status = compositor->paint (compositor, &extents);
compositor = compositor->delegate;
} while (status == CAIRO_INT_STATUS_UNSUPPORTED);
if (status == CAIRO_INT_STATUS_SUCCESS && surface->damage) {
TRACE ((stderr, "%s: applying damage (%d,%d)x(%d, %d)\n",
__FUNCTION__,
extents.unbounded.x, extents.unbounded.y,
extents.unbounded.width, extents.unbounded.height));
surface->damage = _cairo_damage_add_rectangle (surface->damage,
&extents.unbounded);
}
_cairo_composite_rectangles_fini (&extents);
return status;
}
cairo_int_status_t
_cairo_compositor_mask (const cairo_compositor_t *compositor,
cairo_surface_t *surface,
cairo_operator_t op,
const cairo_pattern_t *source,
const cairo_pattern_t *mask,
const cairo_clip_t *clip)
{
cairo_composite_rectangles_t extents;
cairo_int_status_t status;
TRACE ((stderr, "%s\n", __FUNCTION__));
status = _cairo_composite_rectangles_init_for_mask (&extents, surface,
op, source, mask,
clip);
if (unlikely (status))
return status;
do {
while (compositor->mask == NULL)
compositor = compositor->delegate;
status = compositor->mask (compositor, &extents);
compositor = compositor->delegate;
} while (status == CAIRO_INT_STATUS_UNSUPPORTED);
if (status == CAIRO_INT_STATUS_SUCCESS && surface->damage) {
TRACE ((stderr, "%s: applying damage (%d,%d)x(%d, %d)\n",
__FUNCTION__,
extents.unbounded.x, extents.unbounded.y,
extents.unbounded.width, extents.unbounded.height));
surface->damage = _cairo_damage_add_rectangle (surface->damage,
&extents.unbounded);
}
_cairo_composite_rectangles_fini (&extents);
return status;
}
cairo_int_status_t
_cairo_compositor_stroke (const cairo_compositor_t *compositor,
cairo_surface_t *surface,
cairo_operator_t op,
const cairo_pattern_t *source,
const cairo_path_fixed_t *path,
const cairo_stroke_style_t *style,
const cairo_matrix_t *ctm,
const cairo_matrix_t *ctm_inverse,
double tolerance,
cairo_antialias_t antialias,
const cairo_clip_t *clip)
{
cairo_composite_rectangles_t extents;
cairo_int_status_t status;
TRACE ((stderr, "%s\n", __FUNCTION__));
if (_cairo_pen_vertices_needed (tolerance, style->line_width/2, ctm) <= 1)
return CAIRO_INT_STATUS_NOTHING_TO_DO;
status = _cairo_composite_rectangles_init_for_stroke (&extents, surface,
op, source,
path, style, ctm,
clip);
if (unlikely (status))
return status;
do {
while (compositor->stroke == NULL)
compositor = compositor->delegate;
status = compositor->stroke (compositor, &extents,
path, style, ctm, ctm_inverse,
tolerance, antialias);
compositor = compositor->delegate;
} while (status == CAIRO_INT_STATUS_UNSUPPORTED);
if (status == CAIRO_INT_STATUS_SUCCESS && surface->damage) {
TRACE ((stderr, "%s: applying damage (%d,%d)x(%d, %d)\n",
__FUNCTION__,
extents.unbounded.x, extents.unbounded.y,
extents.unbounded.width, extents.unbounded.height));
surface->damage = _cairo_damage_add_rectangle (surface->damage,
&extents.unbounded);
}
_cairo_composite_rectangles_fini (&extents);
return status;
}
cairo_int_status_t
_cairo_compositor_fill (const cairo_compositor_t *compositor,
cairo_surface_t *surface,
cairo_operator_t op,
const cairo_pattern_t *source,
const cairo_path_fixed_t *path,
cairo_fill_rule_t fill_rule,
double tolerance,
cairo_antialias_t antialias,
const cairo_clip_t *clip)
{
cairo_composite_rectangles_t extents;
cairo_int_status_t status;
TRACE ((stderr, "%s\n", __FUNCTION__));
status = _cairo_composite_rectangles_init_for_fill (&extents, surface,
op, source, path,
clip);
if (unlikely (status))
return status;
do {
while (compositor->fill == NULL)
compositor = compositor->delegate;
status = compositor->fill (compositor, &extents,
path, fill_rule, tolerance, antialias);
compositor = compositor->delegate;
} while (status == CAIRO_INT_STATUS_UNSUPPORTED);
if (status == CAIRO_INT_STATUS_SUCCESS && surface->damage) {
TRACE ((stderr, "%s: applying damage (%d,%d)x(%d, %d)\n",
__FUNCTION__,
extents.unbounded.x, extents.unbounded.y,
extents.unbounded.width, extents.unbounded.height));
surface->damage = _cairo_damage_add_rectangle (surface->damage,
&extents.unbounded);
}
_cairo_composite_rectangles_fini (&extents);
return status;
}
cairo_int_status_t
_cairo_compositor_glyphs (const cairo_compositor_t *compositor,
cairo_surface_t *surface,
cairo_operator_t op,
const cairo_pattern_t *source,
cairo_glyph_t *glyphs,
int num_glyphs,
cairo_scaled_font_t *scaled_font,
const cairo_clip_t *clip)
{
cairo_composite_rectangles_t extents;
cairo_bool_t overlap;
cairo_int_status_t status;
TRACE ((stderr, "%s\n", __FUNCTION__));
status = _cairo_composite_rectangles_init_for_glyphs (&extents, surface,
op, source,
scaled_font,
glyphs, num_glyphs,
clip, &overlap);
if (unlikely (status))
return status;
do {
while (compositor->glyphs == NULL)
compositor = compositor->delegate;
status = compositor->glyphs (compositor, &extents,
scaled_font, glyphs, num_glyphs, overlap);
compositor = compositor->delegate;
} while (status == CAIRO_INT_STATUS_UNSUPPORTED);
if (status == CAIRO_INT_STATUS_SUCCESS && surface->damage) {
TRACE ((stderr, "%s: applying damage (%d,%d)x(%d, %d)\n",
__FUNCTION__,
extents.unbounded.x, extents.unbounded.y,
extents.unbounded.width, extents.unbounded.height));
surface->damage = _cairo_damage_add_rectangle (surface->damage,
&extents.unbounded);
}
_cairo_composite_rectangles_fini (&extents);
return status;
}
| 0 |