34 |
#define CACHE_LINE 16 |
#define CACHE_LINE 16 |
35 |
|
|
36 |
#if _MSC_VER <= 1200 |
#if _MSC_VER <= 1200 |
37 |
#define #define DECLARE_ALIGNED_MATRIX(name,sizex,sizey,type,alignment) \ |
#define DECLARE_ALIGNED_MATRIX(name,sizex,sizey,type,alignment) \ |
38 |
type name##_storage[(sizex)*(sizey)+(alignment)-1]; \ |
type name##_storage[(sizex)*(sizey)+(alignment)-1]; \ |
39 |
type * name = (type *) (((int32_t) name##_storage+(alignment - 1)) & ~((int32_t)(alignment)-1)) |
type * name = (type *) (((int32_t) name##_storage+(alignment - 1)) & ~((int32_t)(alignment)-1)) |
40 |
#else |
#else |
41 |
#define #define DECLARE_ALIGNED_MATRIX(name,sizex,sizey,type,alignment) \ |
#define DECLARE_ALIGNED_MATRIX(name,sizex,sizey,type,alignment) \ |
42 |
__declspec(align(CACHE_LINE)) type name[(sizex)*(sizey)] |
__declspec(align(alignment)) type name[(sizex)*(sizey)] |
43 |
#endif |
#endif |
44 |
|
|
45 |
// needed for bitstream.h |
// needed for bitstream.h |
113 |
#define BSWAP(a) __asm__ ( "lwbrx %0,0,%1; eieio" : "=r" (a) : \ |
#define BSWAP(a) __asm__ ( "lwbrx %0,0,%1; eieio" : "=r" (a) : \ |
114 |
"r" (&(a)), "m" (a)); |
"r" (&(a)), "m" (a)); |
115 |
#define EMMS() |
#define EMMS() |
116 |
|
|
117 |
|
static __inline unsigned long get_tbl(void) { |
118 |
|
unsigned long tbl; |
119 |
|
asm volatile("mftb %0" : "=r" (tbl)); |
120 |
|
return tbl; |
121 |
|
} |
122 |
|
static __inline unsigned long get_tbu(void) { |
123 |
|
unsigned long tbl; |
124 |
|
asm volatile("mftbu %0" : "=r" (tbl)); |
125 |
|
return tbl; |
126 |
|
} |
127 |
|
static __inline int64_t read_counter() { |
128 |
|
unsigned long tb, tu; |
129 |
|
do { |
130 |
|
tb = get_tbl(); |
131 |
|
tu = get_tbu(); |
132 |
|
} while(tb != get_tbl()); |
133 |
|
return (((int64_t)tu) << 32) | (int64_t)tb; |
134 |
|
} |
135 |
#else |
#else |
136 |
#define BSWAP(a) __asm__ ( "bswapl %0\n" : "=r" (a) : "0" (a) ) |
#define BSWAP(a) __asm__ ( "bswapl %0\n" : "=r" (a) : "0" (a) ) |
137 |
#define EMMS() __asm__("emms\n\t") |
#define EMMS() __asm__("emms\n\t") |