From 96a32415ab43377cf1575bd3f4f2980f58028209 Mon Sep 17 00:00:00 2001 From: Determinant Date: Fri, 14 Aug 2015 11:51:42 +0800 Subject: add implementation for kaldi io (by ymz) --- .../tools/ATLAS/include/contrib/ATL_gemv_ger_SSE.h | 188 ++ kaldi_io/src/tools/ATLAS/include/contrib/Make.ext | 39 + .../src/tools/ATLAS/include/contrib/SSE3Dnow.h | 709 +++++ .../src/tools/ATLAS/include/contrib/camm_dpa.h | 1626 +++++++++++ .../src/tools/ATLAS/include/contrib/camm_pipe3.h | 295 ++ .../src/tools/ATLAS/include/contrib/camm_scale.h | 215 ++ .../src/tools/ATLAS/include/contrib/camm_strat1.h | 2982 ++++++++++++++++++++ .../src/tools/ATLAS/include/contrib/camm_tpipe.h | 331 +++ .../src/tools/ATLAS/include/contrib/camm_util.h | 508 ++++ 9 files changed, 6893 insertions(+) create mode 100644 kaldi_io/src/tools/ATLAS/include/contrib/ATL_gemv_ger_SSE.h create mode 100644 kaldi_io/src/tools/ATLAS/include/contrib/Make.ext create mode 100644 kaldi_io/src/tools/ATLAS/include/contrib/SSE3Dnow.h create mode 100644 kaldi_io/src/tools/ATLAS/include/contrib/camm_dpa.h create mode 100644 kaldi_io/src/tools/ATLAS/include/contrib/camm_pipe3.h create mode 100644 kaldi_io/src/tools/ATLAS/include/contrib/camm_scale.h create mode 100644 kaldi_io/src/tools/ATLAS/include/contrib/camm_strat1.h create mode 100644 kaldi_io/src/tools/ATLAS/include/contrib/camm_tpipe.h create mode 100644 kaldi_io/src/tools/ATLAS/include/contrib/camm_util.h (limited to 'kaldi_io/src/tools/ATLAS/include/contrib') diff --git a/kaldi_io/src/tools/ATLAS/include/contrib/ATL_gemv_ger_SSE.h b/kaldi_io/src/tools/ATLAS/include/contrib/ATL_gemv_ger_SSE.h new file mode 100644 index 0000000..118d3de --- /dev/null +++ b/kaldi_io/src/tools/ATLAS/include/contrib/ATL_gemv_ger_SSE.h @@ -0,0 +1,188 @@ +#ifdef GER +#undef NO_TRANSPOSE +#define NO_TRANSPOSE +#endif + + +#if NDPM > 4 +#error Max NDPM is 4 +#endif + +#if !defined(ATL_SSE1) && ( defined(SREAL) || defined(SCPLX) ) +#error This routine needs ATL_SSE1 defined +#endif + +#if !defined(ATL_SSE2) && ( defined(DREAL) || defined(DCPLX) ) +#error This routine needs ATL_SSE2 defined +#endif + +#include +#include + +#include "camm_util.h" + +#ifndef GER +#if defined(BETAX) || defined(BETAXI0) +#include "camm_scale.h" +#endif +#endif + +#if NDPM >= 4 +#define EXT4 Mjoin(4dp,BLC) +#undef NDP +#define NDP 4 +#undef EXT +#define EXT EXT4 +#include "camm_dpa.h" +#endif + +#if NDPM >= 3 +#define EXT3 Mjoin(3dp,BLC) +#undef NDP +#define NDP 3 +#undef EXT +#define EXT EXT3 +#include "camm_dpa.h" +#endif + +#if NDPM >= 2 +#define EXT2 Mjoin(2dp,BLC) +#undef NDP +#define NDP 2 +#undef EXT +#define EXT EXT2 +#include "camm_dpa.h" +#endif + +#define EXT1 Mjoin(1dp,BLC) +#undef NDP +#define NDP 1 +#undef EXT +#define EXT EXT1 +#include "camm_dpa.h" + +#undef NDP +#define NDP NDPM +#undef EXT +#define EXT Mjoin(Mjoin(NDP,Mjoin(dp,BLC)),m) +#include "camm_dpa.h" + +#ifdef GER +#if defined(SCPLX) || defined(DCPLX) +#ifdef Conj_ +#define IM 1c +#else +#define IM 1u +#endif +#else +#define IM 1 +#endif + + +#define FN Mjoin(Mjoin(Mjoin(ATL_,PREC),Mjoin(ger,IM)),_a1_x1_yX) + +#undef MY_FUNCTION +#define MY_FUNCTION FN + +void +MY_FUNCTION(int m,int n, const SCALAR alpha,const TYPE *c, + int cinc,const TYPE *b,int binc, + TYPE *a,int lda) { + +#else + + +#define FN Mjoin(Mjoin(Mjoin(ATL_,PREC),gemv),Mjoin(FEXT,Mjoin(_a1_x1_,Mjoin(BL,_y1)))) + +#undef MY_FUNCTION +#define MY_FUNCTION FN + +void +MY_FUNCTION(int m,int n, const SCALAR alpha,const TYPE *a, + int lda,const TYPE *b,int binc, + const SCALAR beta,TYPE *c,int cinc) { + +#endif + + int i,mm,nn; + const TYPE *ae; +#ifdef NO_TRANSPOSE + int len=m,w=n; +#define zz b +#else + int len=n,w=m; +#define zz c +#endif + +#ifdef GER +#define zzinc binc +#else +#define zzinc 1 + + +#if defined(NO_TRANSPOSE) && defined(BETA0) + memset(c,0,m*sizeof(*c)); +#endif + +#if defined(BETAX) || defined(BETAXI0) +#if defined(SCPLX) || defined(DCPLX) + SCALE(beta,c,m); +#endif +#if defined(SREAL) || defined(DREAL) + SCALE(&beta,c,m); +#endif +#endif + +#endif + + ae=a+w*lda; + nn=STRIDE*lda; + + +#if NDPM == 1 + for (;a 1 + if (((ae-a)/lda)%STRIDE) + mm++; +#endif + + if (mm == 1) + Mjoin(dp,EXT1)(a,nn,b,c,STRIDE*zzinc,len); + +#if ( NDPM == 2 && STRIDE > 1 ) || NDPM > 2 + else if (mm == 2) + Mjoin(dp,EXT2)(a,nn,b,c,STRIDE*zzinc,len); +#endif + +#if ( NDPM == 3 && STRIDE > 1 ) || NDPM > 3 + else if (mm == 3) + Mjoin(dp,EXT3)(a,nn,b,c,STRIDE*zzinc,len); +#endif + +#if ( NDPM == 4 && STRIDE > 1 ) || NDPM > 4 + else if (mm == 4) + Mjoin(dp,EXT4)(a,nn,b,c,STRIDE*zzinc,len); +#endif + + + } + +#endif + +} + diff --git a/kaldi_io/src/tools/ATLAS/include/contrib/Make.ext b/kaldi_io/src/tools/ATLAS/include/contrib/Make.ext new file mode 100644 index 0000000..f7f9a0a --- /dev/null +++ b/kaldi_io/src/tools/ATLAS/include/contrib/Make.ext @@ -0,0 +1,39 @@ + +topd = /home/whaley/atlas3.8/AtlasBase +incs = -def topd /home/whaley/atlas3.8/AtlasBase \ + -def incd /home/whaley/atlas3.8/AtlasBase/Clint \ + -def BASEdir /home/whaley/atlas3.8/AtlasBase/Antoine/ \ + -def basd /home/whaley/atlas3.8/AtlasBase/Clint +ext = extract +extF = $(ext) -langF -lnlen71 -Remtblank -llwarn2 -LAPACK1 $(incs) +extC = $(ext) -langC -lnlen79 -Remtblank -llwarn2 $(incs) +extM = $(ext) -langM -lnlen79 -llwarn2 $(incs) + +default: all +force_build: +basd = /home/whaley/atlas3.8/AtlasBase/Clint +basdRCW = /home/whaley/atlas3.8/AtlasBase/Clint +basdAPP = /home/whaley/atlas3.8/AtlasBase/Antoine +incf = /home/whaley/atlas3.8/AtlasBase/gen.inc + +files = ATL_gemv_ger_SSE.h SSE3Dnow.h camm_dpa.h camm_pipe3.h camm_scale.h \ + camm_strat1.h camm_tpipe.h camm_util.h + +all : $(files) + +camm_strat1.h : $(topd)/kernel/CammMaguire/camm_strat1.h + cp $(topd)/kernel/CammMaguire/camm_strat1.h . +camm_tpipe.h : $(topd)/kernel/CammMaguire/camm_tpipe.h + cp $(topd)/kernel/CammMaguire/camm_tpipe.h . +camm_pipe3.h : $(topd)/kernel/CammMaguire/camm_pipe3.h + cp $(topd)/kernel/CammMaguire/camm_pipe3.h . +ATL_gemv_ger_SSE.h : $(topd)/kernel/CammMaguire/ATL_gemv_ger_SSE.h + cp $(topd)/kernel/CammMaguire/ATL_gemv_ger_SSE.h . +camm_util.h : $(topd)/kernel/CammMaguire/camm_util.h + cp $(topd)/kernel/CammMaguire/camm_util.h . +camm_scale.h : $(topd)/kernel/CammMaguire/camm_scale.h + cp $(topd)/kernel/CammMaguire/camm_scale.h . +camm_dpa.h : $(topd)/kernel/CammMaguire/camm_dpa.h + cp $(topd)/kernel/CammMaguire/camm_dpa.h . +SSE3Dnow.h : $(topd)/kernel/PeterSoendergaard/SSE3Dnow.h + cp $(topd)/kernel/PeterSoendergaard/SSE3Dnow.h . diff --git a/kaldi_io/src/tools/ATLAS/include/contrib/SSE3Dnow.h b/kaldi_io/src/tools/ATLAS/include/contrib/SSE3Dnow.h new file mode 100644 index 0000000..a783749 --- /dev/null +++ b/kaldi_io/src/tools/ATLAS/include/contrib/SSE3Dnow.h @@ -0,0 +1,709 @@ +#if !defined(ATL_GAS_x8632) && !defined(ATL_GAS_x8664) + #error "This kernel requires gas x86 assembler!" +#endif +#ifndef Mstr /* Added by RCW to make multiline macros work */ + #define Mstr2(m) # m + #define Mstr(m) Mstr2(m) +#endif +/* The mening of the defined macros is as follows: + * VECLEN: The length of a singleprecision vector register + * vec_add: Add to single precision vectors. + * vec_mul: Multiply to single precision vectors. + * vec_mov: Moves data around + * vec_mov1: Load one element in a vector and zero all other entries! + * vec_splat: Load one element relpicated in all positions in the vector. + * vec_load_apart: Load elements from different memory positions into a register. + * vec_sum: Sums a register. + * vec_store_one: Stores lowest element in vector to memory, no zero-extend! + * Meaning of suffixes is as follows: + * mr means memory to register + * rr means register to register + * rm means register to memory + * a means that instruction needs aligned data + * 1 means that the instructions only operates on the lowest element of the + * vector. + * + * The _1 instructions work under one important assumption: That you never mix + * them with regular instructions, e.g. loading into a register with a normal + * mov, and then using add_rr_1 will not work under 3dnow! since it is in + * reality a normal add. However, if using a mov_1 first, the upper part of + * the register will be zeroed, and it will therefore work. The _1 system is + * more robust under SSE, but other architectures might be implemented the + * same way as 3dnow! + * + * RCW: I added the following functionality for SSE only (note that vw may + * be overwritten with intermediate results, but is not used as input, + * and that all input array may be overwritten wt intermediate results. + * VL : vector length -1): + * vec_red(vd, vw) : vd[0] = sum(vd[0:VL]) + * vec_red2(v1, v2, vw) : v1[0] = sum(v1[0:VL]); v1[1] = sum(v2[0:VL]) + * vec_red4(v0, v1, v2, v3 vw1, vw2) : + * v0[0] = sum(v0[0:VL]); v0[1] = sum(v1[0:VL]) + * if type = double: + * v2[0] = sum(v2[0:VL]); v2[1] = sum(v3[0:VL]) + * else + * v0[2] = sum(v2[0:VL]); v0[3] = sum(v3[0:VL]) + * vec_zero(vd) : vd[0:VL] = 0.0 + */ + + +/* Things to try: + * Non-temporal stores + * Sequences of instructions instead of movups + * + * + * + * + */ + + + +#define gen_vec_rr(op,reg1,reg2) \ + __asm__ __volatile__ (#op " " #reg1 ", " #reg2 \ + : /* nothing */ \ + : /* nothing */) + + +#define w(p) p + +#define nop() __asm__ __volatile__ ("nop") + +#define rep() __asm__ __volatile__ ("rep") + +#define align() __asm__ __volatile__ (".align 16") + + +#ifdef x87double + +#define st0 %%st(0) +#define st1 %%st(1) +#define st2 %%st(2) +#define st3 %%st(3) +#define st4 %%st(4) +#define st5 %%st(5) +#define st6 %%st(6) +#define st7 %%st(7) + + +#define gen_stack_rt(op,reg) \ + __asm__ __volatile__ (#op " " #reg \ + : /* nothing */ \ + : /* nothing */) + +#define gen_stack_tr(op,reg) \ + __asm__ __volatile__ (#op " %%st(0)," #reg \ + : \ + : ) + + +#define gen_stack_rr(op,reg1,reg2) \ + __asm__ __volatile__ (#op " " #reg1 ", " #reg2 \ + : /* nothing */ \ + : /* nothing */) + +#define gen_stack_t(op) \ + __asm__ __volatile__ (#op \ + : /* nothing */ \ + : /* nothing */) + + +#define gen_stack_tm(op,mem) \ + __asm__ __volatile__ (#op " %0" \ + : "=m" (((mem)[0])) \ + : ) + +#define gen_stack_mt(op,mem) \ + __asm__ __volatile__ (#op " %0" \ + : \ + : "m" (((mem)[0]))) + + +#define stack_mov_mt_push(mem) gen_stack_mt(fldl,mem) + +#define stack_add_tr_pop(reg) gen_stack_tr(faddp,reg) +#define stack_add_mt(mem) gen_stack_mt(faddl,mem) + +#define stack_mul_tr(reg) gen_stack_tr(fmul,reg) +#define stack_mul_tr_pop(reg) gen_stack_tr(fmulp,reg) +#define stack_mul_mt(mem) gen_stack_mt(fmul,mem) + +#define stack_mov_tm_pop(mem) gen_stack_tm(fstpl,mem) + +#define stack_zero_push() gen_stack_t(fldz) + +#endif /* x87double */ + +#ifdef SSE + +/* Peculiarities of SSE: Alignment is good, but not mandatory. It is possible to + * load/store from misaligned adresses using movups at a cost of some cycles. Loading + * using mul/add must always be aligned. Alignment is 16 bytes. + * No muladd. + */ + + + +#define gen_vec_mr(op,mem,reg) \ + __asm__ __volatile__ (#op " %0, " #reg \ + : /* nothing */ \ + : "m" (((mem)[0])), "m" (((mem)[1])), "m" (((mem)[2])), "m" (((mem)[3]))) + + +#define gen_vec_rm(op,reg,mem) \ + __asm__ __volatile__ (#op " " #reg ", %0" \ + : "=m" (((mem)[0])), "=m" (((mem)[1])), "=m" (((mem)[2])), "=m" (((mem)[3])) \ + : /* nothing */ ) + + + + +#define VECLEN 4 + +#define reg0 %%xmm0 +#define reg1 %%xmm1 +#define reg2 %%xmm2 +#define reg3 %%xmm3 +#define reg4 %%xmm4 +#define reg5 %%xmm5 +#define reg6 %%xmm6 +#define reg7 %%xmm7 +#ifdef ATL_GAS_x8664 + #define reg8 %%xmm8 + #define reg9 %%xmm9 + #define reg10 %%xmm10 + #define reg11 %%xmm11 + #define reg12 %%xmm12 + #define reg13 %%xmm13 + #define reg14 %%xmm14 + #define reg15 %%xmm15 +#endif + +#define vec_mov_mr(mem,reg) gen_vec_mr(movups,mem,reg) +#define vec_mov_rm(reg,mem) gen_vec_rm(movups,reg,mem) +#define vec_mov_mr_a(mem,reg) gen_vec_mr(movaps,mem,reg) +#define vec_mov_rm_a(reg,mem) gen_vec_rm(movaps,reg,mem) +#define vec_mov_rr(reg1,reg2) gen_vec_rr(movaps,reg1,reg2) + +#define vec_add_mr_a(mem,reg) gen_vec_mr(addps,mem,reg) +#define vec_mul_mr_a(mem,reg) gen_vec_mr(mulps,mem,reg) + +#define vec_add_rr(mem,reg) gen_vec_rr(addps,mem,reg) +#define vec_mul_rr(mem,reg) gen_vec_rr(mulps,mem,reg) + +#define vec_mov_mr_1(mem,reg) gen_vec_mr(movss,mem,reg) +#define vec_mov_rm_1(reg,mem) gen_vec_rm(movss,reg,mem) +#define vec_mov_rr_1(reg1,reg2) gen_vec_rr(movss,reg1,reg2) + +#define vec_add_mr_1(mem,reg) gen_vec_mr(addss,mem,reg) +#define vec_add_rr_1(reg1,reg2) gen_vec_rr(addss,reg1,reg2) + +#define vec_mul_mr_1(mem,reg) gen_vec_mr(mulss,mem,reg) +#define vec_mul_rr_1(reg1,reg2) gen_vec_rr(mulss,reg1,reg2) + +#define vec_unpack_low(reg1,reg2) gen_vec_rr(unpcklps,reg1,reg2) +#define vec_unpack_high(reg1,reg2) gen_vec_rr(unpckhps,reg1,reg2) +#define vec_shuffle(mode,reg1,reg2) vec_shuffle_wrap(mode,reg1,reg2) +#define vec_shuffle_wrap(mode,reg1,reg2) \ + __asm__ __volatile__ ("shufps " #mode ", " #reg1 ", " #reg2 \ + : /* nothing */\ + : /* nothing */) + +/* Hack! */ +/* To use this instruction be sure that register 7 is not in use!!! */ +/* It must be possible to reduce this sequence to only four instructions. + * please tell me how! */ +#define vec_sum(reg) vec_sum_wrap(reg) +#define vec_sum_wrap(reg) \ + __asm__ __volatile__ ("movhlps " #reg ", %%xmm7\n"\ + "addps " #reg ", %%xmm7\n"\ + "movaps %%xmm7, " #reg "\n"\ + "shufps $1, " #reg ", %%xmm7\n"\ + "addss %%xmm7, " #reg "\n"\ + : /* nothing */\ + : /* nothing */) + +/* RCW: added to safely replace vec_sum (vec reduce), and use SSE3 when avail */ +#define vec_zero(vd) __asm__ __volatile__("xorps " Mstr(vd) ", " Mstr(vd) ::) +#ifdef ATL_SSE3 + #define vec_red(vr, vwrk) \ + __asm__ __volatile__("haddps " Mstr(vr) ", " Mstr(vr) "\n"\ + "haddps " Mstr(vr) ", " Mstr(vr) "\n" ::) +/* + * haddps v1 v0 # v0 = {v1cd, v1ab, v0cd, v0ab} + * haddps v0 v0 # v0 = {v1abcd, v0abcd, v1abcd, v0abcd} + */ + #define vec_red2(v0, v1, vwork) \ + __asm__ __volatile__("haddps " Mstr(v1) ", " Mstr(v0) "\n"\ + "haddps " Mstr(v0) ", " Mstr(v0) "\n" ::) +/* + * haddps v1, v0 # v0 = {v1cd,v1ab,v0cd,v0ab} + * haddps v3, v2 # v2 = {v3cd,v3ab,v2cd,v2ab} + * haddps v2, v0 # v0 = {v3abcd,v2abcd,v1abcd, v0abcd} + */ + #define vec_red4(v0, v1, v2, v3, w0, w1) \ + __asm__ __volatile__("haddps " Mstr(v1) ", " Mstr(v0) "\n"\ + "haddps " Mstr(v3) ", " Mstr(v2) "\n"\ + "haddps " Mstr(v2) ", " Mstr(v0) "\n" ::) +#elif defined(ATL_SSE2) + #define vec_red(vr, vwrk) \ + __asm__ __volatile__ ("pshufd $0xEE, " Mstr(vr) ", " Mstr(vwrk) "\n"\ + "addps " Mstr(vwrk) ", " Mstr(vr) "\n"\ + "pshufd $0xE5, " Mstr(vr) ", " Mstr(vwrk) "\n"\ + "addss " Mstr(vwrk) ", " Mstr(vr) "\n"\ + ::) +#else + #define vec_red(vr, vwrk) \ + __asm__ __volatile__ ("movhlps " Mstr(vr) ", " Mstr(vwrk) "\n"\ + "addps " Mstr(vwrk) ", " Mstr(vr) "\n"\ + "movaps " Mstr(vr) ", " Mstr(vwrk) "\n"\ + "shufps $0xE5, " Mstr(vr) ", " Mstr(vr) "\n"\ + "addss " Mstr(vwrk) ", " Mstr(vr) "\n"\ + ::) +#endif +#ifndef ATL_SSE3 /* codes that are the same for SSE2 and SSE1 */ +/* + # v0 = {v0d,v0c,v0b,v0a} + # v1 = {v1d,v1c,v1b,v1a} + movaps v0, vw # vw = {v0d,v0c,v0b,v0a} + unpacklps v1, v0 # v0 = {v1b,v0b,v1a,v0a} + unpackhps v1, vw # vw = {v1d,v0d,v1c,v0c} + addps vw, v0 # v0 = {v1bd,v0bd,v1ac,v0ac} + movhlps v0, vw # vw = {X , X,v1bd,v0bd} + addps vw, v0 # v0 = {X , X,v1abcd,v0abcd} +*/ + #define vec_red2(v0, v1, vw) \ + __asm__ __volatile__ ("movaps " Mstr(v0) ", " Mstr(vw) "\n"\ + "unpcklps " Mstr(v1) ", " Mstr(v0) "\n"\ + "unpckhps " Mstr(v1) ", " Mstr(vw) "\n"\ + "addps " Mstr(vw) ", " Mstr(v0) "\n"\ + "movhlps " Mstr(v0) ", " Mstr(vw) "\n"\ + "addps " Mstr(vw) ", " Mstr(v0) "\n"\ + ::) +/* + * movaps v0, w0 # w0 = {v0d, v0c, v0b, v0a} + * unpcklps v1, v0 # v0 = {v1b, v0b, v1a, v0a} + * movaps v2, w1 # w1 = {v2d, v2c, v2b, v2a} + * unpckhps v1, w0 # w0 = {v1d, v0d, v1c, v0c} + * unpcklps v3, v2 # v2 = {v3b, v2b, v3a, v2a} + * addps w0, v0 # v0 = {v1bd, v0bd, v1ac, v0ac} + * unpckhps v3, w1 # w1 = {v3d, v2d, v3c, v2c} + * movaps v0, w0 # w0 = {v1bd, v0bd, v1ac, v0ac} + * addps w1, v2 # v2 = {v3bd, v2bd, v3ac, v2ac} + * shufps $0x44,v2,v0 # v0 = {v3ac, v2ac, v1ac, v0ac} + * shufps $0xEE,v2,w0 # w0 = {v3bd, v2bd, v1bd, v0bd} + * addps w0, v0 # v0 = {v3abcd, v2abcd, v1abcd, v0abcd} + */ + #define vec_red4(v0, v1, v2, v3, w0, w1) \ + __asm__ __volatile__ ("movaps " Mstr(v0) ", " Mstr(w0) "\n"\ + "unpcklps " Mstr(v1) ", " Mstr(v0) "\n"\ + "movaps " Mstr(v2) ", " Mstr(w1) "\n"\ + "unpckhps " Mstr(v1) ", " Mstr(w0) "\n"\ + "unpcklps " Mstr(v3) ", " Mstr(v2) "\n"\ + "addps " Mstr(w0) ", " Mstr(v0) "\n"\ + "unpckhps " Mstr(v3) ", " Mstr(w1) "\n"\ + "movaps " Mstr(v0) ", " Mstr(w0) "\n"\ + "addps " Mstr(w1) ", " Mstr(v2) "\n"\ + "shufps $0x44, " Mstr(v2) ", " Mstr(v0) "\n"\ + "shufps $0xEE, " Mstr(v2) ", " Mstr(w0) "\n"\ + "addps " Mstr(w0) ", " Mstr(v0) "\n"\ + ::) +#endif + +#define vec_splat(mem,reg) vec_splat_wrap(mem,reg) +#define vec_splat_wrap(mem,reg) \ + __asm__ __volatile__ ("movss %0, " #reg "\n"\ + "unpcklps " #reg ", " #reg "\n"\ + "movlhps " #reg ", " #reg "\n"\ + : /* nothing */ \ + : "m" ((mem)[0])) + + +/* This instruction sequence appears courtesy of Camm Maguire. */ +#define vec_sum_full(reg0,reg1,reg2,reg3,regout,empty0,empty1) vec_sum_full_wrap(reg0,reg1,reg2,reg3,regout,empty0,empty1) +#define vec_sum_full_wrap(reg0,reg1,reg2,reg3,regout,empty0,empty1) \ + __asm__ __volatile__ ("movaps " #reg0 "," #empty0 "\n"\ + "unpcklps " #reg1 "," #reg0 "\n"\ + "movaps " #reg2 "," #empty1 "\n"\ + "unpckhps " #reg1 "," #empty0 "\n"\ + "unpcklps " #reg3 "," #reg2 "\n"\ + "addps " #empty0 "," #reg0 "\n"\ + "unpckhps " #reg3 "," #empty1 "\n"\ + "movaps " #reg0 "," #regout "\n"\ + "addps " #empty1 "," #reg2 "\n"\ + "shufps $0x44," #reg2 "," #reg0 "\n"\ + "shufps $0xee," #reg2 "," #regout "\n"\ + "addps " #reg0 "," #regout "\n"\ + : /* nothing */ \ + : /* nothing */) + + + +typedef float vector[VECLEN]; + +#endif /* end ifdef SSE */ + + +#ifdef SSE2 + +/* Peculiarities of SSE: Alignment is good, but not mandatory. It is possible to + * load/store from misaligned adresses using movups at a cost of some cycles. Loading + * using mul/add must always be aligned. Alignment is 16 bytes. + * No muladd. + */ + + + +#define gen_vec_mr(op,mem,reg) \ + __asm__ __volatile__ (#op " %0, " #reg \ + : /* nothing */ \ + : "m" (((mem)[0])), "m" (((mem)[1]))) + + +#define gen_vec_rm(op,reg,mem) \ + __asm__ __volatile__ (#op " " #reg ", %0" \ + : "=m" (((mem)[0])), "=m" (((mem)[1])) \ + : /* nothing */ ) + + + + +#define VECLEN 2 + +#define reg0 %%xmm0 +#define reg1 %%xmm1 +#define reg2 %%xmm2 +#define reg3 %%xmm3 +#define reg4 %%xmm4 +#define reg5 %%xmm5 +#define reg6 %%xmm6 +#define reg7 %%xmm7 +#ifdef ATL_GAS_x8664 + #define reg8 %%xmm8 + #define reg9 %%xmm9 + #define reg10 %%xmm10 + #define reg11 %%xmm11 + #define reg12 %%xmm12 + #define reg13 %%xmm13 + #define reg14 %%xmm14 + #define reg15 %%xmm15 +#endif + + +#define vec_mov_mr(mem,reg) gen_vec_mr(movupd,mem,reg) +#define vec_mov_rm(reg,mem) gen_vec_rm(movupd,reg,mem) +#define vec_mov_mr_a(mem,reg) gen_vec_mr(movapd,mem,reg) +#define vec_mov_rm_a(reg,mem) gen_vec_rm(movapd,reg,mem) +#define vec_mov_rr(reg1,reg2) gen_vec_rr(movapd,reg1,reg2) + +#define vec_add_mr_a(mem,reg) gen_vec_mr(addpd,mem,reg) +#define vec_mul_mr_a(mem,reg) gen_vec_mr(mulpd,mem,reg) + +#define vec_add_rr(mem,reg) gen_vec_rr(addpd,mem,reg) +#define vec_mul_rr(mem,reg) gen_vec_rr(mulpd,mem,reg) + +#define vec_mov_mr_1(mem,reg) gen_vec_mr(movsd,mem,reg) +#define vec_mov_rm_1(reg,mem) gen_vec_rm(movsd,reg,mem) +#define vec_mov_rr_1(reg1,reg2) gen_vec_rr(movsd,reg1,reg2) + +#define vec_add_mr_1(mem,reg) gen_vec_mr(addsd,mem,reg) +#define vec_add_rr_1(reg1,reg2) gen_vec_rr(addsd,reg1,reg2) + +#define vec_mul_mr_1(mem,reg) gen_vec_mr(mulsd,mem,reg) +#define vec_mul_rr_1(reg1,reg2) gen_vec_rr(mulsd,reg1,reg2) + +#define vec_splat(mem,reg) vec_splat_wrap(mem,reg) +#define vec_splat_wrap(mem,reg) \ + __asm__ __volatile__ ("movsd %0, " #reg "\n"\ + "unpcklpd " #reg ", " #reg \ + : /* nothing */ \ + : "m" ((mem)[0])) + +/* Hack! */ +/* To use this instruction be sure that register 7 is not in use!!! */ +#define vec_sum(reg) vec_sum_wrap(reg) +#define vec_sum_wrap(reg) \ + __asm__ __volatile__ ("movhlps " #reg ", %%xmm7\n"\ + "addpd %%xmm7, " #reg "\n"\ + : /* nothing */\ + : /* nothing */) +/* + * Added by RCW to improve performance and avoid xmm7 hack (replace vec_sum) + */ +#define vec_zero(vd) __asm__ __volatile__("xorps " Mstr(vd) ", " Mstr(vd) ::) +#ifdef ATL_SSE3 + #define vec_red(vr, vwrk) \ + __asm__ __volatile__("haddpd " Mstr(vr) ", " Mstr(vr) "\n" ::) + #define vec_red2(v0, v1, vw) \ + __asm__ __volatile__("haddpd " Mstr(v1) ", " Mstr(v0) "\n" ::) + #define vec_red4(v0, v1, v2, v3, w0, w1) \ + __asm__ __volatile__("haddpd " Mstr(v1) ", " Mstr(v0) "\n"\ + "haddpd " Mstr(v3) ", " Mstr(v2) "\n"\ + ::) +#else + #define vec_red(vr, vwrk) \ + __asm__ __volatile__ ("pshufd $0xEE, " Mstr(vr) ", " Mstr(vwrk) "\n"\ + "addsd " Mstr(vwrk) ", " Mstr(vr) "\n" ::) +/* + * movapd v0, vw # vw = {v0b, v0a} + * unpcklpd v1,v0 # v0 = {v1a, v0a} + * unpckhpd v1, vw # vw = {v1b, v0b} + * addpd vw, v0 # v0 = {v1ab,v0ab} + */ + #define vec_red2(v0, v1, vw) \ + __asm__ __volatile__("movapd " Mstr(v0) ", " Mstr(vw) "\n"\ + "unpcklpd " Mstr(v1) ", " Mstr(v0) "\n"\ + "unpckhpd " Mstr(v1) ", " Mstr(vw) "\n"\ + "addpd " Mstr(vw) ", " Mstr(v0) "\n"\ + ::) +/* + * movapd v0, w0 # w0 = {v0b, v0a} + * movapd v2, w1 # w1 = {v2b, v2a} + * unpcklpd v1, v0 # v0 = {v1a, v0a} + * unpcklpd v3, v2 # v2 = {v3a, v2a} + * unpckhpd v1, w0 # w0 = {v1b, v0b} + * unpckhpd v3, w1 # w1 = {v3b, v2b} + * addpd w0, v0 # v0 = {v1ab, v0ab} + * addpd w1, v2 # v2 = {v3ab, v2ab} + */ + #define vec_red4(v0, v1, v2, v3, w0, w1) \ + __asm__ __volatile__("movapd " Mstr(v0) ", " Mstr(w0) "\n"\ + "movapd " Mstr(v2) ", " Mstr(w1) "\n"\ + "unpcklpd " Mstr(v1) ", " Mstr(v0) "\n"\ + "unpcklpd " Mstr(v3) ", " Mstr(v2) "\n"\ + "unpckhpd " Mstr(v1) ", " Mstr(w0) "\n"\ + "unpckhpd " Mstr(v3) ", " Mstr(w1) "\n"\ + "addpd " Mstr(w0) ", " Mstr(v0) "\n"\ + "addpd " Mstr(w1) ", " Mstr(v2) "\n"\ + ::) +#endif + +#define vec_sum_full(reg1,reg2,empty1) vec_sum_full_wrap(reg1,reg2,empty1) +#define vec_sum_full_wrap(reg1,reg2,empty1) \ + __asm__ __volatile__ ("movhlps " #reg2 ", " #empty1 "\n"\ + "movlhps " #reg2 ", " #empty1 "\n"\ + "addpd " #empty1 ", " #reg1 "\n"\ + : /* nothing */\ + : /* nothing */) + + +typedef double vector[VECLEN]; + +#endif /* end ifdef SSE2 */ + + +#ifdef THREEDNOW + +/* Peculiarities of 3DNOW. Alignment is not an issue, + * all alignments are legal, however alignment gives a speed increase. + * The vec_acc instruction can be used to sum to registers at once more efficiently + * than a series of vec_sum and vec_store_one + * No muladd. + */ + + +#define gen_vec_mr(op,mem,reg) \ + __asm__ __volatile__ (#op " %0, " #reg \ + : /* nothing */ \ + : "m" (((mem)[0])), "m" (((mem)[1]))) + +#define gen_vec_rm(op,reg,mem) \ + __asm__ __volatile__ (#op " " #reg ", %0" \ + : "=m" (((mem)[0])), "=m" (((mem)[1])) \ + : /* nothing */ ) + + + + +#define VECLEN 2 + +#define reg0 %%mm0 +#define reg1 %%mm1 +#define reg2 %%mm2 +#define reg3 %%mm3 +#define reg4 %%mm4 +#define reg5 %%mm5 +#define reg6 %%mm6 +#define reg7 %%mm7 + +#define vec_add_mr(mem,reg) gen_vec_mr(pfadd,mem,reg) +#define vec_mul_mr(mem,reg) gen_vec_mr(pfmul,mem,reg) +#define vec_mov_mr(mem,reg) gen_vec_mr(movq,mem,reg) +#define vec_mov_rm(reg,mem) gen_vec_rm(movq,reg,mem) +#define vec_add_rr(reg1,reg2) gen_vec_rr(pfadd,reg1,reg2) +#define vec_mul_rr(reg1,reg2) gen_vec_rr(pfmul,reg1,reg2) +#define vec_acc_rr(reg1,reg2) gen_vec_rr(pfacc,reg1,reg2) +#define vec_mov_rr(reg1,reg2) gen_vec_rr(movq,reg1,reg2) + +#define vec_sum(reg) gen_vec_rr(pfacc,reg,reg) +#define vec_sum_full(reg1,reg2) gen_vec_rr(pfacc,reg1,reg2) + +#define vec_mov_mr_1(mem,reg) gen_vec_mr(movd,mem,reg) +#define vec_mov_rm_1(reg,mem) gen_vec_rm(movd,reg,mem) +#define vec_mov_rr_1(reg1,reg2) gen_vec_rr(movd,reg1,reg2) + +#define vec_add_rr_1(reg1,reg2) gen_vec_rr(pfadd,reg1,reg2) +#define vec_mul_rr_1(reg1,reg2) gen_vec_rr(pfmul,reg1,reg2) + + +#define vec_splat(mem,reg) vec_splat_wrap(mem,reg) +#define vec_splat_wrap(mem,reg) \ + __asm__ __volatile__ ("movd %0, " #reg "\n"\ + "punpckldq " #reg ", " #reg \ + : /* nothing */ \ + : "m" ((mem)[0])) + + +#define vec_load_apart(mem1,mem2,reg) vec_load_apart_wrap(mem1,mem2,reg) +#define vec_load_apart_wrap(mem1,mem2,reg) \ + __asm__ __volatile__ ("movd %0, " #reg "\n"\ + "punpckldq %1, " #reg \ + : /* nothing */ \ + : "m" ((mem1)[0]), "m" (((mem2)[0]))) + + +#define vec_zero(reg) gen_vec_rr(pxor,reg,reg) + +#define vec_enter() __asm__ __volatile__ ("femms") +#define vec_exit() __asm__ __volatile__ ("femms") + +#define align() __asm__ __volatile__ (".align 16") + + +typedef float vector[VECLEN]; + +#endif + + + + + +#ifdef ALTIVEC + +#define VECLEN 4 + +#define reg0 %%vr0 +#define reg1 %%vr1 +#define reg2 %%vr2 +#define reg3 %%vr3 +#define reg4 %%vr4 +#define reg5 %%vr5 +#define reg6 %%vr6 +#define reg7 %%vr7 +#define reg8 %%vr8 +#define reg9 %%vr9 +#define reg10 %%vr10 +#define reg11 %%vr11 +#define reg12 %%vr12 +#define reg13 %%vr13 +#define reg14 %%vr14 +#define reg15 %%vr15 +#define reg16 %%vr16 +#define reg17 %%vr17 +#define reg18 %%vr18 +#define reg19 %%vr19 +#define reg20 %%vr20 +#define reg21 %%vr21 +#define reg22 %%vr22 +#define reg23 %%vr23 +#define reg24 %%vr24 +#define reg25 %%vr25 +#define reg26 %%vr26 +#define reg27 %%vr27 +#define reg28 %%vr28 +#define reg29 %%vr29 +#define reg30 %%vr30 +#define reg31 %%vr31 + +#define gen_vec_mr(op,mem,reg) \ + __asm__ __volatile__ (#op " %0, " #reg \ + : /* nothing */ \ + : "m" (((mem)[0])), "m" (((mem)[1])), "m" (((mem)[2])), "m" (((mem)[3]))) + + +#define gen_vec_rm(op,reg,mem) \ + __asm__ __volatile__ (#op " " #reg ", %0" \ + : "=m" (((mem)[0])), "=m" (((mem)[1])), "=m" (((mem)[2])), "=m" (((mem)[3])) \ + : /* nothing */ ) + + +#define gen_alti3(op,reg1,reg2,regout) \ + __asm__ __volatile__ (#op " " #reg1 ", " #reg2 ", " #regout \ + : /* nothing */ \ + : /* nothing */) + +#define gen_alti_muladd(op,reg1,reg2,regout) \ + __asm__ __volatile__ (#op " " #reg1 ", " #reg2 ", " #regout ", " #regout \ + : /* nothing */ \ + : /* nothing */) + + + +#define vec_mov_mr_a(mem,reg) gen_vec_mr(lvx,mem,reg) +#define vec_mov_rm_a(reg,mem) gen_vec_rm(svx,reg,mem) +#define vec_muladd(reg1,reg2,regout) gen_alti3(vmaddfp,reg1,reg2,regout) + +#define vec_zero(reg) gen_alti3(vxor,reg,reg,reg) + + +typedef float vector[VECLEN]; + +#endif + + +#ifdef ALTIVEC_C + +/* These macros have been written by, or greatly inspired by, + * Nicholas A. Coult . Thanks. + */ + +/* assumes that last four registers are not in use! */ +#define transpose(x0,x1,x2,x3) \ +reg28 = vec_mergeh(x0,x2); \ +reg29 = vec_mergeh(x1,x3); \ +reg30 = vec_mergel(x0,x2); \ +reg31 = vec_mergel(x1,x3); \ +x0 = vec_mergeh(reg28,reg29); \ +x1 = vec_mergel(reg28,reg29); \ +x2 = vec_mergeh(reg30,reg31); \ +x3 = vec_mergel(reg30,reg31) + +#define vec_mov_rm(v, where) \ +low = vec_ld(0, (where)); \ +high = vec_ld(16, (where)); \ +p_vector = vec_lvsr(0, (int *)(where)); \ +mask = vec_perm((vector unsigned char)(0), (vector unsigned char)(-1), p_vector); \ +v = vec_perm(v, v, p_vector); \ +low = vec_sel(low, v, mask); \ +high = vec_sel(v, high, mask); \ +vec_st(low, 0, (where)); \ +vec_st(high, 16, (where)) + +#define vec_mov_mr_a(mem,reg) reg = vec_ld(0, mem) + +#define vec_mov_mr(u,v) \ +p_vector = (vector unsigned char)vec_lvsl(0, (int*)(v)); \ +low = (vector unsigned char)vec_ld(0, (v)); \ +high = (vector unsigned char)vec_ld(16, (v)); \ +u=(vector float)vec_perm(low, high, p_vector) + +#define vec_muladd(reg1,reg2,regout) regout = vec_madd(reg1,reg2,regout) +#define vec_add_rr(reg1,reg2) reg2 = vec_add(reg1,reg2) + +#define vec_zero(reg) reg = vec_xor(reg,reg) + +#define vec_sum_full(reg0,reg1,reg2,reg3,regout,empty0,empty1) \ +transpose(reg0, reg1,reg2,reg3,regout,empty0,empty1); \ +empty0 = vec_add(reg0,reg1); \ +empty1 = vec_add(reg2,reg3); \ +regout = vec_add(empty0,empty1) + + +#endif /* ALTIVEC_C */ + + + + + + + + diff --git a/kaldi_io/src/tools/ATLAS/include/contrib/camm_dpa.h b/kaldi_io/src/tools/ATLAS/include/contrib/camm_dpa.h new file mode 100644 index 0000000..af9c6b1 --- /dev/null +++ b/kaldi_io/src/tools/ATLAS/include/contrib/camm_dpa.h @@ -0,0 +1,1626 @@ +#include +#include +#include + +#include "camm_util.h" + + +#if defined(ALIGN) +#if( defined(SCPLX) || defined(DCPLX)) +#error Cannot align complex routines +#endif +#if defined(SREAL) && ( NDPM != 1 ) && ( STRIDE % 4 != 0) +#error Can only align SREAL with NDPM 1 or STRIDE % 4 = 0 +#endif +#if defined(DREAL) && ( NDPM != 1 ) && ( STRIDE % 2 != 0) +#error Can only align DREAL with NDPM 1 or STRIDE % 2 = 0 +#endif +#endif + +/****************************************************************************** + * Single Precision Complex Macros + ******************************************************************************/ + +#ifdef SCPLX + +#ifdef NO_TRANSPOSE + +#if NDPM > 3 +#error Max NDPM is 3 for SCPLX NO_TRANSPOSE +#endif + +#undef plax +#define plax + +#undef R1 +#define R1 2 +#undef R2 +#define R2 4 +#undef R3 +#define R3 6 +#undef R4 +#define R4 6 + +#undef TREG +#define TREG 1 +#undef SREG +#define SREG 0 +#undef CREG +#define CREG 0 + +#ifdef GER +#undef AREG +#define AREG 0 +#undef targ +#define targ(a_) AREG +#undef wb +#define wb(a_,b_) pu(AREG,a_,b_) +#undef wbd +#define wbd(a_,b_) pud(AREG,a_,b_) +#undef w +#define w(a_) +#undef w1_2 +#define w1_2(a_) +#else +#undef AREG +#define AREG TREG +#undef targ +#define targ(a_) CREG +#undef wb +#define wb(a_,b_) +#undef wbd +#define wbd(a_,b_) +#undef w +#define w(a_) pu(CREG,a_ ## 0,si) +#undef w1_2 +#define w1_2(a_) pud(CREG,a_ ## 0,si) +#endif + +#undef src +#define src(a_) a_ +#undef mpx +#define mpx(a_) pls(0,si,a_) ps(0,a_,a_) pls(4,si,P(a_,1)) \ + ps(0,P(a_,1),P(a_,1)) sign(a_) +#undef madd +#define madd(a_,b_,c_) pas(a_,b_,c_) +#undef ulfa +#define ulfa(a_) + +#else + +#undef R1 +#define R1 4 +#undef R2 +#define R2 5 +#undef R3 +#define R3 6 +#undef R4 +#define R4 7 + +#undef TREG +#define TREG 3 +#undef SREG +#define SREG 2 +#undef CREG +#define CREG 0 +#undef targ +#define targ(a_) a_ +#undef src +#define src(a_) 0 +#undef w +#define w(a_) +#undef w1_2 +#define w1_2(a_) +#undef mpx +#define mpx(a_) px(a_) +#ifdef BETA0 +#undef ulfa +#define ulfa(a_) phl(a_,0) pa(0,a_) pud(a_,0,si) +#else +#undef ulfa +#define ulfa(a_) pld(0,si,TREG) phl(a_,0) pa(0,a_) pa(TREG,a_) pud(a_,0,si) +#endif +#undef AREG +#define AREG TREG +#undef wb +#define wb(a_,b_) +#undef wbd +#define wbd(a_,b_) +#undef wbs +#define wbs(a_,b_) + + +#undef plax +#define plax pc(CREG,1) ps(160,CREG,CREG) ps(245,1,1) sign(CREG) + + + +#endif + +#if defined(Conj_) && ! defined(GER) +#undef sign +#define sign(a_) pm(SREG,a_) +#else +#undef sign +#define sign(a_) pm(SREG,P(a_,1)) +#endif + + + +#undef plb +#define plb(a_,b_) pl(a_,b_,AREG) +#undef plbd +#define plbd(a_,b_) px(AREG) pld(a_,b_,AREG) + +#undef dpr +#define dpr(a_) pm(src(a_),TREG) pa(TREG,targ(a_)) +#undef dprp +#define dprp(a_,b_,c_) pf(b_,c_) pm(src(a_),TREG) pa(TREG,targ(a_)) +#undef dpi +#define dpi(a_) pm(P(src(a_),1),TREG) ps(177,TREG,TREG) pa(TREG,targ(a_)) + +#ifndef GER + + +#undef plaa +#define plaa(a_) pl(a_ ## 0,si,CREG) plax +#undef wa +#define wa(a_) w(a_) +#undef dp +#define dp(a_,b_,c_) plb(a_ ## 0,b_) dpr(c_) plb(a_ ## 0,b_) dpi(c_) +#undef dpp +#define dpp(a_,b_,c_,d_,e_) plb(a_ ## 0,b_) dprp(c_,d_,e_) plb(a_ ## 0,b_) dpi(c_) +#undef ddp +#define ddp(a_,b_,c_) dp(a_,b_,c_) +#undef ddpp +#define ddpp(a_,b_,c_,d_,e_) dpp(a_,b_,c_,d_,e_) + +#undef plaa1_2 +#define plaa1_2(a_) px(CREG) pld(a_ ## 0,si,CREG) plax +#undef wa1_2 +#define wa1_2(a_) w1_2(a_) +#undef dp1_2 +#define dp1_2(a_,b_,c_) plbd(a_ ## 0,b_) dpr(c_) plbd(a_ ## 0,b_) dpi(c_) +#undef dpp1_2 +#define dpp1_2(a_,b_,c_,d_,e_) plbd(a_ ## 0,b_) dprp(c_,d_,e_) plbd(a_ ## 0,b_) dpi(c_) +#undef ddp1_2 +#define ddp1_2(a_,b_,c_) dp1_2(a_,b_,c_) +#undef ddpp1_2 +#define ddpp1_2(a_,b_,c_,d_,e_) dpp1_2(a_,b_,c_,d_,e_) + + +#else + +#undef lqc +#define lqc(a_) pl(a_ ## 0,si,TREG) +#undef lqc1 +#define lqc1_2(a_) px(TREG) pld(a_ ## 0,si,TREG) + + +#undef plaa +#define plaa(a_) +#undef wa +#define wa(a_) +#undef dp +#define dp(a_,b_,c_) lqc(a_) plb(a_ ## 0,b_) dpr(c_) \ + lqc(a_) dpi(c_) wb(a_ ## 0,b_) +#undef dpp +#define dpp(a_,b_,c_,d_,e_) lqc(a_) plb(a_ ## 0,b_) dpr(c_) pf(d_,e_) \ + lqc(a_) dpi(c_) wb(a_ ## 0,b_) +#undef ddp +#define ddp(a_,b_,c_) dp(a_,b_,c_) +#undef ddpp +#define ddpp(a_,b_,c_,d_,e_) dpp(a_,b_,c_,d_,e_) + +#undef plaa1_2 +#define plaa1_2(a_) +#undef wa1_2 +#define wa1_2(a_) +#undef dp1_2 +#define dp1_2(a_,b_,c_) lqc1_2(a_) plbd(a_ ## 0,b_) dpr(c_) \ + lqc1_2(a_) dpi(c_) wbd(a_ ## 0,b_) +#undef dpp1_2 +#define dpp1_2(a_,b_,c_,d_,e_) lqc1_2(a_) plbd(a_ ## 0,b_) dpr(c_) pf(d_,e_) \ + lqc1_2(a_) dpi(c_) wbd(a_ ## 0,b_) +#undef ddp1_2 +#define ddp1_2(a_,b_,c_) dp1_2(a_,b_,c_) +#undef ddpp1_2 +#define ddpp1_2(a_,b_,c_,d_,e_) dpp1_2(a_,b_,c_,d_,e_) + +#endif + +#endif + +/****************************************************************************** + * Single Precision Real Macros + ******************************************************************************/ + +#ifdef SREAL + +#ifdef NO_TRANSPOSE + +#undef mpx +#define mpx(a_) pls(0,si,a_) ps(0,a_,a_) +#undef madd +#define madd(a_,b_,c_) pas(a_,b_,c_) +#undef TREG +#define TREG 1 +#undef targ +#define targ(a_) 0 +#undef src +#define src(a_) a_ +#undef ulfa +#define ulfa(a_) + +#ifdef GER +#undef w +#define w(a_) +#undef w1_2 +#define w1_2(a_) +#undef w1_4 +#define w1_4(a_) +#undef CREG +#define CREG 2 +#undef AREG +#define AREG 0 +#undef cp +#define cp pc(CREG,TREG) +#undef wb +#define wb(a_,b_) pu(AREG,a_,b_) +#undef wbd +#define wbd(a_,b_) pud(AREG,a_,b_) +#undef wbs +#define wbs(a_,b_) pus(AREG,a_,b_) +#else +#undef CREG +#define CREG 0 +#undef AREG +#define AREG TREG +#undef cp +#define cp +#undef wb +#define wb(a_,b_) +#undef wbd +#define wbd(a_,b_) +#undef wbs +#define wbs(a_,b_) +#undef w +#define w(a_) pu(CREG,a_ ## 0,si) +#undef w1_2 +#define w1_2(a_) pud(CREG,a_ ## 0,si) +#undef w1_4 +#define w1_4(a_) pus(CREG,a_ ## 0,si) +#endif + +#else + +#undef mpx +#define mpx(a_) px(a_) +#ifdef BETA0 +#undef madd +#define madd(a_,b_,c_) +#else +#undef madd +#define madd(a_,b_,c_) pas(a_,b_,c_) +#endif +#undef TREG +#define TREG 3 +#undef targ +#define targ(a_) a_ +#undef src +#define src(a_) 0 +#undef w +#define w(a_) +#undef w1_2 +#define w1_2(a_) +#undef w1_4 +#define w1_4(a_) +#undef ulfa +#undef ulfa +#define ulfa(a_) phl(a_,0) pa(0,a_) pc(a_,0) ps(1,0,0) pa(0,a_) \ + madd(0,si,a_) pus(a_,0,si) + +#undef CREG +#define CREG 0 +#undef AREG +#define AREG TREG +#undef cp +#define cp +#undef wb +#define wb(a_,b_) +#undef wbd +#define wbd(a_,b_) +#undef wbs +#define wbs(a_,b_) + +#endif + +#if defined(ALIGN) +#undef plb +#define plb(a_,b_) pla(a_,b_,AREG) +#else +#undef plb +#define plb(a_,b_) pl(a_,b_,AREG) +#endif +#undef plbd +#define plbd(a_,b_) px(AREG) pld(a_,b_,AREG) +#undef plbs +#define plbs(a_,b_) pls(a_,b_,AREG) +#undef dpr +#define dpr(a_) pm(src(a_),TREG) pa(TREG,targ(a_)) +#undef dprp +#define dprp(a_,b_,c_) pf(b_,c_) pm(src(a_),TREG) pa(TREG,targ(a_)) +#undef dprs +#define dprs(a_) pmsr(src(a_),TREG) pasr(TREG,targ(a_)) +#undef dprps +#define dprps(a_,b_,c_) pf(b_,c_) pmsr(src(a_),TREG) pasr(TREG,targ(a_)) + +#undef plaa +#define plaa(a_) pl(a_ ## 0,si,CREG) +#undef wa +#define wa(a_) w(a_) +#undef dp +#define dp(a_,b_,c_) cp plb(a_ ## 0,b_) dpr(c_) wb(a_ ## 0,b_) +#undef dpp +#define dpp(a_,b_,c_,d_,e_) cp plb(a_ ## 0,b_) dprp(c_,d_,e_) wb(a_ ## 0,b_) +#undef ddp +#define ddp(a_,b_,c_) dp(a_,b_,c_) +#undef ddpp +#define ddpp(a_,b_,c_,d_,e_) dpp(a_,b_,c_,d_,e_) + +#undef plaa1_2 +#define plaa1_2(a_) px(CREG) pld(a_ ## 0,si,CREG) +#undef wa1_2 +#define wa1_2(a_) w1_2(a_) +#undef dp1_2 +#define dp1_2(a_,b_,c_) cp plbd(a_ ## 0,b_) dpr(c_) wbd(a_ ## 0,b_) +#undef dpp1_2 +#define dpp1_2(a_,b_,c_,d_,e_) cp plbd(a_ ## 0,b_) dprp(c_,d_,e_) wbd(a_ ## 0,b_) +#undef ddp1_2 +#define ddp1_2(a_,b_,c_) dp1_2(a_,b_,c_) +#undef ddpp1_2 +#define ddpp1_2(a_,b_,c_,d_,e_) dpp1_2(a_,b_,c_,d_,e_) + +#undef plaa1_4 +#define plaa1_4(a_) pls(a_ ## 0,si,CREG) +#undef wa1_4 +#define wa1_4(a_) w1_4(a_) +#undef dp1_4 +#define dp1_4(a_,b_,c_) cp plbs(a_ ## 0,b_) dprs(c_) wbs(a_ ## 0,b_) +#undef dpp1_4 +#define dpp1_4(a_,b_,c_,d_,e_) cp plbs(a_ ## 0,b_) dprps(c_,d_,e_) wbs(a_ ## 0,b_) +#undef ddp1_4 +#define ddp1_4(a_,b_,c_) dp1_4(a_,b_,c_) +#undef ddpp1_4 +#define ddpp1_4(a_,b_,c_,d_,e_) dpp1_4(a_,b_,c_,d_,e_) + + + +#undef R1 +#define R1 4 +#undef R2 +#define R2 5 +#undef R3 +#define R3 6 +#undef R4 +#define R4 7 + +#endif + +/****************************************************************************** + * Double Precision Real Macros + ******************************************************************************/ + +#ifdef DREAL + +#ifdef ATL_SSE2 + +#ifdef NO_TRANSPOSE + +#undef mpx +#define mpx(a_) pls(0,si,a_) ps(0,a_,a_) +#undef madd +#define madd(a_,b_,c_) pas(a_,b_,c_) +#undef TREG +#define TREG 1 +#undef targ +#define targ(a_) 0 +#undef src +#define src(a_) a_ +#undef ulfa +#define ulfa(a_) + +#ifdef GER +#undef w +#define w(a_) +#undef w1_2 +#define w1_2(a_) +#undef w1_4 +#define w1_4(a_) +#undef CREG +#define CREG 2 +#undef AREG +#define AREG 0 +#undef cp +#define cp pc(CREG,TREG) +#undef wb +#define wb(a_,b_) pu(AREG,a_,b_) +#undef wbd +#define wbd(a_,b_) pus(AREG,a_,b_) +#undef wbs +/* #define wbs(a_,b_) pus(AREG,a_,b_) */ +#else +#undef CREG +#define CREG 0 +#undef AREG +#define AREG TREG +#undef cp +#define cp +#undef wb +#define wb(a_,b_) +#undef wbd +#define wbd(a_,b_) +#undef wbs +/* #define wbs(a_,b_) */ +#undef w +#define w(a_) pu(CREG,a_ ## 0,si) +#undef w1_2 +#define w1_2(a_) pus(CREG,a_ ## 0,si) +#undef w1_4 +/* #define w1_4(a_) pus(CREG,a_ ## 0,si) */ +#endif + +#else + +#undef mpx +#define mpx(a_) px(a_) +#ifdef BETA0 +#undef madd +#define madd(a_,b_,c_) +#else +#undef madd +#define madd(a_,b_,c_) pas(a_,b_,c_) +#endif +#undef TREG +#define TREG 3 +#undef targ +#define targ(a_) a_ +#undef src +#define src(a_) 0 +#undef w +#define w(a_) +#undef w1_2 +#define w1_2(a_) +#undef w1_4 +#define w1_4(a_) +#undef ulfa +#undef ulfa +#define ulfa(a_) /* phl(a_,0) pa(0,a_) */ pc(a_,0) ps(1,0,0) pa(0,a_) \ + madd(0,si,a_) pus(a_,0,si) + +#undef CREG +#define CREG 0 +#undef AREG +#define AREG TREG +#undef cp +#define cp +#undef wb +#define wb(a_,b_) +#undef wbd +#define wbd(a_,b_) +#undef wbs +#define wbs(a_,b_) + +#endif + +#if defined(ALIGN) +#undef plb +#define plb(a_,b_) pla(a_,b_,AREG) +#else +#undef plb +#define plb(a_,b_) pl(a_,b_,AREG) +#endif +#undef plbd +#define plbd(a_,b_) /* px(AREG) */pls(a_,b_,AREG) +#undef plbs +/* #define plbs(a_,b_) pls(a_,b_,AREG) */ +#undef dpr +#define dpr(a_) pm(src(a_),TREG) pa(TREG,targ(a_)) +#undef dprp +#define dprp(a_,b_,c_) pf(b_,c_) pm(src(a_),TREG) pa(TREG,targ(a_)) +#undef dprs +#define dprs(a_) pmsr(src(a_),TREG) pasr(TREG,targ(a_)) +#undef dprps +#define dprps(a_,b_,c_) pf(b_,c_) pmsr(src(a_),TREG) pasr(TREG,targ(a_)) + +#undef plaa +#define plaa(a_) pl(a_ ## 0,si,CREG) +#undef wa +#define wa(a_) w(a_) +#undef dp +#define dp(a_,b_,c_) cp plb(a_ ## 0,b_) dpr(c_) wb(a_ ## 0,b_) +#undef dpp +#define dpp(a_,b_,c_,d_,e_) cp plb(a_ ## 0,b_) dprp(c_,d_,e_) wb(a_ ## 0,b_) +#undef ddp +#define ddp(a_,b_,c_) dp(a_,b_,c_) +#undef ddpp +#define ddpp(a_,b_,c_,d_,e_) dpp(a_,b_,c_,d_,e_) + +#undef plaa1_2 +#define plaa1_2(a_) /* px(CREG) */pls(a_ ## 0,si,CREG) +#undef wa1_2 +#define wa1_2(a_) w1_2(a_) +#undef dp1_2 +#define dp1_2(a_,b_,c_) cp plbd(a_ ## 0,b_) dprs(c_) wbd(a_ ## 0,b_) +#undef dpp1_2 +#define dpp1_2(a_,b_,c_,d_,e_) cp plbd(a_ ## 0,b_) dprps(c_,d_,e_) wbd(a_ ## 0,b_) +#undef ddp1_2 +#define ddp1_2(a_,b_,c_) dp1_2(a_,b_,c_) +#undef ddpp1_2 +#define ddpp1_2(a_,b_,c_,d_,e_) dpp1_2(a_,b_,c_,d_,e_) + +#undef plaa1_4 +/* #define plaa1_4(a_) pls(a_ ## 0,si,CREG) */ +#undef wa1_4 +/* #define wa1_4(a_) w1_4(a_) */ +#undef dp1_4 +/* #define dp1_4(a_,b_,c_) cp plbs(a_ ## 0,b_) dprs(c_) wbs(a_ ## 0,b_) */ +#undef dpp1_4 +/* #define dpp1_4(a_,b_,c_,d_,e_) cp plbs(a_ ## 0,b_) dprps(c_,d_,e_) wbs(a_ ## 0,b_) */ +#undef ddp1_4 +/* #define ddp1_4(a_,b_,c_) dp1_4(a_,b_,c_) */ +#undef ddpp1_4 +/* #define ddpp1_4(a_,b_,c_,d_,e_) dpp1_4(a_,b_,c_,d_,e_) */ + + + +#undef R1 +#define R1 4 +#undef R2 +#define R2 5 +#undef R3 +#define R3 6 +#undef R4 +#define R4 7 + +#else + +#ifdef NO_TRANSPOSE + +#undef t0 +#define t0(a_) 1 +#undef s0 +#define s0(a_) a_ +#undef t8 +#define t8(a_) 2 +#undef s8 +#define s8(a_) a_ +#undef w +#define w(a_) fp(a_ ## 0,si) fp(a_ ## 8,si) +#undef w1_2 +#define w1_2(a_) fp(a_ ## 0,si) +#undef mpx +#define mpx(a_) fl(0,si) fc(M(a_,2)) +#undef madd +#define madd(a_,b_,c_) faa(a_,b_) +#undef ulfa +#define ulfa(a_) fc(0) + +#else + +#undef t0 +#define t0(a_) a_ +#undef s0 +#define s0(a_) 1 +#undef t8 +#define t8(a_) a_ +#undef s8 +#define s8(a_) 2 +#undef w +#define w(a_) +#undef w1_2 +#define w1_2(a_) +#undef mpx +#define mpx(a_) fz +#ifdef BETA0 +#undef madd +#define madd(a_,b_,c_) +#else +#undef madd +#define madd(a_,b_,c_) faa(a_,b_) +#endif +#undef ulfa +#define ulfa(a_) madd(0,si,a_) fp(0,si) + +#endif + + +#ifndef GER + +#undef plaa1_2 +#define plaa1_2(a_) fl(a_ ## 0,si) +#undef wa1_2 +#define wa1_2(a_) w1_2(a_) +#ifdef NO_TRANSPOSE +#undef ddp1_2 +#define ddp1_2(a_,b_,c_) fl(a_ ## 0,b_) fm(M(s0(c_),1),0) fap(0,t0(c_)) +#undef dp1_2 +#define dp1_2(a_,b_,c_) ddp1_2(a_,b_,c_) +#else +#undef ddp1_2 +#define ddp1_2(a_,b_,c_) fl(a_ ## 0,b_) fm(s0(c_),0) fap(0,M(t0(c_),1)) +#undef dp1_2 +#define dp1_2(a_,b_,c_) fl(a_ ## 0,b_) fmp(0,s0(c_)) fap(0,M(t0(c_),2)) +#endif + +#else + +#undef plaa1_2 +#define plaa1_2(a_) fl(a_ ## 0,si) +#undef wa1_2 +#define wa1_2(a_) +#undef ddp1_2 +#define ddp1_2(a_,b_,c_) fd(M(s0(c_),2)) fm(t0(c_),0) faa(a_ ## 0,b_) fp(a_ ## 0,b_) +#undef dp1_2 +#define dp1_2(a_,b_,c_) fm(M(s0(c_),2),0) faa(a_ ## 0,b_) fp(a_ ## 0,b_) + +#endif + + + +#undef plaa +#define plaa(a_) fl(a_ ## 0,si) fl(a_ ## 8,si) fx1 + +#ifndef GER + + +#undef wa +#define wa(a_) w(a_) + + +#undef ddp +#define ddp(a_,b_,c_) fl(a_ ## 0,b_) fm(s0(c_),0) fl(a_ ## 8,b_) \ + fm(P(s8(c_),1),0) fx1 fap(0,P(t0(c_),1)) \ + fap(0,t8(c_)) +#undef ddpp +#define ddpp(a_,b_,c_,d_,e_) fl(a_ ## 0,b_) fm(s0(c_),0) fl(a_ ## 8,b_) \ + fm(P(s8(c_),1),0) pf(d_,e_) fx1 fap(0,P(t0(c_),1)) \ + fap(0,t8(c_)) + +/* #define ddp(a_,b_,c_) fd(M(s0(c_),1)) fma(a_ ## 0,b_) fap(0,t0(c_)) \ */ +/* fd(M(s8(c_),1)) fma(a_ ## 8,b_) fap(0,t8(c_)) */ +/* #define ddpp(a_,b_,c_,d_,e_) fd(M(s0(c_),1)) fma(a_ ## 0,b_) fap(0,t0(c_)) \ */ +/* \ */ +/* fd(M(s8(c_),1)) fma(a_ ## 8,b_) fap(0,t8(c_)) pf(d_,e_) */ + +#ifdef NO_TRANSPOSE + +#undef dp +#define dp(a_,b_,c_) ddp(a_,b_,c_) +#undef dpp +#define dpp(a_,b_,c_,d_,e_) ddpp(a_,b_,c_,d_,e_) + +#else + +#undef dp +#define dp(a_,b_,c_) fl(a_ ## 0,b_) fmp(0,s0(c_)) fl(a_ ## 8,b_) \ + fmp(0,s8(c_)) fap(0,M(t0(c_),1)) fap(0,M(t8(c_),2)) +#undef dpp +#define dpp(a_,b_,c_,d_,e_) fl(a_ ## 0,b_) pf(d_ ,e_) fmp(0,s0(c_)) fl(a_ ## 8,b_) \ + fmp(0,s8(c_)) fap(0,M(t0(c_),1)) fap(0,M(t8(c_),2)) + +/* #define dp(a_,b_,c_) fma(a_ ## 0,b_) fap(0,M(t0(c_),1)) \ */ +/* fma(a_ ## 8,b_) fap(0,M(t8(c_),2)) */ +/* #define dpp(a_,b_,c_,d_,e_) fma(a_ ## 0,b_) fap(0,M(t0(c_),1)) \ */ +/* \ */ +/* fma(a_ ## 8,b_) fap(0,M(t8(c_),2)) pf(d_,e_) */ + +#endif + + +#else + +#undef wa +#define wa(a_) +#undef ddp +#define ddp(a_,b_,c_) fd(M(s0(c_),1)) fm(t0(c_),0) faa(a_ ## 0,b_) fp(a_ ## 0,b_) \ + fd(M(s8(c_),1)) fm(t8(c_),0) faa(a_ ## 8,b_) fp(a_ ## 8,b_) +#undef ddpp +#define ddpp(a_,b_,c_,d_,e_) fd(M(s0(c_),1)) fm(t0(c_),0) faa(a_ ## 0,b_) fp(a_ ## 0,b_) \ + fd(M(s8(c_),1)) fm(t8(c_),0) faa(a_ ## 8,b_) fp(a_ ## 8,b_) pf(d_,e_) + +#undef dp +#define dp(a_,b_,c_) fm(M(s0(c_),1),0) faa(a_ ## 0,b_) fp(a_ ## 0,b_) \ + fm(M(s8(c_),2),0) faa(a_ ## 8,b_) fp(a_ ## 8,b_) +#undef dpp +#define dpp(a_,b_,c_,d_,e_) fm(M(s0(c_),1),0) faa(a_ ## 0,b_) fp(a_ ## 0,b_) \ + fm(M(s8(c_),2),0) faa(a_ ## 8,b_) fp(a_ ## 8,b_) pf(d_,e_) + +#endif + + +#undef R1 +#define R1 3 +#undef R2 +#define R2 4 +#undef R3 +#define R3 5 +#undef R4 +#define R4 6 + +#endif + +#endif + +/****************************************************************************** + * Double Precision Complex Macros + ******************************************************************************/ + +#ifdef DCPLX + +#ifdef ATL_SSE2 +#ifdef NO_TRANSPOSE + +#if NDPM > 3 +#error Max NDPM is 3 for DCPLX NO_TRANSPOSE +#endif + +#undef plax +#define plax + +#undef R1 +#define R1 2 +#undef R2 +#define R2 4 +#undef R3 +#define R3 6 +#undef R4 +#define R4 6 + +#undef TREG +#define TREG 1 +#undef SREG +#define SREG 0 +#undef CREG +#define CREG 0 + +#ifdef GER +#undef AREG +#define AREG 0 +#undef targ +#define targ(a_) AREG +#undef wb +#define wb(a_,b_) pu(AREG,a_,b_) +#undef wbd +/* #define wbd(a_,b_) pud(AREG,a_,b_) */ +#undef w +#define w(a_) +#undef w1_2 +/* #define w1_2(a_) */ +#else +#undef AREG +#define AREG TREG +#undef targ +#define targ(a_) CREG +#undef wb +#define wb(a_,b_) +#undef wbd +/* #define wbd(a_,b_) */ +#undef w +#define w(a_) pu(CREG,a_ ## 0,si) +#undef w1_2 +/* #define w1_2(a_) pud(CREG,a_ ## 0,si) */ +#endif + +#undef src +#define src(a_) a_ +#undef mpx +#define mpx(a_) pls(0,si,a_) ps(0,a_,a_) pls(8,si,P(a_,1)) \ + ps(0,P(a_,1),P(a_,1)) sign(a_) +#undef madd +#define madd(a_,b_,c_) pas(a_,b_,c_) +#undef ulfa +#define ulfa(a_) + +#else + +#undef R1 +#define R1 4 +#undef R2 +#define R2 5 +#undef R3 +#define R3 6 +#undef R4 +#define R4 7 + +#undef TREG +#define TREG 3 +#undef SREG +#define SREG 2 +#undef CREG +#define CREG 0 +#undef targ +#define targ(a_) a_ +#undef src +#define src(a_) 0 +#undef w +#define w(a_) +#undef w1_2 +#define w1_2(a_) +#undef mpx +#define mpx(a_) px(a_) +#ifdef BETA0 +#undef ulfa +#define ulfa(a_) /* phl(a_,0) pa(0,a_) */pu(a_,0,si) +#else +#undef ulfa +#define ulfa(a_) pl(0,si,TREG) /* phl(a_,0) pa(0,a_) */ pa(TREG,a_) pu(a_,0,si) +#endif +#undef AREG +#define AREG TREG +#undef wb +#define wb(a_,b_) +#undef wbd +#define wbd(a_,b_) +#undef wbs +#define wbs(a_,b_) + + +#undef plax +#define plax pc(CREG,1) ps(0,CREG,CREG) ps(3,1,1) sign(CREG) + + + +#endif + +#if defined(Conj_) && ! defined(GER) +#undef sign +#define sign(a_) pm(SREG,a_) +#else +#undef sign +#define sign(a_) pm(SREG,P(a_,1)) +#endif + + + +#undef plb +#define plb(a_,b_) pl(a_,b_,AREG) +#undef plbd +/* #define plbd(a_,b_) px(AREG) pld(a_,b_,AREG) */ + +#undef dpr +#define dpr(a_) pm(src(a_),TREG) pa(TREG,targ(a_)) +#undef dprp +#define dprp(a_,b_,c_) pf(b_,c_) pm(src(a_),TREG) pa(TREG,targ(a_)) +#undef dpi +#define dpi(a_) pm(P(src(a_),1),TREG) ps(1,TREG,TREG) pa(TREG,targ(a_)) + +#ifndef GER + +#undef plaa +#define plaa(a_) pl(a_ ## 0,si,CREG) plax +#undef wa +#define wa(a_) w(a_) +#undef dp +#define dp(a_,b_,c_) plb(a_ ## 0,b_) dpr(c_) plb(a_ ## 0,b_) dpi(c_) +#undef dpp +#define dpp(a_,b_,c_,d_,e_) plb(a_ ## 0,b_) dprp(c_,d_,e_) plb(a_ ## 0,b_) dpi(c_) +#undef ddp +#define ddp(a_,b_,c_) dp(a_,b_,c_) +#undef ddpp +#define ddpp(a_,b_,c_,d_,e_) dpp(a_,b_,c_,d_,e_) + +#undef plaa1_2 +/* #define plaa1_2(a_) px(CREG) pld(a_ ## 0,si,CREG) plax */ +#undef wa1_2 +/* #define wa1_2(a_) w1_2(a_) */ +#undef dp1_2 +/* #define dp1_2(a_,b_,c_) plbd(a_ ## 0,b_) dpr(c_) plbd(a_ ## 0,b_) dpi(c_) */ +#undef dpp1_2 +/* #define dpp1_2(a_,b_,c_,d_,e_) plbd(a_ ## 0,b_) dprp(c_,d_,e_) plbd(a_ ## 0,b_) dpi(c_) */ +#undef ddp1_2 +/* #define ddp1_2(a_,b_,c_) dp1_2(a_,b_,c_) */ +#undef ddpp1_2 +/* #define ddpp1_2(a_,b_,c_,d_,e_) dpp1_2(a_,b_,c_,d_,e_) */ + + +#else + +#undef lqc +#define lqc(a_) pl(a_ ## 0,si,TREG) +#undef lqc1 +/* #define lqc1_2(a_) px(TREG) pld(a_ ## 0,si,TREG) */ + + +#undef plaa +#define plaa(a_) +#undef wa +#define wa(a_) +#undef dp +#define dp(a_,b_,c_) lqc(a_) plb(a_ ## 0,b_) dpr(c_) \ + lqc(a_) dpi(c_) wb(a_ ## 0,b_) +#undef dpp +#define dpp(a_,b_,c_,d_,e_) lqc(a_) plb(a_ ## 0,b_) dpr(c_) pf(d_,e_) \ + lqc(a_) dpi(c_) wb(a_ ## 0,b_) +#undef ddp +#define ddp(a_,b_,c_) dp(a_,b_,c_) +#undef ddpp +#define ddpp(a_,b_,c_,d_,e_) dpp(a_,b_,c_,d_,e_) + +#undef plaa1_2 +/* #define plaa1_2(a_) */ +#undef wa1_2 +/* #define wa1_2(a_) */ +#undef dp1_2 +/* #define dp1_2(a_,b_,c_) lqc1_2(a_) plbd(a_ ## 0,b_) dpr(c_) \ */ +/* lqc1_2(a_) dpi(c_) wbd(a_ ## 0,b_) */ +#undef dpp1_2 +/* #define dpp1_2(a_,b_,c_,d_,e_) lqc1_2(a_) plbd(a_ ## 0,b_) dpr(c_) pf(d_,e_) \ */ +/* lqc1_2(a_) dpi(c_) wbd(a_ ## 0,b_) */ +#undef ddp1_2 +/* #define ddp1_2(a_,b_,c_) dp1_2(a_,b_,c_) */ +#undef ddpp1_2 +/* #define ddpp1_2(a_,b_,c_,d_,e_) dpp1_2(a_,b_,c_,d_,e_) */ + +#endif + +#else + +#if NDPM > 2 +#error Max NDPM is 2 for DCPLX +#endif + +#undef TREG +#define TREG 2 + +#ifdef NO_TRANSPOSE + +#undef w +#define w(a_) fp(a_ ## 0,si) fp(a_ ## 8,si) +#undef plax +#define plax fx1 +#undef srr +#define srr(a_) a_ +#undef sri +#define sri(a_) a_ +#undef sir +#define sir(a_) a_ +#undef sii +#define sii(a_) a_ +#undef trr +#define trr(a_) P(TREG,1) +#undef tri +#define tri(a_) M(TREG,1) +#undef tir +#define tir(a_) TREG +#undef tii +#define tii(a_) TREG +#undef mpx +#define mpx(a_) fl(0,si) fl(8,si) fc(M(a_,2)) fc(M(a_,2)) +#undef madd +#define madd(a_,b_,c_) faa(a_,b_) +#undef ulfa +#define ulfa(a_) fc(0) fc(0) + +#else + +#undef srr +#define srr(a_) P(TREG,1) +#undef sri +#define sri(a_) M(TREG,1) +#undef sir +#define sir(a_) TREG +#undef sii +#define sii(a_) TREG +#undef trr +#define trr(a_) a_ +#undef tri +#define tri(a_) a_ +#undef tir +#define tir(a_) a_ +#undef tii +#define tii(a_) a_ +#undef w +#define w(a_) +#undef plax +#define plax +#undef mpx +#define mpx(a_) fz fz +#ifdef BETA0 +#undef madd +#define madd(a_,b_,c_) +#else +#undef madd +#define madd(a_,b_,c_) faa(a_,b_) +#endif +#undef ulfa +#define ulfa(a_) madd(0,si,a_) fp(0,si) madd(8,si,a_) fp(8,si) + +#endif + + + +#ifdef Conj_ +#undef fapi +#define fapi(a_,b_) fsp(b_) +#undef fspi +#define fspi(a_,b_) fap(a_,b_) +#else +#undef fapi +#define fapi(a_,b_) fap(a_,b_) +#undef fspi +#define fspi(a_,b_) fsp(b_) +#endif + +#ifndef GER + + +#undef plaa +#define plaa(a_) fl(a_ ## 0,si) fl(a_ ## 8,si) plax +#undef wa +#define wa(a_) w(a_) +#undef ddp +#define ddp(a_,b_,c_) fl(a_ ## 0,b_) fd(0) fm(srr(c_),0) fap(0,trr(c_)) \ + fm(sri(c_),0) fap(0,tri(c_))\ + fl(a_ ## 8,b_) fd(0) fm(sir(c_),0) fspi(0,tir(c_)) \ + fm(sii(c_),0) fapi(0,tii(c_)) +#undef ddpp +#define ddpp(a_,b_,c_,d_,e_) fl(a_ ## 0,b_) fd(0) fm(srr(c_),0) fap(0,trr(c_)) \ + fm(sri(c_),0) fap(0,tri(c_))\ + fl(a_ ## 8,b_) fd(0) pf(d_,e_) fm(sir(c_),0) fspi(0,tir(c_))\ + fm(sii(c_),0) fapi(0,tii(c_)) + + + +#ifdef NO_TRANSPOSE + + + +#undef dp +#define dp(a_,b_,c_) ddp(a_,b_,c_) +#undef dpp +#define dpp(a_,b_,c_,d_,e_) ddpp(a_,b_,c_,d_,e_) + + + +#else + +#undef dp +#define dp(a_,b_,c_) fl(a_ ## 0,b_) fd(0) fm(srr(c_),0) fap(0,trr(c_)) \ + fm(sri(c_),0) fap(0,tri(c_))\ + fl(a_ ## 8,b_) fm(0,sir(c_)) fmp(0,M(sir(c_),1)) \ + fspi(0,M(tir(c_),2)) fapi(0,M(tii(c_),2)) + +#undef dpp +#define dpp(a_,b_,c_,d_,e_) fl(a_ ## 0,b_) fd(0) fm(srr(c_),0) fap(0,trr(c_)) \ + pf(d_,e_) fm(sri(c_),0) fap(0,tri(c_))\ + fl(a_ ## 8,b_) fm(0,sir(c_)) fmp(0,M(sir(c_),1)) \ + fspi(0,M(tir(c_),2)) fapi(0,M(tii(c_),2)) + + +#endif + +#else + +#undef plaa +#define plaa(a_) fl(a_ ## 0,si) fl(a_ ## 8,si) plax +#undef wa +#define wa(a_) + +#undef ddprr +#define ddprr(a_,b_,c_) fl(a_ ## 0,b_) \ + fd(tri(c_)) fm(P(sri(c_),1),0) fap(0,1) \ + fd(M(trr(c_),1)) fm(srr(c_),0) fspi(0,1) \ + fp(a_ ## 0,b_) +#undef ddpri +#define ddpri(a_,b_,c_) fl(a_ ## 8,b_) \ + fd(tii(c_)) fm(P(sii(c_),1),0) fap(0,1) \ + fd(M(tir(c_),1)) fm(sir(c_),0) fapi(0,1) \ + fp(a_ ## 8,b_) +#undef dpri +#define dpri(a_,b_,c_) fl(a_ ## 8,b_) \ + fx(2) fm(sir(c_),0) fap(0,2) \ + fm(M(sii(c_),2),0) fapi(0,1) \ + fp(a_ ## 8,b_) + + +#undef ddpp +#define ddpp(a_,b_,c_,d_,e_) ddprr(a_,b_,c_) pf(d_,e_) ddpri(a_,b_,c_) +#undef ddp +#define ddp(a_,b_,c_) ddprr(a_,b_,c_) ddpri(a_,b_,c_) +#undef dpp +#define dpp(a_,b_,c_,d_,e_) ddprr(a_,b_,c_) pf(d_,e_) dpri(a_,b_,c_) +#undef dp +#define dp(a_,b_,c_) ddprr(a_,b_,c_) dpri(a_,b_,c_) + +#endif + + +#undef R1 +#define R1 4 +#undef R2 +#define R2 6 +#undef R3 +#define R3 6 +#undef R4 +#define R4 6 + +#endif + +#endif + + +/****************************************************************************** + * General Macros + ******************************************************************************/ + + + + +#undef bla1 +#define bla1(a_,b_) plaa(a_) dpp(a_,ax,R1,b_,si) wa(a_) +#undef blb1 +#define blb1(a_,b_) plaa(a_) dpp(a_,ax,R1,b_,ax) wa(a_) + +#undef bla2 +#undef bla2 +#define bla2(a_,b_) pf(b_,si) plaa(a_) ddp(a_,ax,R1) pf(b_,ax) dp(a_,bx,R2) wa(a_) +#undef blb2 +#undef blb2 +#define blb2(a_,b_) plaa(a_) ddpp(a_,ax,R1,b_,bx) dp(a_,bx,R2) wa(a_) + +#undef bla3 +#define bla3(a_,b_) plaa(a_) ddpp(a_,ax,R1,b_,si) ddp(a_,bx,R2) \ + dpp(a_,cx,R3,b_,ax) wa(a_) +#undef blb3 +#define blb3(a_,b_) plaa(a_) ddpp(a_,ax,R1,b_,bx) ddp(a_,bx,R2) \ + dpp(a_,cx,R3,b_,cx) wa(a_) + +#undef bla4 +#define bla4(a_,b_) plaa(a_) ddpp(a_,ax,R1,b_,si) ddpp(a_,bx,R2,b_,ax) \ + ddp(a_,cx,R3) dpp(a_,dx,R4,b_,bx) wa(a_) +#undef blb4 +#define blb4(a_,b_) plaa(a_) ddp(a_,ax,R1) ddpp(a_,bx,R2,b_,cx) \ + ddp(a_,cx,R3) dpp(a_,dx,R4,b_,dx) wa(a_) + +#undef bla +#define bla(a_,b_) Mjoin(bla,NDP)(a_,b_) +#undef blb +#define blb(a_,b_) Mjoin(blb,NDP)(a_,b_) + + + +#undef bla11_2 +#define bla11_2(a_) plaa1_2(a_) dp1_2(a_,ax,R1) wa1_2(a_) +#undef bla21_2 +#define bla21_2(a_) plaa1_2(a_) ddp1_2(a_,ax,R1) dp1_2(a_,bx,R2) wa1_2(a_) +#undef bla31_2 +#define bla31_2(a_) plaa1_2(a_) ddp1_2(a_,ax,R1) ddp1_2(a_,bx,R2) \ + dp1_2(a_,cx,R3) wa1_2(a_) +#undef bla41_2 +#define bla41_2(a_) plaa1_2(a_) ddp1_2(a_,ax,R1) ddp1_2(a_,bx,R2) \ + ddp1_2(a_,cx,R3) dp1_2(a_,dx,R4) wa1_2(a_) + +#undef bla1_2 +#define bla1_2(a_) Mjoin(Mjoin(bla,NDP),1_2)(a_) + + + +#undef bla11_4 +#define bla11_4(a_) plaa1_4(a_) dp1_4(a_,ax,R1) wa1_4(a_) +#undef bla21_4 +#define bla21_4(a_) plaa1_4(a_) ddp1_4(a_,ax,R1) dp1_4(a_,bx,R2) wa1_4(a_) +#undef bla31_4 +#define bla31_4(a_) plaa1_4(a_) ddp1_4(a_,ax,R1) ddp1_4(a_,bx,R2) \ + dp1_4(a_,cx,R3) wa1_4(a_) +#undef bla41_4 +#define bla41_4(a_) plaa1_4(a_) ddp1_4(a_,ax,R1) ddp1_4(a_,bx,R2) \ + ddp1_4(a_,cx,R3) dp1_4(a_,dx,R4) wa1_4(a_) + +#undef bla1_4 +#define bla1_4(a_) Mjoin(Mjoin(bla,NDP),1_4)(a_) + + + +#undef inc1 +#define inc1(a_) a(a_,si) a(a_,ax) +#undef inc2 +#define inc2(a_) inc1(a_) a(a_,bx) +#undef inc3 +#define inc3(a_) inc2(a_) a(a_,cx) +#undef inc4 +#define inc4(a_) inc3(a_) a(a_,dx) + +#undef inc +#define inc(a_) Mjoin(inc,NDP)(a_) + + +#ifdef PREFETCH +/* #include "camm_arith.h" */ +#undef S +#define S(a_,b_) (a_) + (b_) +#undef PF1 +#define PF1 PREFETCH +#undef PF2 +#define PF2 S(PF1,32) +#undef PF3 +#define PF3 S(PF1,64) +#undef PF4 +#define PF4 S(PF1,96) +#undef PF5 +#define PF5 S(PF1,128) +#undef PF6 +#define PF6 S(PF1,160) +#undef PF7 +#define PF7 S(PF1,192) +#undef PF8 +#define PF8 S(PF1,224) +#else +#undef PF1 +#define PF1 64 +#undef PF2 +#define PF2 96 +#undef PF3 +#define PF3 128 +#undef PF4 +#define PF4 160 +#undef PF5 +#define PF5 192 +#undef PF6 +#define PF6 224 +#undef PF7 +#define PF7 256 +#undef PF8 +#define PF8 288 +#endif + + +#if defined(NO_TRANSPOSE) && !defined(SREAL) && !defined(GER) +#undef pf +#define pf(a_,b_) f(t0,a_,b_) +#else +#undef pf +#define pf(a_,b_) f(nta,a_,b_) +#endif + +#undef bl1 +#define bl1 bla1_4(0x0) inc(4) +#undef bl2 +#define bl2 bla1_2(0x0) inc(8) +#undef bl4 +#define bl4 bla(0x0,PF1) inc(16) +#undef bl8 +#define bl8 bla(0x0,PF1) blb(0x1,PF1) inc(32) +#undef bl16 +#define bl16 bla(0x0,PF1) blb(0x1,PF1) bla(0x2,PF2) blb(0x3,PF2) inc(64) +#undef bl32 +#define bl32 bla(0x0,PF1) blb(0x1,PF1) bla(0x2,PF2) blb(0x3,PF2) \ + bla(0x4,PF3) blb(0x5,PF3) bla(0x6,PF4) blb(0x7,PF4) inc(128) +#undef bl64 +#define bl64 bla(0x0,PF1) blb(0x1,PF1) bla(0x2,PF2) blb(0x3,PF2) \ + bla(0x4,PF3) blb(0x5,PF3) bla(0x6,PF4) blb(0x7,PF4) \ + bla(0x8,PF5) blb(0x9,PF5) bla(0xa,PF6) blb(0xb,PF6) \ + bla(0xc,PF7) blb(0xd,PF7) bla(0xe,PF8) blb(0xf,PF8) inc(256) + +/* #define in2 inc(8) */ +/* #define in4 inc(16) */ +/* #define in8 inc(32) */ +/* #define in16 inc(64) */ + +#undef in2 +#define in2 +#undef in4 +#define in4 +#undef in8 +#define in8 +#undef in16 +#define in16 + +#ifdef NO_TRANSPOSE +#undef incf +#define incf ra(di,si) +#else +#undef incf