diff --git a/security/clamav-devel/Makefile b/security/clamav-devel/Makefile index dfcf072b72f2..a1eb9dddfca4 100644 --- a/security/clamav-devel/Makefile +++ b/security/clamav-devel/Makefile @@ -2,7 +2,7 @@ # $FreeBSD$ PORTNAME= clamav -PORTVERSION= 20130125 +PORTVERSION= 20130209 CATEGORIES= security MASTER_SITES= http://www.clamav.net/snapshot/ \ LOCAL/garga/clamav-devel @@ -20,7 +20,7 @@ ARC_DESC= Enable arch archives support ARJ_DESC= Enable arj archives support LHA_DESC= Enable lha archives support UNZOO_DESC= Enable zoo archives support -UNRAR_DESC= Enable rar archives support +UNRAR_DESC= Enable rar archives support (req. for unit tests) LLVM_DESC= Enable JIT Bytecode compiler (bundled LLVM) TESTS_DESC= Run compile-time tests (req. python) MILTER_DESC= Compile the milter interface @@ -214,7 +214,7 @@ pre-configure: fi .endif -.if ${PORT_OPTIONS:MTESTS} && ${OSVERSION} >= 700000 +.if ${PORT_OPTIONS:MTESTS} && ${PORT_OPTIONS:MUNRAR} && ${OSVERSION} >= 700000 post-build: @if [ ! -f "${PY_NO_THREAD}" ]; then \ ${GMAKE} -C ${WRKSRC} check; \ diff --git a/security/clamav-devel/distinfo b/security/clamav-devel/distinfo index 3f83968bf1cc..350b08ef8a86 100644 --- a/security/clamav-devel/distinfo +++ b/security/clamav-devel/distinfo @@ -1,2 +1,2 @@ -SHA256 (clamav-devel-20130125.tar.gz) = 288d39babcf097438a8f252d65c58debe6fa243d7d80b1002c3789bcac879b79 -SIZE (clamav-devel-20130125.tar.gz) = 37000592 +SHA256 (clamav-devel-20130209.tar.gz) = 44159451330b527df6f1fd02084dd5aa1e176ad95f89f8cd5dac036e650dff6f +SIZE (clamav-devel-20130209.tar.gz) = 37002027 diff --git a/security/clamav-devel/files/patch-b__libclamav__tomsfastmath__mont__fp_montgomery_reduce.c b/security/clamav-devel/files/patch-b__libclamav__tomsfastmath__mont__fp_montgomery_reduce.c new file mode 100644 index 000000000000..321f45158bce --- /dev/null +++ b/security/clamav-devel/files/patch-b__libclamav__tomsfastmath__mont__fp_montgomery_reduce.c @@ -0,0 +1,119 @@ +--- libclamav/tomsfastmath/mont/fp_montgomery_reduce.c ++++ libclamav/tomsfastmath/mont/fp_montgomery_reduce.c +@@ -30,7 +30,7 @@ asm( \ + "movl %%edx,%1 \n\t" \ + :"=g"(_c[LO]), "=r"(cy) \ + :"0"(_c[LO]), "1"(cy), "g"(mu), "g"(*tmpm++) \ +-: "%eax", "%edx", "%cc") ++: "%eax", "%edx", "cc") + + #define PROPCARRY \ + asm( \ +@@ -39,7 +39,7 @@ asm( \ + "movzbl %%al,%1 \n\t" \ + :"=g"(_c[LO]), "=r"(cy) \ + :"0"(_c[LO]), "1"(cy) \ +-: "%eax", "%cc") ++: "%eax", "cc") + + /******************************************************************/ + #elif defined(TFM_X86_64) +@@ -62,7 +62,7 @@ asm( \ + "movq %%rdx,%1 \n\t" \ + :"=g"(_c[LO]), "=r"(cy) \ + :"0"(_c[LO]), "1"(cy), "r"(mu), "r"(*tmpm++) \ +-: "%rax", "%rdx", "%cc") ++: "%rax", "%rdx", "cc") + + #define INNERMUL8 \ + asm( \ +@@ -155,7 +155,7 @@ asm( \ + \ + :"=r"(_c), "=r"(cy) \ + : "0"(_c), "1"(cy), "g"(mu), "r"(tmpm)\ +-: "%rax", "%rdx", "%r10", "%r11", "%cc") ++: "%rax", "%rdx", "%r10", "%r11", "cc") + + + #define PROPCARRY \ +@@ -165,7 +165,7 @@ asm( \ + "movzbq %%al,%1 \n\t" \ + :"=g"(_c[LO]), "=r"(cy) \ + :"0"(_c[LO]), "1"(cy) \ +-: "%rax", "%cc") ++: "%rax", "cc") + + /******************************************************************/ + #elif defined(TFM_SSE2) +@@ -280,7 +280,7 @@ asm( \ + "movzbl %%al,%1 \n\t" \ + :"=g"(_c[LO]), "=r"(cy) \ + :"0"(_c[LO]), "1"(cy) \ +-: "%eax", "%cc") ++: "%eax", "cc") + + /******************************************************************/ + #elif defined(TFM_ARM) +@@ -300,7 +300,7 @@ asm( \ + " MOVCC %0,#0 \n\t" \ + " UMLAL r0,%0,%3,%4 \n\t" \ + " STR r0,%1 \n\t" \ +-:"=r"(cy),"=m"(_c[0]):"0"(cy),"r"(mu),"r"(*tmpm++),"1"(_c[0]):"r0","%cc"); ++:"=r"(cy),"=m"(_c[0]):"0"(cy),"r"(mu),"r"(*tmpm++),"1"(_c[0]):"r0","cc"); + + #define PROPCARRY \ + asm( \ +@@ -309,7 +309,7 @@ asm( \ + " STR r0,%1 \n\t" \ + " MOVCS %0,#1 \n\t" \ + " MOVCC %0,#0 \n\t" \ +-:"=r"(cy),"=m"(_c[0]):"0"(cy),"1"(_c[0]):"r0","%cc"); ++:"=r"(cy),"=m"(_c[0]):"0"(cy),"1"(_c[0]):"r0","cc"); + + /******************************************************************/ + #elif defined(TFM_PPC32) +@@ -331,7 +331,7 @@ asm( \ + " addc 16,16,18 \n\t" \ + " addze %0,17 \n\t" \ + " stw 16,%1 \n\t" \ +-:"=r"(cy),"=m"(_c[0]):"0"(cy),"r"(mu),"r"(tmpm[0]),"1"(_c[0]):"16", "17", "18","%cc"); ++tmpm; ++:"=r"(cy),"=m"(_c[0]):"0"(cy),"r"(mu),"r"(tmpm[0]),"1"(_c[0]):"16", "17", "18","cc"); ++tmpm; + + #define PROPCARRY \ + asm( \ +@@ -340,7 +340,7 @@ asm( \ + " stw 16,%1 \n\t" \ + " xor %0,%0,%0 \n\t" \ + " addze %0,%0 \n\t" \ +-:"=r"(cy),"=m"(_c[0]):"0"(cy),"1"(_c[0]):"16","%cc"); ++:"=r"(cy),"=m"(_c[0]):"0"(cy),"1"(_c[0]):"16","cc"); + + /******************************************************************/ + #elif defined(TFM_PPC64) +@@ -362,7 +362,7 @@ asm( \ + " addc r16,r16,r18 \n\t" \ + " addze %0,r17 \n\t" \ + " sdx r16,0,%1 \n\t" \ +-:"=r"(cy),"=m"(_c[0]):"0"(cy),"r"(mu),"r"(tmpm[0]),"1"(_c[0]):"r16", "r17", "r18","%cc"); ++tmpm; ++:"=r"(cy),"=m"(_c[0]):"0"(cy),"r"(mu),"r"(tmpm[0]),"1"(_c[0]):"r16", "r17", "r18","cc"); ++tmpm; + + #define PROPCARRY \ + asm( \ +@@ -371,7 +371,7 @@ asm( \ + " sdx r16,0,%1 \n\t" \ + " xor %0,%0,%0 \n\t" \ + " addze %0,%0 \n\t" \ +-:"=r"(cy),"=m"(_c[0]):"0"(cy),"1"(_c[0]):"r16","%cc"); ++:"=r"(cy),"=m"(_c[0]):"0"(cy),"1"(_c[0]):"r16","cc"); + + /******************************************************************/ + #elif defined(TFM_AVR32) +@@ -401,7 +401,7 @@ asm( \ + " st.w %1,r2 \n\t" \ + " eor %0,%0 \n\t" \ + " acr %0 \n\t" \ +-:"=r"(cy),"=r"(&_c[0]):"0"(cy),"1"(&_c[0]):"r2","%cc"); ++:"=r"(cy),"=r"(&_c[0]):"0"(cy),"1"(&_c[0]):"r2","cc"); + + /******************************************************************/ + #elif defined(TFM_MIPS) diff --git a/security/clamav-devel/files/patch-b__libclamav__tomsfastmath__mul__fp_mul_comba.c b/security/clamav-devel/files/patch-b__libclamav__tomsfastmath__mul__fp_mul_comba.c new file mode 100644 index 000000000000..4bb3388f548b --- /dev/null +++ b/security/clamav-devel/files/patch-b__libclamav__tomsfastmath__mul__fp_mul_comba.c @@ -0,0 +1,38 @@ +--- libclamav/tomsfastmath/mul/fp_mul_comba.c ++++ libclamav/tomsfastmath/mul/fp_mul_comba.c +@@ -53,7 +53,7 @@ asm( \ + "addl %%eax,%0 \n\t" \ + "adcl %%edx,%1 \n\t" \ + "adcl $0,%2 \n\t" \ +- :"=r"(c0), "=r"(c1), "=r"(c2): "0"(c0), "1"(c1), "2"(c2), "m"(i), "m"(j) :"%eax","%edx","%cc"); ++ :"=r"(c0), "=r"(c1), "=r"(c2): "0"(c0), "1"(c1), "2"(c2), "m"(i), "m"(j) :"%eax","%edx","cc"); + + #elif defined(TFM_X86_64) + /* x86-64 optimized */ +@@ -88,7 +88,7 @@ asm ( \ + "addq %%rax,%0 \n\t" \ + "adcq %%rdx,%1 \n\t" \ + "adcq $0,%2 \n\t" \ +- :"=r"(c0), "=r"(c1), "=r"(c2): "0"(c0), "1"(c1), "2"(c2), "g"(i), "g"(j) :"%rax","%rdx","%cc"); ++ :"=r"(c0), "=r"(c1), "=r"(c2): "0"(c0), "1"(c1), "2"(c2), "g"(i), "g"(j) :"%rax","%rdx","cc"); + + #elif defined(TFM_SSE2) + /* use SSE2 optimizations */ +@@ -128,7 +128,7 @@ asm( \ + "movd %%mm0,%%eax \n\t" \ + "adcl %%eax,%1 \n\t" \ + "adcl $0,%2 \n\t" \ +- :"=r"(c0), "=r"(c1), "=r"(c2): "0"(c0), "1"(c1), "2"(c2), "m"(i), "m"(j) :"%eax","%cc"); ++ :"=r"(c0), "=r"(c1), "=r"(c2): "0"(c0), "1"(c1), "2"(c2), "m"(i), "m"(j) :"%eax","cc"); + + #elif defined(TFM_ARM) + /* ARM code */ +@@ -155,7 +155,7 @@ asm( \ + " ADDS %0,%0,r0 \n\t" \ + " ADCS %1,%1,r1 \n\t" \ + " ADC %2,%2,#0 \n\t" \ +-:"=r"(c0), "=r"(c1), "=r"(c2) : "0"(c0), "1"(c1), "2"(c2), "r"(i), "r"(j) : "r0", "r1", "%cc"); ++:"=r"(c0), "=r"(c1), "=r"(c2) : "0"(c0), "1"(c1), "2"(c2), "r"(i), "r"(j) : "r0", "r1", "cc"); + + #elif defined(TFM_PPC32) + /* For 32-bit PPC */ diff --git a/security/clamav-devel/files/patch-b__libclamav__tomsfastmath__sqr__fp_sqr_comba.c b/security/clamav-devel/files/patch-b__libclamav__tomsfastmath__sqr__fp_sqr_comba.c new file mode 100644 index 000000000000..25d024499db4 --- /dev/null +++ b/security/clamav-devel/files/patch-b__libclamav__tomsfastmath__sqr__fp_sqr_comba.c @@ -0,0 +1,277 @@ +--- libclamav/tomsfastmath/sqr/fp_sqr_comba.c ++++ libclamav/tomsfastmath/sqr/fp_sqr_comba.c +@@ -41,7 +41,7 @@ asm( \ + "addl %%eax,%0 \n\t" \ + "adcl %%edx,%1 \n\t" \ + "adcl $0,%2 \n\t" \ +- :"=r"(c0), "=r"(c1), "=r"(c2): "0"(c0), "1"(c1), "2"(c2), "m"(i) :"%eax","%edx","%cc"); ++ :"=r"(c0), "=r"(c1), "=r"(c2): "0"(c0), "1"(c1), "2"(c2), "m"(i) :"%eax","%edx","cc"); + + #define SQRADD2(i, j) \ + asm( \ +@@ -53,7 +53,7 @@ asm( \ + "addl %%eax,%0 \n\t" \ + "adcl %%edx,%1 \n\t" \ + "adcl $0,%2 \n\t" \ +- :"=r"(c0), "=r"(c1), "=r"(c2): "0"(c0), "1"(c1), "2"(c2), "m"(i), "m"(j) :"%eax","%edx","%cc"); ++ :"=r"(c0), "=r"(c1), "=r"(c2): "0"(c0), "1"(c1), "2"(c2), "m"(i), "m"(j) :"%eax","%edx","cc"); + + #define SQRADDSC(i, j) \ + asm( \ +@@ -62,7 +62,7 @@ asm( \ + "movl %%eax,%0 \n\t" \ + "movl %%edx,%1 \n\t" \ + "xorl %2,%2 \n\t" \ +- :"=r"(sc0), "=r"(sc1), "=r"(sc2): "0"(sc0), "1"(sc1), "2"(sc2), "g"(i), "g"(j) :"%eax","%edx","%cc"); ++ :"=r"(sc0), "=r"(sc1), "=r"(sc2): "0"(sc0), "1"(sc1), "2"(sc2), "g"(i), "g"(j) :"%eax","%edx","cc"); + + #define SQRADDAC(i, j) \ + asm( \ +@@ -71,7 +71,7 @@ asm( \ + "addl %%eax,%0 \n\t" \ + "adcl %%edx,%1 \n\t" \ + "adcl $0,%2 \n\t" \ +- :"=r"(sc0), "=r"(sc1), "=r"(sc2): "0"(sc0), "1"(sc1), "2"(sc2), "g"(i), "g"(j) :"%eax","%edx","%cc"); ++ :"=r"(sc0), "=r"(sc1), "=r"(sc2): "0"(sc0), "1"(sc1), "2"(sc2), "g"(i), "g"(j) :"%eax","%edx","cc"); + + #define SQRADDDB \ + asm( \ +@@ -81,7 +81,7 @@ asm( \ + "addl %6,%0 \n\t" \ + "adcl %7,%1 \n\t" \ + "adcl %8,%2 \n\t" \ +- :"=r"(c0), "=r"(c1), "=r"(c2) : "0"(c0), "1"(c1), "2"(c2), "r"(sc0), "r"(sc1), "r"(sc2) : "%cc"); ++ :"=r"(c0), "=r"(c1), "=r"(c2) : "0"(c0), "1"(c1), "2"(c2), "r"(sc0), "r"(sc1), "r"(sc2) : "cc"); + + #elif defined(TFM_X86_64) + /* x86-64 optimized */ +@@ -109,7 +109,7 @@ asm( \ + "addq %%rax,%0 \n\t" \ + "adcq %%rdx,%1 \n\t" \ + "adcq $0,%2 \n\t" \ +- :"=r"(c0), "=r"(c1), "=r"(c2): "0"(c0), "1"(c1), "2"(c2), "g"(i) :"%rax","%rdx","%cc"); ++ :"=r"(c0), "=r"(c1), "=r"(c2): "0"(c0), "1"(c1), "2"(c2), "g"(i) :"%rax","%rdx","cc"); + + #define SQRADD2(i, j) \ + asm( \ +@@ -121,7 +121,7 @@ asm( \ + "addq %%rax,%0 \n\t" \ + "adcq %%rdx,%1 \n\t" \ + "adcq $0,%2 \n\t" \ +- :"=r"(c0), "=r"(c1), "=r"(c2): "0"(c0), "1"(c1), "2"(c2), "g"(i), "g"(j) :"%rax","%rdx","%cc"); ++ :"=r"(c0), "=r"(c1), "=r"(c2): "0"(c0), "1"(c1), "2"(c2), "g"(i), "g"(j) :"%rax","%rdx","cc"); + + #define SQRADDSC(i, j) \ + asm( \ +@@ -130,7 +130,7 @@ asm( \ + "movq %%rax,%0 \n\t" \ + "movq %%rdx,%1 \n\t" \ + "xorq %2,%2 \n\t" \ +- :"=r"(sc0), "=r"(sc1), "=r"(sc2): "0"(sc0), "1"(sc1), "2"(sc2), "g"(i), "g"(j) :"%rax","%rdx","%cc"); ++ :"=r"(sc0), "=r"(sc1), "=r"(sc2): "0"(sc0), "1"(sc1), "2"(sc2), "g"(i), "g"(j) :"%rax","%rdx","cc"); + + #define SQRADDAC(i, j) \ + asm( \ +@@ -139,7 +139,7 @@ asm( \ + "addq %%rax,%0 \n\t" \ + "adcq %%rdx,%1 \n\t" \ + "adcq $0,%2 \n\t" \ +- :"=r"(sc0), "=r"(sc1), "=r"(sc2): "0"(sc0), "1"(sc1), "2"(sc2), "g"(i), "g"(j) :"%rax","%rdx","%cc"); ++ :"=r"(sc0), "=r"(sc1), "=r"(sc2): "0"(sc0), "1"(sc1), "2"(sc2), "g"(i), "g"(j) :"%rax","%rdx","cc"); + + #define SQRADDDB \ + asm( \ +@@ -149,7 +149,7 @@ asm( \ + "addq %6,%0 \n\t" \ + "adcq %7,%1 \n\t" \ + "adcq %8,%2 \n\t" \ +- :"=r"(c0), "=r"(c1), "=r"(c2) : "0"(c0), "1"(c1), "2"(c2), "r"(sc0), "r"(sc1), "r"(sc2) : "%cc"); ++ :"=r"(c0), "=r"(c1), "=r"(c2) : "0"(c0), "1"(c1), "2"(c2), "r"(sc0), "r"(sc1), "r"(sc2) : "cc"); + + #elif defined(TFM_SSE2) + +@@ -181,7 +181,7 @@ asm( \ + "movd %%mm0,%%eax \n\t" \ + "adcl %%eax,%1 \n\t" \ + "adcl $0,%2 \n\t" \ +- :"=r"(c0), "=r"(c1), "=r"(c2): "0"(c0), "1"(c1), "2"(c2), "m"(i) :"%eax","%cc"); ++ :"=r"(c0), "=r"(c1), "=r"(c2): "0"(c0), "1"(c1), "2"(c2), "m"(i) :"%eax","cc"); + + #define SQRADD2(i, j) \ + asm( \ +@@ -197,7 +197,7 @@ asm( \ + "addl %%eax,%0 \n\t" \ + "adcl %%edx,%1 \n\t" \ + "adcl $0,%2 \n\t" \ +- :"=r"(c0), "=r"(c1), "=r"(c2): "0"(c0), "1"(c1), "2"(c2), "m"(i), "m"(j) :"%eax","%edx","%cc"); ++ :"=r"(c0), "=r"(c1), "=r"(c2): "0"(c0), "1"(c1), "2"(c2), "m"(i), "m"(j) :"%eax","%edx","cc"); + + #define SQRADDSC(i, j) \ + asm( \ +@@ -221,7 +221,7 @@ asm( \ + "addl %%eax,%0 \n\t" \ + "adcl %%edx,%1 \n\t" \ + "adcl $0,%2 \n\t" \ +- :"=r"(sc0), "=r"(sc1), "=r"(sc2): "0"(sc0), "1"(sc1), "2"(sc2), "m"(i), "m"(j) :"%eax","%edx","%cc"); ++ :"=r"(sc0), "=r"(sc1), "=r"(sc2): "0"(sc0), "1"(sc1), "2"(sc2), "m"(i), "m"(j) :"%eax","%edx","cc"); + + #define SQRADDDB \ + asm( \ +@@ -231,7 +231,7 @@ asm( \ + "addl %6,%0 \n\t" \ + "adcl %7,%1 \n\t" \ + "adcl %8,%2 \n\t" \ +- :"=r"(c0), "=r"(c1), "=r"(c2) : "0"(c0), "1"(c1), "2"(c2), "r"(sc0), "r"(sc1), "r"(sc2) : "%cc"); ++ :"=r"(c0), "=r"(c1), "=r"(c2) : "0"(c0), "1"(c1), "2"(c2), "r"(sc0), "r"(sc1), "r"(sc2) : "cc"); + + #elif defined(TFM_ARM) + +@@ -260,7 +260,7 @@ asm( \ + " ADDS %0,%0,r0 \n\t" \ + " ADCS %1,%1,r1 \n\t" \ + " ADC %2,%2,#0 \n\t" \ +-:"=r"(c0), "=r"(c1), "=r"(c2) : "0"(c0), "1"(c1), "2"(c2), "r"(i) : "r0", "r1", "%cc"); ++:"=r"(c0), "=r"(c1), "=r"(c2) : "0"(c0), "1"(c1), "2"(c2), "r"(i) : "r0", "r1", "cc"); + + /* for squaring some of the terms are doubled... */ + #define SQRADD2(i, j) \ +@@ -272,13 +272,13 @@ asm( \ + " ADDS %0,%0,r0 \n\t" \ + " ADCS %1,%1,r1 \n\t" \ + " ADC %2,%2,#0 \n\t" \ +-:"=r"(c0), "=r"(c1), "=r"(c2) : "0"(c0), "1"(c1), "2"(c2), "r"(i), "r"(j) : "r0", "r1", "%cc"); ++:"=r"(c0), "=r"(c1), "=r"(c2) : "0"(c0), "1"(c1), "2"(c2), "r"(i), "r"(j) : "r0", "r1", "cc"); + + #define SQRADDSC(i, j) \ + asm( \ + " UMULL %0,%1,%6,%7 \n\t" \ + " SUB %2,%2,%2 \n\t" \ +-:"=r"(sc0), "=r"(sc1), "=r"(sc2) : "0"(sc0), "1"(sc1), "2"(sc2), "r"(i), "r"(j) : "%cc"); ++:"=r"(sc0), "=r"(sc1), "=r"(sc2) : "0"(sc0), "1"(sc1), "2"(sc2), "r"(i), "r"(j) : "cc"); + + #define SQRADDAC(i, j) \ + asm( \ +@@ -286,7 +286,7 @@ asm( \ + " ADDS %0,%0,r0 \n\t" \ + " ADCS %1,%1,r1 \n\t" \ + " ADC %2,%2,#0 \n\t" \ +-:"=r"(sc0), "=r"(sc1), "=r"(sc2) : "0"(sc0), "1"(sc1), "2"(sc2), "r"(i), "r"(j) : "r0", "r1", "%cc"); ++:"=r"(sc0), "=r"(sc1), "=r"(sc2) : "0"(sc0), "1"(sc1), "2"(sc2), "r"(i), "r"(j) : "r0", "r1", "cc"); + + #define SQRADDDB \ + asm( \ +@@ -296,7 +296,7 @@ asm( \ + " ADDS %0,%0,%3 \n\t" \ + " ADCS %1,%1,%4 \n\t" \ + " ADC %2,%2,%5 \n\t" \ +-:"=r"(c0), "=r"(c1), "=r"(c2) : "r"(sc0), "r"(sc1), "r"(sc2), "0"(c0), "1"(c1), "2"(c2) : "%cc"); ++:"=r"(c0), "=r"(c1), "=r"(c2) : "r"(sc0), "r"(sc1), "r"(sc2), "0"(c0), "1"(c1), "2"(c2) : "cc"); + + #elif defined(TFM_PPC32) + +@@ -326,7 +326,7 @@ asm( \ + " mulhwu 16,%6,%6 \n\t" \ + " adde %1,%1,16 \n\t" \ + " addze %2,%2 \n\t" \ +-:"=r"(c0), "=r"(c1), "=r"(c2):"0"(c0), "1"(c1), "2"(c2), "r"(i):"16","%cc"); ++:"=r"(c0), "=r"(c1), "=r"(c2):"0"(c0), "1"(c1), "2"(c2), "r"(i):"16","cc"); + + /* for squaring some of the terms are doubled... */ + #define SQRADD2(i, j) \ +@@ -339,14 +339,14 @@ asm( \ + " addc %0,%0,16 \n\t" \ + " adde %1,%1,17 \n\t" \ + " addze %2,%2 \n\t" \ +-:"=r"(c0), "=r"(c1), "=r"(c2):"0"(c0), "1"(c1), "2"(c2), "r"(i), "r"(j):"16", "17","%cc"); ++:"=r"(c0), "=r"(c1), "=r"(c2):"0"(c0), "1"(c1), "2"(c2), "r"(i), "r"(j):"16", "17","cc"); + + #define SQRADDSC(i, j) \ + asm( \ + " mullw %0,%6,%7 \n\t" \ + " mulhwu %1,%6,%7 \n\t" \ + " xor %2,%2,%2 \n\t" \ +-:"=r"(sc0), "=r"(sc1), "=r"(sc2):"0"(sc0), "1"(sc1), "2"(sc2), "r"(i),"r"(j) : "%cc"); ++:"=r"(sc0), "=r"(sc1), "=r"(sc2):"0"(sc0), "1"(sc1), "2"(sc2), "r"(i),"r"(j) : "cc"); + + #define SQRADDAC(i, j) \ + asm( \ +@@ -355,7 +355,7 @@ asm( \ + " mulhwu 16,%6,%7 \n\t" \ + " adde %1,%1,16 \n\t" \ + " addze %2,%2 \n\t" \ +-:"=r"(sc0), "=r"(sc1), "=r"(sc2):"0"(sc0), "1"(sc1), "2"(sc2), "r"(i), "r"(j):"16", "%cc"); ++:"=r"(sc0), "=r"(sc1), "=r"(sc2):"0"(sc0), "1"(sc1), "2"(sc2), "r"(i), "r"(j):"16", "cc"); + + #define SQRADDDB \ + asm( \ +@@ -365,7 +365,7 @@ asm( \ + " addc %0,%0,%3 \n\t" \ + " adde %1,%1,%4 \n\t" \ + " adde %2,%2,%5 \n\t" \ +-:"=r"(c0), "=r"(c1), "=r"(c2) : "r"(sc0), "r"(sc1), "r"(sc2), "0"(c0), "1"(c1), "2"(c2) : "%cc"); ++:"=r"(c0), "=r"(c1), "=r"(c2) : "r"(sc0), "r"(sc1), "r"(sc2), "0"(c0), "1"(c1), "2"(c2) : "cc"); + + #elif defined(TFM_PPC64) + /* PPC64 */ +@@ -394,7 +394,7 @@ asm( \ + " mulhdu r16,%6,%6 \n\t" \ + " adde %1,%1,r16 \n\t" \ + " addze %2,%2 \n\t" \ +-:"=r"(c0), "=r"(c1), "=r"(c2):"0"(c0), "1"(c1), "2"(c2), "r"(i):"r16","%cc"); ++:"=r"(c0), "=r"(c1), "=r"(c2):"0"(c0), "1"(c1), "2"(c2), "r"(i):"r16","cc"); + + /* for squaring some of the terms are doubled... */ + #define SQRADD2(i, j) \ +@@ -407,14 +407,14 @@ asm( \ + " addc %0,%0,r16 \n\t" \ + " adde %1,%1,r17 \n\t" \ + " addze %2,%2 \n\t" \ +-:"=r"(c0), "=r"(c1), "=r"(c2):"0"(c0), "1"(c1), "2"(c2), "r"(i), "r"(j):"r16", "r17","%cc"); ++:"=r"(c0), "=r"(c1), "=r"(c2):"0"(c0), "1"(c1), "2"(c2), "r"(i), "r"(j):"r16", "r17","cc"); + + #define SQRADDSC(i, j) \ + asm( \ + " mulld %0,%6,%7 \n\t" \ + " mulhdu %1,%6,%7 \n\t" \ + " xor %2,%2,%2 \n\t" \ +-:"=r"(sc0), "=r"(sc1), "=r"(sc2):"0"(sc0), "1"(sc1), "2"(sc2), "r"(i),"r"(j) : "%cc"); ++:"=r"(sc0), "=r"(sc1), "=r"(sc2):"0"(sc0), "1"(sc1), "2"(sc2), "r"(i),"r"(j) : "cc"); + + #define SQRADDAC(i, j) \ + asm( \ +@@ -423,7 +423,7 @@ asm( \ + " mulhdu r16,%6,%7 \n\t" \ + " adde %1,%1,r16 \n\t" \ + " addze %2,%2 \n\t" \ +-:"=r"(sc0), "=r"(sc1), "=r"(sc2):"0"(sc0), "1"(sc1), "2"(sc2), "r"(i), "r"(j):"r16", "%cc"); ++:"=r"(sc0), "=r"(sc1), "=r"(sc2):"0"(sc0), "1"(sc1), "2"(sc2), "r"(i), "r"(j):"r16", "cc"); + + #define SQRADDDB \ + asm( \ +@@ -433,7 +433,7 @@ asm( \ + " addc %0,%0,%3 \n\t" \ + " adde %1,%1,%4 \n\t" \ + " adde %2,%2,%5 \n\t" \ +-:"=r"(c0), "=r"(c1), "=r"(c2) : "r"(sc0), "r"(sc1), "r"(sc2), "0"(c0), "1"(c1), "2"(c2) : "%cc"); ++:"=r"(c0), "=r"(c1), "=r"(c2) : "r"(sc0), "r"(sc1), "r"(sc2), "0"(c0), "1"(c1), "2"(c2) : "cc"); + + + #elif defined(TFM_AVR32) +@@ -501,7 +501,7 @@ asm( \ + " add %0,%0,%3 \n\t" \ + " adc %1,%1,%4 \n\t" \ + " adc %2,%2,%5 \n\t" \ +-:"=r"(c0), "=r"(c1), "=r"(c2) : "r"(sc0), "r"(sc1), "r"(sc2), "0"(c0), "1"(c1), "2"(c2) : "%cc"); ++:"=r"(c0), "=r"(c1), "=r"(c2) : "r"(sc0), "r"(sc1), "r"(sc2), "0"(c0), "1"(c1), "2"(c2) : "cc"); + + #elif defined(TFM_MIPS) + +@@ -571,7 +571,7 @@ asm( \ + " mflo %0 \n\t" \ + " mfhi %1 \n\t" \ + " xor %2,%2,%2 \n\t" \ +-:"=r"(sc0), "=r"(sc1), "=r"(sc2):"0"(sc0), "1"(sc1), "2"(sc2), "r"(i),"r"(j) : "%cc"); ++:"=r"(sc0), "=r"(sc1), "=r"(sc2):"0"(sc0), "1"(sc1), "2"(sc2), "r"(i),"r"(j) : "cc"); + + #define SQRADDAC(i, j) \ + asm( \