X-Git-Url: http://git.efficios.com/?a=blobdiff_plain;f=include%2Flonglong.h;h=1f0ce4204255945d2c19ba73c0a4910d10145a93;hb=99a5596592eda72c5c60b45cdfabb47e426132d5;hp=d7ef671226cd6f545c2fd3ede22b693cd213c934;hpb=f4936735c5ed55abd37062791f60ba2754c89c31;p=deliverable%2Fbinutils-gdb.git diff --git a/include/longlong.h b/include/longlong.h index d7ef671226..1f0ce42042 100644 --- a/include/longlong.h +++ b/include/longlong.h @@ -1,5 +1,5 @@ /* longlong.h -- definitions for mixed size 32/64 bit arithmetic. - Copyright (C) 1991-2015 Free Software Foundation, Inc. + Copyright (C) 1991-2019 Free Software Foundation, Inc. This file is part of the GNU C Library. @@ -197,17 +197,19 @@ extern UDItype __udiv_qrnnd (UDItype *, UDItype, UDItype, UDItype); : "=r" ((USItype) (sh)), \ "=&r" ((USItype) (sl)) \ : "%r" ((USItype) (ah)), \ - "rIJ" ((USItype) (bh)), \ + "rICal" ((USItype) (bh)), \ "%r" ((USItype) (al)), \ - "rIJ" ((USItype) (bl))) + "rICal" ((USItype) (bl)) \ + : "cc") #define sub_ddmmss(sh, sl, ah, al, bh, bl) \ __asm__ ("sub.f %1, %4, %5\n\tsbc %0, %2, %3" \ : "=r" ((USItype) (sh)), \ "=&r" ((USItype) (sl)) \ : "r" ((USItype) (ah)), \ - "rIJ" ((USItype) (bh)), \ + "rICal" ((USItype) (bh)), \ "r" ((USItype) (al)), \ - "rIJ" ((USItype) (bl))) + "rICal" ((USItype) (bl)) \ + : "cc") #define __umulsidi3(u,v) ((UDItype)(USItype)u*(USItype)v) #ifdef __ARC_NORM__ @@ -221,8 +223,8 @@ extern UDItype __udiv_qrnnd (UDItype *, UDItype, UDItype, UDItype); } \ while (0) #define COUNT_LEADING_ZEROS_0 32 -#endif -#endif +#endif /* __ARC_NORM__ */ +#endif /* __arc__ */ #if defined (__arm__) && (defined (__thumb2__) || !defined (__thumb__)) \ && W_TYPE_SIZE == 32 @@ -858,42 +860,6 @@ extern UDItype __umulsidi3 (USItype, USItype); #endif #endif /* __mips__ */ -#if defined (__ns32000__) && W_TYPE_SIZE == 32 -#define umul_ppmm(w1, w0, u, v) \ - ({union {UDItype __ll; \ - struct {USItype __l, __h;} __i; \ - } __xx; \ - __asm__ ("meid %2,%0" \ - : "=g" (__xx.__ll) \ - : "%0" ((USItype) (u)), \ - "g" ((USItype) (v))); \ - (w1) = __xx.__i.__h; (w0) = __xx.__i.__l;}) -#define __umulsidi3(u, v) \ - ({UDItype __w; \ - __asm__ ("meid %2,%0" \ - : "=g" (__w) \ - : "%0" ((USItype) (u)), \ - "g" ((USItype) (v))); \ - __w; }) -#define udiv_qrnnd(q, r, n1, n0, d) \ - ({union {UDItype __ll; \ - struct {USItype __l, __h;} __i; \ - } __xx; \ - __xx.__i.__h = (n1); __xx.__i.__l = (n0); \ - __asm__ ("deid %2,%0" \ - : "=g" (__xx.__ll) \ - : "0" (__xx.__ll), \ - "g" ((USItype) (d))); \ - (r) = __xx.__i.__l; (q) = __xx.__i.__h; }) -#define count_trailing_zeros(count,x) \ - do { \ - __asm__ ("ffsd %2,%0" \ - : "=r" ((USItype) (count)) \ - : "0" ((USItype) 0), \ - "r" ((USItype) (x))); \ - } while (0) -#endif /* __ns32000__ */ - /* FIXME: We should test _IBMR2 here when we add assembly support for the system vendor compilers. FIXME: What's needed for gcc PowerPC VxWorks? __vxworks__ is not good @@ -1086,7 +1052,57 @@ extern UDItype __umulsidi3 (USItype, USItype); } while (0) #endif -#if defined(__sh__) && !__SHMEDIA__ && W_TYPE_SIZE == 32 +#if defined(__riscv) +#ifdef __riscv_mul +#define __umulsidi3(u,v) ((UDWtype)(UWtype)(u) * (UWtype)(v)) +#define __muluw3(a, b) ((UWtype)(a) * (UWtype)(b)) +#else +#if __riscv_xlen == 32 + #define MULUW3 "call __mulsi3" +#elif __riscv_xlen == 64 + #define MULUW3 "call __muldi3" +#else +#error unsupport xlen +#endif /* __riscv_xlen */ +/* We rely on the fact that MULUW3 doesn't clobber the t-registers. + It can get better register allocation result. */ +#define __muluw3(a, b) \ + ({ \ + register UWtype __op0 asm ("a0") = a; \ + register UWtype __op1 asm ("a1") = b; \ + asm volatile (MULUW3 \ + : "+r" (__op0), "+r" (__op1) \ + : \ + : "ra", "a2", "a3"); \ + __op0; \ + }) +#endif /* __riscv_mul */ +#define umul_ppmm(w1, w0, u, v) \ + do { \ + UWtype __x0, __x1, __x2, __x3; \ + UHWtype __ul, __vl, __uh, __vh; \ + \ + __ul = __ll_lowpart (u); \ + __uh = __ll_highpart (u); \ + __vl = __ll_lowpart (v); \ + __vh = __ll_highpart (v); \ + \ + __x0 = __muluw3 (__ul, __vl); \ + __x1 = __muluw3 (__ul, __vh); \ + __x2 = __muluw3 (__uh, __vl); \ + __x3 = __muluw3 (__uh, __vh); \ + \ + __x1 += __ll_highpart (__x0);/* this can't give carry */ \ + __x1 += __x2; /* but this indeed can */ \ + if (__x1 < __x2) /* did we get it? */ \ + __x3 += __ll_B; /* yes, add it in the proper pos. */ \ + \ + (w1) = __x3 + __ll_highpart (__x1); \ + (w0) = __ll_lowpart (__x1) * __ll_B + __ll_lowpart (__x0); \ + } while (0) +#endif /* __riscv */ + +#if defined(__sh__) && W_TYPE_SIZE == 32 #ifndef __sh1__ #define umul_ppmm(w1, w0, u, v) \ __asm__ ( \ @@ -1159,21 +1175,6 @@ extern UDItype __umulsidi3 (USItype, USItype); #endif /* __sh__ */ -#if defined (__SH5__) && __SHMEDIA__ && W_TYPE_SIZE == 32 -#define __umulsidi3(u,v) ((UDItype)(USItype)u*(USItype)v) -#define count_leading_zeros(count, x) \ - do \ - { \ - UDItype x_ = (USItype)(x); \ - SItype c_; \ - \ - __asm__ ("nsb %1, %0" : "=r" (c_) : "r" (x_)); \ - (count) = c_ - 31; \ - } \ - while (0) -#define COUNT_LEADING_ZEROS_0 32 -#endif - #if defined (__sparc__) && !defined (__arch64__) && !defined (__sparcv9) \ && W_TYPE_SIZE == 32 #define add_ssaaaa(sh, sl, ah, al, bh, bl) \