| 1 | /* This file is part of the program psim. |
| 2 | |
| 3 | Copyright (C) 1994-1995, Andrew Cagney <cagney@highland.com.au> |
| 4 | |
| 5 | This program is free software; you can redistribute it and/or modify |
| 6 | it under the terms of the GNU General Public License as published by |
| 7 | the Free Software Foundation; either version 3 of the License, or |
| 8 | (at your option) any later version. |
| 9 | |
| 10 | This program is distributed in the hope that it will be useful, |
| 11 | but WITHOUT ANY WARRANTY; without even the implied warranty of |
| 12 | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
| 13 | GNU General Public License for more details. |
| 14 | |
| 15 | You should have received a copy of the GNU General Public License |
| 16 | along with this program; if not, see <http://www.gnu.org/licenses/>. |
| 17 | |
| 18 | */ |
| 19 | |
| 20 | |
| 21 | #ifndef _BITS_H_ |
| 22 | #define _BITS_H_ |
| 23 | |
| 24 | |
| 25 | /* bit manipulation routines: |
| 26 | |
| 27 | Bit numbering: The bits are numbered according to the PowerPC |
| 28 | convention - the left most (or most significant) is bit 0 while the |
| 29 | right most (least significant) is bit 1. |
| 30 | |
| 31 | Size convention: Each macro is in three forms - <MACRO>32 which |
| 32 | operates in 32bit quantity (bits are numbered 0..31); <MACRO>64 |
| 33 | which operates using 64bit quantites (and bits are numbered 0..64); |
| 34 | and <MACRO> which operates using the bit size of the target |
| 35 | architecture (bits are still numbered 0..63), with 32bit |
| 36 | architectures ignoring the first 32bits having bit 32 as the most |
| 37 | significant. |
| 38 | |
| 39 | BIT*(POS): Quantity with just 1 bit set. |
| 40 | |
| 41 | MASK*(FIRST, LAST): Create a constant bit mask of the specified |
| 42 | size with bits [FIRST .. LAST] set. |
| 43 | |
| 44 | MASKED*(VALUE, FIRST, LAST): Masks out all but bits [FIRST |
| 45 | .. LAST]. |
| 46 | |
| 47 | LSMASKED*(VALUE, FIRST, LAST): Like MASKED - LS bit is zero. |
| 48 | |
| 49 | EXTRACTED*(VALUE, FIRST, LAST): Masks out bits [FIRST .. LAST] but |
| 50 | also right shifts the masked value so that bit LAST becomes the |
| 51 | least significant (right most). |
| 52 | |
| 53 | LSEXTRACTED*(VALUE, FIRST, LAST): Same as extracted - LS bit is |
| 54 | zero. |
| 55 | |
| 56 | SHUFFLED**(VALUE, OLD, NEW): Mask then move a single bit from OLD |
| 57 | new NEW. |
| 58 | |
| 59 | MOVED**(VALUE, OLD_FIRST, OLD_LAST, NEW_FIRST, NEW_LAST): Moves |
| 60 | things around so that bits OLD_FIRST..OLD_LAST are masked then |
| 61 | moved to NEW_FIRST..NEW_LAST. |
| 62 | |
| 63 | INSERTED*(VALUE, FIRST, LAST): Takes VALUE and `inserts' the (LAST |
| 64 | - FIRST + 1) least significant bits into bit positions [ FIRST |
| 65 | .. LAST ]. This is almost the complement to EXTRACTED. |
| 66 | |
| 67 | IEA_MASKED(SHOULD_MASK, ADDR): Convert the address to the targets |
| 68 | natural size. If in 32bit mode, discard the high 32bits. |
| 69 | |
| 70 | EXTENDED(VALUE): Convert VALUE (32bits of it) to the targets |
| 71 | natural size. If in 64bit mode, sign extend the value. |
| 72 | |
| 73 | ALIGN_*(VALUE): Round upwards the value so that it is aligned. |
| 74 | |
| 75 | FLOOR_*(VALUE): Truncate the value so that it is aligned. |
| 76 | |
| 77 | ROTL*(VALUE, NR_BITS): Return the value rotated by NR_BITS |
| 78 | |
| 79 | */ |
| 80 | |
| 81 | #define _MAKE_SHIFT(WIDTH, pos) ((WIDTH) - 1 - (pos)) |
| 82 | |
| 83 | |
| 84 | #if (WITH_TARGET_WORD_MSB == 0) |
| 85 | #define _LSB_POS(WIDTH, SHIFT) (WIDTH - 1 - SHIFT) |
| 86 | #else |
| 87 | #define _LSB_POS(WIDTH, SHIFT) (SHIFT) |
| 88 | #endif |
| 89 | |
| 90 | |
| 91 | /* MakeBit */ |
| 92 | #define _BITn(WIDTH, pos) (((natural##WIDTH)(1)) \ |
| 93 | << _MAKE_SHIFT(WIDTH, pos)) |
| 94 | |
| 95 | #define BIT4(POS) (1 << _MAKE_SHIFT(4, POS)) |
| 96 | #define BIT5(POS) (1 << _MAKE_SHIFT(5, POS)) |
| 97 | #define BIT8(POS) (1 << _MAKE_SHIFT(8, POS)) |
| 98 | #define BIT10(POS) (1 << _MAKE_SHIFT(10, POS)) |
| 99 | #define BIT32(POS) _BITn(32, POS) |
| 100 | #define BIT64(POS) _BITn(64, POS) |
| 101 | |
| 102 | #if (WITH_TARGET_WORD_BITSIZE == 64) |
| 103 | #define BIT(POS) BIT64(POS) |
| 104 | #else |
| 105 | #define BIT(POS) (((POS) < 32) ? 0 : _BITn(32, (POS)-32)) |
| 106 | #endif |
| 107 | |
| 108 | |
| 109 | /* multi bit mask */ |
| 110 | #define _MASKn(WIDTH, START, STOP) \ |
| 111 | (((((unsigned##WIDTH)0) - 1) \ |
| 112 | >> (WIDTH - ((STOP) - (START) + 1))) \ |
| 113 | << (WIDTH - 1 - (STOP))) |
| 114 | |
| 115 | #define MASK32(START, STOP) _MASKn(32, START, STOP) |
| 116 | #define MASK64(START, STOP) _MASKn(64, START, STOP) |
| 117 | |
| 118 | /* Multi-bit mask on least significant bits */ |
| 119 | |
| 120 | #define _LSMASKn(WIDTH, FIRST, LAST) _MASKn (WIDTH, \ |
| 121 | _LSB_POS (WIDTH, FIRST), \ |
| 122 | _LSB_POS (WIDTH, LAST)) |
| 123 | |
| 124 | #define LSMASK64(FIRST, LAST) _LSMASKn (64, (FIRST), (LAST)) |
| 125 | |
| 126 | #if (WITH_TARGET_WORD_BITSIZE == 64) |
| 127 | #define MASK(START, STOP) \ |
| 128 | (((START) <= (STOP)) \ |
| 129 | ? _MASKn(64, START, STOP) \ |
| 130 | : (_MASKn(64, 0, STOP) \ |
| 131 | | _MASKn(64, START, 63))) |
| 132 | #else |
| 133 | #define MASK(START, STOP) \ |
| 134 | (((START) <= (STOP)) \ |
| 135 | ? (((STOP) < 32) \ |
| 136 | ? 0 \ |
| 137 | : _MASKn(32, \ |
| 138 | (START) < 32 ? 0 : (START) - 32, \ |
| 139 | (STOP)-32)) \ |
| 140 | : (_MASKn(32, \ |
| 141 | (START) < 32 ? 0 : (START) - 32, \ |
| 142 | 31) \ |
| 143 | | (((STOP) < 32) \ |
| 144 | ? 0 \ |
| 145 | : _MASKn(32, \ |
| 146 | 0, \ |
| 147 | (STOP) - 32)))) |
| 148 | #endif |
| 149 | |
| 150 | |
| 151 | /* mask the required bits, leaving them in place */ |
| 152 | |
| 153 | INLINE_BITS\ |
| 154 | (unsigned32) MASKED32 |
| 155 | (unsigned32 word, |
| 156 | unsigned start, |
| 157 | unsigned stop); |
| 158 | |
| 159 | INLINE_BITS\ |
| 160 | (unsigned64) MASKED64 |
| 161 | (unsigned64 word, |
| 162 | unsigned start, |
| 163 | unsigned stop); |
| 164 | |
| 165 | INLINE_BITS\ |
| 166 | (unsigned_word) MASKED |
| 167 | (unsigned_word word, |
| 168 | unsigned start, |
| 169 | unsigned stop); |
| 170 | |
| 171 | INLINE_BITS\ |
| 172 | (unsigned64) LSMASKED64 |
| 173 | (unsigned64 word, |
| 174 | int first, |
| 175 | int last); |
| 176 | |
| 177 | |
| 178 | /* extract the required bits aligning them with the lsb */ |
| 179 | #define _EXTRACTEDn(WIDTH, WORD, START, STOP) \ |
| 180 | ((((natural##WIDTH)(WORD)) >> (WIDTH - (STOP) - 1)) \ |
| 181 | & _MASKn(WIDTH, WIDTH-1+(START)-(STOP), WIDTH-1)) |
| 182 | |
| 183 | /* #define EXTRACTED10(WORD, START, STOP) _EXTRACTEDn(10, WORD, START, STOP) */ |
| 184 | #define EXTRACTED32(WORD, START, STOP) _EXTRACTEDn(32, WORD, START, STOP) |
| 185 | #define EXTRACTED64(WORD, START, STOP) _EXTRACTEDn(64, WORD, START, STOP) |
| 186 | |
| 187 | INLINE_BITS\ |
| 188 | (unsigned_word) EXTRACTED |
| 189 | (unsigned_word val, |
| 190 | unsigned start, |
| 191 | unsigned stop); |
| 192 | |
| 193 | INLINE_BITS\ |
| 194 | (unsigned64) LSEXTRACTED64 |
| 195 | (unsigned64 val, |
| 196 | int start, |
| 197 | int stop); |
| 198 | |
| 199 | /* move a single bit around */ |
| 200 | /* NB: the wierdness (N>O?N-O:0) is to stop a warning from GCC */ |
| 201 | #define _SHUFFLEDn(N, WORD, OLD, NEW) \ |
| 202 | ((OLD) < (NEW) \ |
| 203 | ? (((unsigned##N)(WORD) \ |
| 204 | >> (((NEW) > (OLD)) ? ((NEW) - (OLD)) : 0)) \ |
| 205 | & MASK32((NEW), (NEW))) \ |
| 206 | : (((unsigned##N)(WORD) \ |
| 207 | << (((OLD) > (NEW)) ? ((OLD) - (NEW)) : 0)) \ |
| 208 | & MASK32((NEW), (NEW)))) |
| 209 | |
| 210 | #define SHUFFLED32(WORD, OLD, NEW) _SHUFFLEDn(32, WORD, OLD, NEW) |
| 211 | #define SHUFFLED64(WORD, OLD, NEW) _SHUFFLEDn(64, WORD, OLD, NEW) |
| 212 | |
| 213 | #define SHUFFLED(WORD, OLD, NEW) _SHUFFLEDn(_word, WORD, OLD, NEW) |
| 214 | |
| 215 | |
| 216 | /* move a group of bits around */ |
| 217 | #define _INSERTEDn(N, WORD, START, STOP) \ |
| 218 | (((natural##N)(WORD) << _MAKE_SHIFT(N, STOP)) & _MASKn(N, START, STOP)) |
| 219 | |
| 220 | #define INSERTED32(WORD, START, STOP) _INSERTEDn(32, WORD, START, STOP) |
| 221 | #define INSERTED64(WORD, START, STOP) _INSERTEDn(64, WORD, START, STOP) |
| 222 | |
| 223 | INLINE_BITS\ |
| 224 | (unsigned_word) INSERTED |
| 225 | (unsigned_word val, |
| 226 | unsigned start, |
| 227 | unsigned stop); |
| 228 | |
| 229 | |
| 230 | /* depending on MODE return a 64bit or 32bit (sign extended) value */ |
| 231 | #if (WITH_TARGET_WORD_BITSIZE == 64) |
| 232 | #define EXTENDED(X) ((signed64)(signed32)(X)) |
| 233 | #else |
| 234 | #define EXTENDED(X) (X) |
| 235 | #endif |
| 236 | |
| 237 | |
| 238 | /* memory alignment macro's */ |
| 239 | #define _ALIGNa(A,X) (((X) + ((A) - 1)) & ~((A) - 1)) |
| 240 | #define _FLOORa(A,X) ((X) & ~((A) - 1)) |
| 241 | |
| 242 | #define ALIGN_8(X) _ALIGNa(8, X) |
| 243 | #define ALIGN_16(X) _ALIGNa(16, X) |
| 244 | |
| 245 | #define ALIGN_PAGE(X) _ALIGNa(0x1000, X) |
| 246 | #define FLOOR_PAGE(X) ((X) & ~(0x1000 - 1)) |
| 247 | |
| 248 | |
| 249 | /* bit bliting macro's */ |
| 250 | #define BLIT32(V, POS, BIT) \ |
| 251 | do { \ |
| 252 | if (BIT) \ |
| 253 | V |= BIT32(POS); \ |
| 254 | else \ |
| 255 | V &= ~BIT32(POS); \ |
| 256 | } while (0) |
| 257 | #define MBLIT32(V, LO, HI, VAL) \ |
| 258 | do { \ |
| 259 | (V) = (((V) & ~MASK32((LO), (HI))) \ |
| 260 | | INSERTED32(VAL, LO, HI)); \ |
| 261 | } while (0) |
| 262 | |
| 263 | |
| 264 | /* some rotate functions to make things easier |
| 265 | |
| 266 | NOTE: These are functions not macro's as the latter tickles bugs in |
| 267 | gcc-2.6.3 */ |
| 268 | |
| 269 | #define _ROTLn(N, VAL, SHIFT) \ |
| 270 | (((VAL) << (SHIFT)) | ((VAL) >> ((N)-(SHIFT)))) |
| 271 | |
| 272 | INLINE_BITS\ |
| 273 | (unsigned32) ROTL32 |
| 274 | (unsigned32 val, |
| 275 | long shift); |
| 276 | |
| 277 | INLINE_BITS\ |
| 278 | (unsigned64) ROTL64 |
| 279 | (unsigned64 val, |
| 280 | long shift); |
| 281 | |
| 282 | |
| 283 | #if (BITS_INLINE & INCLUDE_MODULE) |
| 284 | #include "bits.c" |
| 285 | #endif |
| 286 | |
| 287 | #endif /* _BITS_H_ */ |