Merge branch 'for-linus' of git://git.kernel.dk/linux-block
[deliverable/linux.git] / arch / mips / include / asm / octeon / cvmx.h
1 /***********************license start***************
2 * Author: Cavium Networks
3 *
4 * Contact: support@caviumnetworks.com
5 * This file is part of the OCTEON SDK
6 *
7 * Copyright (c) 2003-2008 Cavium Networks
8 *
9 * This file is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License, Version 2, as
11 * published by the Free Software Foundation.
12 *
13 * This file is distributed in the hope that it will be useful, but
14 * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
15 * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
16 * NONINFRINGEMENT. See the GNU General Public License for more
17 * details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this file; if not, write to the Free Software
21 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
22 * or visit http://www.gnu.org/licenses/.
23 *
24 * This file may also be available under a different license from Cavium.
25 * Contact Cavium Networks for more information
26 ***********************license end**************************************/
27
28 #ifndef __CVMX_H__
29 #define __CVMX_H__
30
31 #include <linux/kernel.h>
32 #include <linux/string.h>
33
34 enum cvmx_mips_space {
35 CVMX_MIPS_SPACE_XKSEG = 3LL,
36 CVMX_MIPS_SPACE_XKPHYS = 2LL,
37 CVMX_MIPS_SPACE_XSSEG = 1LL,
38 CVMX_MIPS_SPACE_XUSEG = 0LL
39 };
40
41 /* These macros for use when using 32 bit pointers. */
42 #define CVMX_MIPS32_SPACE_KSEG0 1l
43 #define CVMX_ADD_SEG32(segment, add) \
44 (((int32_t)segment << 31) | (int32_t)(add))
45
46 #define CVMX_IO_SEG CVMX_MIPS_SPACE_XKPHYS
47
48 /* These macros simplify the process of creating common IO addresses */
49 #define CVMX_ADD_SEG(segment, add) \
50 ((((uint64_t)segment) << 62) | (add))
51 #ifndef CVMX_ADD_IO_SEG
52 #define CVMX_ADD_IO_SEG(add) CVMX_ADD_SEG(CVMX_IO_SEG, (add))
53 #endif
54
55 #include <asm/octeon/cvmx-asm.h>
56 #include <asm/octeon/cvmx-packet.h>
57 #include <asm/octeon/cvmx-sysinfo.h>
58
59 #include <asm/octeon/cvmx-ciu-defs.h>
60 #include <asm/octeon/cvmx-gpio-defs.h>
61 #include <asm/octeon/cvmx-iob-defs.h>
62 #include <asm/octeon/cvmx-ipd-defs.h>
63 #include <asm/octeon/cvmx-l2c-defs.h>
64 #include <asm/octeon/cvmx-l2d-defs.h>
65 #include <asm/octeon/cvmx-l2t-defs.h>
66 #include <asm/octeon/cvmx-led-defs.h>
67 #include <asm/octeon/cvmx-mio-defs.h>
68 #include <asm/octeon/cvmx-pow-defs.h>
69
70 #include <asm/octeon/cvmx-bootinfo.h>
71 #include <asm/octeon/cvmx-bootmem.h>
72 #include <asm/octeon/cvmx-l2c.h>
73
74 #ifndef CVMX_ENABLE_DEBUG_PRINTS
75 #define CVMX_ENABLE_DEBUG_PRINTS 1
76 #endif
77
78 #if CVMX_ENABLE_DEBUG_PRINTS
79 #define cvmx_dprintf printk
80 #else
81 #define cvmx_dprintf(...) {}
82 #endif
83
84 #define CVMX_MAX_CORES (16)
85 #define CVMX_CACHE_LINE_SIZE (128) /* In bytes */
86 #define CVMX_CACHE_LINE_MASK (CVMX_CACHE_LINE_SIZE - 1) /* In bytes */
87 #define CVMX_CACHE_LINE_ALIGNED __attribute__ ((aligned(CVMX_CACHE_LINE_SIZE)))
88 #define CAST64(v) ((long long)(long)(v))
89 #define CASTPTR(type, v) ((type *)(long)(v))
90
91 /*
92 * Returns processor ID, different Linux and simple exec versions
93 * provided in the cvmx-app-init*.c files.
94 */
95 static inline uint32_t cvmx_get_proc_id(void) __attribute__ ((pure));
96 static inline uint32_t cvmx_get_proc_id(void)
97 {
98 uint32_t id;
99 asm("mfc0 %0, $15,0" : "=r"(id));
100 return id;
101 }
102
103 /* turn the variable name into a string */
104 #define CVMX_TMP_STR(x) CVMX_TMP_STR2(x)
105 #define CVMX_TMP_STR2(x) #x
106
107 /**
108 * Builds a bit mask given the required size in bits.
109 *
110 * @bits: Number of bits in the mask
111 * Returns The mask
112 */ static inline uint64_t cvmx_build_mask(uint64_t bits)
113 {
114 return ~((~0x0ull) << bits);
115 }
116
117 /**
118 * Builds a memory address for I/O based on the Major and Sub DID.
119 *
120 * @major_did: 5 bit major did
121 * @sub_did: 3 bit sub did
122 * Returns I/O base address
123 */
124 static inline uint64_t cvmx_build_io_address(uint64_t major_did,
125 uint64_t sub_did)
126 {
127 return (0x1ull << 48) | (major_did << 43) | (sub_did << 40);
128 }
129
130 /**
131 * Perform mask and shift to place the supplied value into
132 * the supplied bit rage.
133 *
134 * Example: cvmx_build_bits(39,24,value)
135 * <pre>
136 * 6 5 4 3 3 2 1
137 * 3 5 7 9 1 3 5 7 0
138 * +-------+-------+-------+-------+-------+-------+-------+------+
139 * 000000000000000000000000___________value000000000000000000000000
140 * </pre>
141 *
142 * @high_bit: Highest bit value can occupy (inclusive) 0-63
143 * @low_bit: Lowest bit value can occupy inclusive 0-high_bit
144 * @value: Value to use
145 * Returns Value masked and shifted
146 */
147 static inline uint64_t cvmx_build_bits(uint64_t high_bit,
148 uint64_t low_bit, uint64_t value)
149 {
150 return (value & cvmx_build_mask(high_bit - low_bit + 1)) << low_bit;
151 }
152
153 /**
154 * Convert a memory pointer (void*) into a hardware compatible
155 * memory address (uint64_t). Octeon hardware widgets don't
156 * understand logical addresses.
157 *
158 * @ptr: C style memory pointer
159 * Returns Hardware physical address
160 */
161 static inline uint64_t cvmx_ptr_to_phys(void *ptr)
162 {
163 if (sizeof(void *) == 8) {
164 /*
165 * We're running in 64 bit mode. Normally this means
166 * that we can use 40 bits of address space (the
167 * hardware limit). Unfortunately there is one case
168 * were we need to limit this to 30 bits, sign
169 * extended 32 bit. Although these are 64 bits wide,
170 * only 30 bits can be used.
171 */
172 if ((CAST64(ptr) >> 62) == 3)
173 return CAST64(ptr) & cvmx_build_mask(30);
174 else
175 return CAST64(ptr) & cvmx_build_mask(40);
176 } else {
177 return (long)(ptr) & 0x1fffffff;
178 }
179 }
180
181 /**
182 * Convert a hardware physical address (uint64_t) into a
183 * memory pointer (void *).
184 *
185 * @physical_address:
186 * Hardware physical address to memory
187 * Returns Pointer to memory
188 */
189 static inline void *cvmx_phys_to_ptr(uint64_t physical_address)
190 {
191 if (sizeof(void *) == 8) {
192 /* Just set the top bit, avoiding any TLB ugliness */
193 return CASTPTR(void,
194 CVMX_ADD_SEG(CVMX_MIPS_SPACE_XKPHYS,
195 physical_address));
196 } else {
197 return CASTPTR(void,
198 CVMX_ADD_SEG32(CVMX_MIPS32_SPACE_KSEG0,
199 physical_address));
200 }
201 }
202
203 /* The following #if controls the definition of the macro
204 CVMX_BUILD_WRITE64. This macro is used to build a store operation to
205 a full 64bit address. With a 64bit ABI, this can be done with a simple
206 pointer access. 32bit ABIs require more complicated assembly */
207
208 /* We have a full 64bit ABI. Writing to a 64bit address can be done with
209 a simple volatile pointer */
210 #define CVMX_BUILD_WRITE64(TYPE, ST) \
211 static inline void cvmx_write64_##TYPE(uint64_t addr, TYPE##_t val) \
212 { \
213 *CASTPTR(volatile TYPE##_t, addr) = val; \
214 }
215
216
217 /* The following #if controls the definition of the macro
218 CVMX_BUILD_READ64. This macro is used to build a load operation from
219 a full 64bit address. With a 64bit ABI, this can be done with a simple
220 pointer access. 32bit ABIs require more complicated assembly */
221
222 /* We have a full 64bit ABI. Writing to a 64bit address can be done with
223 a simple volatile pointer */
224 #define CVMX_BUILD_READ64(TYPE, LT) \
225 static inline TYPE##_t cvmx_read64_##TYPE(uint64_t addr) \
226 { \
227 return *CASTPTR(volatile TYPE##_t, addr); \
228 }
229
230
231 /* The following defines 8 functions for writing to a 64bit address. Each
232 takes two arguments, the address and the value to write.
233 cvmx_write64_int64 cvmx_write64_uint64
234 cvmx_write64_int32 cvmx_write64_uint32
235 cvmx_write64_int16 cvmx_write64_uint16
236 cvmx_write64_int8 cvmx_write64_uint8 */
237 CVMX_BUILD_WRITE64(int64, "sd");
238 CVMX_BUILD_WRITE64(int32, "sw");
239 CVMX_BUILD_WRITE64(int16, "sh");
240 CVMX_BUILD_WRITE64(int8, "sb");
241 CVMX_BUILD_WRITE64(uint64, "sd");
242 CVMX_BUILD_WRITE64(uint32, "sw");
243 CVMX_BUILD_WRITE64(uint16, "sh");
244 CVMX_BUILD_WRITE64(uint8, "sb");
245 #define cvmx_write64 cvmx_write64_uint64
246
247 /* The following defines 8 functions for reading from a 64bit address. Each
248 takes the address as the only argument
249 cvmx_read64_int64 cvmx_read64_uint64
250 cvmx_read64_int32 cvmx_read64_uint32
251 cvmx_read64_int16 cvmx_read64_uint16
252 cvmx_read64_int8 cvmx_read64_uint8 */
253 CVMX_BUILD_READ64(int64, "ld");
254 CVMX_BUILD_READ64(int32, "lw");
255 CVMX_BUILD_READ64(int16, "lh");
256 CVMX_BUILD_READ64(int8, "lb");
257 CVMX_BUILD_READ64(uint64, "ld");
258 CVMX_BUILD_READ64(uint32, "lw");
259 CVMX_BUILD_READ64(uint16, "lhu");
260 CVMX_BUILD_READ64(uint8, "lbu");
261 #define cvmx_read64 cvmx_read64_uint64
262
263
264 static inline void cvmx_write_csr(uint64_t csr_addr, uint64_t val)
265 {
266 cvmx_write64(csr_addr, val);
267
268 /*
269 * Perform an immediate read after every write to an RSL
270 * register to force the write to complete. It doesn't matter
271 * what RSL read we do, so we choose CVMX_MIO_BOOT_BIST_STAT
272 * because it is fast and harmless.
273 */
274 if (((csr_addr >> 40) & 0x7ffff) == (0x118))
275 cvmx_read64(CVMX_MIO_BOOT_BIST_STAT);
276 }
277
278 static inline void cvmx_writeq_csr(void __iomem *csr_addr, uint64_t val)
279 {
280 cvmx_write_csr((__force uint64_t)csr_addr, val);
281 }
282
283 static inline void cvmx_write_io(uint64_t io_addr, uint64_t val)
284 {
285 cvmx_write64(io_addr, val);
286
287 }
288
289 static inline uint64_t cvmx_read_csr(uint64_t csr_addr)
290 {
291 uint64_t val = cvmx_read64(csr_addr);
292 return val;
293 }
294
295 static inline uint64_t cvmx_readq_csr(void __iomem *csr_addr)
296 {
297 return cvmx_read_csr((__force uint64_t) csr_addr);
298 }
299
300 static inline void cvmx_send_single(uint64_t data)
301 {
302 const uint64_t CVMX_IOBDMA_SENDSINGLE = 0xffffffffffffa200ull;
303 cvmx_write64(CVMX_IOBDMA_SENDSINGLE, data);
304 }
305
306 static inline void cvmx_read_csr_async(uint64_t scraddr, uint64_t csr_addr)
307 {
308 union {
309 uint64_t u64;
310 struct {
311 uint64_t scraddr:8;
312 uint64_t len:8;
313 uint64_t addr:48;
314 } s;
315 } addr;
316 addr.u64 = csr_addr;
317 addr.s.scraddr = scraddr >> 3;
318 addr.s.len = 1;
319 cvmx_send_single(addr.u64);
320 }
321
322 /* Return true if Octeon is CN38XX pass 1 */
323 static inline int cvmx_octeon_is_pass1(void)
324 {
325 #if OCTEON_IS_COMMON_BINARY()
326 return 0; /* Pass 1 isn't supported for common binaries */
327 #else
328 /* Now that we know we're built for a specific model, only check CN38XX */
329 #if OCTEON_IS_MODEL(OCTEON_CN38XX)
330 return cvmx_get_proc_id() == OCTEON_CN38XX_PASS1;
331 #else
332 return 0; /* Built for non CN38XX chip, we're not CN38XX pass1 */
333 #endif
334 #endif
335 }
336
337 static inline unsigned int cvmx_get_core_num(void)
338 {
339 unsigned int core_num;
340 CVMX_RDHWRNV(core_num, 0);
341 return core_num;
342 }
343
344 /**
345 * Returns the number of bits set in the provided value.
346 * Simple wrapper for POP instruction.
347 *
348 * @val: 32 bit value to count set bits in
349 *
350 * Returns Number of bits set
351 */
352 static inline uint32_t cvmx_pop(uint32_t val)
353 {
354 uint32_t pop;
355 CVMX_POP(pop, val);
356 return pop;
357 }
358
359 /**
360 * Returns the number of bits set in the provided value.
361 * Simple wrapper for DPOP instruction.
362 *
363 * @val: 64 bit value to count set bits in
364 *
365 * Returns Number of bits set
366 */
367 static inline int cvmx_dpop(uint64_t val)
368 {
369 int pop;
370 CVMX_DPOP(pop, val);
371 return pop;
372 }
373
374 /**
375 * Provide current cycle counter as a return value
376 *
377 * Returns current cycle counter
378 */
379
380 static inline uint64_t cvmx_get_cycle(void)
381 {
382 uint64_t cycle;
383 CVMX_RDHWR(cycle, 31);
384 return cycle;
385 }
386
387 /**
388 * Wait for the specified number of cycle
389 *
390 */
391 static inline void cvmx_wait(uint64_t cycles)
392 {
393 uint64_t done = cvmx_get_cycle() + cycles;
394
395 while (cvmx_get_cycle() < done)
396 ; /* Spin */
397 }
398
399 /**
400 * Reads a chip global cycle counter. This counts CPU cycles since
401 * chip reset. The counter is 64 bit.
402 * This register does not exist on CN38XX pass 1 silicion
403 *
404 * Returns Global chip cycle count since chip reset.
405 */
406 static inline uint64_t cvmx_get_cycle_global(void)
407 {
408 if (cvmx_octeon_is_pass1())
409 return 0;
410 else
411 return cvmx_read64(CVMX_IPD_CLK_COUNT);
412 }
413
414 /**
415 * This macro spins on a field waiting for it to reach a value. It
416 * is common in code to need to wait for a specific field in a CSR
417 * to match a specific value. Conceptually this macro expands to:
418 *
419 * 1) read csr at "address" with a csr typedef of "type"
420 * 2) Check if ("type".s."field" "op" "value")
421 * 3) If #2 isn't true loop to #1 unless too much time has passed.
422 */
423 #define CVMX_WAIT_FOR_FIELD64(address, type, field, op, value, timeout_usec)\
424 ( \
425 { \
426 int result; \
427 do { \
428 uint64_t done = cvmx_get_cycle() + (uint64_t)timeout_usec * \
429 cvmx_sysinfo_get()->cpu_clock_hz / 1000000; \
430 type c; \
431 while (1) { \
432 c.u64 = cvmx_read_csr(address); \
433 if ((c.s.field) op(value)) { \
434 result = 0; \
435 break; \
436 } else if (cvmx_get_cycle() > done) { \
437 result = -1; \
438 break; \
439 } else \
440 cvmx_wait(100); \
441 } \
442 } while (0); \
443 result; \
444 })
445
446 /***************************************************************************/
447
448 /* Return the number of cores available in the chip */
449 static inline uint32_t cvmx_octeon_num_cores(void)
450 {
451 uint32_t ciu_fuse = (uint32_t) cvmx_read_csr(CVMX_CIU_FUSE) & 0xffff;
452 return cvmx_pop(ciu_fuse);
453 }
454
455 #endif /* __CVMX_H__ */
This page took 0.039305 seconds and 5 git commands to generate.