[LIB]: Consolidate _atomic_dec_and_lock()
[deliverable/linux.git] / arch / x86_64 / lib / delay.c
CommitLineData
1da177e4
LT
1/*
2 * Precise Delay Loops for x86-64
3 *
4 * Copyright (C) 1993 Linus Torvalds
5 * Copyright (C) 1997 Martin Mares <mj@atrey.karlin.mff.cuni.cz>
6 *
7 * The __delay function must _NOT_ be inlined as its execution time
8 * depends wildly on alignment on many x86 processors.
9 */
10
11#include <linux/config.h>
12#include <linux/sched.h>
13#include <linux/delay.h>
14#include <asm/delay.h>
8a9e1b0f 15#include <asm/msr.h>
1da177e4
LT
16
17#ifdef CONFIG_SMP
18#include <asm/smp.h>
19#endif
20
8a9e1b0f
VP
21int read_current_timer(unsigned long *timer_value)
22{
23 rdtscll(*timer_value);
24 return 0;
25}
26
1da177e4
LT
27void __delay(unsigned long loops)
28{
29 unsigned bclock, now;
30
31 rdtscl(bclock);
32 do
33 {
34 rep_nop();
35 rdtscl(now);
36 }
37 while((now-bclock) < loops);
38}
39
40inline void __const_udelay(unsigned long xloops)
41{
39c715b7 42 __delay(((xloops * cpu_data[raw_smp_processor_id()].loops_per_jiffy) >> 32) * HZ);
1da177e4
LT
43}
44
45void __udelay(unsigned long usecs)
46{
47 __const_udelay(usecs * 0x000010c6); /* 2**32 / 1000000 */
48}
49
50void __ndelay(unsigned long nsecs)
51{
52 __const_udelay(nsecs * 0x00005); /* 2**32 / 1000000000 (rounded up) */
53}
This page took 0.080387 seconds and 5 git commands to generate.