Linux-2.6.12-rc2
[deliverable/linux.git] / arch / sparc / lib / mul.S
1 /* $Id: mul.S,v 1.4 1996/09/30 02:22:32 davem Exp $
2 * mul.S: This routine was taken from glibc-1.09 and is covered
3 * by the GNU Library General Public License Version 2.
4 */
5
6 /*
7 * Signed multiply, from Appendix E of the Sparc Version 8
8 * Architecture Manual.
9 */
10
11 /*
12 * Returns %o0 * %o1 in %o1%o0 (i.e., %o1 holds the upper 32 bits of
13 * the 64-bit product).
14 *
15 * This code optimizes short (less than 13-bit) multiplies.
16 */
17
18 .globl .mul
19 .mul:
20 mov %o0, %y ! multiplier -> Y
21 andncc %o0, 0xfff, %g0 ! test bits 12..31
22 be Lmul_shortway ! if zero, can do it the short way
23 andcc %g0, %g0, %o4 ! zero the partial product and clear N and V
24
25 /*
26 * Long multiply. 32 steps, followed by a final shift step.
27 */
28 mulscc %o4, %o1, %o4 ! 1
29 mulscc %o4, %o1, %o4 ! 2
30 mulscc %o4, %o1, %o4 ! 3
31 mulscc %o4, %o1, %o4 ! 4
32 mulscc %o4, %o1, %o4 ! 5
33 mulscc %o4, %o1, %o4 ! 6
34 mulscc %o4, %o1, %o4 ! 7
35 mulscc %o4, %o1, %o4 ! 8
36 mulscc %o4, %o1, %o4 ! 9
37 mulscc %o4, %o1, %o4 ! 10
38 mulscc %o4, %o1, %o4 ! 11
39 mulscc %o4, %o1, %o4 ! 12
40 mulscc %o4, %o1, %o4 ! 13
41 mulscc %o4, %o1, %o4 ! 14
42 mulscc %o4, %o1, %o4 ! 15
43 mulscc %o4, %o1, %o4 ! 16
44 mulscc %o4, %o1, %o4 ! 17
45 mulscc %o4, %o1, %o4 ! 18
46 mulscc %o4, %o1, %o4 ! 19
47 mulscc %o4, %o1, %o4 ! 20
48 mulscc %o4, %o1, %o4 ! 21
49 mulscc %o4, %o1, %o4 ! 22
50 mulscc %o4, %o1, %o4 ! 23
51 mulscc %o4, %o1, %o4 ! 24
52 mulscc %o4, %o1, %o4 ! 25
53 mulscc %o4, %o1, %o4 ! 26
54 mulscc %o4, %o1, %o4 ! 27
55 mulscc %o4, %o1, %o4 ! 28
56 mulscc %o4, %o1, %o4 ! 29
57 mulscc %o4, %o1, %o4 ! 30
58 mulscc %o4, %o1, %o4 ! 31
59 mulscc %o4, %o1, %o4 ! 32
60 mulscc %o4, %g0, %o4 ! final shift
61
62 ! If %o0 was negative, the result is
63 ! (%o0 * %o1) + (%o1 << 32))
64 ! We fix that here.
65
66 #if 0
67 tst %o0
68 bge 1f
69 rd %y, %o0
70
71 ! %o0 was indeed negative; fix upper 32 bits of result by subtracting
72 ! %o1 (i.e., return %o4 - %o1 in %o1).
73 retl
74 sub %o4, %o1, %o1
75
76 1:
77 retl
78 mov %o4, %o1
79 #else
80 /* Faster code adapted from tege@sics.se's code for umul.S. */
81 sra %o0, 31, %o2 ! make mask from sign bit
82 and %o1, %o2, %o2 ! %o2 = 0 or %o1, depending on sign of %o0
83 rd %y, %o0 ! get lower half of product
84 retl
85 sub %o4, %o2, %o1 ! subtract compensation
86 ! and put upper half in place
87 #endif
88
89 Lmul_shortway:
90 /*
91 * Short multiply. 12 steps, followed by a final shift step.
92 * The resulting bits are off by 12 and (32-12) = 20 bit positions,
93 * but there is no problem with %o0 being negative (unlike above).
94 */
95 mulscc %o4, %o1, %o4 ! 1
96 mulscc %o4, %o1, %o4 ! 2
97 mulscc %o4, %o1, %o4 ! 3
98 mulscc %o4, %o1, %o4 ! 4
99 mulscc %o4, %o1, %o4 ! 5
100 mulscc %o4, %o1, %o4 ! 6
101 mulscc %o4, %o1, %o4 ! 7
102 mulscc %o4, %o1, %o4 ! 8
103 mulscc %o4, %o1, %o4 ! 9
104 mulscc %o4, %o1, %o4 ! 10
105 mulscc %o4, %o1, %o4 ! 11
106 mulscc %o4, %o1, %o4 ! 12
107 mulscc %o4, %g0, %o4 ! final shift
108
109 /*
110 * %o4 has 20 of the bits that should be in the low part of the
111 * result; %y has the bottom 12 (as %y's top 12). That is:
112 *
113 * %o4 %y
114 * +----------------+----------------+
115 * | -12- | -20- | -12- | -20- |
116 * +------(---------+------)---------+
117 * --hi-- ----low-part----
118 *
119 * The upper 12 bits of %o4 should be sign-extended to form the
120 * high part of the product (i.e., highpart = %o4 >> 20).
121 */
122
123 rd %y, %o5
124 sll %o4, 12, %o0 ! shift middle bits left 12
125 srl %o5, 20, %o5 ! shift low bits right 20, zero fill at left
126 or %o5, %o0, %o0 ! construct low part of result
127 retl
128 sra %o4, 20, %o1 ! ... and extract high part of result
129
130 .globl .mul_patch
131 .mul_patch:
132 smul %o0, %o1, %o0
133 retl
134 rd %y, %o1
135 nop
This page took 0.03476 seconds and 5 git commands to generate.