]>
Commit | Line | Data |
---|---|---|
69d35728 TS |
1 | /* |
2 | * Utility compute operations used by translated code. | |
3 | * | |
e494ead5 | 4 | * Copyright (c) 2003 Fabrice Bellard |
69d35728 TS |
5 | * Copyright (c) 2007 Aurelien Jarno |
6 | * | |
7 | * Permission is hereby granted, free of charge, to any person obtaining a copy | |
8 | * of this software and associated documentation files (the "Software"), to deal | |
9 | * in the Software without restriction, including without limitation the rights | |
10 | * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell | |
11 | * copies of the Software, and to permit persons to whom the Software is | |
12 | * furnished to do so, subject to the following conditions: | |
13 | * | |
14 | * The above copyright notice and this permission notice shall be included in | |
15 | * all copies or substantial portions of the Software. | |
16 | * | |
17 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | |
18 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | |
19 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | |
20 | * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER | |
21 | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, | |
22 | * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN | |
23 | * THE SOFTWARE. | |
24 | */ | |
25 | ||
aafd7584 | 26 | #include "qemu/osdep.h" |
1de7afc9 | 27 | #include "qemu/host-utils.h" |
69d35728 | 28 | |
6758c192 | 29 | #ifndef CONFIG_INT128 |
e494ead5 | 30 | /* Long integer helpers */ |
ff7a1eb0 RH |
31 | static inline void mul64(uint64_t *plow, uint64_t *phigh, |
32 | uint64_t a, uint64_t b) | |
69d35728 | 33 | { |
ff7a1eb0 RH |
34 | typedef union { |
35 | uint64_t ll; | |
36 | struct { | |
37 | #ifdef HOST_WORDS_BIGENDIAN | |
38 | uint32_t high, low; | |
39 | #else | |
40 | uint32_t low, high; | |
41 | #endif | |
42 | } l; | |
43 | } LL; | |
44 | LL rl, rm, rn, rh, a0, b0; | |
45 | uint64_t c; | |
e494ead5 | 46 | |
ff7a1eb0 RH |
47 | a0.ll = a; |
48 | b0.ll = b; | |
e494ead5 | 49 | |
ff7a1eb0 RH |
50 | rl.ll = (uint64_t)a0.l.low * b0.l.low; |
51 | rm.ll = (uint64_t)a0.l.low * b0.l.high; | |
52 | rn.ll = (uint64_t)a0.l.high * b0.l.low; | |
53 | rh.ll = (uint64_t)a0.l.high * b0.l.high; | |
e494ead5 | 54 | |
ff7a1eb0 RH |
55 | c = (uint64_t)rl.l.high + rm.l.low + rn.l.low; |
56 | rl.l.high = c; | |
57 | c >>= 32; | |
58 | c = c + rm.l.high + rn.l.high + rh.l.low; | |
59 | rh.l.low = c; | |
60 | rh.l.high += (uint32_t)(c >> 32); | |
e494ead5 | 61 | |
ff7a1eb0 RH |
62 | *plow = rl.ll; |
63 | *phigh = rh.ll; | |
69d35728 TS |
64 | } |
65 | ||
66 | /* Unsigned 64x64 -> 128 multiplication */ | |
e494ead5 | 67 | void mulu64 (uint64_t *plow, uint64_t *phigh, uint64_t a, uint64_t b) |
69d35728 | 68 | { |
e494ead5 | 69 | mul64(plow, phigh, a, b); |
e494ead5 | 70 | } |
69d35728 | 71 | |
e494ead5 TS |
72 | /* Signed 64x64 -> 128 multiplication */ |
73 | void muls64 (uint64_t *plow, uint64_t *phigh, int64_t a, int64_t b) | |
74 | { | |
ff7a1eb0 | 75 | uint64_t rh; |
69d35728 | 76 | |
ff7a1eb0 RH |
77 | mul64(plow, &rh, a, b); |
78 | ||
79 | /* Adjust for signs. */ | |
80 | if (b < 0) { | |
81 | rh -= a; | |
e494ead5 | 82 | } |
ff7a1eb0 RH |
83 | if (a < 0) { |
84 | rh -= b; | |
85 | } | |
86 | *phigh = rh; | |
69d35728 | 87 | } |
98d1eb27 TM |
88 | |
89 | /* Unsigned 128x64 division. Returns 1 if overflow (divide by zero or */ | |
90 | /* quotient exceeds 64 bits). Otherwise returns quotient via plow and */ | |
91 | /* remainder via phigh. */ | |
92 | int divu128(uint64_t *plow, uint64_t *phigh, uint64_t divisor) | |
93 | { | |
94 | uint64_t dhi = *phigh; | |
95 | uint64_t dlo = *plow; | |
96 | unsigned i; | |
97 | uint64_t carry = 0; | |
98 | ||
99 | if (divisor == 0) { | |
100 | return 1; | |
101 | } else if (dhi == 0) { | |
102 | *plow = dlo / divisor; | |
103 | *phigh = dlo % divisor; | |
104 | return 0; | |
105 | } else if (dhi > divisor) { | |
106 | return 1; | |
107 | } else { | |
108 | ||
109 | for (i = 0; i < 64; i++) { | |
110 | carry = dhi >> 63; | |
111 | dhi = (dhi << 1) | (dlo >> 63); | |
112 | if (carry || (dhi >= divisor)) { | |
113 | dhi -= divisor; | |
114 | carry = 1; | |
115 | } else { | |
116 | carry = 0; | |
117 | } | |
118 | dlo = (dlo << 1) | carry; | |
119 | } | |
120 | ||
121 | *plow = dlo; | |
122 | *phigh = dhi; | |
123 | return 0; | |
124 | } | |
125 | } | |
e44259b6 TM |
126 | |
127 | int divs128(int64_t *plow, int64_t *phigh, int64_t divisor) | |
128 | { | |
129 | int sgn_dvdnd = *phigh < 0; | |
130 | int sgn_divsr = divisor < 0; | |
131 | int overflow = 0; | |
132 | ||
133 | if (sgn_dvdnd) { | |
134 | *plow = ~(*plow); | |
135 | *phigh = ~(*phigh); | |
136 | if (*plow == (int64_t)-1) { | |
137 | *plow = 0; | |
138 | (*phigh)++; | |
139 | } else { | |
140 | (*plow)++; | |
141 | } | |
142 | } | |
143 | ||
144 | if (sgn_divsr) { | |
145 | divisor = 0 - divisor; | |
146 | } | |
147 | ||
148 | overflow = divu128((uint64_t *)plow, (uint64_t *)phigh, (uint64_t)divisor); | |
149 | ||
150 | if (sgn_dvdnd ^ sgn_divsr) { | |
151 | *plow = 0 - *plow; | |
152 | } | |
153 | ||
154 | if (!overflow) { | |
155 | if ((*plow < 0) ^ (sgn_dvdnd ^ sgn_divsr)) { | |
156 | overflow = 1; | |
157 | } | |
158 | } | |
159 | ||
160 | return overflow; | |
161 | } | |
6758c192 | 162 | #endif |
e44259b6 | 163 | |
f539fbe3 JRZ |
164 | /** |
165 | * urshift - 128-bit Unsigned Right Shift. | |
166 | * @plow: in/out - lower 64-bit integer. | |
167 | * @phigh: in/out - higher 64-bit integer. | |
168 | * @shift: in - bytes to shift, between 0 and 127. | |
169 | * | |
170 | * Result is zero-extended and stored in plow/phigh, which are | |
171 | * input/output variables. Shift values outside the range will | |
172 | * be mod to 128. In other words, the caller is responsible to | |
173 | * verify/assert both the shift range and plow/phigh pointers. | |
174 | */ | |
175 | void urshift(uint64_t *plow, uint64_t *phigh, int32_t shift) | |
176 | { | |
177 | shift &= 127; | |
178 | if (shift == 0) { | |
179 | return; | |
180 | } | |
181 | ||
182 | uint64_t h = *phigh >> (shift & 63); | |
183 | if (shift >= 64) { | |
184 | *plow = h; | |
185 | *phigh = 0; | |
186 | } else { | |
187 | *plow = (*plow >> (shift & 63)) | (*phigh << (64 - (shift & 63))); | |
188 | *phigh = h; | |
189 | } | |
190 | } | |
191 | ||
192 | /** | |
193 | * ulshift - 128-bit Unsigned Left Shift. | |
194 | * @plow: in/out - lower 64-bit integer. | |
195 | * @phigh: in/out - higher 64-bit integer. | |
196 | * @shift: in - bytes to shift, between 0 and 127. | |
197 | * @overflow: out - true if any 1-bit is shifted out. | |
198 | * | |
199 | * Result is zero-extended and stored in plow/phigh, which are | |
200 | * input/output variables. Shift values outside the range will | |
201 | * be mod to 128. In other words, the caller is responsible to | |
202 | * verify/assert both the shift range and plow/phigh pointers. | |
203 | */ | |
204 | void ulshift(uint64_t *plow, uint64_t *phigh, int32_t shift, bool *overflow) | |
205 | { | |
206 | uint64_t low = *plow; | |
207 | uint64_t high = *phigh; | |
208 | ||
209 | shift &= 127; | |
210 | if (shift == 0) { | |
211 | return; | |
212 | } | |
213 | ||
214 | /* check if any bit will be shifted out */ | |
215 | urshift(&low, &high, 128 - shift); | |
216 | if (low | high) { | |
217 | *overflow = true; | |
218 | } | |
219 | ||
220 | if (shift >= 64) { | |
221 | *phigh = *plow << (shift & 63); | |
222 | *plow = 0; | |
223 | } else { | |
224 | *phigh = (*plow >> (64 - (shift & 63))) | (*phigh << (shift & 63)); | |
225 | *plow = *plow << shift; | |
226 | } | |
227 | } |