1 |
/* |
2 |
* Copyright (C) 2005-2006 Anders Gavare. All rights reserved. |
3 |
* |
4 |
* Redistribution and use in source and binary forms, with or without |
5 |
* modification, are permitted provided that the following conditions are met: |
6 |
* |
7 |
* 1. Redistributions of source code must retain the above copyright |
8 |
* notice, this list of conditions and the following disclaimer. |
9 |
* 2. Redistributions in binary form must reproduce the above copyright |
10 |
* notice, this list of conditions and the following disclaimer in the |
11 |
* documentation and/or other materials provided with the distribution. |
12 |
* 3. The name of the author may not be used to endorse or promote products |
13 |
* derived from this software without specific prior written permission. |
14 |
* |
15 |
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND |
16 |
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE |
17 |
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE |
18 |
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE |
19 |
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL |
20 |
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS |
21 |
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) |
22 |
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT |
23 |
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY |
24 |
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF |
25 |
* SUCH DAMAGE. |
26 |
* |
27 |
* |
28 |
* $Id: cpu_arm_instr_loadstore.c,v 1.20 2006/02/16 19:49:04 debug Exp $ |
29 |
* |
30 |
* |
31 |
* TODO: Many things... |
32 |
* |
33 |
* o) Big-endian ARM loads/stores. |
34 |
* |
35 |
* o) Alignment checks! |
36 |
* |
37 |
* o) Native load/store if the endianness is the same as the host's |
38 |
* (only implemented for little endian, so far, and it assumes that |
39 |
* alignment is correct!) |
40 |
* |
41 |
* o) "Base Updated Abort Model", which updates the base register |
42 |
* even if the memory access failed. |
43 |
* |
44 |
* o) Some ARM implementations use pc+8, some use pc+12 for stores? |
45 |
* |
46 |
* o) All load/store variants with the PC register are not really |
47 |
* valid. (E.g. a byte load into the PC register. What should that |
48 |
* accomplish?) |
49 |
* |
50 |
* o) Perhaps an optimization for the case when offset = 0, because |
51 |
* that's quite common, and also when the Reg expression is just |
52 |
* a simple, non-rotated register (0..14). |
53 |
*/ |
54 |
|
55 |
|
56 |
#if defined(A__SIGNED) && !defined(A__H) && !defined(A__L) |
57 |
#define A__LDRD |
58 |
#endif |
59 |
#if defined(A__SIGNED) && defined(A__H) && !defined(A__L) |
60 |
#define A__STRD |
61 |
#endif |
62 |
|
63 |
|
64 |
/* |
65 |
* General load/store, by using memory_rw(). If at all possible, memory_rw() |
66 |
* then inserts the page into the translation array, so that the fast |
67 |
* load/store routine below can be used for further accesses. |
68 |
*/ |
69 |
void A__NAME__general(struct cpu *cpu, struct arm_instr_call *ic) |
70 |
{ |
71 |
#if !defined(A__P) && defined(A__W) |
72 |
const int memory_rw_flags = CACHE_DATA | MEMORY_USER_ACCESS; |
73 |
#else |
74 |
const int memory_rw_flags = CACHE_DATA; |
75 |
#endif |
76 |
|
77 |
#ifdef A__REG |
78 |
uint32_t (*reg_func)(struct cpu *, struct arm_instr_call *) |
79 |
= (void *)(size_t)ic->arg[1]; |
80 |
#endif |
81 |
|
82 |
#if defined(A__STRD) || defined(A__LDRD) |
83 |
unsigned char data[8]; |
84 |
const int datalen = 8; |
85 |
#else |
86 |
#ifdef A__B |
87 |
unsigned char data[1]; |
88 |
const int datalen = 1; |
89 |
#else |
90 |
#ifdef A__H |
91 |
unsigned char data[2]; |
92 |
const int datalen = 2; |
93 |
#else |
94 |
const int datalen = 4; |
95 |
#ifdef HOST_LITTLE_ENDIAN |
96 |
unsigned char *data = (unsigned char *) ic->arg[2]; |
97 |
#else |
98 |
unsigned char data[4]; |
99 |
#endif |
100 |
#endif |
101 |
#endif |
102 |
#endif |
103 |
|
104 |
uint32_t addr, low_pc, offset = |
105 |
#ifndef A__U |
106 |
- |
107 |
#endif |
108 |
#ifdef A__REG |
109 |
reg_func(cpu, ic); |
110 |
#else |
111 |
ic->arg[1]; |
112 |
#endif |
113 |
|
114 |
low_pc = ((size_t)ic - (size_t)cpu->cd.arm. |
115 |
cur_ic_page) / sizeof(struct arm_instr_call); |
116 |
cpu->pc &= ~((ARM_IC_ENTRIES_PER_PAGE-1) |
117 |
<< ARM_INSTR_ALIGNMENT_SHIFT); |
118 |
cpu->pc += (low_pc << ARM_INSTR_ALIGNMENT_SHIFT); |
119 |
|
120 |
addr = reg(ic->arg[0]) |
121 |
#ifdef A__P |
122 |
+ offset |
123 |
#endif |
124 |
; |
125 |
|
126 |
|
127 |
#if defined(A__L) || defined(A__LDRD) |
128 |
/* Load: */ |
129 |
if (!cpu->memory_rw(cpu, cpu->mem, addr, data, datalen, |
130 |
MEM_READ, memory_rw_flags)) { |
131 |
/* load failed, an exception was generated */ |
132 |
return; |
133 |
} |
134 |
#if defined(A__B) && !defined(A__LDRD) |
135 |
reg(ic->arg[2]) = |
136 |
#ifdef A__SIGNED |
137 |
(int32_t)(int8_t) |
138 |
#endif |
139 |
data[0]; |
140 |
#else |
141 |
#if defined(A__H) && !defined(A__LDRD) |
142 |
reg(ic->arg[2]) = |
143 |
#ifdef A__SIGNED |
144 |
(int32_t)(int16_t) |
145 |
#endif |
146 |
(data[0] + (data[1] << 8)); |
147 |
#else |
148 |
#ifndef A__LDRD |
149 |
#ifdef HOST_LITTLE_ENDIAN |
150 |
/* Nothing. */ |
151 |
#else |
152 |
reg(ic->arg[2]) = data[0] + (data[1] << 8) + |
153 |
(data[2] << 16) + (data[3] << 24); |
154 |
#endif |
155 |
#else |
156 |
reg(ic->arg[2]) = data[0] + (data[1] << 8) + |
157 |
(data[2] << 16) + (data[3] << 24); |
158 |
reg(((uint32_t *)ic->arg[2]) + 1) = data[4] + (data[5] << 8) + |
159 |
(data[6] << 16) + (data[7] << 24); |
160 |
#endif |
161 |
#endif |
162 |
#endif |
163 |
#else |
164 |
/* Store: */ |
165 |
#if !defined(A__B) && !defined(A__H) && defined(HOST_LITTLE_ENDIAN) |
166 |
#ifdef A__STRD |
167 |
*(uint32_t *)data = reg(ic->arg[2]); |
168 |
*(uint32_t *)(data + 4) = reg(ic->arg[2] + 4); |
169 |
#endif |
170 |
#else |
171 |
data[0] = reg(ic->arg[2]); |
172 |
#ifndef A__B |
173 |
data[1] = reg(ic->arg[2]) >> 8; |
174 |
#if !defined(A__H) || defined(A__STRD) |
175 |
data[1] = reg(ic->arg[2]) >> 8; |
176 |
data[2] = reg(ic->arg[2]) >> 16; |
177 |
data[3] = reg(ic->arg[2]) >> 24; |
178 |
#ifdef A__STRD |
179 |
data[4] = reg(ic->arg[2] + 4); |
180 |
data[5] = reg(ic->arg[2] + 4) >> 8; |
181 |
data[6] = reg(ic->arg[2] + 4) >> 16; |
182 |
data[7] = reg(ic->arg[2] + 4) >> 24; |
183 |
#endif |
184 |
#endif |
185 |
#endif |
186 |
#endif |
187 |
if (!cpu->memory_rw(cpu, cpu->mem, addr, data, datalen, |
188 |
MEM_WRITE, memory_rw_flags)) { |
189 |
/* store failed, an exception was generated */ |
190 |
return; |
191 |
} |
192 |
#endif |
193 |
|
194 |
#ifdef A__P |
195 |
#ifdef A__W |
196 |
reg(ic->arg[0]) = addr; |
197 |
#endif |
198 |
#else /* post-index writeback */ |
199 |
reg(ic->arg[0]) = addr + offset; |
200 |
#endif |
201 |
} |
202 |
|
203 |
|
204 |
/* |
205 |
* Fast load/store, if the page is in the translation array. |
206 |
*/ |
207 |
void A__NAME(struct cpu *cpu, struct arm_instr_call *ic) |
208 |
{ |
209 |
#if defined(A__LDRD) || defined(A__STRD) |
210 |
/* Chicken out, let's do this unoptimized for now: */ |
211 |
A__NAME__general(cpu, ic); |
212 |
#else |
213 |
#ifdef A__REG |
214 |
uint32_t (*reg_func)(struct cpu *, struct arm_instr_call *) |
215 |
= (void *)(size_t)ic->arg[1]; |
216 |
#endif |
217 |
uint32_t offset = |
218 |
#ifndef A__U |
219 |
- |
220 |
#endif |
221 |
#ifdef A__REG |
222 |
reg_func(cpu, ic); |
223 |
#else |
224 |
ic->arg[1]; |
225 |
#endif |
226 |
uint32_t addr = reg(ic->arg[0]) |
227 |
#ifdef A__P |
228 |
+ offset |
229 |
#endif |
230 |
; |
231 |
unsigned char *page = cpu->cd.arm. |
232 |
#ifdef A__L |
233 |
host_load |
234 |
#else |
235 |
host_store |
236 |
#endif |
237 |
[addr >> 12]; |
238 |
|
239 |
|
240 |
#if !defined(A__P) && defined(A__W) |
241 |
/* |
242 |
* T-bit: userland access: check the corresponding bit in the |
243 |
* is_userpage array. If it is set, then we're ok. Otherwise: use the |
244 |
* generic function. |
245 |
*/ |
246 |
uint32_t x = cpu->cd.arm.is_userpage[addr >> 17]; |
247 |
if (!(x & (1 << ((addr >> 12) & 31)))) |
248 |
A__NAME__general(cpu, ic); |
249 |
else |
250 |
#endif |
251 |
|
252 |
|
253 |
if (page == NULL) { |
254 |
A__NAME__general(cpu, ic); |
255 |
} else { |
256 |
#ifdef A__L |
257 |
#ifdef A__B |
258 |
reg(ic->arg[2]) = |
259 |
#ifdef A__SIGNED |
260 |
(int32_t)(int8_t) |
261 |
#endif |
262 |
page[addr & 0xfff]; |
263 |
#else |
264 |
#ifdef A__H |
265 |
reg(ic->arg[2]) = |
266 |
#ifdef A__SIGNED |
267 |
(int32_t)(int16_t) |
268 |
#endif |
269 |
(page[addr & 0xfff] + (page[(addr & 0xfff) + 1] << 8)); |
270 |
#else |
271 |
#ifdef HOST_LITTLE_ENDIAN |
272 |
reg(ic->arg[2]) = *(uint32_t *)(page + (addr & 0xffc)); |
273 |
#else |
274 |
reg(ic->arg[2]) = page[addr & 0xfff] + |
275 |
(page[(addr & 0xfff) + 1] << 8) + |
276 |
(page[(addr & 0xfff) + 2] << 16) + |
277 |
(page[(addr & 0xfff) + 3] << 24); |
278 |
#endif |
279 |
#endif |
280 |
#endif |
281 |
#else |
282 |
#ifdef A__B |
283 |
page[addr & 0xfff] = reg(ic->arg[2]); |
284 |
#else |
285 |
#ifdef A__H |
286 |
page[addr & 0xfff] = reg(ic->arg[2]); |
287 |
page[(addr & 0xfff)+1] = reg(ic->arg[2]) >> 8; |
288 |
#else |
289 |
#ifdef HOST_LITTLE_ENDIAN |
290 |
*(uint32_t *)(page + (addr & 0xffc)) = reg(ic->arg[2]); |
291 |
#else |
292 |
page[addr & 0xfff] = reg(ic->arg[2]); |
293 |
page[(addr & 0xfff)+1] = reg(ic->arg[2]) >> 8; |
294 |
page[(addr & 0xfff)+2] = reg(ic->arg[2]) >> 16; |
295 |
page[(addr & 0xfff)+3] = reg(ic->arg[2]) >> 24; |
296 |
#endif |
297 |
#endif |
298 |
#endif |
299 |
#endif |
300 |
|
301 |
/* Index Write-back: */ |
302 |
#ifdef A__P |
303 |
#ifdef A__W |
304 |
reg(ic->arg[0]) = addr; |
305 |
#endif |
306 |
#else |
307 |
/* post-index writeback */ |
308 |
reg(ic->arg[0]) = addr + offset; |
309 |
#endif |
310 |
} |
311 |
#endif /* not STRD */ |
312 |
} |
313 |
|
314 |
|
315 |
/* |
316 |
* Special case when loading or storing the ARM's PC register, or when the PC |
317 |
* register is used as the base address register. |
318 |
* |
319 |
* o) Loads into the PC register cause a branch. If an exception occured |
320 |
* during the load, then the PC register should already point to the |
321 |
* exception handler, in which case we simply recalculate the pointers a |
322 |
* second time (no harm is done by doing that). |
323 |
* |
324 |
* TODO: A tiny performance optimization would be to separate the two |
325 |
* cases: a load where arg[0] = PC, and the case where arg[2] = PC. |
326 |
* |
327 |
* o) Stores store "PC of the current instruction + 12". The solution I have |
328 |
* choosen is to calculate this value and place it into a temporary |
329 |
* variable (tmp_pc), which is then used for the store. |
330 |
*/ |
331 |
void A__NAME_PC(struct cpu *cpu, struct arm_instr_call *ic) |
332 |
{ |
333 |
#ifdef A__L |
334 |
/* Load: */ |
335 |
if (ic->arg[0] == (size_t)(&cpu->cd.arm.tmp_pc)) { |
336 |
/* tmp_pc = current PC + 8: */ |
337 |
uint32_t low_pc, tmp; |
338 |
low_pc = ((size_t)ic - (size_t) cpu->cd.arm.cur_ic_page) / |
339 |
sizeof(struct arm_instr_call); |
340 |
tmp = cpu->pc & ~((ARM_IC_ENTRIES_PER_PAGE-1) << |
341 |
ARM_INSTR_ALIGNMENT_SHIFT); |
342 |
tmp += (low_pc << ARM_INSTR_ALIGNMENT_SHIFT); |
343 |
cpu->cd.arm.tmp_pc = tmp + 8; |
344 |
} |
345 |
A__NAME(cpu, ic); |
346 |
if (ic->arg[2] == (size_t)(&cpu->cd.arm.r[ARM_PC])) { |
347 |
cpu->pc = cpu->cd.arm.r[ARM_PC]; |
348 |
quick_pc_to_pointers(cpu); |
349 |
if (cpu->machine->show_trace_tree) |
350 |
cpu_functioncall_trace(cpu, cpu->pc); |
351 |
} |
352 |
#else |
353 |
/* Store: */ |
354 |
uint32_t low_pc, tmp; |
355 |
/* Calculate tmp from this instruction's PC + 12 */ |
356 |
low_pc = ((size_t)ic - (size_t) cpu->cd.arm.cur_ic_page) / |
357 |
sizeof(struct arm_instr_call); |
358 |
tmp = cpu->pc & ~((ARM_IC_ENTRIES_PER_PAGE-1) << |
359 |
ARM_INSTR_ALIGNMENT_SHIFT); |
360 |
tmp += (low_pc << ARM_INSTR_ALIGNMENT_SHIFT); |
361 |
cpu->cd.arm.tmp_pc = tmp + 12; |
362 |
A__NAME(cpu, ic); |
363 |
#endif |
364 |
} |
365 |
|
366 |
|
367 |
#ifndef A__NOCONDITIONS |
368 |
/* Load/stores with all registers except the PC register: */ |
369 |
void A__NAME__eq(struct cpu *cpu, struct arm_instr_call *ic) |
370 |
{ if (cpu->cd.arm.flags & ARM_F_Z) A__NAME(cpu, ic); } |
371 |
void A__NAME__ne(struct cpu *cpu, struct arm_instr_call *ic) |
372 |
{ if (!(cpu->cd.arm.flags & ARM_F_Z)) A__NAME(cpu, ic); } |
373 |
void A__NAME__cs(struct cpu *cpu, struct arm_instr_call *ic) |
374 |
{ if (cpu->cd.arm.flags & ARM_F_C) A__NAME(cpu, ic); } |
375 |
void A__NAME__cc(struct cpu *cpu, struct arm_instr_call *ic) |
376 |
{ if (!(cpu->cd.arm.flags & ARM_F_C)) A__NAME(cpu, ic); } |
377 |
void A__NAME__mi(struct cpu *cpu, struct arm_instr_call *ic) |
378 |
{ if (cpu->cd.arm.flags & ARM_F_N) A__NAME(cpu, ic); } |
379 |
void A__NAME__pl(struct cpu *cpu, struct arm_instr_call *ic) |
380 |
{ if (!(cpu->cd.arm.flags & ARM_F_N)) A__NAME(cpu, ic); } |
381 |
void A__NAME__vs(struct cpu *cpu, struct arm_instr_call *ic) |
382 |
{ if (cpu->cd.arm.flags & ARM_F_V) A__NAME(cpu, ic); } |
383 |
void A__NAME__vc(struct cpu *cpu, struct arm_instr_call *ic) |
384 |
{ if (!(cpu->cd.arm.flags & ARM_F_V)) A__NAME(cpu, ic); } |
385 |
|
386 |
void A__NAME__hi(struct cpu *cpu, struct arm_instr_call *ic) |
387 |
{ if (cpu->cd.arm.flags & ARM_F_C && |
388 |
!(cpu->cd.arm.flags & ARM_F_Z)) A__NAME(cpu, ic); } |
389 |
void A__NAME__ls(struct cpu *cpu, struct arm_instr_call *ic) |
390 |
{ if (cpu->cd.arm.flags & ARM_F_Z || |
391 |
!(cpu->cd.arm.flags & ARM_F_C)) A__NAME(cpu, ic); } |
392 |
void A__NAME__ge(struct cpu *cpu, struct arm_instr_call *ic) |
393 |
{ if (((cpu->cd.arm.flags & ARM_F_N)?1:0) == |
394 |
((cpu->cd.arm.flags & ARM_F_V)?1:0)) A__NAME(cpu, ic); } |
395 |
void A__NAME__lt(struct cpu *cpu, struct arm_instr_call *ic) |
396 |
{ if (((cpu->cd.arm.flags & ARM_F_N)?1:0) != |
397 |
((cpu->cd.arm.flags & ARM_F_V)?1:0)) A__NAME(cpu, ic); } |
398 |
void A__NAME__gt(struct cpu *cpu, struct arm_instr_call *ic) |
399 |
{ if (((cpu->cd.arm.flags & ARM_F_N)?1:0) == |
400 |
((cpu->cd.arm.flags & ARM_F_V)?1:0) && |
401 |
!(cpu->cd.arm.flags & ARM_F_Z)) A__NAME(cpu, ic); } |
402 |
void A__NAME__le(struct cpu *cpu, struct arm_instr_call *ic) |
403 |
{ if (((cpu->cd.arm.flags & ARM_F_N)?1:0) != |
404 |
((cpu->cd.arm.flags & ARM_F_V)?1:0) || |
405 |
(cpu->cd.arm.flags & ARM_F_Z)) A__NAME(cpu, ic); } |
406 |
|
407 |
|
408 |
/* Load/stores with the PC register: */ |
409 |
void A__NAME_PC__eq(struct cpu *cpu, struct arm_instr_call *ic) |
410 |
{ if (cpu->cd.arm.flags & ARM_F_Z) A__NAME_PC(cpu, ic); } |
411 |
void A__NAME_PC__ne(struct cpu *cpu, struct arm_instr_call *ic) |
412 |
{ if (!(cpu->cd.arm.flags & ARM_F_Z)) A__NAME_PC(cpu, ic); } |
413 |
void A__NAME_PC__cs(struct cpu *cpu, struct arm_instr_call *ic) |
414 |
{ if (cpu->cd.arm.flags & ARM_F_C) A__NAME_PC(cpu, ic); } |
415 |
void A__NAME_PC__cc(struct cpu *cpu, struct arm_instr_call *ic) |
416 |
{ if (!(cpu->cd.arm.flags & ARM_F_C)) A__NAME_PC(cpu, ic); } |
417 |
void A__NAME_PC__mi(struct cpu *cpu, struct arm_instr_call *ic) |
418 |
{ if (cpu->cd.arm.flags & ARM_F_N) A__NAME_PC(cpu, ic); } |
419 |
void A__NAME_PC__pl(struct cpu *cpu, struct arm_instr_call *ic) |
420 |
{ if (!(cpu->cd.arm.flags & ARM_F_N)) A__NAME_PC(cpu, ic); } |
421 |
void A__NAME_PC__vs(struct cpu *cpu, struct arm_instr_call *ic) |
422 |
{ if (cpu->cd.arm.flags & ARM_F_V) A__NAME_PC(cpu, ic); } |
423 |
void A__NAME_PC__vc(struct cpu *cpu, struct arm_instr_call *ic) |
424 |
{ if (!(cpu->cd.arm.flags & ARM_F_V)) A__NAME_PC(cpu, ic); } |
425 |
|
426 |
void A__NAME_PC__hi(struct cpu *cpu, struct arm_instr_call *ic) |
427 |
{ if (cpu->cd.arm.flags & ARM_F_C && |
428 |
!(cpu->cd.arm.flags & ARM_F_Z)) A__NAME_PC(cpu, ic); } |
429 |
void A__NAME_PC__ls(struct cpu *cpu, struct arm_instr_call *ic) |
430 |
{ if (cpu->cd.arm.flags & ARM_F_Z || |
431 |
!(cpu->cd.arm.flags & ARM_F_C)) A__NAME_PC(cpu, ic); } |
432 |
void A__NAME_PC__ge(struct cpu *cpu, struct arm_instr_call *ic) |
433 |
{ if (((cpu->cd.arm.flags & ARM_F_N)?1:0) == |
434 |
((cpu->cd.arm.flags & ARM_F_V)?1:0)) A__NAME_PC(cpu, ic); } |
435 |
void A__NAME_PC__lt(struct cpu *cpu, struct arm_instr_call *ic) |
436 |
{ if (((cpu->cd.arm.flags & ARM_F_N)?1:0) != |
437 |
((cpu->cd.arm.flags & ARM_F_V)?1:0)) A__NAME_PC(cpu, ic); } |
438 |
void A__NAME_PC__gt(struct cpu *cpu, struct arm_instr_call *ic) |
439 |
{ if (((cpu->cd.arm.flags & ARM_F_N)?1:0) == |
440 |
((cpu->cd.arm.flags & ARM_F_V)?1:0) && |
441 |
!(cpu->cd.arm.flags & ARM_F_Z)) A__NAME_PC(cpu, ic); } |
442 |
void A__NAME_PC__le(struct cpu *cpu, struct arm_instr_call *ic) |
443 |
{ if (((cpu->cd.arm.flags & ARM_F_N)?1:0) != |
444 |
((cpu->cd.arm.flags & ARM_F_V)?1:0) || |
445 |
(cpu->cd.arm.flags & ARM_F_Z)) A__NAME_PC(cpu, ic); } |
446 |
#endif |
447 |
|
448 |
|
449 |
#ifdef A__LDRD |
450 |
#undef A__LDRD |
451 |
#endif |
452 |
|
453 |
#ifdef A__STRD |
454 |
#undef A__STRD |
455 |
#endif |
456 |
|