blob: 4cddcc85bf7d8e06ee6b2120633dc73ba2feaa24 [file] [log] [blame]
Paul Lind2bc2b792012-02-01 10:54:19 -08001/* libs/pixelflinger/codeflinger/MIPSAssembler.cpp
2**
3** Copyright 2012, The Android Open Source Project
4**
5** Licensed under the Apache License, Version 2.0 (the "License");
6** you may not use this file except in compliance with the License.
7** You may obtain a copy of the License at
8**
9** http://www.apache.org/licenses/LICENSE-2.0
10**
11** Unless required by applicable law or agreed to in writing, software
12** distributed under the License is distributed on an "AS IS" BASIS,
13** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14** See the License for the specific language governing permissions and
15** limitations under the License.
16*/
17
18
19/* MIPS assembler and ARM->MIPS assembly translator
20**
21** The approach is to leave the GGLAssembler and associated files largely
22** un-changed, still utilizing all Arm instruction generation. Via the
23** ArmToMipsAssembler (subclassed from ArmAssemblerInterface) each Arm
24** instruction is translated to one or more Mips instructions as necessary. This
25** is clearly less efficient than a direct implementation within the
26** GGLAssembler, but is far cleaner, more maintainable, and has yielded very
27** significant performance gains on Mips compared to the generic pixel pipeline.
28**
29**
30** GGLAssembler changes
31**
32** - The register allocator has been modified to re-map Arm registers 0-15 to mips
33** registers 2-17. Mips register 0 cannot be used as general-purpose register,
34** and register 1 has traditional uses as a short-term temporary.
35**
36** - Added some early bailouts for OUT_OF_REGISTERS in texturing.cpp and
37** GGLAssembler.cpp, since this is not fatal, and can be retried at lower
38** optimization level.
39**
40**
41** ARMAssembler and ARMAssemblerInterface changes
42**
43** Refactored ARM address-mode static functions (imm(), reg_imm(), imm12_pre(), etc.)
44** to virtual, so they can be overridden in MIPSAssembler. The implementation of these
45** functions on ARM is moved from ARMAssemblerInterface.cpp to ARMAssembler.cpp, and
46** is unchanged from the original. (This required duplicating 2 of these as static
47** functions in ARMAssemblerInterface.cpp so they could be used as static initializers).
48*/
49
50
51#define LOG_TAG "MIPSAssembler"
52
53#include <stdio.h>
54#include <stdlib.h>
Paul Lind2bc2b792012-02-01 10:54:19 -080055
Mark Salyzyn66ce3e02016-09-28 10:07:20 -070056#include <android/log.h>
57#include <cutils/properties.h>
Paul Lind2bc2b792012-02-01 10:54:19 -080058#include <private/pixelflinger/ggl_context.h>
59
Mathias Agopian9857d992013-04-01 15:17:55 -070060#include "CodeCache.h"
Mark Salyzyn66ce3e02016-09-28 10:07:20 -070061#include "MIPSAssembler.h"
Mathias Agopian9857d992013-04-01 15:17:55 -070062#include "mips_disassem.h"
Paul Lind2bc2b792012-02-01 10:54:19 -080063
64// Choose MIPS arch variant following gcc flags
65#if defined(__mips__) && __mips==32 && __mips_isa_rev>=2
66#define mips32r2 1
67#else
68#define mips32r2 0
69#endif
70
71
72#define NOT_IMPLEMENTED() LOG_ALWAYS_FATAL("Arm instruction %s not yet implemented\n", __func__)
73
74
75
76// ----------------------------------------------------------------------------
77
78namespace android {
79
80// ----------------------------------------------------------------------------
81#if 0
82#pragma mark -
83#pragma mark ArmToMipsAssembler...
84#endif
85
86ArmToMipsAssembler::ArmToMipsAssembler(const sp<Assembly>& assembly,
87 char *abuf, int linesz, int instr_count)
88 : ARMAssemblerInterface(),
89 mArmDisassemblyBuffer(abuf),
90 mArmLineLength(linesz),
91 mArmInstrCount(instr_count),
92 mInum(0),
93 mAssembly(assembly)
94{
95 mMips = new MIPSAssembler(assembly, this);
96 mArmPC = (uint32_t **) malloc(ARM_MAX_INSTUCTIONS * sizeof(uint32_t *));
97 init_conditional_labels();
98}
99
100ArmToMipsAssembler::~ArmToMipsAssembler()
101{
102 delete mMips;
103 free((void *) mArmPC);
104}
105
106uint32_t* ArmToMipsAssembler::pc() const
107{
108 return mMips->pc();
109}
110
111uint32_t* ArmToMipsAssembler::base() const
112{
113 return mMips->base();
114}
115
116void ArmToMipsAssembler::reset()
117{
118 cond.labelnum = 0;
119 mInum = 0;
120 mMips->reset();
121}
122
123int ArmToMipsAssembler::getCodegenArch()
124{
125 return CODEGEN_ARCH_MIPS;
126}
127
128void ArmToMipsAssembler::comment(const char* string)
129{
130 mMips->comment(string);
131}
132
133void ArmToMipsAssembler::label(const char* theLabel)
134{
135 mMips->label(theLabel);
136}
137
138void ArmToMipsAssembler::disassemble(const char* name)
139{
140 mMips->disassemble(name);
141}
142
143void ArmToMipsAssembler::init_conditional_labels()
144{
145 int i;
146 for (i=0;i<99; ++i) {
147 sprintf(cond.label[i], "cond_%d", i);
148 }
149}
150
151
152
153#if 0
154#pragma mark -
155#pragma mark Prolog/Epilog & Generate...
156#endif
157
158void ArmToMipsAssembler::prolog()
159{
160 mArmPC[mInum++] = pc(); // save starting PC for this instr
161
162 mMips->ADDIU(R_sp, R_sp, -(5 * 4));
163 mMips->SW(R_s0, R_sp, 0);
164 mMips->SW(R_s1, R_sp, 4);
165 mMips->SW(R_s2, R_sp, 8);
166 mMips->SW(R_s3, R_sp, 12);
167 mMips->SW(R_s4, R_sp, 16);
168 mMips->MOVE(R_v0, R_a0); // move context * passed in a0 to v0 (arm r0)
169}
170
171void ArmToMipsAssembler::epilog(uint32_t touched)
172{
173 mArmPC[mInum++] = pc(); // save starting PC for this instr
174
175 mMips->LW(R_s0, R_sp, 0);
176 mMips->LW(R_s1, R_sp, 4);
177 mMips->LW(R_s2, R_sp, 8);
178 mMips->LW(R_s3, R_sp, 12);
179 mMips->LW(R_s4, R_sp, 16);
180 mMips->ADDIU(R_sp, R_sp, (5 * 4));
181 mMips->JR(R_ra);
182
183}
184
185int ArmToMipsAssembler::generate(const char* name)
186{
187 return mMips->generate(name);
188}
189
190uint32_t* ArmToMipsAssembler::pcForLabel(const char* label)
191{
192 return mMips->pcForLabel(label);
193}
194
195
196
197//----------------------------------------------------------
198
199#if 0
200#pragma mark -
201#pragma mark Addressing modes & shifters...
202#endif
203
204
205// do not need this for MIPS, but it is in the Interface (virtual)
206int ArmToMipsAssembler::buildImmediate(
207 uint32_t immediate, uint32_t& rot, uint32_t& imm)
208{
209 // for MIPS, any 32-bit immediate is OK
210 rot = 0;
211 imm = immediate;
212 return 0;
213}
214
215// shifters...
216
217bool ArmToMipsAssembler::isValidImmediate(uint32_t immediate)
218{
219 // for MIPS, any 32-bit immediate is OK
220 return true;
221}
222
223uint32_t ArmToMipsAssembler::imm(uint32_t immediate)
224{
225 // ALOGW("immediate value %08x at pc %08x\n", immediate, (int)pc());
226 amode.value = immediate;
227 return AMODE_IMM;
228}
229
230uint32_t ArmToMipsAssembler::reg_imm(int Rm, int type, uint32_t shift)
231{
232 amode.reg = Rm;
233 amode.stype = type;
234 amode.value = shift;
235 return AMODE_REG_IMM;
236}
237
238uint32_t ArmToMipsAssembler::reg_rrx(int Rm)
239{
240 // reg_rrx mode is not used in the GLLAssember code at this time
241 return AMODE_UNSUPPORTED;
242}
243
244uint32_t ArmToMipsAssembler::reg_reg(int Rm, int type, int Rs)
245{
246 // reg_reg mode is not used in the GLLAssember code at this time
247 return AMODE_UNSUPPORTED;
248}
249
250
251// addressing modes...
252// LDR(B)/STR(B)/PLD (immediate and Rm can be negative, which indicate U=0)
253uint32_t ArmToMipsAssembler::immed12_pre(int32_t immed12, int W)
254{
255 LOG_ALWAYS_FATAL_IF(abs(immed12) >= 0x800,
256 "LDR(B)/STR(B)/PLD immediate too big (%08x)",
257 immed12);
258 amode.value = immed12;
259 amode.writeback = W;
260 return AMODE_IMM_12_PRE;
261}
262
263uint32_t ArmToMipsAssembler::immed12_post(int32_t immed12)
264{
265 LOG_ALWAYS_FATAL_IF(abs(immed12) >= 0x800,
266 "LDR(B)/STR(B)/PLD immediate too big (%08x)",
267 immed12);
268
269 amode.value = immed12;
270 return AMODE_IMM_12_POST;
271}
272
273uint32_t ArmToMipsAssembler::reg_scale_pre(int Rm, int type,
274 uint32_t shift, int W)
275{
276 LOG_ALWAYS_FATAL_IF(W | type | shift, "reg_scale_pre adv modes not yet implemented");
277
278 amode.reg = Rm;
279 // amode.stype = type; // more advanced modes not used in GGLAssembler yet
280 // amode.value = shift;
281 // amode.writeback = W;
282 return AMODE_REG_SCALE_PRE;
283}
284
285uint32_t ArmToMipsAssembler::reg_scale_post(int Rm, int type, uint32_t shift)
286{
287 LOG_ALWAYS_FATAL("adr mode reg_scale_post not yet implemented\n");
288 return AMODE_UNSUPPORTED;
289}
290
291// LDRH/LDRSB/LDRSH/STRH (immediate and Rm can be negative, which indicate U=0)
292uint32_t ArmToMipsAssembler::immed8_pre(int32_t immed8, int W)
293{
294 // uint32_t offset = abs(immed8);
295
296 LOG_ALWAYS_FATAL("adr mode immed8_pre not yet implemented\n");
297
298 LOG_ALWAYS_FATAL_IF(abs(immed8) >= 0x100,
299 "LDRH/LDRSB/LDRSH/STRH immediate too big (%08x)",
300 immed8);
301 return AMODE_IMM_8_PRE;
302}
303
304uint32_t ArmToMipsAssembler::immed8_post(int32_t immed8)
305{
306 // uint32_t offset = abs(immed8);
307
308 LOG_ALWAYS_FATAL_IF(abs(immed8) >= 0x100,
309 "LDRH/LDRSB/LDRSH/STRH immediate too big (%08x)",
310 immed8);
311 amode.value = immed8;
312 return AMODE_IMM_8_POST;
313}
314
315uint32_t ArmToMipsAssembler::reg_pre(int Rm, int W)
316{
317 LOG_ALWAYS_FATAL_IF(W, "reg_pre writeback not yet implemented");
318 amode.reg = Rm;
319 return AMODE_REG_PRE;
320}
321
322uint32_t ArmToMipsAssembler::reg_post(int Rm)
323{
324 LOG_ALWAYS_FATAL("adr mode reg_post not yet implemented\n");
325 return AMODE_UNSUPPORTED;
326}
327
328
329
330// ----------------------------------------------------------------------------
331
332#if 0
333#pragma mark -
334#pragma mark Data Processing...
335#endif
336
337
338static const char * const dpOpNames[] = {
339 "AND", "EOR", "SUB", "RSB", "ADD", "ADC", "SBC", "RSC",
340 "TST", "TEQ", "CMP", "CMN", "ORR", "MOV", "BIC", "MVN"
341};
342
343// check if the operand registers from a previous CMP or S-bit instruction
344// would be overwritten by this instruction. If so, move the value to a
345// safe register.
346// Note that we cannot tell at _this_ instruction time if a future (conditional)
347// instruction will _also_ use this value (a defect of the simple 1-pass, one-
348// instruction-at-a-time translation). Therefore we must be conservative and
349// save the value before it is overwritten. This costs an extra MOVE instr.
350
351void ArmToMipsAssembler::protectConditionalOperands(int Rd)
352{
353 if (Rd == cond.r1) {
354 mMips->MOVE(R_cmp, cond.r1);
355 cond.r1 = R_cmp;
356 }
357 if (cond.type == CMP_COND && Rd == cond.r2) {
358 mMips->MOVE(R_cmp2, cond.r2);
359 cond.r2 = R_cmp2;
360 }
361}
362
363
364// interprets the addressing mode, and generates the common code
365// used by the majority of data-processing ops. Many MIPS instructions
366// have a register-based form and a different immediate form. See
367// opAND below for an example. (this could be inlined)
368//
369// this works with the imm(), reg_imm() methods above, which are directly
370// called by the GLLAssembler.
371// note: _signed parameter defaults to false (un-signed)
372// note: tmpReg parameter defaults to 1, MIPS register AT
373int ArmToMipsAssembler::dataProcAdrModes(int op, int& source, bool _signed, int tmpReg)
374{
375 if (op < AMODE_REG) {
376 source = op;
377 return SRC_REG;
378 } else if (op == AMODE_IMM) {
379 if ((!_signed && amode.value > 0xffff)
380 || (_signed && ((int)amode.value < -32768 || (int)amode.value > 32767) )) {
381 mMips->LUI(tmpReg, (amode.value >> 16));
382 if (amode.value & 0x0000ffff) {
383 mMips->ORI(tmpReg, tmpReg, (amode.value & 0x0000ffff));
384 }
385 source = tmpReg;
386 return SRC_REG;
387 } else {
388 source = amode.value;
389 return SRC_IMM;
390 }
391 } else if (op == AMODE_REG_IMM) {
392 switch (amode.stype) {
393 case LSL: mMips->SLL(tmpReg, amode.reg, amode.value); break;
394 case LSR: mMips->SRL(tmpReg, amode.reg, amode.value); break;
395 case ASR: mMips->SRA(tmpReg, amode.reg, amode.value); break;
396 case ROR: if (mips32r2) {
397 mMips->ROTR(tmpReg, amode.reg, amode.value);
398 } else {
399 mMips->RORIsyn(tmpReg, amode.reg, amode.value);
400 }
401 break;
402 }
403 source = tmpReg;
404 return SRC_REG;
405 } else { // adr mode RRX is not used in GGL Assembler at this time
406 // we are screwed, this should be exception, assert-fail or something
407 LOG_ALWAYS_FATAL("adr mode reg_rrx not yet implemented\n");
408 return SRC_ERROR;
409 }
410}
411
412
413void ArmToMipsAssembler::dataProcessing(int opcode, int cc,
414 int s, int Rd, int Rn, uint32_t Op2)
415{
416 int src; // src is modified by dataProcAdrModes() - passed as int&
417
418
419 if (cc != AL) {
420 protectConditionalOperands(Rd);
421 // the branch tests register(s) set by prev CMP or instr with 'S' bit set
422 // inverse the condition to jump past this conditional instruction
423 ArmToMipsAssembler::B(cc^1, cond.label[++cond.labelnum]);
424 } else {
425 mArmPC[mInum++] = pc(); // save starting PC for this instr
426 }
427
428 switch (opcode) {
429 case opAND:
430 if (dataProcAdrModes(Op2, src) == SRC_REG) {
431 mMips->AND(Rd, Rn, src);
432 } else { // adr mode was SRC_IMM
433 mMips->ANDI(Rd, Rn, src);
434 }
435 break;
436
437 case opADD:
438 // set "signed" to true for adr modes
439 if (dataProcAdrModes(Op2, src, true) == SRC_REG) {
440 mMips->ADDU(Rd, Rn, src);
441 } else { // adr mode was SRC_IMM
442 mMips->ADDIU(Rd, Rn, src);
443 }
444 break;
445
446 case opSUB:
447 // set "signed" to true for adr modes
448 if (dataProcAdrModes(Op2, src, true) == SRC_REG) {
449 mMips->SUBU(Rd, Rn, src);
450 } else { // adr mode was SRC_IMM
451 mMips->SUBIU(Rd, Rn, src);
452 }
453 break;
454
455 case opEOR:
456 if (dataProcAdrModes(Op2, src) == SRC_REG) {
457 mMips->XOR(Rd, Rn, src);
458 } else { // adr mode was SRC_IMM
459 mMips->XORI(Rd, Rn, src);
460 }
461 break;
462
463 case opORR:
464 if (dataProcAdrModes(Op2, src) == SRC_REG) {
465 mMips->OR(Rd, Rn, src);
466 } else { // adr mode was SRC_IMM
467 mMips->ORI(Rd, Rn, src);
468 }
469 break;
470
471 case opBIC:
472 if (dataProcAdrModes(Op2, src) == SRC_IMM) {
473 // if we are 16-bit imnmediate, load to AT reg
474 mMips->ORI(R_at, 0, src);
475 src = R_at;
476 }
477 mMips->NOT(R_at, src);
478 mMips->AND(Rd, Rn, R_at);
479 break;
480
481 case opRSB:
482 if (dataProcAdrModes(Op2, src) == SRC_IMM) {
483 // if we are 16-bit imnmediate, load to AT reg
484 mMips->ORI(R_at, 0, src);
485 src = R_at;
486 }
487 mMips->SUBU(Rd, src, Rn); // subu with the parameters reversed
488 break;
489
490 case opMOV:
491 if (Op2 < AMODE_REG) { // op2 is reg # in this case
492 mMips->MOVE(Rd, Op2);
493 } else if (Op2 == AMODE_IMM) {
494 if (amode.value > 0xffff) {
495 mMips->LUI(Rd, (amode.value >> 16));
496 if (amode.value & 0x0000ffff) {
497 mMips->ORI(Rd, Rd, (amode.value & 0x0000ffff));
498 }
499 } else {
500 mMips->ORI(Rd, 0, amode.value);
501 }
502 } else if (Op2 == AMODE_REG_IMM) {
503 switch (amode.stype) {
504 case LSL: mMips->SLL(Rd, amode.reg, amode.value); break;
505 case LSR: mMips->SRL(Rd, amode.reg, amode.value); break;
506 case ASR: mMips->SRA(Rd, amode.reg, amode.value); break;
507 case ROR: if (mips32r2) {
508 mMips->ROTR(Rd, amode.reg, amode.value);
509 } else {
510 mMips->RORIsyn(Rd, amode.reg, amode.value);
511 }
512 break;
513 }
514 }
515 else {
516 // adr mode RRX is not used in GGL Assembler at this time
517 mMips->UNIMPL();
518 }
519 break;
520
521 case opMVN: // this is a 1's complement: NOT
522 if (Op2 < AMODE_REG) { // op2 is reg # in this case
523 mMips->NOR(Rd, Op2, 0); // NOT is NOR with 0
524 break;
525 } else if (Op2 == AMODE_IMM) {
526 if (amode.value > 0xffff) {
527 mMips->LUI(Rd, (amode.value >> 16));
528 if (amode.value & 0x0000ffff) {
529 mMips->ORI(Rd, Rd, (amode.value & 0x0000ffff));
530 }
531 } else {
532 mMips->ORI(Rd, 0, amode.value);
533 }
534 } else if (Op2 == AMODE_REG_IMM) {
535 switch (amode.stype) {
536 case LSL: mMips->SLL(Rd, amode.reg, amode.value); break;
537 case LSR: mMips->SRL(Rd, amode.reg, amode.value); break;
538 case ASR: mMips->SRA(Rd, amode.reg, amode.value); break;
539 case ROR: if (mips32r2) {
540 mMips->ROTR(Rd, amode.reg, amode.value);
541 } else {
542 mMips->RORIsyn(Rd, amode.reg, amode.value);
543 }
544 break;
545 }
546 }
547 else {
548 // adr mode RRX is not used in GGL Assembler at this time
549 mMips->UNIMPL();
550 }
551 mMips->NOR(Rd, Rd, 0); // NOT is NOR with 0
552 break;
553
554 case opCMP:
555 // Either operand of a CMP instr could get overwritten by a subsequent
556 // conditional instruction, which is ok, _UNLESS_ there is a _second_
557 // conditional instruction. Under MIPS, this requires doing the comparison
558 // again (SLT), and the original operands must be available. (and this
559 // pattern of multiple conditional instructions from same CMP _is_ used
560 // in GGL-Assembler)
561 //
562 // For now, if a conditional instr overwrites the operands, we will
563 // move them to dedicated temp regs. This is ugly, and inefficient,
564 // and should be optimized.
565 //
566 // WARNING: making an _Assumption_ that CMP operand regs will NOT be
567 // trashed by intervening NON-conditional instructions. In the general
568 // case this is legal, but it is NOT currently done in GGL-Assembler.
569
570 cond.type = CMP_COND;
571 cond.r1 = Rn;
572 if (dataProcAdrModes(Op2, src, false, R_cmp2) == SRC_REG) {
573 cond.r2 = src;
574 } else { // adr mode was SRC_IMM
575 mMips->ORI(R_cmp2, R_zero, src);
576 cond.r2 = R_cmp2;
577 }
578
579 break;
580
581
582 case opTST:
583 case opTEQ:
584 case opCMN:
585 case opADC:
586 case opSBC:
587 case opRSC:
588 mMips->UNIMPL(); // currently unused in GGL Assembler code
589 break;
590 }
591
592 if (cc != AL) {
593 mMips->label(cond.label[cond.labelnum]);
594 }
595 if (s && opcode != opCMP) {
596 cond.type = SBIT_COND;
597 cond.r1 = Rd;
598 }
599}
600
601
602
603#if 0
604#pragma mark -
605#pragma mark Multiply...
606#endif
607
608// multiply, accumulate
609void ArmToMipsAssembler::MLA(int cc, int s,
610 int Rd, int Rm, int Rs, int Rn) {
611
612 mArmPC[mInum++] = pc(); // save starting PC for this instr
613
614 mMips->MUL(R_at, Rm, Rs);
615 mMips->ADDU(Rd, R_at, Rn);
616 if (s) {
617 cond.type = SBIT_COND;
618 cond.r1 = Rd;
619 }
620}
621
622void ArmToMipsAssembler::MUL(int cc, int s,
623 int Rd, int Rm, int Rs) {
624 mArmPC[mInum++] = pc();
625 mMips->MUL(Rd, Rm, Rs);
626 if (s) {
627 cond.type = SBIT_COND;
628 cond.r1 = Rd;
629 }
630}
631
632void ArmToMipsAssembler::UMULL(int cc, int s,
633 int RdLo, int RdHi, int Rm, int Rs) {
634 mArmPC[mInum++] = pc();
635 mMips->MULT(Rm, Rs);
636 mMips->MFHI(RdHi);
637 mMips->MFLO(RdLo);
638 if (s) {
639 cond.type = SBIT_COND;
640 cond.r1 = RdHi; // BUG...
641 LOG_ALWAYS_FATAL("Condition on UMULL must be on 64-bit result\n");
642 }
643}
644
645void ArmToMipsAssembler::UMUAL(int cc, int s,
646 int RdLo, int RdHi, int Rm, int Rs) {
647 LOG_FATAL_IF(RdLo==Rm || RdHi==Rm || RdLo==RdHi,
648 "UMUAL(r%u,r%u,r%u,r%u)", RdLo,RdHi,Rm,Rs);
649 // *mPC++ = (cc<<28) | (1<<23) | (1<<21) | (s<<20) |
650 // (RdHi<<16) | (RdLo<<12) | (Rs<<8) | 0x90 | Rm;
651 mArmPC[mInum++] = pc();
652 mMips->NOP2();
653 NOT_IMPLEMENTED();
654 if (s) {
655 cond.type = SBIT_COND;
656 cond.r1 = RdHi; // BUG...
657 LOG_ALWAYS_FATAL("Condition on UMULL must be on 64-bit result\n");
658 }
659}
660
661void ArmToMipsAssembler::SMULL(int cc, int s,
662 int RdLo, int RdHi, int Rm, int Rs) {
663 LOG_FATAL_IF(RdLo==Rm || RdHi==Rm || RdLo==RdHi,
664 "SMULL(r%u,r%u,r%u,r%u)", RdLo,RdHi,Rm,Rs);
665 // *mPC++ = (cc<<28) | (1<<23) | (1<<22) | (s<<20) |
666 // (RdHi<<16) | (RdLo<<12) | (Rs<<8) | 0x90 | Rm;
667 mArmPC[mInum++] = pc();
668 mMips->NOP2();
669 NOT_IMPLEMENTED();
670 if (s) {
671 cond.type = SBIT_COND;
672 cond.r1 = RdHi; // BUG...
673 LOG_ALWAYS_FATAL("Condition on SMULL must be on 64-bit result\n");
674 }
675}
676void ArmToMipsAssembler::SMUAL(int cc, int s,
677 int RdLo, int RdHi, int Rm, int Rs) {
678 LOG_FATAL_IF(RdLo==Rm || RdHi==Rm || RdLo==RdHi,
679 "SMUAL(r%u,r%u,r%u,r%u)", RdLo,RdHi,Rm,Rs);
680 // *mPC++ = (cc<<28) | (1<<23) | (1<<22) | (1<<21) | (s<<20) |
681 // (RdHi<<16) | (RdLo<<12) | (Rs<<8) | 0x90 | Rm;
682 mArmPC[mInum++] = pc();
683 mMips->NOP2();
684 NOT_IMPLEMENTED();
685 if (s) {
686 cond.type = SBIT_COND;
687 cond.r1 = RdHi; // BUG...
688 LOG_ALWAYS_FATAL("Condition on SMUAL must be on 64-bit result\n");
689 }
690}
691
692
693
694#if 0
695#pragma mark -
696#pragma mark Branches...
697#endif
698
699// branches...
700
701void ArmToMipsAssembler::B(int cc, const char* label)
702{
703 mArmPC[mInum++] = pc();
704 if (cond.type == SBIT_COND) { cond.r2 = R_zero; }
705
706 switch(cc) {
707 case EQ: mMips->BEQ(cond.r1, cond.r2, label); break;
708 case NE: mMips->BNE(cond.r1, cond.r2, label); break;
709 case HS: mMips->BGEU(cond.r1, cond.r2, label); break;
710 case LO: mMips->BLTU(cond.r1, cond.r2, label); break;
711 case MI: mMips->BLT(cond.r1, cond.r2, label); break;
712 case PL: mMips->BGE(cond.r1, cond.r2, label); break;
713
714 case HI: mMips->BGTU(cond.r1, cond.r2, label); break;
715 case LS: mMips->BLEU(cond.r1, cond.r2, label); break;
716 case GE: mMips->BGE(cond.r1, cond.r2, label); break;
717 case LT: mMips->BLT(cond.r1, cond.r2, label); break;
718 case GT: mMips->BGT(cond.r1, cond.r2, label); break;
719 case LE: mMips->BLE(cond.r1, cond.r2, label); break;
720 case AL: mMips->B(label); break;
721 case NV: /* B Never - no instruction */ break;
722
723 case VS:
724 case VC:
725 default:
726 LOG_ALWAYS_FATAL("Unsupported cc: %02x\n", cc);
727 break;
728 }
729}
730
731void ArmToMipsAssembler::BL(int cc, const char* label)
732{
733 LOG_ALWAYS_FATAL("branch-and-link not supported yet\n");
734 mArmPC[mInum++] = pc();
735}
736
737// no use for Branches with integer PC, but they're in the Interface class ....
738void ArmToMipsAssembler::B(int cc, uint32_t* to_pc)
739{
740 LOG_ALWAYS_FATAL("branch to absolute PC not supported, use Label\n");
741 mArmPC[mInum++] = pc();
742}
743
744void ArmToMipsAssembler::BL(int cc, uint32_t* to_pc)
745{
746 LOG_ALWAYS_FATAL("branch to absolute PC not supported, use Label\n");
747 mArmPC[mInum++] = pc();
748}
749
750void ArmToMipsAssembler::BX(int cc, int Rn)
751{
752 LOG_ALWAYS_FATAL("branch to absolute PC not supported, use Label\n");
753 mArmPC[mInum++] = pc();
754}
755
756
757
758#if 0
759#pragma mark -
760#pragma mark Data Transfer...
761#endif
762
763// data transfer...
764void ArmToMipsAssembler::LDR(int cc, int Rd, int Rn, uint32_t offset)
765{
766 mArmPC[mInum++] = pc();
767 // work-around for ARM default address mode of immed12_pre(0)
768 if (offset > AMODE_UNSUPPORTED) offset = 0;
769 switch (offset) {
770 case 0:
771 amode.value = 0;
772 amode.writeback = 0;
773 // fall thru to next case ....
774 case AMODE_IMM_12_PRE:
775 if (Rn == ARMAssemblerInterface::SP) {
776 Rn = R_sp; // convert LDR via Arm SP to LW via Mips SP
777 }
778 mMips->LW(Rd, Rn, amode.value);
779 if (amode.writeback) { // OPTIONAL writeback on pre-index mode
780 mMips->ADDIU(Rn, Rn, amode.value);
781 }
782 break;
783 case AMODE_IMM_12_POST:
784 if (Rn == ARMAssemblerInterface::SP) {
785 Rn = R_sp; // convert STR thru Arm SP to STR thru Mips SP
786 }
787 mMips->LW(Rd, Rn, 0);
788 mMips->ADDIU(Rn, Rn, amode.value);
789 break;
790 case AMODE_REG_SCALE_PRE:
791 // we only support simple base + index, no advanced modes for this one yet
792 mMips->ADDU(R_at, Rn, amode.reg);
793 mMips->LW(Rd, R_at, 0);
794 break;
795 }
796}
797
798void ArmToMipsAssembler::LDRB(int cc, int Rd, int Rn, uint32_t offset)
799{
800 mArmPC[mInum++] = pc();
801 // work-around for ARM default address mode of immed12_pre(0)
802 if (offset > AMODE_UNSUPPORTED) offset = 0;
803 switch (offset) {
804 case 0:
805 amode.value = 0;
806 amode.writeback = 0;
807 // fall thru to next case ....
808 case AMODE_IMM_12_PRE:
809 mMips->LBU(Rd, Rn, amode.value);
810 if (amode.writeback) { // OPTIONAL writeback on pre-index mode
811 mMips->ADDIU(Rn, Rn, amode.value);
812 }
813 break;
814 case AMODE_IMM_12_POST:
815 mMips->LBU(Rd, Rn, 0);
816 mMips->ADDIU(Rn, Rn, amode.value);
817 break;
818 case AMODE_REG_SCALE_PRE:
819 // we only support simple base + index, no advanced modes for this one yet
820 mMips->ADDU(R_at, Rn, amode.reg);
821 mMips->LBU(Rd, R_at, 0);
822 break;
823 }
824
825}
826
827void ArmToMipsAssembler::STR(int cc, int Rd, int Rn, uint32_t offset)
828{
829 mArmPC[mInum++] = pc();
830 // work-around for ARM default address mode of immed12_pre(0)
831 if (offset > AMODE_UNSUPPORTED) offset = 0;
832 switch (offset) {
833 case 0:
834 amode.value = 0;
835 amode.writeback = 0;
836 // fall thru to next case ....
837 case AMODE_IMM_12_PRE:
838 if (Rn == ARMAssemblerInterface::SP) {
839 Rn = R_sp; // convert STR thru Arm SP to SW thru Mips SP
840 }
841 if (amode.writeback) { // OPTIONAL writeback on pre-index mode
842 // If we will writeback, then update the index reg, then store.
843 // This correctly handles stack-push case.
844 mMips->ADDIU(Rn, Rn, amode.value);
845 mMips->SW(Rd, Rn, 0);
846 } else {
847 // No writeback so store offset by value
848 mMips->SW(Rd, Rn, amode.value);
849 }
850 break;
851 case AMODE_IMM_12_POST:
852 mMips->SW(Rd, Rn, 0);
853 mMips->ADDIU(Rn, Rn, amode.value); // post index always writes back
854 break;
855 case AMODE_REG_SCALE_PRE:
856 // we only support simple base + index, no advanced modes for this one yet
857 mMips->ADDU(R_at, Rn, amode.reg);
858 mMips->SW(Rd, R_at, 0);
859 break;
860 }
861}
862
863void ArmToMipsAssembler::STRB(int cc, int Rd, int Rn, uint32_t offset)
864{
865 mArmPC[mInum++] = pc();
866 // work-around for ARM default address mode of immed12_pre(0)
867 if (offset > AMODE_UNSUPPORTED) offset = 0;
868 switch (offset) {
869 case 0:
870 amode.value = 0;
871 amode.writeback = 0;
872 // fall thru to next case ....
873 case AMODE_IMM_12_PRE:
874 mMips->SB(Rd, Rn, amode.value);
875 if (amode.writeback) { // OPTIONAL writeback on pre-index mode
876 mMips->ADDIU(Rn, Rn, amode.value);
877 }
878 break;
879 case AMODE_IMM_12_POST:
880 mMips->SB(Rd, Rn, 0);
881 mMips->ADDIU(Rn, Rn, amode.value);
882 break;
883 case AMODE_REG_SCALE_PRE:
884 // we only support simple base + index, no advanced modes for this one yet
885 mMips->ADDU(R_at, Rn, amode.reg);
886 mMips->SB(Rd, R_at, 0);
887 break;
888 }
889}
890
891void ArmToMipsAssembler::LDRH(int cc, int Rd, int Rn, uint32_t offset)
892{
893 mArmPC[mInum++] = pc();
894 // work-around for ARM default address mode of immed8_pre(0)
895 if (offset > AMODE_UNSUPPORTED) offset = 0;
896 switch (offset) {
897 case 0:
898 amode.value = 0;
899 // fall thru to next case ....
900 case AMODE_IMM_8_PRE: // no support yet for writeback
901 mMips->LHU(Rd, Rn, amode.value);
902 break;
903 case AMODE_IMM_8_POST:
904 mMips->LHU(Rd, Rn, 0);
905 mMips->ADDIU(Rn, Rn, amode.value);
906 break;
907 case AMODE_REG_PRE:
908 // we only support simple base +/- index
909 if (amode.reg >= 0) {
910 mMips->ADDU(R_at, Rn, amode.reg);
911 } else {
912 mMips->SUBU(R_at, Rn, abs(amode.reg));
913 }
914 mMips->LHU(Rd, R_at, 0);
915 break;
916 }
917}
918
919void ArmToMipsAssembler::LDRSB(int cc, int Rd, int Rn, uint32_t offset)
920{
921 mArmPC[mInum++] = pc();
922 mMips->NOP2();
923 NOT_IMPLEMENTED();
924}
925
926void ArmToMipsAssembler::LDRSH(int cc, int Rd, int Rn, uint32_t offset)
927{
928 mArmPC[mInum++] = pc();
929 mMips->NOP2();
930 NOT_IMPLEMENTED();
931}
932
933void ArmToMipsAssembler::STRH(int cc, int Rd, int Rn, uint32_t offset)
934{
935 mArmPC[mInum++] = pc();
936 // work-around for ARM default address mode of immed8_pre(0)
937 if (offset > AMODE_UNSUPPORTED) offset = 0;
938 switch (offset) {
939 case 0:
940 amode.value = 0;
941 // fall thru to next case ....
942 case AMODE_IMM_8_PRE: // no support yet for writeback
943 mMips->SH(Rd, Rn, amode.value);
944 break;
945 case AMODE_IMM_8_POST:
946 mMips->SH(Rd, Rn, 0);
947 mMips->ADDIU(Rn, Rn, amode.value);
948 break;
949 case AMODE_REG_PRE:
950 // we only support simple base +/- index
951 if (amode.reg >= 0) {
952 mMips->ADDU(R_at, Rn, amode.reg);
953 } else {
954 mMips->SUBU(R_at, Rn, abs(amode.reg));
955 }
956 mMips->SH(Rd, R_at, 0);
957 break;
958 }
959}
960
961
962
963#if 0
964#pragma mark -
965#pragma mark Block Data Transfer...
966#endif
967
968// block data transfer...
969void ArmToMipsAssembler::LDM(int cc, int dir,
970 int Rn, int W, uint32_t reg_list)
971{ // ED FD EA FA IB IA DB DA
972 // const uint8_t P[8] = { 1, 0, 1, 0, 1, 0, 1, 0 };
973 // const uint8_t U[8] = { 1, 1, 0, 0, 1, 1, 0, 0 };
974 // *mPC++ = (cc<<28) | (4<<25) | (uint32_t(P[dir])<<24) |
975 // (uint32_t(U[dir])<<23) | (1<<20) | (W<<21) | (Rn<<16) | reg_list;
976 mArmPC[mInum++] = pc();
977 mMips->NOP2();
978 NOT_IMPLEMENTED();
979}
980
981void ArmToMipsAssembler::STM(int cc, int dir,
982 int Rn, int W, uint32_t reg_list)
983{ // FA EA FD ED IB IA DB DA
984 // const uint8_t P[8] = { 0, 1, 0, 1, 1, 0, 1, 0 };
985 // const uint8_t U[8] = { 0, 0, 1, 1, 1, 1, 0, 0 };
986 // *mPC++ = (cc<<28) | (4<<25) | (uint32_t(P[dir])<<24) |
987 // (uint32_t(U[dir])<<23) | (0<<20) | (W<<21) | (Rn<<16) | reg_list;
988 mArmPC[mInum++] = pc();
989 mMips->NOP2();
990 NOT_IMPLEMENTED();
991}
992
993
994
995#if 0
996#pragma mark -
997#pragma mark Special...
998#endif
999
1000// special...
1001void ArmToMipsAssembler::SWP(int cc, int Rn, int Rd, int Rm) {
1002 // *mPC++ = (cc<<28) | (2<<23) | (Rn<<16) | (Rd << 12) | 0x90 | Rm;
1003 mArmPC[mInum++] = pc();
1004 mMips->NOP2();
1005 NOT_IMPLEMENTED();
1006}
1007
1008void ArmToMipsAssembler::SWPB(int cc, int Rn, int Rd, int Rm) {
1009 // *mPC++ = (cc<<28) | (2<<23) | (1<<22) | (Rn<<16) | (Rd << 12) | 0x90 | Rm;
1010 mArmPC[mInum++] = pc();
1011 mMips->NOP2();
1012 NOT_IMPLEMENTED();
1013}
1014
1015void ArmToMipsAssembler::SWI(int cc, uint32_t comment) {
1016 // *mPC++ = (cc<<28) | (0xF<<24) | comment;
1017 mArmPC[mInum++] = pc();
1018 mMips->NOP2();
1019 NOT_IMPLEMENTED();
1020}
1021
1022
1023#if 0
1024#pragma mark -
1025#pragma mark DSP instructions...
1026#endif
1027
1028// DSP instructions...
1029void ArmToMipsAssembler::PLD(int Rn, uint32_t offset) {
1030 LOG_ALWAYS_FATAL_IF(!((offset&(1<<24)) && !(offset&(1<<21))),
1031 "PLD only P=1, W=0");
1032 // *mPC++ = 0xF550F000 | (Rn<<16) | offset;
1033 mArmPC[mInum++] = pc();
1034 mMips->NOP2();
1035 NOT_IMPLEMENTED();
1036}
1037
1038void ArmToMipsAssembler::CLZ(int cc, int Rd, int Rm)
1039{
1040 mArmPC[mInum++] = pc();
1041 mMips->CLZ(Rd, Rm);
1042}
1043
1044void ArmToMipsAssembler::QADD(int cc, int Rd, int Rm, int Rn)
1045{
1046 // *mPC++ = (cc<<28) | 0x1000050 | (Rn<<16) | (Rd<<12) | Rm;
1047 mArmPC[mInum++] = pc();
1048 mMips->NOP2();
1049 NOT_IMPLEMENTED();
1050}
1051
1052void ArmToMipsAssembler::QDADD(int cc, int Rd, int Rm, int Rn)
1053{
1054 // *mPC++ = (cc<<28) | 0x1400050 | (Rn<<16) | (Rd<<12) | Rm;
1055 mArmPC[mInum++] = pc();
1056 mMips->NOP2();
1057 NOT_IMPLEMENTED();
1058}
1059
1060void ArmToMipsAssembler::QSUB(int cc, int Rd, int Rm, int Rn)
1061{
1062 // *mPC++ = (cc<<28) | 0x1200050 | (Rn<<16) | (Rd<<12) | Rm;
1063 mArmPC[mInum++] = pc();
1064 mMips->NOP2();
1065 NOT_IMPLEMENTED();
1066}
1067
1068void ArmToMipsAssembler::QDSUB(int cc, int Rd, int Rm, int Rn)
1069{
1070 // *mPC++ = (cc<<28) | 0x1600050 | (Rn<<16) | (Rd<<12) | Rm;
1071 mArmPC[mInum++] = pc();
1072 mMips->NOP2();
1073 NOT_IMPLEMENTED();
1074}
1075
1076// 16 x 16 signed multiply (like SMLAxx without the accumulate)
1077void ArmToMipsAssembler::SMUL(int cc, int xy,
1078 int Rd, int Rm, int Rs)
1079{
1080 mArmPC[mInum++] = pc();
1081
1082 // the 16 bits may be in the top or bottom half of 32-bit source reg,
1083 // as defined by the codes BB, BT, TB, TT (compressed param xy)
1084 // where x corresponds to Rm and y to Rs
1085
1086 // select half-reg for Rm
1087 if (xy & xyTB) {
1088 // use top 16-bits
1089 mMips->SRA(R_at, Rm, 16);
1090 } else {
1091 // use bottom 16, but sign-extend to 32
1092 if (mips32r2) {
1093 mMips->SEH(R_at, Rm);
1094 } else {
1095 mMips->SLL(R_at, Rm, 16);
1096 mMips->SRA(R_at, R_at, 16);
1097 }
1098 }
1099 // select half-reg for Rs
1100 if (xy & xyBT) {
1101 // use top 16-bits
1102 mMips->SRA(R_at2, Rs, 16);
1103 } else {
1104 // use bottom 16, but sign-extend to 32
1105 if (mips32r2) {
1106 mMips->SEH(R_at2, Rs);
1107 } else {
1108 mMips->SLL(R_at2, Rs, 16);
1109 mMips->SRA(R_at2, R_at2, 16);
1110 }
1111 }
1112 mMips->MUL(Rd, R_at, R_at2);
1113}
1114
1115// signed 32b x 16b multiple, save top 32-bits of 48-bit result
1116void ArmToMipsAssembler::SMULW(int cc, int y,
1117 int Rd, int Rm, int Rs)
1118{
1119 mArmPC[mInum++] = pc();
1120
1121 // the selector yT or yB refers to reg Rs
1122 if (y & yT) {
1123 // zero the bottom 16-bits, with 2 shifts, it can affect result
1124 mMips->SRL(R_at, Rs, 16);
1125 mMips->SLL(R_at, R_at, 16);
1126
1127 } else {
1128 // move low 16-bit half, to high half
1129 mMips->SLL(R_at, Rs, 16);
1130 }
1131 mMips->MULT(Rm, R_at);
1132 mMips->MFHI(Rd);
1133}
1134
1135// 16 x 16 signed multiply, accumulate: Rd = Rm{16} * Rs{16} + Rn
1136void ArmToMipsAssembler::SMLA(int cc, int xy,
1137 int Rd, int Rm, int Rs, int Rn)
1138{
1139 mArmPC[mInum++] = pc();
1140
1141 // the 16 bits may be in the top or bottom half of 32-bit source reg,
1142 // as defined by the codes BB, BT, TB, TT (compressed param xy)
1143 // where x corresponds to Rm and y to Rs
1144
1145 // select half-reg for Rm
1146 if (xy & xyTB) {
1147 // use top 16-bits
1148 mMips->SRA(R_at, Rm, 16);
1149 } else {
1150 // use bottom 16, but sign-extend to 32
1151 if (mips32r2) {
1152 mMips->SEH(R_at, Rm);
1153 } else {
1154 mMips->SLL(R_at, Rm, 16);
1155 mMips->SRA(R_at, R_at, 16);
1156 }
1157 }
1158 // select half-reg for Rs
1159 if (xy & xyBT) {
1160 // use top 16-bits
1161 mMips->SRA(R_at2, Rs, 16);
1162 } else {
1163 // use bottom 16, but sign-extend to 32
1164 if (mips32r2) {
1165 mMips->SEH(R_at2, Rs);
1166 } else {
1167 mMips->SLL(R_at2, Rs, 16);
1168 mMips->SRA(R_at2, R_at2, 16);
1169 }
1170 }
1171
1172 mMips->MUL(R_at, R_at, R_at2);
1173 mMips->ADDU(Rd, R_at, Rn);
1174}
1175
1176void ArmToMipsAssembler::SMLAL(int cc, int xy,
1177 int RdHi, int RdLo, int Rs, int Rm)
1178{
1179 // *mPC++ = (cc<<28) | 0x1400080 | (RdHi<<16) | (RdLo<<12) | (Rs<<8) | (xy<<4) | Rm;
1180 mArmPC[mInum++] = pc();
1181 mMips->NOP2();
1182 NOT_IMPLEMENTED();
1183}
1184
1185void ArmToMipsAssembler::SMLAW(int cc, int y,
1186 int Rd, int Rm, int Rs, int Rn)
1187{
1188 // *mPC++ = (cc<<28) | 0x1200080 | (Rd<<16) | (Rn<<12) | (Rs<<8) | (y<<4) | Rm;
1189 mArmPC[mInum++] = pc();
1190 mMips->NOP2();
1191 NOT_IMPLEMENTED();
1192}
1193
1194// used by ARMv6 version of GGLAssembler::filter32
1195void ArmToMipsAssembler::UXTB16(int cc, int Rd, int Rm, int rotate)
1196{
1197 mArmPC[mInum++] = pc();
1198
1199 //Rd[31:16] := ZeroExtend((Rm ROR (8 * sh))[23:16]),
1200 //Rd[15:0] := ZeroExtend((Rm ROR (8 * sh))[7:0]). sh 0-3.
1201
1202 mMips->ROTR(Rm, Rm, rotate * 8);
1203 mMips->AND(Rd, Rm, 0x00FF00FF);
1204}
1205
1206void ArmToMipsAssembler::UBFX(int cc, int Rd, int Rn, int lsb, int width)
1207{
1208 /* Placeholder for UBFX */
1209 mArmPC[mInum++] = pc();
1210
1211 mMips->NOP2();
1212 NOT_IMPLEMENTED();
1213}
1214
1215
1216
1217
1218
1219#if 0
1220#pragma mark -
1221#pragma mark MIPS Assembler...
1222#endif
1223
1224
1225//**************************************************************************
1226//**************************************************************************
1227//**************************************************************************
1228
1229
1230/* mips assembler
1231** this is a subset of mips32r2, targeted specifically at ARM instruction
1232** replacement in the pixelflinger/codeflinger code.
1233**
1234** To that end, there is no need for floating point, or priviledged
1235** instructions. This all runs in user space, no float.
1236**
1237** The syntax makes no attempt to be as complete as the assember, with
1238** synthetic instructions, and automatic recognition of immedate operands
1239** (use the immediate form of the instruction), etc.
1240**
1241** We start with mips32r1, and may add r2 and dsp extensions if cpu
1242** supports. Decision will be made at compile time, based on gcc
1243** options. (makes sense since android will be built for a a specific
1244** device)
1245*/
1246
1247MIPSAssembler::MIPSAssembler(const sp<Assembly>& assembly, ArmToMipsAssembler *parent)
1248 : mParent(parent),
1249 mAssembly(assembly)
1250{
1251 mBase = mPC = (uint32_t *)assembly->base();
1252 mDuration = ggl_system_time();
1253}
1254
Ljubomir Papugae0c9f2b2015-12-15 15:23:01 +01001255MIPSAssembler::MIPSAssembler(void* assembly)
1256 : mParent(NULL), mAssembly(NULL)
1257{
1258 mBase = mPC = (uint32_t *)assembly;
1259}
1260
Paul Lind2bc2b792012-02-01 10:54:19 -08001261MIPSAssembler::~MIPSAssembler()
1262{
1263}
1264
1265
1266uint32_t* MIPSAssembler::pc() const
1267{
1268 return mPC;
1269}
1270
1271uint32_t* MIPSAssembler::base() const
1272{
1273 return mBase;
1274}
1275
1276void MIPSAssembler::reset()
1277{
1278 mBase = mPC = (uint32_t *)mAssembly->base();
1279 mBranchTargets.clear();
1280 mLabels.clear();
1281 mLabelsInverseMapping.clear();
1282 mComments.clear();
1283}
1284
1285
1286// convert tabs to spaces, and remove any newline
1287// works with strings of limited size (makes a temp copy)
1288#define TABSTOP 8
1289void MIPSAssembler::string_detab(char *s)
1290{
1291 char *os = s;
1292 char temp[100];
1293 char *t = temp;
1294 int len = 99;
1295 int i = TABSTOP;
1296
1297 while (*s && len-- > 0) {
1298 if (*s == '\n') { s++; continue; }
1299 if (*s == '\t') {
1300 s++;
1301 for ( ; i>0; i--) {*t++ = ' '; len--; }
1302 } else {
1303 *t++ = *s++;
1304 }
1305 if (i <= 0) i = TABSTOP;
1306 i--;
1307 }
1308 *t = '\0';
1309 strcpy(os, temp);
1310}
1311
1312void MIPSAssembler::string_pad(char *s, int padded_len)
1313{
1314 int len = strlen(s);
1315 s += len;
1316 for (int i = padded_len - len; i > 0; --i) {
1317 *s++ = ' ';
1318 }
1319 *s = '\0';
1320}
1321
1322// ----------------------------------------------------------------------------
1323
1324void MIPSAssembler::disassemble(const char* name)
1325{
1326 char di_buf[140];
1327
1328 if (name) {
1329 ALOGW("%s:\n", name);
1330 }
1331
1332 bool arm_disasm_fmt = (mParent->mArmDisassemblyBuffer == NULL) ? false : true;
1333
1334 typedef char dstr[40];
1335 dstr *lines = (dstr *)mParent->mArmDisassemblyBuffer;
1336
1337 if (mParent->mArmDisassemblyBuffer != NULL) {
1338 for (int i=0; i<mParent->mArmInstrCount; ++i) {
1339 string_detab(lines[i]);
1340 }
1341 }
1342
1343 // iArm is an index to Arm instructions 1...n for this assembly sequence
1344 // mArmPC[iArm] holds the value of the Mips-PC for the first MIPS
1345 // instruction corresponding to that Arm instruction number
1346
1347 int iArm = 0;
1348 size_t count = pc()-base();
1349 uint32_t* mipsPC = base();
1350 while (count--) {
1351 ssize_t label = mLabelsInverseMapping.indexOfKey(mipsPC);
1352 if (label >= 0) {
1353 ALOGW("%s:\n", mLabelsInverseMapping.valueAt(label));
1354 }
1355 ssize_t comment = mComments.indexOfKey(mipsPC);
1356 if (comment >= 0) {
1357 ALOGW("; %s\n", mComments.valueAt(comment));
1358 }
1359 // ALOGW("%08x: %08x ", int(i), int(i[0]));
1360 ::mips_disassem(mipsPC, di_buf, arm_disasm_fmt);
1361 string_detab(di_buf);
1362 string_pad(di_buf, 30);
Elliott Hughes606d4ae2015-11-05 18:55:20 +00001363 ALOGW("%08x: %08x %s", uintptr_t(mipsPC), uint32_t(*mipsPC), di_buf);
Paul Lind2bc2b792012-02-01 10:54:19 -08001364 mipsPC++;
1365 }
1366}
1367
1368void MIPSAssembler::comment(const char* string)
1369{
1370 mComments.add(pc(), string);
1371}
1372
1373void MIPSAssembler::label(const char* theLabel)
1374{
1375 mLabels.add(theLabel, pc());
1376 mLabelsInverseMapping.add(pc(), theLabel);
1377}
1378
1379
1380void MIPSAssembler::prolog()
1381{
1382 // empty - done in ArmToMipsAssembler
1383}
1384
1385void MIPSAssembler::epilog(uint32_t touched)
1386{
1387 // empty - done in ArmToMipsAssembler
1388}
1389
1390int MIPSAssembler::generate(const char* name)
1391{
1392 // fixup all the branches
1393 size_t count = mBranchTargets.size();
1394 while (count--) {
1395 const branch_target_t& bt = mBranchTargets[count];
1396 uint32_t* target_pc = mLabels.valueFor(bt.label);
1397 LOG_ALWAYS_FATAL_IF(!target_pc,
1398 "error resolving branch targets, target_pc is null");
1399 int32_t offset = int32_t(target_pc - (bt.pc+1));
1400 *bt.pc |= offset & 0x00FFFF;
1401 }
1402
1403 mAssembly->resize( int(pc()-base())*4 );
1404
1405 // the instruction & data caches are flushed by CodeCache
1406 const int64_t duration = ggl_system_time() - mDuration;
1407 const char * const format = "generated %s (%d ins) at [%p:%p] in %lld ns\n";
1408 ALOGI(format, name, int(pc()-base()), base(), pc(), duration);
1409
Paul Lind2bc2b792012-02-01 10:54:19 -08001410 char value[PROPERTY_VALUE_MAX];
1411 value[0] = '\0';
1412
1413 property_get("debug.pf.disasm", value, "0");
1414
1415 if (atoi(value) != 0) {
1416 disassemble(name);
1417 }
1418
1419 return NO_ERROR;
1420}
1421
1422uint32_t* MIPSAssembler::pcForLabel(const char* label)
1423{
1424 return mLabels.valueFor(label);
1425}
1426
1427
1428
1429#if 0
1430#pragma mark -
1431#pragma mark Arithmetic...
1432#endif
1433
1434void MIPSAssembler::ADDU(int Rd, int Rs, int Rt)
1435{
1436 *mPC++ = (spec_op<<OP_SHF) | (addu_fn<<FUNC_SHF)
1437 | (Rs<<RS_SHF) | (Rt<<RT_SHF) | (Rd<<RD_SHF);
1438}
1439
1440// MD00086 pdf says this is: ADDIU rt, rs, imm -- they do not use Rd
1441void MIPSAssembler::ADDIU(int Rt, int Rs, int16_t imm)
1442{
1443 *mPC++ = (addiu_op<<OP_SHF) | (Rt<<RT_SHF) | (Rs<<RS_SHF) | (imm & MSK_16);
1444}
1445
1446
1447void MIPSAssembler::SUBU(int Rd, int Rs, int Rt)
1448{
1449 *mPC++ = (spec_op<<OP_SHF) | (subu_fn<<FUNC_SHF) |
1450 (Rs<<RS_SHF) | (Rt<<RT_SHF) | (Rd<<RD_SHF) ;
1451}
1452
1453
1454void MIPSAssembler::SUBIU(int Rt, int Rs, int16_t imm) // really addiu(d, s, -j)
1455{
1456 *mPC++ = (addiu_op<<OP_SHF) | (Rt<<RT_SHF) | (Rs<<RS_SHF) | ((-imm) & MSK_16);
1457}
1458
1459
1460void MIPSAssembler::NEGU(int Rd, int Rs) // really subu(d, zero, s)
1461{
1462 MIPSAssembler::SUBU(Rd, 0, Rs);
1463}
1464
1465void MIPSAssembler::MUL(int Rd, int Rs, int Rt)
1466{
1467 *mPC++ = (spec2_op<<OP_SHF) | (mul_fn<<FUNC_SHF) |
1468 (Rs<<RS_SHF) | (Rt<<RT_SHF) | (Rd<<RD_SHF) ;
1469}
1470
1471void MIPSAssembler::MULT(int Rs, int Rt) // dest is hi,lo
1472{
1473 *mPC++ = (spec_op<<OP_SHF) | (mult_fn<<FUNC_SHF) | (Rt<<RT_SHF) | (Rs<<RS_SHF);
1474}
1475
1476void MIPSAssembler::MULTU(int Rs, int Rt) // dest is hi,lo
1477{
1478 *mPC++ = (spec_op<<OP_SHF) | (multu_fn<<FUNC_SHF) | (Rt<<RT_SHF) | (Rs<<RS_SHF);
1479}
1480
1481void MIPSAssembler::MADD(int Rs, int Rt) // hi,lo = hi,lo + Rs * Rt
1482{
1483 *mPC++ = (spec2_op<<OP_SHF) | (madd_fn<<FUNC_SHF) | (Rt<<RT_SHF) | (Rs<<RS_SHF);
1484}
1485
1486void MIPSAssembler::MADDU(int Rs, int Rt) // hi,lo = hi,lo + Rs * Rt
1487{
1488 *mPC++ = (spec2_op<<OP_SHF) | (maddu_fn<<FUNC_SHF) | (Rt<<RT_SHF) | (Rs<<RS_SHF);
1489}
1490
1491
1492void MIPSAssembler::MSUB(int Rs, int Rt) // hi,lo = hi,lo - Rs * Rt
1493{
1494 *mPC++ = (spec2_op<<OP_SHF) | (msub_fn<<FUNC_SHF) | (Rt<<RT_SHF) | (Rs<<RS_SHF);
1495}
1496
1497void MIPSAssembler::MSUBU(int Rs, int Rt) // hi,lo = hi,lo - Rs * Rt
1498{
1499 *mPC++ = (spec2_op<<OP_SHF) | (msubu_fn<<FUNC_SHF) | (Rt<<RT_SHF) | (Rs<<RS_SHF);
1500}
1501
1502
1503void MIPSAssembler::SEB(int Rd, int Rt) // sign-extend byte (mips32r2)
1504{
1505 *mPC++ = (spec3_op<<OP_SHF) | (bshfl_fn<<FUNC_SHF) | (seb_fn << SA_SHF) |
1506 (Rt<<RT_SHF) | (Rd<<RD_SHF);
1507}
1508
1509void MIPSAssembler::SEH(int Rd, int Rt) // sign-extend half-word (mips32r2)
1510{
1511 *mPC++ = (spec3_op<<OP_SHF) | (bshfl_fn<<FUNC_SHF) | (seh_fn << SA_SHF) |
1512 (Rt<<RT_SHF) | (Rd<<RD_SHF);
1513}
1514
1515
1516
1517#if 0
1518#pragma mark -
1519#pragma mark Comparisons...
1520#endif
1521
1522void MIPSAssembler::SLT(int Rd, int Rs, int Rt)
1523{
1524 *mPC++ = (spec_op<<OP_SHF) | (slt_fn<<FUNC_SHF) |
1525 (Rd<<RD_SHF) | (Rs<<RS_SHF) | (Rt<<RT_SHF);
1526}
1527
1528void MIPSAssembler::SLTI(int Rt, int Rs, int16_t imm)
1529{
1530 *mPC++ = (slti_op<<OP_SHF) | (Rt<<RT_SHF) | (Rs<<RS_SHF) | (imm & MSK_16);
1531}
1532
1533
1534void MIPSAssembler::SLTU(int Rd, int Rs, int Rt)
1535{
1536 *mPC++ = (spec_op<<OP_SHF) | (sltu_fn<<FUNC_SHF) |
1537 (Rd<<RD_SHF) | (Rs<<RS_SHF) | (Rt<<RT_SHF);
1538}
1539
1540void MIPSAssembler::SLTIU(int Rt, int Rs, int16_t imm)
1541{
1542 *mPC++ = (sltiu_op<<OP_SHF) | (Rt<<RT_SHF) | (Rs<<RS_SHF) | (imm & MSK_16);
1543}
1544
1545
1546
1547#if 0
1548#pragma mark -
1549#pragma mark Logical...
1550#endif
1551
1552void MIPSAssembler::AND(int Rd, int Rs, int Rt)
1553{
1554 *mPC++ = (spec_op<<OP_SHF) | (and_fn<<FUNC_SHF) |
1555 (Rd<<RD_SHF) | (Rs<<RS_SHF) | (Rt<<RT_SHF);
1556}
1557
1558void MIPSAssembler::ANDI(int Rt, int Rs, uint16_t imm) // todo: support larger immediate
1559{
1560 *mPC++ = (andi_op<<OP_SHF) | (Rt<<RT_SHF) | (Rs<<RS_SHF) | (imm & MSK_16);
1561}
1562
1563
1564void MIPSAssembler::OR(int Rd, int Rs, int Rt)
1565{
1566 *mPC++ = (spec_op<<OP_SHF) | (or_fn<<FUNC_SHF) |
1567 (Rd<<RD_SHF) | (Rs<<RS_SHF) | (Rt<<RT_SHF);
1568}
1569
1570void MIPSAssembler::ORI(int Rt, int Rs, uint16_t imm)
1571{
1572 *mPC++ = (ori_op<<OP_SHF) | (Rt<<RT_SHF) | (Rs<<RS_SHF) | (imm & MSK_16);
1573}
1574
1575void MIPSAssembler::NOR(int Rd, int Rs, int Rt)
1576{
1577 *mPC++ = (spec_op<<OP_SHF) | (nor_fn<<FUNC_SHF) |
1578 (Rd<<RD_SHF) | (Rs<<RS_SHF) | (Rt<<RT_SHF);
1579}
1580
1581void MIPSAssembler::NOT(int Rd, int Rs)
1582{
1583 MIPSAssembler::NOR(Rd, Rs, 0); // NOT(d,s) = NOR(d,s,zero)
1584}
1585
1586void MIPSAssembler::XOR(int Rd, int Rs, int Rt)
1587{
1588 *mPC++ = (spec_op<<OP_SHF) | (xor_fn<<FUNC_SHF) |
1589 (Rd<<RD_SHF) | (Rs<<RS_SHF) | (Rt<<RT_SHF);
1590}
1591
1592void MIPSAssembler::XORI(int Rt, int Rs, uint16_t imm) // todo: support larger immediate
1593{
1594 *mPC++ = (xori_op<<OP_SHF) | (Rt<<RT_SHF) | (Rs<<RS_SHF) | (imm & MSK_16);
1595}
1596
1597void MIPSAssembler::SLL(int Rd, int Rt, int shft)
1598{
1599 *mPC++ = (spec_op<<OP_SHF) | (sll_fn<<FUNC_SHF) |
1600 (Rd<<RD_SHF) | (Rt<<RT_SHF) | (shft<<RE_SHF);
1601}
1602
1603void MIPSAssembler::SLLV(int Rd, int Rt, int Rs)
1604{
1605 *mPC++ = (spec_op<<OP_SHF) | (sllv_fn<<FUNC_SHF) |
1606 (Rd<<RD_SHF) | (Rs<<RS_SHF) | (Rt<<RT_SHF);
1607}
1608
1609void MIPSAssembler::SRL(int Rd, int Rt, int shft)
1610{
1611 *mPC++ = (spec_op<<OP_SHF) | (srl_fn<<FUNC_SHF) |
1612 (Rd<<RD_SHF) | (Rt<<RT_SHF) | (shft<<RE_SHF);
1613}
1614
1615void MIPSAssembler::SRLV(int Rd, int Rt, int Rs)
1616{
1617 *mPC++ = (spec_op<<OP_SHF) | (srlv_fn<<FUNC_SHF) |
1618 (Rd<<RD_SHF) | (Rs<<RS_SHF) | (Rt<<RT_SHF);
1619}
1620
1621void MIPSAssembler::SRA(int Rd, int Rt, int shft)
1622{
1623 *mPC++ = (spec_op<<OP_SHF) | (sra_fn<<FUNC_SHF) |
1624 (Rd<<RD_SHF) | (Rt<<RT_SHF) | (shft<<RE_SHF);
1625}
1626
1627void MIPSAssembler::SRAV(int Rd, int Rt, int Rs)
1628{
1629 *mPC++ = (spec_op<<OP_SHF) | (srav_fn<<FUNC_SHF) |
1630 (Rd<<RD_SHF) | (Rs<<RS_SHF) | (Rt<<RT_SHF);
1631}
1632
1633void MIPSAssembler::ROTR(int Rd, int Rt, int shft) // mips32r2
1634{
1635 // note weird encoding (SRL + 1)
1636 *mPC++ = (spec_op<<OP_SHF) | (srl_fn<<FUNC_SHF) |
1637 (1<<RS_SHF) | (Rd<<RD_SHF) | (Rt<<RT_SHF) | (shft<<RE_SHF);
1638}
1639
1640void MIPSAssembler::ROTRV(int Rd, int Rt, int Rs) // mips32r2
1641{
1642 // note weird encoding (SRLV + 1)
1643 *mPC++ = (spec_op<<OP_SHF) | (srlv_fn<<FUNC_SHF) |
1644 (Rd<<RD_SHF) | (Rs<<RS_SHF) | (Rt<<RT_SHF) | (1<<RE_SHF);
1645}
1646
1647// uses at2 register (mapped to some appropriate mips reg)
1648void MIPSAssembler::RORsyn(int Rd, int Rt, int Rs)
1649{
1650 // synthetic: d = t rotated by s
1651 MIPSAssembler::NEGU(R_at2, Rs);
1652 MIPSAssembler::SLLV(R_at2, Rt, R_at2);
1653 MIPSAssembler::SRLV(Rd, Rt, Rs);
1654 MIPSAssembler::OR(Rd, Rd, R_at2);
1655}
1656
1657// immediate version - uses at2 register (mapped to some appropriate mips reg)
1658void MIPSAssembler::RORIsyn(int Rd, int Rt, int rot)
1659{
1660 // synthetic: d = t rotated by immed rot
1661 // d = s >> rot | s << (32-rot)
1662 MIPSAssembler::SLL(R_at2, Rt, 32-rot);
1663 MIPSAssembler::SRL(Rd, Rt, rot);
1664 MIPSAssembler::OR(Rd, Rd, R_at2);
1665}
1666
1667void MIPSAssembler::CLO(int Rd, int Rs)
1668{
1669 // Rt field must have same gpr # as Rd
1670 *mPC++ = (spec2_op<<OP_SHF) | (clo_fn<<FUNC_SHF) |
1671 (Rd<<RD_SHF) | (Rs<<RS_SHF) | (Rd<<RT_SHF);
1672}
1673
1674void MIPSAssembler::CLZ(int Rd, int Rs)
1675{
1676 // Rt field must have same gpr # as Rd
1677 *mPC++ = (spec2_op<<OP_SHF) | (clz_fn<<FUNC_SHF) |
1678 (Rd<<RD_SHF) | (Rs<<RS_SHF) | (Rd<<RT_SHF);
1679}
1680
1681void MIPSAssembler::WSBH(int Rd, int Rt) // mips32r2
1682{
1683 *mPC++ = (spec3_op<<OP_SHF) | (bshfl_fn<<FUNC_SHF) | (wsbh_fn << SA_SHF) |
1684 (Rt<<RT_SHF) | (Rd<<RD_SHF);
1685}
1686
1687
1688
1689#if 0
1690#pragma mark -
1691#pragma mark Load/store...
1692#endif
1693
1694void MIPSAssembler::LW(int Rt, int Rbase, int16_t offset)
1695{
1696 *mPC++ = (lw_op<<OP_SHF) | (Rbase<<RS_SHF) | (Rt<<RT_SHF) | (offset & MSK_16);
1697}
1698
1699void MIPSAssembler::SW(int Rt, int Rbase, int16_t offset)
1700{
1701 *mPC++ = (sw_op<<OP_SHF) | (Rbase<<RS_SHF) | (Rt<<RT_SHF) | (offset & MSK_16);
1702}
1703
1704// lb is sign-extended
1705void MIPSAssembler::LB(int Rt, int Rbase, int16_t offset)
1706{
1707 *mPC++ = (lb_op<<OP_SHF) | (Rbase<<RS_SHF) | (Rt<<RT_SHF) | (offset & MSK_16);
1708}
1709
1710void MIPSAssembler::LBU(int Rt, int Rbase, int16_t offset)
1711{
1712 *mPC++ = (lbu_op<<OP_SHF) | (Rbase<<RS_SHF) | (Rt<<RT_SHF) | (offset & MSK_16);
1713}
1714
1715void MIPSAssembler::SB(int Rt, int Rbase, int16_t offset)
1716{
1717 *mPC++ = (sb_op<<OP_SHF) | (Rbase<<RS_SHF) | (Rt<<RT_SHF) | (offset & MSK_16);
1718}
1719
1720// lh is sign-extended
1721void MIPSAssembler::LH(int Rt, int Rbase, int16_t offset)
1722{
1723 *mPC++ = (lh_op<<OP_SHF) | (Rbase<<RS_SHF) | (Rt<<RT_SHF) | (offset & MSK_16);
1724}
1725
1726void MIPSAssembler::LHU(int Rt, int Rbase, int16_t offset)
1727{
1728 *mPC++ = (lhu_op<<OP_SHF) | (Rbase<<RS_SHF) | (Rt<<RT_SHF) | (offset & MSK_16);
1729}
1730
1731void MIPSAssembler::SH(int Rt, int Rbase, int16_t offset)
1732{
1733 *mPC++ = (sh_op<<OP_SHF) | (Rbase<<RS_SHF) | (Rt<<RT_SHF) | (offset & MSK_16);
1734}
1735
1736void MIPSAssembler::LUI(int Rt, int16_t offset)
1737{
1738 *mPC++ = (lui_op<<OP_SHF) | (Rt<<RT_SHF) | (offset & MSK_16);
1739}
1740
1741
1742
1743#if 0
1744#pragma mark -
1745#pragma mark Register move...
1746#endif
1747
1748void MIPSAssembler::MOVE(int Rd, int Rs)
1749{
1750 // encoded as "or rd, rs, zero"
1751 *mPC++ = (spec_op<<OP_SHF) | (or_fn<<FUNC_SHF) |
1752 (Rd<<RD_SHF) | (Rs<<RS_SHF) | (0<<RT_SHF);
1753}
1754
1755void MIPSAssembler::MOVN(int Rd, int Rs, int Rt)
1756{
1757 *mPC++ = (spec_op<<OP_SHF) | (movn_fn<<FUNC_SHF) |
1758 (Rd<<RD_SHF) | (Rs<<RS_SHF) | (Rt<<RT_SHF);
1759}
1760
1761void MIPSAssembler::MOVZ(int Rd, int Rs, int Rt)
1762{
1763 *mPC++ = (spec_op<<OP_SHF) | (movz_fn<<FUNC_SHF) |
1764 (Rd<<RD_SHF) | (Rs<<RS_SHF) | (Rt<<RT_SHF);
1765}
1766
1767void MIPSAssembler::MFHI(int Rd)
1768{
1769 *mPC++ = (spec_op<<OP_SHF) | (mfhi_fn<<FUNC_SHF) | (Rd<<RD_SHF);
1770}
1771
1772void MIPSAssembler::MFLO(int Rd)
1773{
1774 *mPC++ = (spec_op<<OP_SHF) | (mflo_fn<<FUNC_SHF) | (Rd<<RD_SHF);
1775}
1776
1777void MIPSAssembler::MTHI(int Rs)
1778{
1779 *mPC++ = (spec_op<<OP_SHF) | (mthi_fn<<FUNC_SHF) | (Rs<<RS_SHF);
1780}
1781
1782void MIPSAssembler::MTLO(int Rs)
1783{
1784 *mPC++ = (spec_op<<OP_SHF) | (mtlo_fn<<FUNC_SHF) | (Rs<<RS_SHF);
1785}
1786
1787
1788
1789#if 0
1790#pragma mark -
1791#pragma mark Branch...
1792#endif
1793
1794// temporarily forcing a NOP into branch-delay slot, just to be safe
1795// todo: remove NOP, optimze use of delay slots
1796void MIPSAssembler::B(const char* label)
1797{
1798 mBranchTargets.add(branch_target_t(label, mPC));
1799
1800 // encoded as BEQ zero, zero, offset
1801 *mPC++ = (beq_op<<OP_SHF) | (0<<RT_SHF)
1802 | (0<<RS_SHF) | 0; // offset filled in later
1803
1804 MIPSAssembler::NOP();
1805}
1806
1807void MIPSAssembler::BEQ(int Rs, int Rt, const char* label)
1808{
1809 mBranchTargets.add(branch_target_t(label, mPC));
1810 *mPC++ = (beq_op<<OP_SHF) | (Rt<<RT_SHF) | (Rs<<RS_SHF) | 0;
1811 MIPSAssembler::NOP();
1812}
1813
1814void MIPSAssembler::BNE(int Rs, int Rt, const char* label)
1815{
1816 mBranchTargets.add(branch_target_t(label, mPC));
1817 *mPC++ = (bne_op<<OP_SHF) | (Rt<<RT_SHF) | (Rs<<RS_SHF) | 0;
1818 MIPSAssembler::NOP();
1819}
1820
1821void MIPSAssembler::BLEZ(int Rs, const char* label)
1822{
1823 mBranchTargets.add(branch_target_t(label, mPC));
1824 *mPC++ = (blez_op<<OP_SHF) | (0<<RT_SHF) | (Rs<<RS_SHF) | 0;
1825 MIPSAssembler::NOP();
1826}
1827
1828void MIPSAssembler::BLTZ(int Rs, const char* label)
1829{
1830 mBranchTargets.add(branch_target_t(label, mPC));
1831 *mPC++ = (regimm_op<<OP_SHF) | (bltz_fn<<RT_SHF) | (Rs<<RS_SHF) | 0;
1832 MIPSAssembler::NOP();
1833}
1834
1835void MIPSAssembler::BGTZ(int Rs, const char* label)
1836{
1837 mBranchTargets.add(branch_target_t(label, mPC));
1838 *mPC++ = (bgtz_op<<OP_SHF) | (0<<RT_SHF) | (Rs<<RS_SHF) | 0;
1839 MIPSAssembler::NOP();
1840}
1841
1842
1843void MIPSAssembler::BGEZ(int Rs, const char* label)
1844{
1845 mBranchTargets.add(branch_target_t(label, mPC));
1846 *mPC++ = (regimm_op<<OP_SHF) | (bgez_fn<<RT_SHF) | (Rs<<RS_SHF) | 0;
1847 MIPSAssembler::NOP();
1848}
1849
1850void MIPSAssembler::JR(int Rs)
1851{
1852 *mPC++ = (spec_op<<OP_SHF) | (Rs<<RS_SHF) | (jr_fn << FUNC_SHF);
1853 MIPSAssembler::NOP();
1854}
1855
1856
1857#if 0
1858#pragma mark -
1859#pragma mark Synthesized Branch...
1860#endif
1861
1862// synthetic variants of branches (using slt & friends)
1863void MIPSAssembler::BEQZ(int Rs, const char* label)
1864{
1865 BEQ(Rs, R_zero, label);
1866}
1867
1868void MIPSAssembler::BNEZ(int Rs, const char* label)
1869{
1870 BNE(R_at, R_zero, label);
1871}
1872
1873void MIPSAssembler::BGE(int Rs, int Rt, const char* label)
1874{
1875 SLT(R_at, Rs, Rt);
1876 BEQ(R_at, R_zero, label);
1877}
1878
1879void MIPSAssembler::BGEU(int Rs, int Rt, const char* label)
1880{
1881 SLTU(R_at, Rs, Rt);
1882 BEQ(R_at, R_zero, label);
1883}
1884
1885void MIPSAssembler::BGT(int Rs, int Rt, const char* label)
1886{
1887 SLT(R_at, Rt, Rs); // rev
1888 BNE(R_at, R_zero, label);
1889}
1890
1891void MIPSAssembler::BGTU(int Rs, int Rt, const char* label)
1892{
1893 SLTU(R_at, Rt, Rs); // rev
1894 BNE(R_at, R_zero, label);
1895}
1896
1897void MIPSAssembler::BLE(int Rs, int Rt, const char* label)
1898{
1899 SLT(R_at, Rt, Rs); // rev
1900 BEQ(R_at, R_zero, label);
1901}
1902
1903void MIPSAssembler::BLEU(int Rs, int Rt, const char* label)
1904{
1905 SLTU(R_at, Rt, Rs); // rev
1906 BEQ(R_at, R_zero, label);
1907}
1908
1909void MIPSAssembler::BLT(int Rs, int Rt, const char* label)
1910{
1911 SLT(R_at, Rs, Rt);
1912 BNE(R_at, R_zero, label);
1913}
1914
1915void MIPSAssembler::BLTU(int Rs, int Rt, const char* label)
1916{
1917 SLTU(R_at, Rs, Rt);
1918 BNE(R_at, R_zero, label);
1919}
1920
1921
1922
1923
1924#if 0
1925#pragma mark -
1926#pragma mark Misc...
1927#endif
1928
1929void MIPSAssembler::NOP(void)
1930{
1931 // encoded as "sll zero, zero, 0", which is all zero
1932 *mPC++ = (spec_op<<OP_SHF) | (sll_fn<<FUNC_SHF);
1933}
1934
1935// using this as special opcode for not-yet-implemented ARM instruction
1936void MIPSAssembler::NOP2(void)
1937{
1938 // encoded as "sll zero, zero, 2", still a nop, but a unique code
1939 *mPC++ = (spec_op<<OP_SHF) | (sll_fn<<FUNC_SHF) | (2 << RE_SHF);
1940}
1941
1942// using this as special opcode for purposefully NOT implemented ARM instruction
1943void MIPSAssembler::UNIMPL(void)
1944{
1945 // encoded as "sll zero, zero, 3", still a nop, but a unique code
1946 *mPC++ = (spec_op<<OP_SHF) | (sll_fn<<FUNC_SHF) | (3 << RE_SHF);
1947}
1948
1949
1950}; // namespace android:
1951
1952