ref: 8483799d4a0f80e38edfb83f1772af840cafe637
parent: 7f8bd359548ce61cad5011e44075d21e83d56314
author: Jacob Moody <[email protected]>
date: Mon Apr 1 01:20:20 EDT 2024
9c/9l/libmach: handle 64 bit constants * Add a handful of 64 bit classifications to 9l, along with instruction generation for each. * 9c should avoid generating immediate instructions for 64 constants. * libmach should know about 9l's generation to present a better disassembly. * libmach now properly displays MOVD for moves between registers on 64 bit.
--- a/sys/src/cmd/9c/cgen.c
+++ b/sys/src/cmd/9c/cgen.c
@@ -1,5 +1,15 @@
#include "gc.h"
+static int
+isim32(vlong v)
+{
+ if((v & 0xffffffff80000000) == 0xffffffff80000000) // 32-bit negative
+ return 1;
+ if((v & 0xffffffff00000000) == 0) // 32-bit positive
+ return 1;
+ return 0;
+}
+
void
cgen(Node *n, Node *nn)
{
@@ -148,7 +158,7 @@
* immediate operands
*/
if(nn != Z)
- if(r->op == OCONST)
+ if(r->op == OCONST && isim32(r->vconst))
if(!typefd[n->type->etype]) {
cgen(l, nn);
if(r->vconst == 0)
@@ -203,7 +213,7 @@
case OASOR:
if(l->op == OBIT)
goto asbitop;
- if(r->op == OCONST)
+ if(r->op == OCONST && isim32(r->vconst))
if(!typefd[r->type->etype])
if(!typefd[n->type->etype]) {
if(l->addable < INDEXED)
--- a/sys/src/cmd/9l/asmout.c
+++ b/sys/src/cmd/9l/asmout.c
@@ -138,6 +138,21 @@
return LOP_IRR(OP_ORIS, r, REGZERO, v);
return AOP_IRR(OP_ADDIS, r, REGZERO, v);
}
+
+static ulong
+rotateleft32(Prog *p, int as, int from, int to)
+{
+ ulong o;
+ uchar mask[2];
+
+ maskgen64(p, mask, 0xFFFFFFFFULL<<32);
+ if(mask[1] != (63-32))
+ diag("invalid mask for shift: %llux (shift %d)\n%P", 0xFFFFFFFFULL<<32, 32, p);
+ o = AOP_RRR(opirr(as), from, to, (32&0x1F));
+ o |= (mask[0]&31L)<<6 | 1<<1;
+ assert((mask[0] & 0x20) == 0);
+ return o;
+}
int
asmout(Prog *p, Optab *o, int aflag)
@@ -949,6 +964,35 @@
reloc(&p->from, p->pc, 1);
break;
+ /* 64 bit constant operations */
+
+ case 77: /* mov $vucon,r */
+ d = vregoff(&p->from);
+ o1 = loadu32(p->to.reg, d>>32);
+ o2 = LOP_IRR(OP_ORI, p->to.reg, p->to.reg, d>>32);
+ o3 = rotateleft32(p, ARLDMI, p->to.reg, p->to.reg);
+ break;
+
+ case 78: /* mov $vcon,r */
+ d = vregoff(&p->from);
+ o1 = loadu32(p->to.reg, d);
+ o2 = loadu32(REGTMP, d>>32);
+ o3 = LOP_IRR(OP_ORI, p->to.reg, p->to.reg, (long)d);
+ o4 = LOP_IRR(OP_ORI, REGTMP, REGTMP, d>>32);
+ o5 = rotateleft32(p, ARLDMI, REGTMP, p->to.reg);
+ break;
+
+ case 79: /* mov $vulcon,r */
+ d = vregoff(&p->from);
+ o1 = LOP_IRR(OP_ORI, p->to.reg, REGZERO, d>>32);
+ o2 = rotateleft32(p, ARLDMI, p->to.reg, p->to.reg);
+ break;
+
+ case 80: /* mov $vuucon,r */
+ d = vregoff(&p->from);
+ o1 = loadu32(p->to.reg, d>>32);
+ o2 = rotateleft32(p, ARLDMI, p->to.reg, p->to.reg);
+ break;
}
if(aflag)
return o1;
--- a/sys/src/cmd/9l/cnam.c
+++ b/sys/src/cmd/9l/cnam.c
@@ -11,6 +11,10 @@
"ADDCON",
"ANDCON",
"LCON",
+ "VULCON",
+ "VUUCON",
+ "VUCON",
+ "VCON",
"SACON",
"SECON",
"LACON",
--- a/sys/src/cmd/9l/l.h
+++ b/sys/src/cmd/9l/l.h
@@ -140,7 +140,11 @@
C_UCON, /* low 16 bits 0 */
C_ADDCON, /* -0x8000 <= v < 0 */
C_ANDCON, /* 0 < v <= 0xFFFF */
- C_LCON, /* other */
+ C_LCON, /* 32 bit */
+ C_VULCON, /* 64 bit, low 32 bits 0, top 16 bits zero */
+ C_VUUCON, /* 64 bit, low 32 bits 0, top 16 bits only */
+ C_VUCON, /* 64 bit, low 32 bits 0 */
+ C_VCON, /* 64 bit */
C_SACON,
C_SECON,
C_LACON,
--- a/sys/src/cmd/9l/optab.c
+++ b/sys/src/cmd/9l/optab.c
@@ -92,9 +92,9 @@
{ ASRAD, C_SCON, C_REG, C_NONE, C_REG, 56, 4, 0 },
{ ASRAD, C_SCON, C_NONE, C_NONE, C_REG, 56, 4, 0 },
- { ARLWMI, C_SCON, C_REG, C_LCON, C_REG, 62, 4, 0 },
- { ARLWMI, C_REG, C_REG, C_LCON, C_REG, 63, 4, 0 },
- { ARLDMI, C_SCON, C_REG, C_LCON, C_REG, 30, 4, 0 },
+ { ARLWMI, C_SCON, C_REG, C_VCON, C_REG, 62, 4, 0 },
+ { ARLWMI, C_REG, C_REG, C_VCON, C_REG, 63, 4, 0 },
+ { ARLDMI, C_SCON, C_REG, C_VCON, C_REG, 30, 4, 0 },
{ ARLDC, C_SCON, C_REG, C_LCON, C_REG, 29, 4, 0 },
{ ARLDCL, C_SCON, C_REG, C_LCON, C_REG, 29, 4, 0 },
@@ -223,9 +223,13 @@
{ AMOVWZ, C_LACON,C_NONE, C_NONE, C_REG, 26, 8, REGSP },
{ AMOVWZ, C_ADDCON,C_NONE, C_NONE, C_REG, 3, 4, REGZERO },
- /* load unsigned/long constants (TO DO: check) */
+ /* load unsigned/long/vlong constants */
{ AMOVD, C_UCON, C_NONE, C_NONE, C_REG, 3, 4, REGZERO },
{ AMOVD, C_LCON, C_NONE, C_NONE, C_REG, 19, 8, 0 },
+ { AMOVD, C_VUCON, C_NONE, C_NONE, C_REG, 77, 12, 0 },
+ { AMOVD, C_VULCON, C_NONE, C_NONE, C_REG, 79, 8, 0 },
+ { AMOVD, C_VUUCON, C_NONE, C_NONE, C_REG, 80, 8, 0 },
+ { AMOVD, C_VCON, C_NONE, C_NONE, C_REG, 78, 20, 0 },
{ AMOVW, C_UCON, C_NONE, C_NONE, C_REG, 3, 4, REGZERO },
{ AMOVW, C_LCON, C_NONE, C_NONE, C_REG, 19, 8, 0 },
{ AMOVWZ, C_UCON, C_NONE, C_NONE, C_REG, 3, 4, REGZERO },
--- a/sys/src/cmd/9l/span.c
+++ b/sys/src/cmd/9l/span.c
@@ -266,13 +266,33 @@
return C_ANDCON;
if((instoffset & 0xffff) == 0 && isuint32(instoffset)) /* && (instoffset & (1<<31)) == 0) */
return C_UCON;
- return C_LCON;
+ if((instoffset & 0xffffffff00000000) == 0) // 32-bit positive
+ return C_LCON;
+ if((instoffset & 0xffffffff) == 0){
+ if((instoffset & 0xffff000000000000ull) == 0)
+ return C_VULCON;
+ if((instoffset & 0x0000ffff00000000ull) == 0)
+ return C_VUUCON;
+ return C_VUCON;
+ }
+ return C_VCON;
}
if(instoffset >= -0x8000)
return C_ADDCON;
if((instoffset & 0xffff) == 0 && isint32(instoffset))
return C_UCON;
- return C_LCON;
+ if((instoffset & 0xffffffff80000000) == 0xffffffff80000000) // 32-bit negative
+ return C_LCON;
+ if((instoffset & 0xffffffff00000000) == 0) // 32-bit positive
+ return C_LCON;
+ if((instoffset & 0xffffffff) == 0){
+ if((instoffset & 0xffff000000000000ull) == 0)
+ return C_VULCON;
+ if((instoffset & 0x0000ffff00000000ull) == 0)
+ return C_VUUCON;
+ return C_VUCON;
+ }
+ return C_VCON;
case D_EXTERN:
case D_STATIC:
@@ -398,6 +418,10 @@
if(a == b)
return 1;
switch(a) {
+ case C_VCON:
+ if(b == C_VUCON || b == C_LCON || b == C_ZCON || b == C_SCON || b == C_UCON || b == C_ADDCON || b == C_ANDCON)
+ return 1;
+ break;
case C_LCON:
if(b == C_ZCON || b == C_SCON || b == C_UCON || b == C_ADDCON || b == C_ANDCON)
return 1;
--- a/sys/src/libmach/qdb.c
+++ b/sys/src/libmach/qdb.c
@@ -147,8 +147,7 @@
uchar imm; /* bits 16-19 */
ushort xo; /* bits 21-30, 22-30, 26-30, or 30 (beware) */
uvlong imm64;
- long w0;
- long w1;
+ long w[5]; /* full context of a combined pseudo instruction */
uchar pop; /* op of second half of prefixed instruction */
uvlong addr; /* pc of instruction */
short target;
@@ -241,7 +240,7 @@
i->imm64 <<= 16;
else if(i->op == 25 || i->op == 27 || i->op == 29)
i->imm64 = (uvlong)(i->uimm<<16);
- i->w0 = w;
+ i->w[0] = w;
i->target = -1;
i->addr = pc;
i->size = 1;
@@ -252,26 +251,62 @@
mkinstr(uvlong pc, Instr *i)
{
Instr x;
+ Instr sf[3];
ulong w;
+ int j;
if(decode(pc, i) < 0)
return -1;
/*
- * combine ADDIS/ORI (CAU/ORIL) into MOVW
- * also ORIS/ORIL for unsigned in 64-bit mode
+ * Linker has to break out larger constants into multiple instructions.
+ * Combine them back together into one MOV.
+ * 15 is addis, 25 is oris, 24 is ori.
*/
- if((i->op == 15 || i->op == 25) && i->ra==0) {
+ if((i->op == 15 && i->ra == 0) || (i->op == 25 && i->rs == 0) || (i->op == 24 && i->rs == 0)) {
if(decode(pc+4, &x) < 0)
- return -1;
- if(x.op == 24 && x.rs == x.ra && x.ra == i->rd) {
+ return 1;
+
+ /* very specific worst case 64 bit load */
+ if(x.rd == 31 && (x.op == 15 && x.ra == 0) || (x.op == 25 && x.rs == 0)){
+ for(j = 0; j < nelem(sf); j++)
+ if(decode(pc + 4*(j+2), sf+j) < 0)
+ goto Next;
+ if(sf[0].op == 24 && sf[0].rs == sf[0].ra && sf[0].ra == i->rd)
+ if(sf[1].op == 24 && sf[1].rs == sf[1].ra && sf[1].ra == 31)
+ if(sf[2].ra == (i->rs == 0 ? i->ra : i ->rs))
+ if(sf[2].op == 30 && IBF(sf[2].w[0], 27, 30) == 7)
+ if(sf[2].xsh == 32 && IBF(sf[2].w[0], 21, 26) == 0){
+ i->size = 5;
+ i->imm64 = (i->imm64&0xFFFF0000) | ((sf[0].imm64&0xFFFF));
+ i->imm64 |= ((x.imm64&0xFFFF0000)<<32) | ((sf[1].imm64&0xFFFF)<<32);
+ }
+ return 1;
+ }
+
+Next:
+ if(i->op != 24 && x.op == 24 && x.rs == x.ra && x.ra == i->rd) {
i->imm64 |= (x.imm64 & 0xFFFF);
if(i->op != 15)
i->imm64 &= 0xFFFFFFFFUL;
- i->w1 = x.w0;
+ i->w[1] = x.w[0];
i->target = x.rd;
i->size++;
- return 1;
+ if(decode(pc+8, &x) < 0)
+ return 1;
}
+
+ /* 64 bit constant mov with lower 32 zero */
+ if(x.ra == (i->rs == 0 ? i->ra : i ->rs))
+ if(x.op == 30 && IBF(x.w[0], 27, 30) == 7)
+ if(x.xsh == 32 && IBF(x.w[0], 21, 26) == 0){
+ i->imm64 <<= 32;
+ if(i->size == 2)
+ i->w[2] = x.w[0];
+ else
+ i->w[1] = x.w[0];
+ i->size++;
+ }
+ return 1;
}
/* ISA3.1+ prefixed instructions */
@@ -278,8 +313,8 @@
if(i->op == 1){
if(get4(mymap, pc+4, &w) < 0)
return -1;
- i->w1 = w;
- i->pop = IBF(i->w1, 0, 5);
+ i->w[1] = w;
+ i->pop = IBF(i->w[1], 0, 5);
i->size++;
}
return 1;
@@ -398,9 +433,9 @@
addi(Opcode *o, Instr *i)
{
if (i->op==14 && i->ra == 0)
- format("MOVW", i, "%i,R%d");
+ format("MOV%N", i, "%i,R%d");
else if (i->ra == REGSB) {
- bprint(i, "MOVW\t$");
+ format("MOV%N\t$", i, nil);
address(i);
bprint(i, ",R%d", i->rd);
} else if(i->op==14 && i->simm < 0) {
@@ -417,22 +452,23 @@
static void
addis(Opcode *o, Instr *i)
{
- long v;
+ vlong v;
v = i->imm64;
- if (i->op==15 && i->ra == 0)
- bprint(i, "MOVW\t$%lux,R%d", v, i->rd);
- else if (i->op==15 && i->ra == REGSB) {
- bprint(i, "MOVW\t$");
+ if (i->op==15 && i->ra == 0){
+ format("MOV%N\t", i, nil);
+ bprint(i, "$%llux,R%d", v, i->rd);
+ } else if (i->op==15 && i->ra == REGSB) {
+ format("MOV%N\t$", i, nil);
address(i);
bprint(i, ",R%d", i->rd);
} else if(i->op==15 && v < 0) {
- bprint(i, "SUB\t$%ld,R%d", -v, i->ra);
+ bprint(i, "SUB\t$%lld,R%d", -v, i->ra);
if(i->rd != i->ra)
bprint(i, ",R%d", i->rd);
} else {
format(o->mnemonic, i, nil);
- bprint(i, "\t$%ld,R%d", v, i->ra);
+ bprint(i, "\t$%lld,R%d", v, i->ra);
if(i->rd != i->ra)
bprint(i, ",R%d", i->rd);
}
@@ -683,10 +719,12 @@
if (i->rs == 0 && i->ra == 0 && i->rb == 0)
format("NOP", i, nil);
else if (i->rs == i->rb)
- format("MOVW", i, "R%b,R%a");
+ format("MOV%N", i, "R%b,R%a");
else
and(o, i);
- } else
+ } else if(i->op == 24 && i->rs == 0)
+ format("MOV%N", i, "$%B,R%a");
+ else
and(o, i);
}
@@ -693,8 +731,13 @@
static void
shifted(Opcode *o, Instr *i)
{
+ if (i->op == 25 && i->rs == 0){
+ format("MOV%N\t", i, nil);
+ bprint(i, "$%llux,R%d", i->imm64, i->ra);
+ return;
+ }
format(o->mnemonic, i, nil);
- bprint(i, "\t$%lux,", (ulong)i->uimm<<16);
+ bprint(i, "\t$%llux,", i->imm64);
if (i->rs == i->ra)
bprint(i, "R%d", i->ra);
else
@@ -851,7 +894,7 @@
long l;
char buf[16];
- l = ((IBF(i->w0, 11, 15)<<11) | (IBF(i->w0, 16, 25)<<1) | (i->rc))<<16;
+ l = ((IBF(i->w[0], 11, 15)<<11) | (IBF(i->w[0], 16, 25)<<1) | (i->rc))<<16;
snprint(buf, sizeof buf, "$%ld,PC,R%%d", l);
format(o->mnemonic, i, buf);
}
@@ -859,7 +902,7 @@
static void
vsldbi(Opcode *o, Instr *i)
{
- switch(IBF(i->w0, 21, 22)){
+ switch(IBF(i->w[0], 21, 22)){
case 1:
format("vsrdbi", i, o->ken);
break;
@@ -1014,16 +1057,16 @@
for(p = popcodes; p->mnemonic != nil; p++){
if(i->pop != p->op2)
continue;
- if((i->w0 & p->xomask1) != p->xo1)
+ if((i->w[0] & p->xomask1) != p->xo1)
continue;
- if((i->w1 & p->xomask2) != p->xo2)
+ if((i->w[1] & p->xomask2) != p->xo2)
continue;
format(p->mnemonic, i, nil);
return;
}
- if((i->w0 & XXM1) == 3<<24)
+ if((i->w[0] & XXM1) == 3<<24)
format("NOP", i, nil);
- else if((i->w0 & XXM4) == 0){
+ else if((i->w[0] & XXM4) == 0){
if((i->pop & ~1) == 25<<1)
format("plxv", i, nil);
else if((i->pop & ~1) == 27<<1)
@@ -1178,18 +1221,21 @@
{19, 0, ALL, "MOVFL", gen, "%S,%D"},
{63, 64, ALL, "MOVCRFS", gen, "%S,%D"},
- {31, 512, ALL, "MOVW", gen, "XER,%D"},
- {31, 19, ALL, "MOVW", gen, "CR,R%d"},
+ {31, 19, ALL, "MOV%N", gen, "CR,R%d"},
- {63, 583, ALL, "MOVW%C", gen, "FPSCR, F%d"}, /* mffs */
- {31, 83, ALL, "MOVW", gen, "MSR,R%d"},
- {31, 339, ALL, "MOVW", gen, "%P,R%d"},
- {31, 595, ALL, "MOVW", gen, "SEG(%a),R%d"},
- {31, 659, ALL, "MOVW", gen, "SEG(R%b),R%d"},
- {31, 323, ALL, "MOVW", gen, "DCR(%Q),R%d"},
- {31, 451, ALL, "MOVW", gen, "R%s,DCR(%Q)"},
- {31, 259, ALL, "MOVW", gen, "DCR(R%a),R%d"},
- {31, 387, ALL, "MOVW", gen, "R%s,DCR(R%a)"},
+ {31, 512, ALL, "MOVW", gen, "XER,%D"}, /* deprecated */
+ {31, 595, ALL, "MOVW", gen, "SEG(%a),R%d"}, /* deprecated */
+ {31, 659, ALL, "MOVW", gen, "SEG(R%b),R%d"},/* deprecated */
+ {31, 323, ALL, "MOVW", gen, "DCR(%Q),R%d"}, /* deprecated */
+ {31, 451, ALL, "MOVW", gen, "R%s,DCR(%Q)"}, /* deprecated */
+ {31, 259, ALL, "MOVW", gen, "DCR(R%a),R%d"},/* deprecated */
+ {31, 387, ALL, "MOVW", gen, "R%s,DCR(R%a)"},/* deprecated */
+ {31, 210, ALL, "MOVW", gen, "R%s,SEG(%a)"}, /* deprecated */
+ {31, 242, ALL, "MOVW", gen, "R%s,SEG(R%b)"},/* deprecated */
+
+ {63, 583, ALL, "MOV%N%C", gen, "FPSCR, F%d"}, /* mffs */
+ {31, 83, ALL, "MOV%N", gen, "MSR,R%d"},
+ {31, 339, ALL, "MOV%N", gen, "%P,R%d"},
{31, 144, ALL, "MOVFL", gen, "R%s,%m,CR"},
{63, 70, ALL, "MTFSB0%C", gencc, "%D"},
{63, 38, ALL, "MTFSB1%C", gencc, "%D"},
@@ -1197,9 +1243,7 @@
{63, 134, ALL, "MOVFL%C", gencc, "%K,%D"},
{31, 146, ALL, "MOVW", gen, "R%s,MSR"},
{31, 178, ALL, "MOVD", gen, "R%s,MSR"},
- {31, 467, ALL, "MOVW", gen, "R%s,%P"},
- {31, 210, ALL, "MOVW", gen, "R%s,SEG(%a)"},
- {31, 242, ALL, "MOVW", gen, "R%s,SEG(R%b)"},
+ {31, 467, ALL, "MOV%N", gen, "R%s,%P"},
{31, 7, ALL, "MOVBE", vldx, 0},
{31, 39, ALL, "MOVHE", vldx, 0},
@@ -1263,7 +1307,7 @@
{31, 124, ALL, "NOR%C", gencc, il3},
{31, 444, ALL, "OR%C", or, il3},
{31, 412, ALL, "ORN%C", or, il3},
- {24, 0, 0, "OR", and, "%I,R%d,R%a"},
+ {24, 0, 0, "OR", or, "%I,R%d,R%a"},
{25, 0, 0, "OR", shifted, 0},
{19, 50, ALL, "RFI", gen, 0},
@@ -2301,6 +2345,10 @@
bprint(i, "%d", i->frc);
break;
+ case 'B':
+ bprint(i, "%llx", i->imm64);
+ break;
+
case 'd':
case 's':
bprint(i, "%d", i->rd);
@@ -2325,7 +2373,7 @@
break;
case 'E':
- switch(IBF(i->w0,27,30)){ /* low bit is top bit of shift in rldiX cases */
+ switch(IBF(i->w[0],27,30)){ /* low bit is top bit of shift in rldiX cases */
case 8: i->mb = i->xmbe; i->me = 63; break; /* rldcl */
case 9: i->mb = 0; i->me = i->xmbe; break; /* rldcr */
case 4: case 5:
@@ -2399,6 +2447,10 @@
bprint(i, "%d", i->nb==0? 32: i->nb); /* eg, pg 10-103 */
break;
+ case 'N':
+ bprint(i, "%c", asstype==APOWER64 ? 'D' : 'W');
+ break;
+
case 'P':
n = ((i->spr&0x1f)<<5)|((i->spr>>5)&0x1f);
for(s=0; sprname[s].name; s++)
@@ -2438,7 +2490,7 @@
break;
case 'w':
- bprint(i, "[%lux]", i->w0);
+ bprint(i, "[%lux]", i->w[0]);
break;
case 'W':
@@ -2490,7 +2542,7 @@
format(o->mnemonic, &i, o->ken);
return i.size*4;
}
- bprint(&i, "unknown %lux", i.w0);
+ bprint(&i, "unknown %lux", i.w[0]);
return i.size*4;
}
@@ -2505,6 +2557,7 @@
powerdas(Map *map, uvlong pc, char *buf, int n)
{
Instr instr;
+ int i;
mymap = map;
memset(&instr, 0, sizeof(instr));
@@ -2512,11 +2565,10 @@
instr.end = buf+n-1;
if (mkinstr(pc, &instr) < 0)
return -1;
- if (instr.end-instr.curr > 8)
- instr.curr = _hexify(instr.curr, instr.w0, 7);
- if (instr.end-instr.curr > 9 && instr.size == 2) {
- *instr.curr++ = ' ';
- instr.curr = _hexify(instr.curr, instr.w1, 7);
+ for(i = 0; instr.end-instr.curr > 8+1 && i < instr.size; i++){
+ if(i != 0)
+ *instr.curr++ = ' ';
+ instr.curr = _hexify(instr.curr, instr.w[i], 7);
}
*instr.curr = 0;
return instr.size*4;