作者:glycerin
项目:zygomy
// generate one instruction:
// as f, t
func rawgins(as obj.As, f *gc.Node, t *gc.Node) *obj.Prog {
// self move check
// TODO(mundaym): use sized math and extend to MOVB, MOVWZ etc.
switch as {
case s390x.AMOVD, s390x.AFMOVS, s390x.AFMOVD:
if f != nil && t != nil &&
f.Op == gc.OREGISTER && t.Op == gc.OREGISTER &&
f.Reg == t.Reg {
return nil
}
}
p := gc.Prog(as)
gc.Naddr(&p.From, f)
gc.Naddr(&p.To, t)
switch as {
// Bad things the front end has done to us. Crash to find call stack.
case s390x.ACMP, s390x.ACMPU:
if p.From.Type == obj.TYPE_MEM || p.To.Type == obj.TYPE_MEM {
gc.Debug['h'] = 1
gc.Fatalf("bad inst: %v", p)
}
}
if gc.Debug['g'] != 0 {
fmt.Printf("%v\n", p)
}
w := int32(0)
switch as {
case s390x.AMOVB, s390x.AMOVBZ:
w = 1
case s390x.AMOVH, s390x.AMOVHZ:
w = 2
case s390x.AMOVW, s390x.AMOVWZ:
w = 4
case s390x.AMOVD:
if p.From.Type == obj.TYPE_CONST || p.From.Type == obj.TYPE_ADDR {
break
}
w = 8
}
if w != 0 && ((f != nil && p.From.Width < int64(w)) || (t != nil && p.To.Type != obj.TYPE_REG && p.To.Width > int64(w))) {
gc.Dump("f", f)
gc.Dump("t", t)
gc.Fatalf("bad width: %v (%d, %d)\n", p, p.From.Width, p.To.Width)
}
return p
}
作者:glycerin
项目:zygomy
func defframe(ptxt *obj.Prog) {
// fill in argument size, stack size
ptxt.To.Type = obj.TYPE_TEXTSIZE
ptxt.To.Val = int32(gc.Rnd(gc.Curfn.Type.ArgWidth(), int64(gc.Widthptr)))
frame := uint32(gc.Rnd(gc.Stksize+gc.Maxarg, int64(gc.Widthreg)))
// arm64 requires that the frame size (not counting saved LR)
// be empty or be 8 mod 16. If not, pad it.
if frame != 0 && frame%16 != 8 {
frame += 8
}
ptxt.To.Offset = int64(frame)
// insert code to zero ambiguously live variables
// so that the garbage collector only sees initialized values
// when it looks for pointers.
p := ptxt
hi := int64(0)
lo := hi
// iterate through declarations - they are sorted in decreasing xoffset order.
for _, n := range gc.Curfn.Func.Dcl {
if !n.Name.Needzero {
continue
}
if n.Class != gc.PAUTO {
gc.Fatalf("needzero class %d", n.Class)
}
if n.Type.Width%int64(gc.Widthptr) != 0 || n.Xoffset%int64(gc.Widthptr) != 0 || n.Type.Width == 0 {
gc.Fatalf("var %v has size %d offset %d", gc.Nconv(n, gc.FmtLong), int(n.Type.Width), int(n.Xoffset))
}
if lo != hi && n.Xoffset+n.Type.Width >= lo-int64(2*gc.Widthreg) {
// merge with range we already have
lo = n.Xoffset
continue
}
// zero old range
p = zerorange(p, int64(frame), lo, hi)
// set new range
hi = n.Xoffset + n.Type.Width
lo = n.Xoffset
}
// zero final range
zerorange(p, int64(frame), lo, hi)
}
作者:glycerin
项目:zygomy
/*
* insert n into reg slot of p
*/
func raddr(n *gc.Node, p *obj.Prog) {
var a obj.Addr
gc.Naddr(&a, n)
if a.Type != obj.TYPE_REG {
if n != nil {
gc.Fatalf("bad in raddr: %v", n.Op)
} else {
gc.Fatalf("bad in raddr: <null>")
}
p.Reg = 0
} else {
p.Reg = a.Reg
}
}
作者:glycerin
项目:zygomy
/*
* n is a 64-bit value. fill in lo and hi to refer to its 32-bit halves.
*/
func split64(n *gc.Node, lo *gc.Node, hi *gc.Node) {
if !gc.Is64(n.Type) {
gc.Fatalf("split64 %v", n.Type)
}
if nsclean >= len(sclean) {
gc.Fatalf("split64 clean")
}
sclean[nsclean].Op = gc.OEMPTY
nsclean++
switch n.Op {
default:
switch n.Op {
default:
var n1 gc.Node
if !dotaddable(n, &n1) {
gc.Igen(n, &n1, nil)
sclean[nsclean-1] = n1
}
n = &n1
case gc.ONAME, gc.OINDREG:
// nothing
}
*lo = *n
*hi = *n
lo.Type = gc.Types[gc.TUINT32]
if n.Type.Etype == gc.TINT64 {
hi.Type = gc.Types[gc.TINT32]
} else {
hi.Type = gc.Types[gc.TUINT32]
}
hi.Xoffset += 4
case gc.OLITERAL:
var n1 gc.Node
n.Convconst(&n1, n.Type)
i := n1.Int64()
gc.Nodconst(lo, gc.Types[gc.TUINT32], int64(uint32(i)))
i >>= 32
if n.Type.Etype == gc.TINT64 {
gc.Nodconst(hi, gc.Types[gc.TINT32], int64(int32(i)))
} else {
gc.Nodconst(hi, gc.Types[gc.TUINT32], int64(uint32(i)))
}
}
}
作者:glycerin
项目:zygomy
/*
* generate
* as n, $c (CMP/CMPU)
*/
func ginscon2(as obj.As, n2 *gc.Node, c int64) {
var n1 gc.Node
gc.Nodconst(&n1, gc.Types[gc.TINT64], c)
switch as {
default:
gc.Fatalf("ginscon2")
case ppc64.ACMP:
if -ppc64.BIG <= c && c <= ppc64.BIG {
rawgins(as, n2, &n1)
return
}
case ppc64.ACMPU:
if 0 <= c && c <= 2*ppc64.BIG {
rawgins(as, n2, &n1)
return
}
}
// MOV n1 into register first
var ntmp gc.Node
gc.Regalloc(&ntmp, gc.Types[gc.TINT64], nil)
rawgins(ppc64.AMOVD, &n1, &ntmp)
rawgins(as, n2, &ntmp)
gc.Regfree(&ntmp)
}
作者:glycerin
项目:zygomy
// RightShiftWithCarry generates a constant unsigned
// right shift with carry.
//
// res = n >> shift // with carry
func RightShiftWithCarry(n *gc.Node, shift uint, res *gc.Node) {
// Extra 1 is for carry bit.
maxshift := uint(n.Type.Width*8 + 1)
if shift == 0 {
gmove(n, res)
} else if shift < maxshift {
// 1. clear rightmost bit of target
var n1 gc.Node
gc.Nodconst(&n1, n.Type, 1)
gins(optoas(gc.ORSH, n.Type), &n1, n)
gins(optoas(gc.OLSH, n.Type), &n1, n)
// 2. add carry flag to target
var n2 gc.Node
gc.Nodconst(&n1, n.Type, 0)
gc.Regalloc(&n2, n.Type, nil)
gins(optoas(gc.OAS, n.Type), &n1, &n2)
gins(arm64.AADC, &n2, n)
// 3. right rotate 1 bit
gc.Nodconst(&n1, n.Type, 1)
gins(arm64.AROR, &n1, n)
// ARM64 backend doesn't eliminate shifts by 0. It is manually checked here.
if shift > 1 {
var n3 gc.Node
gc.Nodconst(&n3, n.Type, int64(shift-1))
cgen_shift(gc.ORSH, true, n, &n3, res)
} else {
gmove(n, res)
}
gc.Regfree(&n2)
} else {
gc.Fatalf("RightShiftWithCarry: shift(%v) is bigger than max size(%v)", shift, maxshift)
}
}
作者:glycerin
项目:zygomy
func proginfo(p *obj.Prog) {
info := &p.Info
*info = progtable[p.As&obj.AMask]
if info.Flags == 0 {
gc.Fatalf("unknown instruction %v", p)
}
if p.From.Type == obj.TYPE_ADDR && p.From.Sym != nil && (info.Flags&gc.LeftRead != 0) {
info.Flags &^= gc.LeftRead
info.Flags |= gc.LeftAddr
}
if (info.Flags&gc.RegRead != 0) && p.Reg == 0 {
info.Flags &^= gc.RegRead
info.Flags |= gc.CanRegRead | gc.RightRead
}
if (p.Scond&arm.C_SCOND != arm.C_SCOND_NONE) && (info.Flags&gc.RightWrite != 0) {
info.Flags |= gc.RightRead
}
switch p.As {
case arm.ADIV,
arm.ADIVU,
arm.AMOD,
arm.AMODU:
info.Regset |= RtoB(arm.REG_R12)
}
}
作者:glycerin
项目:zygomy
func gcmp(as obj.As, lhs *gc.Node, rhs *gc.Node) *obj.Prog {
if lhs.Op != gc.OREGISTER {
gc.Fatalf("bad operands to gcmp: %v %v", lhs.Op, rhs.Op)
}
p := rawgins(as, rhs, nil)
raddr(lhs, p)
return p
}
作者:glycerin
项目:zygomy
// as2variant returns the variant (V_*) flags of instruction as.
func as2variant(as obj.As) int {
for i, v := range varianttable[as&obj.AMask] {
if v&obj.AMask == as&obj.AMask {
return i
}
}
gc.Fatalf("as2variant: instruction %v is not a variant of itself", obj.Aconv(as&obj.AMask))
return 0
}
作者:glycerin
项目:zygomy
func splitclean() {
if nsclean <= 0 {
gc.Fatalf("splitclean")
}
nsclean--
if sclean[nsclean].Op != gc.OEMPTY {
gc.Regfree(&sclean[nsclean])
}
}
作者:glycerin
项目:zygomy
/*
* generate high multiply:
* res = (nl*nr) >> width
*/
func cgen_hmul(nl *gc.Node, nr *gc.Node, res *gc.Node) {
// largest ullman on left.
if nl.Ullman < nr.Ullman {
nl, nr = nr, nl
}
t := nl.Type
w := t.Width * 8
var n1 gc.Node
gc.Cgenr(nl, &n1, res)
var n2 gc.Node
gc.Cgenr(nr, &n2, nil)
switch gc.Simtype[t.Etype] {
case gc.TINT8,
gc.TINT16,
gc.TINT32:
gins3(optoas(gc.OMUL, t), &n2, &n1, nil)
var lo gc.Node
gc.Nodreg(&lo, gc.Types[gc.TUINT64], mips.REG_LO)
gins(mips.AMOVV, &lo, &n1)
p := gins(mips.ASRAV, nil, &n1)
p.From.Type = obj.TYPE_CONST
p.From.Offset = w
case gc.TUINT8,
gc.TUINT16,
gc.TUINT32:
gins3(optoas(gc.OMUL, t), &n2, &n1, nil)
var lo gc.Node
gc.Nodreg(&lo, gc.Types[gc.TUINT64], mips.REG_LO)
gins(mips.AMOVV, &lo, &n1)
p := gins(mips.ASRLV, nil, &n1)
p.From.Type = obj.TYPE_CONST
p.From.Offset = w
case gc.TINT64,
gc.TUINT64:
if t.IsSigned() {
gins3(mips.AMULV, &n2, &n1, nil)
} else {
gins3(mips.AMULVU, &n2, &n1, nil)
}
var hi gc.Node
gc.Nodreg(&hi, gc.Types[gc.TUINT64], mips.REG_HI)
gins(mips.AMOVV, &hi, &n1)
default:
gc.Fatalf("cgen_hmul %v", t)
}
gc.Cgen(&n1, res)
gc.Regfree(&n1)
gc.Regfree(&n2)
}
作者:glycerin
项目:zygomy
/*
* direct reference,
* could be set/use depending on
* semantics
*/
func copyas(a *obj.Addr, v *obj.Addr) bool {
if x86.REG_AL <= a.Reg && a.Reg <= x86.REG_BL {
gc.Fatalf("use of byte register")
}
if x86.REG_AL <= v.Reg && v.Reg <= x86.REG_BL {
gc.Fatalf("use of byte register")
}
if a.Type != v.Type || a.Name != v.Name || a.Reg != v.Reg {
return false
}
if regtyp(v) {
return true
}
if (v.Type == obj.TYPE_MEM || v.Type == obj.TYPE_ADDR) && (v.Name == obj.NAME_AUTO || v.Name == obj.NAME_PARAM) {
if v.Offset == a.Offset {
return true
}
}
return false
}
作者:glycerin
项目:zygomy
/* generate a constant shift
* arm encodes a shift by 32 as 0, thus asking for 0 shift is illegal.
*/
func gshift(as obj.As, lhs *gc.Node, stype int32, sval int32, rhs *gc.Node) *obj.Prog {
if sval <= 0 || sval > 32 {
gc.Fatalf("bad shift value: %d", sval)
}
sval = sval & 0x1f
p := gins(as, nil, rhs)
p.From.Type = obj.TYPE_SHIFT
p.From.Offset = int64(stype) | int64(sval)<<7 | int64(lhs.Reg)&15
return p
}
作者:glycerin
项目:zygomy
// Called after regopt and peep have run.
// Expand CHECKNIL pseudo-op into actual nil pointer check.
func expandchecks(firstp *obj.Prog) {
for p := firstp; p != nil; p = p.Link {
if gc.Debug_checknil != 0 && gc.Ctxt.Debugvlog != 0 {
fmt.Printf("expandchecks: %v\n", p)
}
if p.As != obj.ACHECKNIL {
continue
}
if gc.Debug_checknil != 0 && p.Lineno > 1 { // p->lineno==1 in generated wrappers
gc.Warnl(p.Lineno, "generated nil check")
}
if p.From.Type != obj.TYPE_REG {
gc.Fatalf("invalid nil check %v\n", p)
}
// check is
// CMPBNE arg, $0, 2(PC) [likely]
// MOVD R0, 0(R0)
p1 := gc.Ctxt.NewProg()
gc.Clearp(p1)
p1.Link = p.Link
p.Link = p1
p1.Lineno = p.Lineno
p1.Pc = 9999
p.As = s390x.ACMPBNE
p.From3 = new(obj.Addr)
p.From3.Type = obj.TYPE_CONST
p.From3.Offset = 0
p.To.Type = obj.TYPE_BRANCH
p.To.Val = p1.Link
// crash by write to memory address 0.
p1.As = s390x.AMOVD
p1.From.Type = obj.TYPE_REG
p1.From.Reg = s390x.REGZERO
p1.To.Type = obj.TYPE_MEM
p1.To.Reg = s390x.REGZERO
p1.To.Offset = 0
}
}
作者:glycerin
项目:zygomy
/*
* generate division according to op, one of:
* res = nl / nr
* res = nl % nr
*/
func cgen_div(op gc.Op, nl *gc.Node, nr *gc.Node, res *gc.Node) {
if gc.Is64(nl.Type) {
gc.Fatalf("cgen_div %v", nl.Type)
}
var t *gc.Type
if nl.Type.IsSigned() {
t = gc.Types[gc.TINT32]
} else {
t = gc.Types[gc.TUINT32]
}
var ax gc.Node
var oldax gc.Node
savex(x86.REG_AX, &ax, &oldax, res, t)
var olddx gc.Node
var dx gc.Node
savex(x86.REG_DX, &dx, &olddx, res, t)
dodiv(op, nl, nr, res, &ax, &dx)
restx(&dx, &olddx)
restx(&ax, &oldax)
}
作者:glycerin
项目:zygomy
// jmptoset returns ASETxx for AJxx.
func jmptoset(jmp obj.As) obj.As {
switch jmp {
case x86.AJEQ:
return x86.ASETEQ
case x86.AJNE:
return x86.ASETNE
case x86.AJLT:
return x86.ASETLT
case x86.AJCS:
return x86.ASETCS
case x86.AJLE:
return x86.ASETLE
case x86.AJLS:
return x86.ASETLS
case x86.AJGT:
return x86.ASETGT
case x86.AJHI:
return x86.ASETHI
case x86.AJGE:
return x86.ASETGE
case x86.AJCC:
return x86.ASETCC
case x86.AJMI:
return x86.ASETMI
case x86.AJOC:
return x86.ASETOC
case x86.AJOS:
return x86.ASETOS
case x86.AJPC:
return x86.ASETPC
case x86.AJPL:
return x86.ASETPL
case x86.AJPS:
return x86.ASETPS
}
gc.Fatalf("jmptoset: no entry for %v", jmp)
panic("unreachable")
}
作者:glycerin
项目:zygomy
/*
* return Axxx for Oxxx on type t.
*/
func optoas(op gc.Op, t *gc.Type) obj.As {
if t == nil {
gc.Fatalf("optoas: t is nil")
}
// avoid constant conversions in switches below
const (
OMINUS_ = uint32(gc.OMINUS) << 16
OLSH_ = uint32(gc.OLSH) << 16
ORSH_ = uint32(gc.ORSH) << 16
OADD_ = uint32(gc.OADD) << 16
OSUB_ = uint32(gc.OSUB) << 16
OMUL_ = uint32(gc.OMUL) << 16
ODIV_ = uint32(gc.ODIV) << 16
OMOD_ = uint32(gc.OMOD) << 16
OOR_ = uint32(gc.OOR) << 16
OAND_ = uint32(gc.OAND) << 16
OXOR_ = uint32(gc.OXOR) << 16
OEQ_ = uint32(gc.OEQ) << 16
ONE_ = uint32(gc.ONE) << 16
OLT_ = uint32(gc.OLT) << 16
OLE_ = uint32(gc.OLE) << 16
OGE_ = uint32(gc.OGE) << 16
OGT_ = uint32(gc.OGT) << 16
OCMP_ = uint32(gc.OCMP) << 16
OPS_ = uint32(gc.OPS) << 16
OAS_ = uint32(gc.OAS) << 16
OSQRT_ = uint32(gc.OSQRT) << 16
)
a := obj.AXXX
switch uint32(op)<<16 | uint32(gc.Simtype[t.Etype]) {
default:
gc.Fatalf("optoas: no entry %v-%v etype %v simtype %v", op, t, gc.Types[t.Etype], gc.Types[gc.Simtype[t.Etype]])
/* case CASE(OADDR, TPTR32):
a = ALEAL;
break;
case CASE(OADDR, TPTR64):
a = ALEAQ;
break;
*/
// TODO(kaib): make sure the conditional branches work on all edge cases
case OEQ_ | gc.TBOOL,
OEQ_ | gc.TINT8,
OEQ_ | gc.TUINT8,
OEQ_ | gc.TINT16,
OEQ_ | gc.TUINT16,
OEQ_ | gc.TINT32,
OEQ_ | gc.TUINT32,
OEQ_ | gc.TINT64,
OEQ_ | gc.TUINT64,
OEQ_ | gc.TPTR32,
OEQ_ | gc.TPTR64,
OEQ_ | gc.TFLOAT32,
OEQ_ | gc.TFLOAT64:
a = arm.ABEQ
case ONE_ | gc.TBOOL,
ONE_ | gc.TINT8,
ONE_ | gc.TUINT8,
ONE_ | gc.TINT16,
ONE_ | gc.TUINT16,
ONE_ | gc.TINT32,
ONE_ | gc.TUINT32,
ONE_ | gc.TINT64,
ONE_ | gc.TUINT64,
ONE_ | gc.TPTR32,
ONE_ | gc.TPTR64,
ONE_ | gc.TFLOAT32,
ONE_ | gc.TFLOAT64:
a = arm.ABNE
case OLT_ | gc.TINT8,
OLT_ | gc.TINT16,
OLT_ | gc.TINT32,
OLT_ | gc.TINT64,
OLT_ | gc.TFLOAT32,
OLT_ | gc.TFLOAT64:
a = arm.ABLT
case OLT_ | gc.TUINT8,
OLT_ | gc.TUINT16,
OLT_ | gc.TUINT32,
OLT_ | gc.TUINT64:
a = arm.ABLO
case OLE_ | gc.TINT8,
OLE_ | gc.TINT16,
OLE_ | gc.TINT32,
OLE_ | gc.TINT64,
OLE_ | gc.TFLOAT32,
OLE_ | gc.TFLOAT64:
a = arm.ABLE
case OLE_ | gc.TUINT8,
//.........这里部分代码省略.........
作者:glycerin
项目:zygomy
/*
* generate one instruction:
* as f, t
*/
func gins(as obj.As, f *gc.Node, t *gc.Node) *obj.Prog {
// Node nod;
// int32 v;
if f != nil && f.Op == gc.OINDEX {
gc.Fatalf("gins OINDEX not implemented")
}
// gc.Regalloc(&nod, ®node, Z);
// v = constnode.vconst;
// gc.Cgen(f->right, &nod);
// constnode.vconst = v;
// idx.reg = nod.reg;
// gc.Regfree(&nod);
if t != nil && t.Op == gc.OINDEX {
gc.Fatalf("gins OINDEX not implemented")
}
// gc.Regalloc(&nod, ®node, Z);
// v = constnode.vconst;
// gc.Cgen(t->right, &nod);
// constnode.vconst = v;
// idx.reg = nod.reg;
// gc.Regfree(&nod);
p := gc.Prog(as)
gc.Naddr(&p.From, f)
gc.Naddr(&p.To, t)
switch as {
case arm.ABL:
if p.To.Type == obj.TYPE_REG {
p.To.Type = obj.TYPE_MEM
}
case arm.ACMP, arm.ACMPF, arm.ACMPD:
if t != nil {
if f.Op != gc.OREGISTER {
/* generate a comparison
TODO(kaib): one of the args can actually be a small constant. relax the constraint and fix call sites.
*/
gc.Fatalf("bad operands to gcmp")
}
p.From = p.To
p.To = obj.Addr{}
raddr(f, p)
}
case arm.AMULU:
if f != nil && f.Op != gc.OREGISTER {
gc.Fatalf("bad operands to mul")
}
case arm.AMOVW:
if (p.From.Type == obj.TYPE_MEM || p.From.Type == obj.TYPE_ADDR || p.From.Type == obj.TYPE_CONST) && (p.To.Type == obj.TYPE_MEM || p.To.Type == obj.TYPE_ADDR) {
gc.Fatalf("gins double memory")
}
case arm.AADD:
if p.To.Type == obj.TYPE_MEM {
gc.Fatalf("gins arith to mem")
}
case arm.ARSB:
if p.From.Type == obj.TYPE_NONE {
gc.Fatalf("rsb with no from")
}
}
if gc.Debug['g'] != 0 {
fmt.Printf("%v\n", p)
}
return p
}
作者:glycerin
项目:zygomy
func gmove(f *gc.Node, t *gc.Node) {
if gc.Debug['M'] != 0 {
fmt.Printf("gmove %v -> %v\n", f, t)
}
ft := gc.Simsimtype(f.Type)
tt := gc.Simsimtype(t.Type)
cvt := t.Type
if gc.Iscomplex[ft] || gc.Iscomplex[tt] {
gc.Complexmove(f, t)
return
}
// cannot have two memory operands;
// except 64-bit, which always copies via registers anyway.
var a obj.As
var r1 gc.Node
if !gc.Is64(f.Type) && !gc.Is64(t.Type) && gc.Ismem(f) && gc.Ismem(t) {
goto hard
}
// convert constant to desired type
if f.Op == gc.OLITERAL {
var con gc.Node
switch tt {
default:
f.Convconst(&con, t.Type)
case gc.TINT16,
gc.TINT8:
var con gc.Node
f.Convconst(&con, gc.Types[gc.TINT32])
var r1 gc.Node
gc.Regalloc(&r1, con.Type, t)
gins(arm.AMOVW, &con, &r1)
gmove(&r1, t)
gc.Regfree(&r1)
return
case gc.TUINT16,
gc.TUINT8:
var con gc.Node
f.Convconst(&con, gc.Types[gc.TUINT32])
var r1 gc.Node
gc.Regalloc(&r1, con.Type, t)
gins(arm.AMOVW, &con, &r1)
gmove(&r1, t)
gc.Regfree(&r1)
return
}
f = &con
ft = gc.Simsimtype(con.Type)
// constants can't move directly to memory
if gc.Ismem(t) && !gc.Is64(t.Type) {
goto hard
}
}
// value -> value copy, only one memory operand.
// figure out the instruction to use.
// break out of switch for one-instruction gins.
// goto rdst for "destination must be register".
// goto hard for "convert to cvt type first".
// otherwise handle and return.
switch uint32(ft)<<16 | uint32(tt) {
default:
// should not happen
gc.Fatalf("gmove %v -> %v", f, t)
return
/*
* integer copy and truncate
*/
case gc.TINT8<<16 | gc.TINT8: // same size
if !gc.Ismem(f) {
a = arm.AMOVB
break
}
fallthrough
case gc.TUINT8<<16 | gc.TINT8,
gc.TINT16<<16 | gc.TINT8, // truncate
gc.TUINT16<<16 | gc.TINT8,
gc.TINT32<<16 | gc.TINT8,
gc.TUINT32<<16 | gc.TINT8:
a = arm.AMOVBS
case gc.TUINT8<<16 | gc.TUINT8:
if !gc.Ismem(f) {
a = arm.AMOVB
break
}
fallthrough
case gc.TINT8<<16 | gc.TUINT8,
gc.TINT16<<16 | gc.TUINT8,
//.........这里部分代码省略.........
作者:glycerin
项目:zygomy
/*
* generate code to compute address of n,
* a reference to a (perhaps nested) field inside
* an array or struct.
* return 0 on failure, 1 on success.
* on success, leaves usable address in a.
*
* caller is responsible for calling sudoclean
* after successful sudoaddable,
* to release the register used for a.
*/
func sudoaddable(as obj.As, n *gc.Node, a *obj.Addr) bool {
if n.Type == nil {
return false
}
*a = obj.Addr{}
switch n.Op {
case gc.OLITERAL:
if !gc.Isconst(n, gc.CTINT) {
break
}
v := n.Int64()
if v >= 32000 || v <= -32000 {
break
}
switch as {
default:
return false
case arm.AADD,
arm.ASUB,
arm.AAND,
arm.AORR,
arm.AEOR,
arm.AMOVB,
arm.AMOVBS,
arm.AMOVBU,
arm.AMOVH,
arm.AMOVHS,
arm.AMOVHU,
arm.AMOVW:
break
}
cleani += 2
reg := &clean[cleani-1]
reg1 := &clean[cleani-2]
reg.Op = gc.OEMPTY
reg1.Op = gc.OEMPTY
gc.Naddr(a, n)
return true
case gc.ODOT,
gc.ODOTPTR:
cleani += 2
reg := &clean[cleani-1]
reg1 := &clean[cleani-2]
reg.Op = gc.OEMPTY
reg1.Op = gc.OEMPTY
var nn *gc.Node
var oary [10]int64
o := gc.Dotoffset(n, oary[:], &nn)
if nn == nil {
sudoclean()
return false
}
if nn.Addable && o == 1 && oary[0] >= 0 {
// directly addressable set of DOTs
n1 := *nn
n1.Type = n.Type
n1.Xoffset += oary[0]
gc.Naddr(a, &n1)
return true
}
gc.Regalloc(reg, gc.Types[gc.Tptr], nil)
n1 := *reg
n1.Op = gc.OINDREG
if oary[0] >= 0 {
gc.Agen(nn, reg)
n1.Xoffset = oary[0]
} else {
gc.Cgen(nn, reg)
gc.Cgen_checknil(reg)
n1.Xoffset = -(oary[0] + 1)
}
for i := 1; i < o; i++ {
if oary[i] >= 0 {
gc.Fatalf("can't happen")
}
gins(arm.AMOVW, &n1, reg)
gc.Cgen_checknil(reg)
n1.Xoffset = -(oary[i] + 1)
}
//.........这里部分代码省略.........