作者:glycerin
项目:zygomy
/*
* generate
* as $c, n
*/
func ginscon(as obj.As, c int64, n2 *gc.Node) {
var n1 gc.Node
switch as {
case x86.AADDL,
x86.AMOVL,
x86.ALEAL:
gc.Nodconst(&n1, gc.Types[gc.TINT32], c)
default:
gc.Nodconst(&n1, gc.Types[gc.TINT64], c)
}
if as != x86.AMOVQ && (c < -(1<<31) || c >= 1<<31) {
// cannot have 64-bit immediate in ADD, etc.
// instead, MOV into register first.
var ntmp gc.Node
gc.Regalloc(&ntmp, gc.Types[gc.TINT64], nil)
gins(x86.AMOVQ, &n1, &ntmp)
gins(as, &ntmp, n2)
gc.Regfree(&ntmp)
return
}
gins(as, &n1, n2)
}
作者:glycerin
项目:zygomy
// RightShiftWithCarry generates a constant unsigned
// right shift with carry.
//
// res = n >> shift // with carry
func RightShiftWithCarry(n *gc.Node, shift uint, res *gc.Node) {
// Extra 1 is for carry bit.
maxshift := uint(n.Type.Width*8 + 1)
if shift == 0 {
gmove(n, res)
} else if shift < maxshift {
// 1. clear rightmost bit of target
var n1 gc.Node
gc.Nodconst(&n1, n.Type, 1)
gins(optoas(gc.ORSH, n.Type), &n1, n)
gins(optoas(gc.OLSH, n.Type), &n1, n)
// 2. add carry flag to target
var n2 gc.Node
gc.Nodconst(&n1, n.Type, 0)
gc.Regalloc(&n2, n.Type, nil)
gins(optoas(gc.OAS, n.Type), &n1, &n2)
gins(arm64.AADC, &n2, n)
// 3. right rotate 1 bit
gc.Nodconst(&n1, n.Type, 1)
gins(arm64.AROR, &n1, n)
// ARM64 backend doesn't eliminate shifts by 0. It is manually checked here.
if shift > 1 {
var n3 gc.Node
gc.Nodconst(&n3, n.Type, int64(shift-1))
cgen_shift(gc.ORSH, true, n, &n3, res)
} else {
gmove(n, res)
}
gc.Regfree(&n2)
} else {
gc.Fatalf("RightShiftWithCarry: shift(%v) is bigger than max size(%v)", shift, maxshift)
}
}
作者:glycerin
项目:zygomy
/*
* n is a 64-bit value. fill in lo and hi to refer to its 32-bit halves.
*/
func split64(n *gc.Node, lo *gc.Node, hi *gc.Node) {
if !gc.Is64(n.Type) {
gc.Fatalf("split64 %v", n.Type)
}
if nsclean >= len(sclean) {
gc.Fatalf("split64 clean")
}
sclean[nsclean].Op = gc.OEMPTY
nsclean++
switch n.Op {
default:
switch n.Op {
default:
var n1 gc.Node
if !dotaddable(n, &n1) {
gc.Igen(n, &n1, nil)
sclean[nsclean-1] = n1
}
n = &n1
case gc.ONAME, gc.OINDREG:
// nothing
}
*lo = *n
*hi = *n
lo.Type = gc.Types[gc.TUINT32]
if n.Type.Etype == gc.TINT64 {
hi.Type = gc.Types[gc.TINT32]
} else {
hi.Type = gc.Types[gc.TUINT32]
}
hi.Xoffset += 4
case gc.OLITERAL:
var n1 gc.Node
n.Convconst(&n1, n.Type)
i := n1.Int64()
gc.Nodconst(lo, gc.Types[gc.TUINT32], int64(uint32(i)))
i >>= 32
if n.Type.Etype == gc.TINT64 {
gc.Nodconst(hi, gc.Types[gc.TINT32], int64(int32(i)))
} else {
gc.Nodconst(hi, gc.Types[gc.TUINT32], int64(uint32(i)))
}
}
}
作者:glycerin
项目:zygomy
/*
* generate
* as n, $c (CMP/CMPU)
*/
func ginscon2(as obj.As, n2 *gc.Node, c int64) {
var n1 gc.Node
gc.Nodconst(&n1, gc.Types[gc.TINT64], c)
switch as {
default:
gc.Fatalf("ginscon2")
case ppc64.ACMP:
if -ppc64.BIG <= c && c <= ppc64.BIG {
rawgins(as, n2, &n1)
return
}
case ppc64.ACMPU:
if 0 <= c && c <= 2*ppc64.BIG {
rawgins(as, n2, &n1)
return
}
}
// MOV n1 into register first
var ntmp gc.Node
gc.Regalloc(&ntmp, gc.Types[gc.TINT64], nil)
rawgins(ppc64.AMOVD, &n1, &ntmp)
rawgins(as, n2, &ntmp)
gc.Regfree(&ntmp)
}
作者:glycerin
项目:zygomy
/*
* generate an addressable node in res, containing the value of n.
* n is an array index, and might be any size; res width is <= 32-bit.
* returns Prog* to patch to panic call.
*/
func igenindex(n *gc.Node, res *gc.Node, bounded bool) *obj.Prog {
if !gc.Is64(n.Type) {
if n.Addable && (gc.Simtype[n.Etype] == gc.TUINT32 || gc.Simtype[n.Etype] == gc.TINT32) {
// nothing to do.
*res = *n
} else {
gc.Tempname(res, gc.Types[gc.TUINT32])
gc.Cgen(n, res)
}
return nil
}
var tmp gc.Node
gc.Tempname(&tmp, gc.Types[gc.TINT64])
gc.Cgen(n, &tmp)
var lo gc.Node
var hi gc.Node
split64(&tmp, &lo, &hi)
gc.Tempname(res, gc.Types[gc.TUINT32])
gmove(&lo, res)
if bounded {
splitclean()
return nil
}
var zero gc.Node
gc.Nodconst(&zero, gc.Types[gc.TINT32], 0)
gins(x86.ACMPL, &hi, &zero)
splitclean()
return gc.Gbranch(x86.AJNE, nil, +1)
}
作者:glycerin
项目:zygomy
/*
* generate array index into res.
* n might be any size; res is 32-bit.
* returns Prog* to patch to panic call.
*/
func cgenindex(n *gc.Node, res *gc.Node, bounded bool) *obj.Prog {
if !gc.Is64(n.Type) {
gc.Cgen(n, res)
return nil
}
var tmp gc.Node
gc.Tempname(&tmp, gc.Types[gc.TINT64])
gc.Cgen(n, &tmp)
var lo gc.Node
var hi gc.Node
split64(&tmp, &lo, &hi)
gmove(&lo, res)
if bounded {
splitclean()
return nil
}
var n1 gc.Node
gc.Regalloc(&n1, gc.Types[gc.TINT32], nil)
var n2 gc.Node
gc.Regalloc(&n2, gc.Types[gc.TINT32], nil)
var zero gc.Node
gc.Nodconst(&zero, gc.Types[gc.TINT32], 0)
gmove(&hi, &n1)
gmove(&zero, &n2)
gins(arm.ACMP, &n1, &n2)
gc.Regfree(&n2)
gc.Regfree(&n1)
splitclean()
return gc.Gbranch(arm.ABNE, nil, -1)
}
作者:glycerin
项目:zygomy
func ncon(i uint32) *gc.Node {
if ncon_n.Type == nil {
gc.Nodconst(&ncon_n, gc.Types[gc.TUINT32], 0)
}
ncon_n.SetInt(int64(i))
return &ncon_n
}
作者:glycerin
项目:zygomy
/*
* generate
* as $c, reg
*/
func gconreg(as obj.As, c int64, reg int) {
var n1 gc.Node
var n2 gc.Node
gc.Nodconst(&n1, gc.Types[gc.TINT64], c)
gc.Nodreg(&n2, gc.Types[gc.TINT64], reg)
gins(as, &n1, &n2)
}
作者:glycerin
项目:zygomy
/*
* generate
* as $c, n
*/
func ginscon(as obj.As, c int64, n *gc.Node) {
var n1 gc.Node
gc.Nodconst(&n1, gc.Types[gc.TINT32], c)
var n2 gc.Node
gc.Regalloc(&n2, gc.Types[gc.TINT32], nil)
gmove(&n1, &n2)
gins(as, &n2, n)
gc.Regfree(&n2)
}
作者:glycerin
项目:zygomy
func bignodes() {
if bignodes_did {
return
}
bignodes_did = true
gc.Nodconst(&zerof, gc.Types[gc.TINT64], 0)
zerof.Convconst(&zerof, gc.Types[gc.TFLOAT64])
var i big.Int
i.SetInt64(1)
i.Lsh(&i, 63)
var bigi gc.Node
gc.Nodconst(&bigi, gc.Types[gc.TUINT64], 0)
bigi.SetBigInt(&i)
bigi.Convconst(&two63f, gc.Types[gc.TFLOAT64])
gc.Nodconst(&bigi, gc.Types[gc.TUINT64], 0)
i.Lsh(&i, 1)
bigi.SetBigInt(&i)
bigi.Convconst(&two64f, gc.Types[gc.TFLOAT64])
}
作者:glycerin
项目:zygomy
func ginsnop() {
var con gc.Node
gc.Nodconst(&con, gc.Types[gc.TINT], 0)
gins(arm64.AHINT, &con, nil)
}
作者:glycerin
项目:zygomy
//.........这里部分代码省略.........
goto olsh_break
}
gc.Regalloc(&s, gc.Types[gc.TUINT32], nil)
gc.Regalloc(&creg, gc.Types[gc.TUINT32], nil)
if gc.Is64(r.Type) {
// shift is >= 1<<32
var cl gc.Node
var ch gc.Node
split64(r, &cl, &ch)
gmove(&ch, &s)
gins(arm.ATST, &s, nil)
p6 = gc.Gbranch(arm.ABNE, nil, 0)
gmove(&cl, &s)
splitclean()
} else {
gmove(r, &s)
p6 = nil
}
gins(arm.ATST, &s, nil)
// shift == 0
p1 = gins(arm.AMOVW, &bl, &al)
p1.Scond = arm.C_SCOND_EQ
p1 = gins(arm.AMOVW, &bh, &ah)
p1.Scond = arm.C_SCOND_EQ
p2 = gc.Gbranch(arm.ABEQ, nil, 0)
// shift is < 32
gc.Nodconst(&n1, gc.Types[gc.TUINT32], 32)
gmove(&n1, &creg)
gins(arm.ACMP, &s, &creg)
// MOVW.LO bl<<s, al
p1 = gregshift(arm.AMOVW, &bl, arm.SHIFT_LL, &s, &al)
p1.Scond = arm.C_SCOND_LO
// MOVW.LO bh<<s, ah
p1 = gregshift(arm.AMOVW, &bh, arm.SHIFT_LL, &s, &ah)
p1.Scond = arm.C_SCOND_LO
// SUB.LO s, creg
p1 = gins(arm.ASUB, &s, &creg)
p1.Scond = arm.C_SCOND_LO
// OR.LO bl>>creg, ah
p1 = gregshift(arm.AORR, &bl, arm.SHIFT_LR, &creg, &ah)
p1.Scond = arm.C_SCOND_LO
// BLO end
p3 = gc.Gbranch(arm.ABLO, nil, 0)
// shift == 32
p1 = gins(arm.AEOR, &al, &al)
p1.Scond = arm.C_SCOND_EQ
p1 = gins(arm.AMOVW, &bl, &ah)
作者:glycerin
项目:zygomy
func clearfat_tail(n1 *gc.Node, b int64) {
if b >= 16 && isPlan9 {
var z gc.Node
gc.Nodconst(&z, gc.Types[gc.TUINT64], 0)
q := b / 8
for ; q > 0; q-- {
n1.Type = z.Type
gins(x86.AMOVQ, &z, n1)
n1.Xoffset += 8
b -= 8
}
if b != 0 {
n1.Xoffset -= 8 - b
gins(x86.AMOVQ, &z, n1)
}
return
}
if b >= 16 {
var vec_zero gc.Node
gc.Regalloc(&vec_zero, gc.Types[gc.TFLOAT64], nil)
gins(x86.AXORPS, &vec_zero, &vec_zero)
for b >= 16 {
gins(x86.AMOVUPS, &vec_zero, n1)
n1.Xoffset += 16
b -= 16
}
// MOVUPS X0, off(base) is a few bytes shorter than MOV 0, off(base)
if b != 0 {
n1.Xoffset -= 16 - b
gins(x86.AMOVUPS, &vec_zero, n1)
}
gc.Regfree(&vec_zero)
return
}
// Write sequence of MOV 0, off(base) instead of using STOSQ.
// The hope is that although the code will be slightly longer,
// the MOVs will have no dependencies and pipeline better
// than the unrolled STOSQ loop.
var z gc.Node
gc.Nodconst(&z, gc.Types[gc.TUINT64], 0)
if b >= 8 {
n1.Type = z.Type
gins(x86.AMOVQ, &z, n1)
n1.Xoffset += 8
b -= 8
if b != 0 {
n1.Xoffset -= 8 - b
gins(x86.AMOVQ, &z, n1)
}
return
}
if b >= 4 {
gc.Nodconst(&z, gc.Types[gc.TUINT32], 0)
n1.Type = z.Type
gins(x86.AMOVL, &z, n1)
n1.Xoffset += 4
b -= 4
if b != 0 {
n1.Xoffset -= 4 - b
gins(x86.AMOVL, &z, n1)
}
return
}
if b >= 2 {
gc.Nodconst(&z, gc.Types[gc.TUINT16], 0)
n1.Type = z.Type
gins(x86.AMOVW, &z, n1)
n1.Xoffset += 2
b -= 2
}
gc.Nodconst(&z, gc.Types[gc.TUINT8], 0)
for b > 0 {
n1.Type = z.Type
gins(x86.AMOVB, &z, n1)
n1.Xoffset++
b--
}
}
作者:glycerin
项目:zygomy
//.........这里部分代码省略.........
goto rdst
// convert via int32
case gc.TINT16<<16 | gc.TFLOAT32,
gc.TINT16<<16 | gc.TFLOAT64,
gc.TINT8<<16 | gc.TFLOAT32,
gc.TINT8<<16 | gc.TFLOAT64,
gc.TUINT16<<16 | gc.TFLOAT32,
gc.TUINT16<<16 | gc.TFLOAT64,
gc.TUINT8<<16 | gc.TFLOAT32,
gc.TUINT8<<16 | gc.TFLOAT64:
cvt = gc.Types[gc.TINT32]
goto hard
// convert via int64.
case gc.TUINT32<<16 | gc.TFLOAT32,
gc.TUINT32<<16 | gc.TFLOAT64:
cvt = gc.Types[gc.TINT64]
goto hard
// algorithm is:
// if small enough, use native int64 -> uint64 conversion.
// otherwise, halve (rounding to odd?), convert, and double.
case gc.TUINT64<<16 | gc.TFLOAT32,
gc.TUINT64<<16 | gc.TFLOAT64:
a := x86.ACVTSQ2SS
if tt == gc.TFLOAT64 {
a = x86.ACVTSQ2SD
}
var zero gc.Node
gc.Nodconst(&zero, gc.Types[gc.TUINT64], 0)
var one gc.Node
gc.Nodconst(&one, gc.Types[gc.TUINT64], 1)
var r1 gc.Node
gc.Regalloc(&r1, f.Type, f)
var r2 gc.Node
gc.Regalloc(&r2, t.Type, t)
var r3 gc.Node
gc.Regalloc(&r3, f.Type, nil)
var r4 gc.Node
gc.Regalloc(&r4, f.Type, nil)
gmove(f, &r1)
gins(x86.ACMPQ, &r1, &zero)
p1 := gc.Gbranch(x86.AJLT, nil, +1)
gins(a, &r1, &r2)
p2 := gc.Gbranch(obj.AJMP, nil, 0)
gc.Patch(p1, gc.Pc)
gmove(&r1, &r3)
gins(x86.ASHRQ, &one, &r3)
gmove(&r1, &r4)
gins(x86.AANDL, &one, &r4)
gins(x86.AORQ, &r4, &r3)
gins(a, &r3, &r2)
gins(optoas(gc.OADD, t.Type), &r2, &r2)
gc.Patch(p2, gc.Pc)
gmove(&r2, t)
gc.Regfree(&r4)
gc.Regfree(&r3)
gc.Regfree(&r2)
gc.Regfree(&r1)
return
/*
作者:glycerin
项目:zygomy
func clearfat(nl *gc.Node) {
/* clear a fat object */
if gc.Debug['g'] != 0 {
gc.Dump("\nclearfat", nl)
}
w := uint32(nl.Type.Width)
// Avoid taking the address for simple enough types.
if gc.Componentgen(nil, nl) {
return
}
c := w % 4 // bytes
q := w / 4 // quads
if nl.Type.Align < 4 {
q = 0
c = w
}
var r0 gc.Node
r0.Op = gc.OREGISTER
r0.Reg = arm.REG_R0
var r1 gc.Node
r1.Op = gc.OREGISTER
r1.Reg = arm.REG_R1
var dst gc.Node
gc.Regalloc(&dst, gc.Types[gc.Tptr], &r1)
gc.Agen(nl, &dst)
var nc gc.Node
gc.Nodconst(&nc, gc.Types[gc.TUINT32], 0)
var nz gc.Node
gc.Regalloc(&nz, gc.Types[gc.TUINT32], &r0)
gc.Cgen(&nc, &nz)
if q > 128 {
var end gc.Node
gc.Regalloc(&end, gc.Types[gc.Tptr], nil)
p := gins(arm.AMOVW, &dst, &end)
p.From.Type = obj.TYPE_ADDR
p.From.Offset = int64(q) * 4
p = gins(arm.AMOVW, &nz, &dst)
p.To.Type = obj.TYPE_MEM
p.To.Offset = 4
p.Scond |= arm.C_PBIT
pl := p
p = gins(arm.ACMP, &dst, nil)
raddr(&end, p)
gc.Patch(gc.Gbranch(arm.ABNE, nil, 0), pl)
gc.Regfree(&end)
} else if q >= 4 && !gc.Nacl {
f := gc.Sysfunc("duffzero")
p := gins(obj.ADUFFZERO, nil, f)
gc.Afunclit(&p.To, f)
// 4 and 128 = magic constants: see ../../runtime/asm_arm.s
p.To.Offset = 4 * (128 - int64(q))
} else {
var p *obj.Prog
for q > 0 {
p = gins(arm.AMOVW, &nz, &dst)
p.To.Type = obj.TYPE_MEM
p.To.Offset = 4
p.Scond |= arm.C_PBIT
//print("1. %v\n", p);
q--
}
}
if c > 4 {
// Loop to zero unaligned memory.
var end gc.Node
gc.Regalloc(&end, gc.Types[gc.Tptr], nil)
p := gins(arm.AMOVW, &dst, &end)
p.From.Type = obj.TYPE_ADDR
p.From.Offset = int64(c)
p = gins(arm.AMOVB, &nz, &dst)
p.To.Type = obj.TYPE_MEM
p.To.Offset = 1
p.Scond |= arm.C_PBIT
pl := p
p = gins(arm.ACMP, &dst, nil)
raddr(&end, p)
gc.Patch(gc.Gbranch(arm.ABNE, nil, 0), pl)
gc.Regfree(&end)
c = 0
}
var p *obj.Prog
for c > 0 {
p = gins(arm.AMOVB, &nz, &dst)
p.To.Type = obj.TYPE_MEM
//.........这里部分代码省略.........
作者:glycerin
项目:zygomy
/*
* generate division.
* generates one of:
* res = nl / nr
* res = nl % nr
* according to op.
*/
func dodiv(op gc.Op, nl *gc.Node, nr *gc.Node, res *gc.Node) {
// Have to be careful about handling
// most negative int divided by -1 correctly.
// The hardware will trap.
// Also the byte divide instruction needs AH,
// which we otherwise don't have to deal with.
// Easiest way to avoid for int8, int16: use int32.
// For int32 and int64, use explicit test.
// Could use int64 hw for int32.
t := nl.Type
t0 := t
check := false
if t.IsSigned() {
check = true
if gc.Isconst(nl, gc.CTINT) && nl.Int64() != -(1<<uint64(t.Width*8-1)) {
check = false
} else if gc.Isconst(nr, gc.CTINT) && nr.Int64() != -1 {
check = false
}
}
if t.Width < 4 {
if t.IsSigned() {
t = gc.Types[gc.TINT32]
} else {
t = gc.Types[gc.TUINT32]
}
check = false
}
a := optoas(op, t)
var n3 gc.Node
gc.Regalloc(&n3, t0, nil)
var ax gc.Node
var oldax gc.Node
if nl.Ullman >= nr.Ullman {
savex(x86.REG_AX, &ax, &oldax, res, t0)
gc.Cgen(nl, &ax)
gc.Regalloc(&ax, t0, &ax) // mark ax live during cgen
gc.Cgen(nr, &n3)
gc.Regfree(&ax)
} else {
gc.Cgen(nr, &n3)
savex(x86.REG_AX, &ax, &oldax, res, t0)
gc.Cgen(nl, &ax)
}
if t != t0 {
// Convert
ax1 := ax
n31 := n3
ax.Type = t
n3.Type = t
gmove(&ax1, &ax)
gmove(&n31, &n3)
}
var n4 gc.Node
if gc.Nacl {
// Native Client does not relay the divide-by-zero trap
// to the executing program, so we must insert a check
// for ourselves.
gc.Nodconst(&n4, t, 0)
gins(optoas(gc.OCMP, t), &n3, &n4)
p1 := gc.Gbranch(optoas(gc.ONE, t), nil, +1)
if panicdiv == nil {
panicdiv = gc.Sysfunc("panicdivide")
}
gc.Ginscall(panicdiv, -1)
gc.Patch(p1, gc.Pc)
}
var p2 *obj.Prog
if check {
gc.Nodconst(&n4, t, -1)
gins(optoas(gc.OCMP, t), &n3, &n4)
p1 := gc.Gbranch(optoas(gc.ONE, t), nil, +1)
if op == gc.ODIV {
// a / (-1) is -a.
gins(optoas(gc.OMINUS, t), nil, &ax)
gmove(&ax, res)
} else {
// a % (-1) is 0.
gc.Nodconst(&n4, t, 0)
gmove(&n4, res)
}
//.........这里部分代码省略.........
作者:glycerin
项目:zygomy
/*
* generate shift according to op, one of:
* res = nl << nr
* res = nl >> nr
*/
func cgen_shift(op gc.Op, bounded bool, nl *gc.Node, nr *gc.Node, res *gc.Node) {
if nl.Type.Width > 4 {
gc.Fatalf("cgen_shift %v", nl.Type)
}
w := int(nl.Type.Width * 8)
if op == gc.OLROT {
v := nr.Int64()
var n1 gc.Node
gc.Regalloc(&n1, nl.Type, res)
if w == 32 {
gc.Cgen(nl, &n1)
gshift(arm.AMOVW, &n1, arm.SHIFT_RR, int32(w)-int32(v), &n1)
} else {
var n2 gc.Node
gc.Regalloc(&n2, nl.Type, nil)
gc.Cgen(nl, &n2)
gshift(arm.AMOVW, &n2, arm.SHIFT_LL, int32(v), &n1)
gshift(arm.AORR, &n2, arm.SHIFT_LR, int32(w)-int32(v), &n1)
gc.Regfree(&n2)
// Ensure sign/zero-extended result.
gins(optoas(gc.OAS, nl.Type), &n1, &n1)
}
gmove(&n1, res)
gc.Regfree(&n1)
return
}
if nr.Op == gc.OLITERAL {
var n1 gc.Node
gc.Regalloc(&n1, nl.Type, res)
gc.Cgen(nl, &n1)
sc := uint64(nr.Int64())
if sc == 0 {
} else // nothing to do
if sc >= uint64(nl.Type.Width*8) {
if op == gc.ORSH && nl.Type.IsSigned() {
gshift(arm.AMOVW, &n1, arm.SHIFT_AR, int32(w), &n1)
} else {
gins(arm.AEOR, &n1, &n1)
}
} else {
if op == gc.ORSH && nl.Type.IsSigned() {
gshift(arm.AMOVW, &n1, arm.SHIFT_AR, int32(sc), &n1)
} else if op == gc.ORSH {
gshift(arm.AMOVW, &n1, arm.SHIFT_LR, int32(sc), &n1) // OLSH
} else {
gshift(arm.AMOVW, &n1, arm.SHIFT_LL, int32(sc), &n1)
}
}
if w < 32 && op == gc.OLSH {
gins(optoas(gc.OAS, nl.Type), &n1, &n1)
}
gmove(&n1, res)
gc.Regfree(&n1)
return
}
tr := nr.Type
var t gc.Node
var n1 gc.Node
var n2 gc.Node
var n3 gc.Node
if tr.Width > 4 {
var nt gc.Node
gc.Tempname(&nt, nr.Type)
if nl.Ullman >= nr.Ullman {
gc.Regalloc(&n2, nl.Type, res)
gc.Cgen(nl, &n2)
gc.Cgen(nr, &nt)
n1 = nt
} else {
gc.Cgen(nr, &nt)
gc.Regalloc(&n2, nl.Type, res)
gc.Cgen(nl, &n2)
}
var hi gc.Node
var lo gc.Node
split64(&nt, &lo, &hi)
gc.Regalloc(&n1, gc.Types[gc.TUINT32], nil)
gc.Regalloc(&n3, gc.Types[gc.TUINT32], nil)
gmove(&lo, &n1)
gmove(&hi, &n3)
splitclean()
gins(arm.ATST, &n3, nil)
gc.Nodconst(&t, gc.Types[gc.TUINT32], int64(w))
p1 := gins(arm.AMOVW, &t, &n1)
p1.Scond = arm.C_SCOND_NE
tr = gc.Types[gc.TUINT32]
gc.Regfree(&n3)
//.........这里部分代码省略.........