作者:bibbyflyawa
项目:g
/*
* generate
* as $c, n
*/
func ginscon(as int, c int64, n2 *gc.Node) {
var n1 gc.Node
switch as {
case x86.AADDL,
x86.AMOVL,
x86.ALEAL:
gc.Nodconst(&n1, gc.Types[gc.TINT32], c)
default:
gc.Nodconst(&n1, gc.Types[gc.TINT64], c)
}
if as != x86.AMOVQ && (c < -(1<<31) || c >= 1<<31) {
// cannot have 64-bit immediate in ADD, etc.
// instead, MOV into register first.
var ntmp gc.Node
gc.Regalloc(&ntmp, gc.Types[gc.TINT64], nil)
gins(x86.AMOVQ, &n1, &ntmp)
gins(as, &ntmp, n2)
gc.Regfree(&ntmp)
return
}
gins(as, &n1, n2)
}
作者:xslonepiec
项目:goio
/*
* n is a 64-bit value. fill in lo and hi to refer to its 32-bit halves.
*/
func split64(n *gc.Node, lo *gc.Node, hi *gc.Node) {
if !gc.Is64(n.Type) {
gc.Fatal("split64 %v", gc.Tconv(n.Type, 0))
}
if nsclean >= len(sclean) {
gc.Fatal("split64 clean")
}
sclean[nsclean].Op = gc.OEMPTY
nsclean++
switch n.Op {
default:
switch n.Op {
default:
var n1 gc.Node
if !dotaddable(n, &n1) {
gc.Igen(n, &n1, nil)
sclean[nsclean-1] = n1
}
n = &n1
case gc.ONAME:
if n.Class == gc.PPARAMREF {
var n1 gc.Node
gc.Cgen(n.Heapaddr, &n1)
sclean[nsclean-1] = n1
n = &n1
}
// nothing
case gc.OINDREG:
break
}
*lo = *n
*hi = *n
lo.Type = gc.Types[gc.TUINT32]
if n.Type.Etype == gc.TINT64 {
hi.Type = gc.Types[gc.TINT32]
} else {
hi.Type = gc.Types[gc.TUINT32]
}
hi.Xoffset += 4
case gc.OLITERAL:
var n1 gc.Node
gc.Convconst(&n1, n.Type, &n.Val)
i := gc.Mpgetfix(n1.Val.U.Xval)
gc.Nodconst(lo, gc.Types[gc.TUINT32], int64(uint32(i)))
i >>= 32
if n.Type.Etype == gc.TINT64 {
gc.Nodconst(hi, gc.Types[gc.TINT32], int64(int32(i)))
} else {
gc.Nodconst(hi, gc.Types[gc.TUINT32], int64(uint32(i)))
}
}
}
作者:tidatid
项目:g
/*
* generate an addressable node in res, containing the value of n.
* n is an array index, and might be any size; res width is <= 32-bit.
* returns Prog* to patch to panic call.
*/
func igenindex(n *gc.Node, res *gc.Node, bounded bool) *obj.Prog {
if !gc.Is64(n.Type) {
if n.Addable != 0 {
// nothing to do.
*res = *n
} else {
gc.Tempname(res, gc.Types[gc.TUINT32])
gc.Cgen(n, res)
}
return nil
}
var tmp gc.Node
gc.Tempname(&tmp, gc.Types[gc.TINT64])
gc.Cgen(n, &tmp)
var lo gc.Node
var hi gc.Node
split64(&tmp, &lo, &hi)
gc.Tempname(res, gc.Types[gc.TUINT32])
gmove(&lo, res)
if bounded {
splitclean()
return nil
}
var zero gc.Node
gc.Nodconst(&zero, gc.Types[gc.TINT32], 0)
gins(x86.ACMPL, &hi, &zero)
splitclean()
return gc.Gbranch(x86.AJNE, nil, +1)
}
作者:bibbyflyawa
项目:g
func ncon(i uint32) *gc.Node {
if ncon_n.Type == nil {
gc.Nodconst(&ncon_n, gc.Types[gc.TUINT32], 0)
}
ncon_n.SetInt(int64(i))
return &ncon_n
}
作者:bibbyflyawa
项目:g
/*
* generate array index into res.
* n might be any size; res is 32-bit.
* returns Prog* to patch to panic call.
*/
func cgenindex(n *gc.Node, res *gc.Node, bounded bool) *obj.Prog {
if !gc.Is64(n.Type) {
gc.Cgen(n, res)
return nil
}
var tmp gc.Node
gc.Tempname(&tmp, gc.Types[gc.TINT64])
gc.Cgen(n, &tmp)
var lo gc.Node
var hi gc.Node
split64(&tmp, &lo, &hi)
gmove(&lo, res)
if bounded {
splitclean()
return nil
}
var n1 gc.Node
gc.Regalloc(&n1, gc.Types[gc.TINT32], nil)
var n2 gc.Node
gc.Regalloc(&n2, gc.Types[gc.TINT32], nil)
var zero gc.Node
gc.Nodconst(&zero, gc.Types[gc.TINT32], 0)
gmove(&hi, &n1)
gmove(&zero, &n2)
gins(arm.ACMP, &n1, &n2)
gc.Regfree(&n2)
gc.Regfree(&n1)
splitclean()
return gc.Gbranch(arm.ABNE, nil, -1)
}
作者:xslonepiec
项目:goio
func ncon(i uint32) *gc.Node {
if ncon_n.Type == nil {
gc.Nodconst(&ncon_n, gc.Types[gc.TUINT32], 0)
}
gc.Mpmovecfix(ncon_n.Val.U.Xval, int64(i))
return &ncon_n
}
作者:tidatid
项目:g
/*
* generate
* as n, $c (CMP/CMPU)
*/
func ginscon2(as int, n2 *gc.Node, c int64) {
var n1 gc.Node
gc.Nodconst(&n1, gc.Types[gc.TINT64], c)
switch as {
default:
gc.Fatal("ginscon2")
case ppc64.ACMP:
if -ppc64.BIG <= c && c <= ppc64.BIG {
rawgins(as, n2, &n1)
return
}
case ppc64.ACMPU:
if 0 <= c && c <= 2*ppc64.BIG {
rawgins(as, n2, &n1)
return
}
}
// MOV n1 into register first
var ntmp gc.Node
gc.Regalloc(&ntmp, gc.Types[gc.TINT64], nil)
rawgins(ppc64.AMOVD, &n1, &ntmp)
rawgins(as, n2, &ntmp)
gc.Regfree(&ntmp)
}
作者:xslonepiec
项目:goio
/*
* generate
* as $c, reg
*/
func gconreg(as int, c int64, reg int) {
var n1 gc.Node
var n2 gc.Node
gc.Nodconst(&n1, gc.Types[gc.TINT64], c)
gc.Nodreg(&n2, gc.Types[gc.TINT64], reg)
gins(as, &n1, &n2)
}
作者:bibbyflyawa
项目:g
/*
* generate
* as $c, n
*/
func ginscon(as int, c int64, n *gc.Node) {
var n1 gc.Node
gc.Nodconst(&n1, gc.Types[gc.TINT32], c)
var n2 gc.Node
gc.Regalloc(&n2, gc.Types[gc.TINT32], nil)
gmove(&n1, &n2)
gins(as, &n2, n)
gc.Regfree(&n2)
}
作者:bibbyflyawa
项目:g
func bignodes() {
if bignodes_did {
return
}
bignodes_did = true
var i big.Int
i.SetInt64(1)
i.Lsh(&i, 63)
gc.Nodconst(&bigi, gc.Types[gc.TUINT64], 0)
bigi.SetBigInt(&i)
bigi.Convconst(&bigf, gc.Types[gc.TFLOAT64])
}
作者:tidatid
项目:g
func bignodes() {
if bignodes_did != 0 {
return
}
bignodes_did = 1
gc.Nodconst(&bigi, gc.Types[gc.TUINT64], 1)
gc.Mpshiftfix(bigi.Val.U.Xval, 63)
bigf = bigi
bigf.Type = gc.Types[gc.TFLOAT64]
bigf.Val.Ctype = gc.CTFLT
bigf.Val.U.Fval = new(gc.Mpflt)
gc.Mpmovefixflt(bigf.Val.U.Fval, bigi.Val.U.Xval)
}
作者:tidatid
项目:g
/*
* generate
* as $c, n
*/
func ginscon(as int, c int64, n2 *gc.Node) {
var n1 gc.Node
gc.Nodconst(&n1, gc.Types[gc.TINT64], c)
if as != ppc64.AMOVD && (c < -ppc64.BIG || c > ppc64.BIG) || n2.Op != gc.OREGISTER || as == ppc64.AMULLD {
// cannot have more than 16-bit of immediate in ADD, etc.
// instead, MOV into register first.
var ntmp gc.Node
gc.Regalloc(&ntmp, gc.Types[gc.TINT64], nil)
rawgins(ppc64.AMOVD, &n1, &ntmp)
rawgins(as, &ntmp, n2)
gc.Regfree(&ntmp)
return
}
rawgins(as, &n1, n2)
}
作者:xslonepiec
项目:goio
func gencmp0(n *gc.Node, t *gc.Type, o int, likely int, to *obj.Prog) {
var n1 gc.Node
gc.Regalloc(&n1, t, nil)
gc.Cgen(n, &n1)
a := optoas(gc.OCMP, t)
if a != arm.ACMP {
var n2 gc.Node
gc.Nodconst(&n2, t, 0)
var n3 gc.Node
gc.Regalloc(&n3, t, nil)
gmove(&n2, &n3)
gins(a, &n1, &n3)
gc.Regfree(&n3)
} else {
gins(arm.ATST, &n1, nil)
}
a = optoas(o, t)
gc.Patch(gc.Gbranch(a, t, likely), to)
gc.Regfree(&n1)
}
作者:kluesk
项目:go-akaro
/*
* generate shift according to op, one of:
* res = nl << nr
* res = nl >> nr
*/
func cgen_shift(op int, bounded bool, nl *gc.Node, nr *gc.Node, res *gc.Node) {
a := int(optoas(op, nl.Type))
if nr.Op == gc.OLITERAL {
var n1 gc.Node
regalloc(&n1, nl.Type, res)
cgen(nl, &n1)
sc := uint64(uint64(gc.Mpgetfix(nr.Val.U.Xval)))
if sc >= uint64(nl.Type.Width*8) {
// large shift gets 2 shifts by width-1
var n3 gc.Node
gc.Nodconst(&n3, gc.Types[gc.TUINT32], nl.Type.Width*8-1)
gins(a, &n3, &n1)
gins(a, &n3, &n1)
} else {
gins(a, nr, &n1)
}
gmove(&n1, res)
regfree(&n1)
return
}
if nl.Ullman >= gc.UINF {
var n4 gc.Node
gc.Tempname(&n4, nl.Type)
cgen(nl, &n4)
nl = &n4
}
if nr.Ullman >= gc.UINF {
var n5 gc.Node
gc.Tempname(&n5, nr.Type)
cgen(nr, &n5)
nr = &n5
}
// Allow either uint32 or uint64 as shift type,
// to avoid unnecessary conversion from uint32 to uint64
// just to do the comparison.
tcount := gc.Types[gc.Simtype[nr.Type.Etype]]
if tcount.Etype < gc.TUINT32 {
tcount = gc.Types[gc.TUINT32]
}
var n1 gc.Node
regalloc(&n1, nr.Type, nil) // to hold the shift type in CX
var n3 gc.Node
regalloc(&n3, tcount, &n1) // to clear high bits of CX
var n2 gc.Node
regalloc(&n2, nl.Type, res)
if nl.Ullman >= nr.Ullman {
cgen(nl, &n2)
cgen(nr, &n1)
gmove(&n1, &n3)
} else {
cgen(nr, &n1)
gmove(&n1, &n3)
cgen(nl, &n2)
}
regfree(&n3)
// test and fix up large shifts
if !bounded {
gc.Nodconst(&n3, tcount, nl.Type.Width*8)
gins(optoas(gc.OCMP, tcount), &n1, &n3)
p1 := (*obj.Prog)(gc.Gbranch(optoas(gc.OLT, tcount), nil, +1))
if op == gc.ORSH && gc.Issigned[nl.Type.Etype] {
gc.Nodconst(&n3, gc.Types[gc.TUINT32], nl.Type.Width*8-1)
gins(a, &n3, &n2)
} else {
gc.Nodconst(&n3, nl.Type, 0)
gmove(&n3, &n2)
}
gc.Patch(p1, gc.Pc)
}
gins(a, &n1, &n2)
gmove(&n2, res)
regfree(&n1)
regfree(&n2)
}
作者:kluesk
项目:go-akaro
//.........这里部分代码省略.........
gc.Nodreg(n, t, i)
}
func regfree(n *gc.Node) {
if n.Op == gc.ONAME {
return
}
if n.Op != gc.OREGISTER && n.Op != gc.OINDREG {
gc.Fatal("regfree: not a register")
}
i := int(int(n.Val.U.Reg) - arm64.REG_R0)
if i == arm64.REGSP-arm64.REG_R0 {
return
}
if i < 0 || i >= len(reg) {
gc.Fatal("regfree: reg out of range")
}
if reg[i] <= 0 {
gc.Fatal("regfree: reg not allocated")
}
reg[i]--
if reg[i] == 0 {
regpc[i] = 0
}
}
/*
* generate
* as $c, n
*/
func ginscon(as int, c int64, n2 *gc.Node) {
var n1 gc.Node
gc.Nodconst(&n1, gc.Types[gc.TINT64], c)
if as != arm64.AMOVD && (c < -arm64.BIG || c > arm64.BIG) {
// cannot have more than 16-bit of immediate in ADD, etc.
// instead, MOV into register first.
var ntmp gc.Node
regalloc(&ntmp, gc.Types[gc.TINT64], nil)
gins(arm64.AMOVD, &n1, &ntmp)
gins(as, &ntmp, n2)
regfree(&ntmp)
return
}
gins(as, &n1, n2)
}
/*
* generate
* as n, $c (CMP)
*/
func ginscon2(as int, n2 *gc.Node, c int64) {
var n1 gc.Node
gc.Nodconst(&n1, gc.Types[gc.TINT64], c)
switch as {
default:
gc.Fatal("ginscon2")
case arm64.ACMP:
if -arm64.BIG <= c && c <= arm64.BIG {
gcmp(as, n2, &n1)
作者:bibbyflyawa
项目:g
func clearfat(nl *gc.Node) {
/* clear a fat object */
if gc.Debug['g'] != 0 {
gc.Dump("\nclearfat", nl)
}
w := uint32(nl.Type.Width)
// Avoid taking the address for simple enough types.
if gc.Componentgen(nil, nl) {
return
}
c := w % 4 // bytes
q := w / 4 // quads
var r0 gc.Node
r0.Op = gc.OREGISTER
r0.Reg = arm.REG_R0
var r1 gc.Node
r1.Op = gc.OREGISTER
r1.Reg = arm.REG_R1
var dst gc.Node
gc.Regalloc(&dst, gc.Types[gc.Tptr], &r1)
gc.Agen(nl, &dst)
var nc gc.Node
gc.Nodconst(&nc, gc.Types[gc.TUINT32], 0)
var nz gc.Node
gc.Regalloc(&nz, gc.Types[gc.TUINT32], &r0)
gc.Cgen(&nc, &nz)
if q > 128 {
var end gc.Node
gc.Regalloc(&end, gc.Types[gc.Tptr], nil)
p := gins(arm.AMOVW, &dst, &end)
p.From.Type = obj.TYPE_ADDR
p.From.Offset = int64(q) * 4
p = gins(arm.AMOVW, &nz, &dst)
p.To.Type = obj.TYPE_MEM
p.To.Offset = 4
p.Scond |= arm.C_PBIT
pl := p
p = gins(arm.ACMP, &dst, nil)
raddr(&end, p)
gc.Patch(gc.Gbranch(arm.ABNE, nil, 0), pl)
gc.Regfree(&end)
} else if q >= 4 && !gc.Nacl {
f := gc.Sysfunc("duffzero")
p := gins(obj.ADUFFZERO, nil, f)
gc.Afunclit(&p.To, f)
// 4 and 128 = magic constants: see ../../runtime/asm_arm.s
p.To.Offset = 4 * (128 - int64(q))
} else {
var p *obj.Prog
for q > 0 {
p = gins(arm.AMOVW, &nz, &dst)
p.To.Type = obj.TYPE_MEM
p.To.Offset = 4
p.Scond |= arm.C_PBIT
//print("1. %P\n", p);
q--
}
}
var p *obj.Prog
for c > 0 {
p = gins(arm.AMOVB, &nz, &dst)
p.To.Type = obj.TYPE_MEM
p.To.Offset = 1
p.Scond |= arm.C_PBIT
//print("2. %P\n", p);
c--
}
gc.Regfree(&dst)
gc.Regfree(&nz)
}
作者:bibbyflyawa
项目:g
/*
* generate shift according to op, one of:
* res = nl << nr
* res = nl >> nr
*/
func cgen_shift(op int, bounded bool, nl *gc.Node, nr *gc.Node, res *gc.Node) {
if nl.Type.Width > 4 {
gc.Fatal("cgen_shift %v", nl.Type)
}
w := int(nl.Type.Width * 8)
if op == gc.OLROT {
v := nr.Int()
var n1 gc.Node
gc.Regalloc(&n1, nl.Type, res)
if w == 32 {
gc.Cgen(nl, &n1)
gshift(arm.AMOVW, &n1, arm.SHIFT_RR, int32(w)-int32(v), &n1)
} else {
var n2 gc.Node
gc.Regalloc(&n2, nl.Type, nil)
gc.Cgen(nl, &n2)
gshift(arm.AMOVW, &n2, arm.SHIFT_LL, int32(v), &n1)
gshift(arm.AORR, &n2, arm.SHIFT_LR, int32(w)-int32(v), &n1)
gc.Regfree(&n2)
// Ensure sign/zero-extended result.
gins(optoas(gc.OAS, nl.Type), &n1, &n1)
}
gmove(&n1, res)
gc.Regfree(&n1)
return
}
if nr.Op == gc.OLITERAL {
var n1 gc.Node
gc.Regalloc(&n1, nl.Type, res)
gc.Cgen(nl, &n1)
sc := uint64(nr.Int())
if sc == 0 {
} else // nothing to do
if sc >= uint64(nl.Type.Width*8) {
if op == gc.ORSH && gc.Issigned[nl.Type.Etype] {
gshift(arm.AMOVW, &n1, arm.SHIFT_AR, int32(w), &n1)
} else {
gins(arm.AEOR, &n1, &n1)
}
} else {
if op == gc.ORSH && gc.Issigned[nl.Type.Etype] {
gshift(arm.AMOVW, &n1, arm.SHIFT_AR, int32(sc), &n1)
} else if op == gc.ORSH {
gshift(arm.AMOVW, &n1, arm.SHIFT_LR, int32(sc), &n1) // OLSH
} else {
gshift(arm.AMOVW, &n1, arm.SHIFT_LL, int32(sc), &n1)
}
}
if w < 32 && op == gc.OLSH {
gins(optoas(gc.OAS, nl.Type), &n1, &n1)
}
gmove(&n1, res)
gc.Regfree(&n1)
return
}
tr := nr.Type
var t gc.Node
var n1 gc.Node
var n2 gc.Node
var n3 gc.Node
if tr.Width > 4 {
var nt gc.Node
gc.Tempname(&nt, nr.Type)
if nl.Ullman >= nr.Ullman {
gc.Regalloc(&n2, nl.Type, res)
gc.Cgen(nl, &n2)
gc.Cgen(nr, &nt)
n1 = nt
} else {
gc.Cgen(nr, &nt)
gc.Regalloc(&n2, nl.Type, res)
gc.Cgen(nl, &n2)
}
var hi gc.Node
var lo gc.Node
split64(&nt, &lo, &hi)
gc.Regalloc(&n1, gc.Types[gc.TUINT32], nil)
gc.Regalloc(&n3, gc.Types[gc.TUINT32], nil)
gmove(&lo, &n1)
gmove(&hi, &n3)
splitclean()
gins(arm.ATST, &n3, nil)
gc.Nodconst(&t, gc.Types[gc.TUINT32], int64(w))
p1 := gins(arm.AMOVW, &t, &n1)
p1.Scond = arm.C_SCOND_NE
tr = gc.Types[gc.TUINT32]
gc.Regfree(&n3)
//.........这里部分代码省略.........
作者:kluesk
项目:go-akaro
/*
* generate division according to op, one of:
* res = nl / nr
* res = nl % nr
*/
func cgen_div(op int, nl *gc.Node, nr *gc.Node, res *gc.Node) {
var w int
if nr.Op != gc.OLITERAL {
goto longdiv
}
w = int(nl.Type.Width * 8)
// Front end handled 32-bit division. We only need to handle 64-bit.
// try to do division by multiply by (2^w)/d
// see hacker's delight chapter 10
switch gc.Simtype[nl.Type.Etype] {
default:
goto longdiv
case gc.TUINT64:
var m gc.Magic
m.W = w
m.Ud = uint64(gc.Mpgetfix(nr.Val.U.Xval))
gc.Umagic(&m)
if m.Bad != 0 {
break
}
if op == gc.OMOD {
goto longmod
}
var n1 gc.Node
cgenr(nl, &n1, nil)
var n2 gc.Node
gc.Nodconst(&n2, nl.Type, int64(m.Um))
var n3 gc.Node
regalloc(&n3, nl.Type, res)
cgen_hmul(&n1, &n2, &n3)
if m.Ua != 0 {
// need to add numerator accounting for overflow
gins(optoas(gc.OADD, nl.Type), &n1, &n3)
gc.Nodconst(&n2, nl.Type, 1)
gins(optoas(gc.ORROTC, nl.Type), &n2, &n3)
gc.Nodconst(&n2, nl.Type, int64(m.S)-1)
gins(optoas(gc.ORSH, nl.Type), &n2, &n3)
} else {
gc.Nodconst(&n2, nl.Type, int64(m.S))
gins(optoas(gc.ORSH, nl.Type), &n2, &n3) // shift dx
}
gmove(&n3, res)
regfree(&n1)
regfree(&n3)
return
case gc.TINT64:
var m gc.Magic
m.W = w
m.Sd = gc.Mpgetfix(nr.Val.U.Xval)
gc.Smagic(&m)
if m.Bad != 0 {
break
}
if op == gc.OMOD {
goto longmod
}
var n1 gc.Node
cgenr(nl, &n1, res)
var n2 gc.Node
gc.Nodconst(&n2, nl.Type, m.Sm)
var n3 gc.Node
regalloc(&n3, nl.Type, nil)
cgen_hmul(&n1, &n2, &n3)
if m.Sm < 0 {
// need to add numerator
gins(optoas(gc.OADD, nl.Type), &n1, &n3)
}
gc.Nodconst(&n2, nl.Type, int64(m.S))
gins(optoas(gc.ORSH, nl.Type), &n2, &n3) // shift n3
gc.Nodconst(&n2, nl.Type, int64(w)-1)
gins(optoas(gc.ORSH, nl.Type), &n2, &n1) // -1 iff num is neg
gins(optoas(gc.OSUB, nl.Type), &n1, &n3) // added
if m.Sd < 0 {
// this could probably be removed
// by factoring it into the multiplier
gins(optoas(gc.OMINUS, nl.Type), nil, &n3)
}
gmove(&n3, res)
regfree(&n1)
regfree(&n3)
//.........这里部分代码省略.........
作者:xslonepiec
项目:goio
/*
* generate
* as $c, n
*/
func ginscon(as int, c int64, n2 *gc.Node) {
var n1 gc.Node
gc.Nodconst(&n1, gc.Types[gc.TINT32], c)
gins(as, &n1, n2)
}
作者:kluesk
项目:go-akaro
//.........这里部分代码省略.........
regalloc(&n4, n1.Type, nil)
gmove(&n1, &n4)
ginscon2(optoas(gc.OCMP, gc.Types[gc.TUINT64]), &n4, int64(v))
regfree(&n4)
p1 := gc.Gbranch(optoas(gc.OGT, gc.Types[gc.TUINT64]), nil, +1)
ginscall(gc.Panicindex, 0)
gc.Patch(p1, gc.Pc)
}
n1 = n3
n1.Op = gc.OINDREG
n1.Type = gc.Types[gc.Tptr]
n1.Xoffset = int64(gc.Array_array)
gmove(&n1, &n3)
}
if v*uint64(w) != 0 {
ginscon(optoas(gc.OADD, gc.Types[gc.Tptr]), int64(v*uint64(w)), &n3)
}
*a = n3
break
}
var n2 gc.Node
regalloc(&n2, gc.Types[gc.TINT64], &n1) // i
gmove(&n1, &n2)
regfree(&n1)
var n4 gc.Node
if gc.Debug['B'] == 0 && !n.Bounded {
// check bounds
if gc.Isconst(nl, gc.CTSTR) {
gc.Nodconst(&n4, gc.Types[gc.TUINT64], int64(len(nl.Val.U.Sval)))
} else if gc.Isslice(nl.Type) || nl.Type.Etype == gc.TSTRING {
n1 = n3
n1.Op = gc.OINDREG
n1.Type = gc.Types[gc.Tptr]
n1.Xoffset = int64(gc.Array_nel)
regalloc(&n4, gc.Types[gc.TUINT64], nil)
gmove(&n1, &n4)
} else {
if nl.Type.Bound < (1<<15)-1 {
gc.Nodconst(&n4, gc.Types[gc.TUINT64], nl.Type.Bound)
} else {
regalloc(&n4, gc.Types[gc.TUINT64], nil)
p1 := gins(ppc64.AMOVD, nil, &n4)
p1.From.Type = obj.TYPE_CONST
p1.From.Offset = nl.Type.Bound
}
}
gins(optoas(gc.OCMP, gc.Types[gc.TUINT64]), &n2, &n4)
if n4.Op == gc.OREGISTER {
regfree(&n4)
}
p1 := gc.Gbranch(optoas(gc.OLT, gc.Types[gc.TUINT64]), nil, +1)
if p2 != nil {
gc.Patch(p2, gc.Pc)
}
ginscall(gc.Panicindex, 0)
gc.Patch(p1, gc.Pc)
}
if gc.Isconst(nl, gc.CTSTR) {
regalloc(&n3, gc.Types[gc.Tptr], res)