作者:wheelcomple
项目:go-
func ginscmp(op int, t *gc.Type, n1, n2 *gc.Node, likely int) *obj.Prog {
if gc.Isint[t.Etype] && n1.Op == gc.OLITERAL && gc.Smallintconst(n1) && n2.Op != gc.OLITERAL {
// Reverse comparison to place constant last.
op = gc.Brrev(op)
n1, n2 = n2, n1
}
// General case.
var r1, r2, g1, g2 gc.Node
if n1.Op == gc.ONAME && n1.Class&gc.PHEAP == 0 || n1.Op == gc.OINDREG {
r1 = *n1
} else {
gc.Regalloc(&r1, t, n1)
gc.Regalloc(&g1, n1.Type, &r1)
gc.Cgen(n1, &g1)
gmove(&g1, &r1)
}
if n2.Op == gc.OLITERAL && gc.Isint[t.Etype] && gc.Smallintconst(n2) {
r2 = *n2
} else {
gc.Regalloc(&r2, t, n2)
gc.Regalloc(&g2, n1.Type, &r2)
gc.Cgen(n2, &g2)
gmove(&g2, &r2)
}
gins(optoas(gc.OCMP, t), &r1, &r2)
if r1.Op == gc.OREGISTER {
gc.Regfree(&g1)
gc.Regfree(&r1)
}
if r2.Op == gc.OREGISTER {
gc.Regfree(&g2)
gc.Regfree(&r2)
}
return gc.Gbranch(optoas(op, t), nil, likely)
}
作者:rentongzhan
项目:g
/*
* generate array index into res.
* n might be any size; res is 32-bit.
* returns Prog* to patch to panic call.
*/
func cgenindex(n *gc.Node, res *gc.Node, bounded bool) *obj.Prog {
if !gc.Is64(n.Type) {
gc.Cgen(n, res)
return nil
}
var tmp gc.Node
gc.Tempname(&tmp, gc.Types[gc.TINT64])
gc.Cgen(n, &tmp)
var lo gc.Node
var hi gc.Node
split64(&tmp, &lo, &hi)
gmove(&lo, res)
if bounded {
splitclean()
return nil
}
var n1 gc.Node
gc.Regalloc(&n1, gc.Types[gc.TINT32], nil)
var n2 gc.Node
gc.Regalloc(&n2, gc.Types[gc.TINT32], nil)
var zero gc.Node
gc.Nodconst(&zero, gc.Types[gc.TINT32], 0)
gmove(&hi, &n1)
gmove(&zero, &n2)
gins(arm.ACMP, &n1, &n2)
gc.Regfree(&n2)
gc.Regfree(&n1)
splitclean()
return gc.Gbranch(arm.ABNE, nil, -1)
}
作者:arnold
项目:g
func ginscmp(op int, t *gc.Type, n1, n2 *gc.Node, likely int) *obj.Prog {
if gc.Isint[t.Etype] && n1.Op == gc.OLITERAL && n2.Op != gc.OLITERAL {
// Reverse comparison to place constant last.
op = gc.Brrev(op)
n1, n2 = n2, n1
}
var r1, r2, g1, g2 gc.Node
gc.Regalloc(&r1, t, n1)
gc.Regalloc(&g1, n1.Type, &r1)
gc.Cgen(n1, &g1)
gmove(&g1, &r1)
if gc.Isint[t.Etype] && gc.Isconst(n2, gc.CTINT) {
ginscon2(optoas(gc.OCMP, t), &r1, n2.Int())
} else {
gc.Regalloc(&r2, t, n2)
gc.Regalloc(&g2, n1.Type, &r2)
gc.Cgen(n2, &g2)
gmove(&g2, &r2)
gcmp(optoas(gc.OCMP, t), &r1, &r2)
gc.Regfree(&g2)
gc.Regfree(&r2)
}
gc.Regfree(&g1)
gc.Regfree(&r1)
return gc.Gbranch(optoas(op, t), nil, likely)
}
作者:arnold
项目:g
func ginscmp(op int, t *gc.Type, n1, n2 *gc.Node, likely int) *obj.Prog {
if gc.Isint[t.Etype] && n1.Op == gc.OLITERAL && n1.Int() == 0 && n2.Op != gc.OLITERAL {
op = gc.Brrev(op)
n1, n2 = n2, n1
}
var r1, r2, g1, g2 gc.Node
gc.Regalloc(&r1, t, n1)
gc.Regalloc(&g1, n1.Type, &r1)
gc.Cgen(n1, &g1)
gmove(&g1, &r1)
if gc.Isint[t.Etype] && n2.Op == gc.OLITERAL && n2.Int() == 0 {
gins(arm.ACMP, &r1, n2)
} else {
gc.Regalloc(&r2, t, n2)
gc.Regalloc(&g2, n1.Type, &r2)
gc.Cgen(n2, &g2)
gmove(&g2, &r2)
gins(optoas(gc.OCMP, t), &r1, &r2)
gc.Regfree(&g2)
gc.Regfree(&r2)
}
gc.Regfree(&g1)
gc.Regfree(&r1)
return gc.Gbranch(optoas(op, t), nil, likely)
}
作者:Greento
项目:g
/*
* generate an addressable node in res, containing the value of n.
* n is an array index, and might be any size; res width is <= 32-bit.
* returns Prog* to patch to panic call.
*/
func igenindex(n *gc.Node, res *gc.Node, bounded bool) *obj.Prog {
if !gc.Is64(n.Type) {
if n.Addable && (gc.Simtype[n.Etype] == gc.TUINT32 || gc.Simtype[n.Etype] == gc.TINT32) {
// nothing to do.
*res = *n
} else {
gc.Tempname(res, gc.Types[gc.TUINT32])
gc.Cgen(n, res)
}
return nil
}
var tmp gc.Node
gc.Tempname(&tmp, gc.Types[gc.TINT64])
gc.Cgen(n, &tmp)
var lo gc.Node
var hi gc.Node
split64(&tmp, &lo, &hi)
gc.Tempname(res, gc.Types[gc.TUINT32])
gmove(&lo, res)
if bounded {
splitclean()
return nil
}
var zero gc.Node
gc.Nodconst(&zero, gc.Types[gc.TINT32], 0)
gins(x86.ACMPL, &hi, &zero)
splitclean()
return gc.Gbranch(x86.AJNE, nil, +1)
}
作者:4a
项目:g
// generate branch
// n1, n2 are registers
func ginsbranch(as int, t *gc.Type, n1, n2 *gc.Node, likely int) *obj.Prog {
p := gc.Gbranch(as, t, likely)
gc.Naddr(&p.From, n1)
if n2 != nil {
p.Reg = n2.Reg
}
return p
}
作者:Samurai
项目:g
/*
* generate floating-point operation.
*/
func cgen_float(n *gc.Node, res *gc.Node) {
nl := n.Left
switch n.Op {
case gc.OEQ,
gc.ONE,
gc.OLT,
gc.OLE,
gc.OGE:
p1 := gc.Gbranch(obj.AJMP, nil, 0)
p2 := gc.Pc
gmove(gc.Nodbool(true), res)
p3 := gc.Gbranch(obj.AJMP, nil, 0)
gc.Patch(p1, gc.Pc)
gc.Bgen(n, true, 0, p2)
gmove(gc.Nodbool(false), res)
gc.Patch(p3, gc.Pc)
return
case gc.OPLUS:
gc.Cgen(nl, res)
return
case gc.OCONV:
if gc.Eqtype(n.Type, nl.Type) || gc.Noconv(n.Type, nl.Type) {
gc.Cgen(nl, res)
return
}
var n2 gc.Node
gc.Tempname(&n2, n.Type)
var n1 gc.Node
gc.Mgen(nl, &n1, res)
gmove(&n1, &n2)
gmove(&n2, res)
gc.Mfree(&n1)
return
}
if gc.Thearch.Use387 {
cgen_float387(n, res)
} else {
cgen_floatsse(n, res)
}
}
作者:srei
项目:g
func ginscmp(op gc.Op, t *gc.Type, n1, n2 *gc.Node, likely int) *obj.Prog {
if gc.Isint[t.Etype] || t.Etype == gc.Tptr {
if (n1.Op == gc.OLITERAL || n1.Op == gc.OADDR && n1.Left.Op == gc.ONAME) && n2.Op != gc.OLITERAL {
// Reverse comparison to place constant (including address constant) last.
op = gc.Brrev(op)
n1, n2 = n2, n1
}
}
// General case.
var r1, r2, g1, g2 gc.Node
// A special case to make write barriers more efficient.
// Comparing the first field of a named struct can be done directly.
base := n1
if n1.Op == gc.ODOT && n1.Left.Type.Etype == gc.TSTRUCT && n1.Left.Type.Type.Sym == n1.Right.Sym {
base = n1.Left
}
if base.Op == gc.ONAME && base.Class&gc.PHEAP == 0 || n1.Op == gc.OINDREG {
r1 = *n1
} else {
gc.Regalloc(&r1, t, n1)
gc.Regalloc(&g1, n1.Type, &r1)
gc.Cgen(n1, &g1)
gmove(&g1, &r1)
}
if n2.Op == gc.OLITERAL && gc.Isint[t.Etype] || n2.Op == gc.OADDR && n2.Left.Op == gc.ONAME && n2.Left.Class == gc.PEXTERN {
r2 = *n2
} else {
gc.Regalloc(&r2, t, n2)
gc.Regalloc(&g2, n1.Type, &r2)
gc.Cgen(n2, &g2)
gmove(&g2, &r2)
}
gins(optoas(gc.OCMP, t), &r1, &r2)
if r1.Op == gc.OREGISTER {
gc.Regfree(&g1)
gc.Regfree(&r1)
}
if r2.Op == gc.OREGISTER {
gc.Regfree(&g2)
gc.Regfree(&r2)
}
return gc.Gbranch(optoas(op, t), nil, likely)
}
作者:Greento
项目:g
func ginscmp(op gc.Op, t *gc.Type, n1, n2 *gc.Node, likely int) *obj.Prog {
if t.IsInteger() && n1.Op == gc.OLITERAL && gc.Smallintconst(n1) && n2.Op != gc.OLITERAL {
// Reverse comparison to place constant last.
op = gc.Brrev(op)
n1, n2 = n2, n1
}
// General case.
var r1, r2, g1, g2 gc.Node
// A special case to make write barriers more efficient.
// Comparing the first field of a named struct can be done directly.
base := n1
if n1.Op == gc.ODOT && n1.Left.Type.IsStruct() && n1.Left.Type.Field(0).Sym == n1.Sym {
base = n1.Left
}
if base.Op == gc.ONAME && base.Class != gc.PAUTOHEAP || n1.Op == gc.OINDREG {
r1 = *n1
} else {
gc.Regalloc(&r1, t, n1)
gc.Regalloc(&g1, n1.Type, &r1)
gc.Cgen(n1, &g1)
gmove(&g1, &r1)
}
if n2.Op == gc.OLITERAL && t.IsInteger() && gc.Smallintconst(n2) {
r2 = *n2
} else {
gc.Regalloc(&r2, t, n2)
gc.Regalloc(&g2, n1.Type, &r2)
gc.Cgen(n2, &g2)
gmove(&g2, &r2)
}
gins(optoas(gc.OCMP, t), &r1, &r2)
if r1.Op == gc.OREGISTER {
gc.Regfree(&g1)
gc.Regfree(&r1)
}
if r2.Op == gc.OREGISTER {
gc.Regfree(&g2)
gc.Regfree(&r2)
}
return gc.Gbranch(optoas(op, t), nil, likely)
}
作者:rentongzhan
项目:g
//.........这里部分代码省略.........
gc.Afunclit(&p.To, f)
// 8 and 128 = magic constants: see ../../runtime/asm_arm.s
p.To.Offset = 8 * (128 - int64(c))
gc.Regfree(&tmp)
gc.Regfree(&src)
gc.Regfree(&dst)
return
}
var dst gc.Node
var src gc.Node
if n.Ullman >= res.Ullman {
gc.Agenr(n, &dst, res) // temporarily use dst
gc.Regalloc(&src, gc.Types[gc.Tptr], nil)
gins(arm.AMOVW, &dst, &src)
if res.Op == gc.ONAME {
gc.Gvardef(res)
}
gc.Agen(res, &dst)
} else {
if res.Op == gc.ONAME {
gc.Gvardef(res)
}
gc.Agenr(res, &dst, res)
gc.Agenr(n, &src, nil)
}
var tmp gc.Node
gc.Regalloc(&tmp, gc.Types[gc.TUINT32], nil)
// set up end marker
var nend gc.Node
if c >= 4 {
gc.Regalloc(&nend, gc.Types[gc.TUINT32], nil)
p := gins(arm.AMOVW, &src, &nend)
p.From.Type = obj.TYPE_ADDR
if dir < 0 {
p.From.Offset = int64(dir)
} else {
p.From.Offset = w
}
}
// move src and dest to the end of block if necessary
if dir < 0 {
p := gins(arm.AMOVW, &src, &src)
p.From.Type = obj.TYPE_ADDR
p.From.Offset = w + int64(dir)
p = gins(arm.AMOVW, &dst, &dst)
p.From.Type = obj.TYPE_ADDR
p.From.Offset = w + int64(dir)
}
// move
if c >= 4 {
p := gins(op, &src, &tmp)
p.From.Type = obj.TYPE_MEM
p.From.Offset = int64(dir)
p.Scond |= arm.C_PBIT
ploop := p
p = gins(op, &tmp, &dst)
p.To.Type = obj.TYPE_MEM
p.To.Offset = int64(dir)
p.Scond |= arm.C_PBIT
p = gins(arm.ACMP, &src, nil)
raddr(&nend, p)
gc.Patch(gc.Gbranch(arm.ABNE, nil, 0), ploop)
gc.Regfree(&nend)
} else {
var p *obj.Prog
for {
tmp14 := c
c--
if tmp14 <= 0 {
break
}
p = gins(op, &src, &tmp)
p.From.Type = obj.TYPE_MEM
p.From.Offset = int64(dir)
p.Scond |= arm.C_PBIT
p = gins(op, &tmp, &dst)
p.To.Type = obj.TYPE_MEM
p.To.Offset = int64(dir)
p.Scond |= arm.C_PBIT
}
}
gc.Regfree(&dst)
gc.Regfree(&src)
gc.Regfree(&tmp)
}
作者:wheelcomple
项目:go-
//.........这里部分代码省略.........
// MOVW bl<<(v-32), ah
gshift(arm.AMOVW, &bl, arm.SHIFT_LL, int32(v-32), &ah)
} else if v == 32 {
gins(arm.AEOR, &al, &al)
gins(arm.AMOVW, &bl, &ah)
} else if v > 0 {
// MOVW bl<<v, al
gshift(arm.AMOVW, &bl, arm.SHIFT_LL, int32(v), &al)
// MOVW bh<<v, ah
gshift(arm.AMOVW, &bh, arm.SHIFT_LL, int32(v), &ah)
// OR bl>>(32-v), ah
gshift(arm.AORR, &bl, arm.SHIFT_LR, int32(32-v), &ah)
} else {
gins(arm.AMOVW, &bl, &al)
gins(arm.AMOVW, &bh, &ah)
}
goto olsh_break
}
gc.Regalloc(&s, gc.Types[gc.TUINT32], nil)
gc.Regalloc(&creg, gc.Types[gc.TUINT32], nil)
if gc.Is64(r.Type) {
// shift is >= 1<<32
var cl gc.Node
var ch gc.Node
split64(r, &cl, &ch)
gmove(&ch, &s)
gins(arm.ATST, &s, nil)
p6 = gc.Gbranch(arm.ABNE, nil, 0)
gmove(&cl, &s)
splitclean()
} else {
gmove(r, &s)
p6 = nil
}
gins(arm.ATST, &s, nil)
// shift == 0
p1 = gins(arm.AMOVW, &bl, &al)
p1.Scond = arm.C_SCOND_EQ
p1 = gins(arm.AMOVW, &bh, &ah)
p1.Scond = arm.C_SCOND_EQ
p2 = gc.Gbranch(arm.ABEQ, nil, 0)
// shift is < 32
gc.Nodconst(&n1, gc.Types[gc.TUINT32], 32)
gmove(&n1, &creg)
gins(arm.ACMP, &s, &creg)
// MOVW.LO bl<<s, al
p1 = gregshift(arm.AMOVW, &bl, arm.SHIFT_LL, &s, &al)
p1.Scond = arm.C_SCOND_LO
// MOVW.LO bh<<s, ah
p1 = gregshift(arm.AMOVW, &bh, arm.SHIFT_LL, &s, &ah)
p1.Scond = arm.C_SCOND_LO
作者:srei
项目:g
func floatmove_387(f *gc.Node, t *gc.Node) {
var r1 gc.Node
var a int
ft := gc.Simsimtype(f.Type)
tt := gc.Simsimtype(t.Type)
cvt := t.Type
switch uint32(ft)<<16 | uint32(tt) {
default:
goto fatal
/*
* float to integer
*/
case gc.TFLOAT32<<16 | gc.TINT16,
gc.TFLOAT32<<16 | gc.TINT32,
gc.TFLOAT32<<16 | gc.TINT64,
gc.TFLOAT64<<16 | gc.TINT16,
gc.TFLOAT64<<16 | gc.TINT32,
gc.TFLOAT64<<16 | gc.TINT64:
if t.Op == gc.OREGISTER {
goto hardmem
}
var r1 gc.Node
gc.Nodreg(&r1, gc.Types[ft], x86.REG_F0)
if f.Op != gc.OREGISTER {
if ft == gc.TFLOAT32 {
gins(x86.AFMOVF, f, &r1)
} else {
gins(x86.AFMOVD, f, &r1)
}
}
// set round to zero mode during conversion
var t1 gc.Node
memname(&t1, gc.Types[gc.TUINT16])
var t2 gc.Node
memname(&t2, gc.Types[gc.TUINT16])
gins(x86.AFSTCW, nil, &t1)
gins(x86.AMOVW, ncon(0xf7f), &t2)
gins(x86.AFLDCW, &t2, nil)
if tt == gc.TINT16 {
gins(x86.AFMOVWP, &r1, t)
} else if tt == gc.TINT32 {
gins(x86.AFMOVLP, &r1, t)
} else {
gins(x86.AFMOVVP, &r1, t)
}
gins(x86.AFLDCW, &t1, nil)
return
// convert via int32.
case gc.TFLOAT32<<16 | gc.TINT8,
gc.TFLOAT32<<16 | gc.TUINT16,
gc.TFLOAT32<<16 | gc.TUINT8,
gc.TFLOAT64<<16 | gc.TINT8,
gc.TFLOAT64<<16 | gc.TUINT16,
gc.TFLOAT64<<16 | gc.TUINT8:
var t1 gc.Node
gc.Tempname(&t1, gc.Types[gc.TINT32])
gmove(f, &t1)
switch tt {
default:
gc.Fatalf("gmove %v", t)
case gc.TINT8:
gins(x86.ACMPL, &t1, ncon(-0x80&(1<<32-1)))
p1 := gc.Gbranch(optoas(gc.OLT, gc.Types[gc.TINT32]), nil, -1)
gins(x86.ACMPL, &t1, ncon(0x7f))
p2 := gc.Gbranch(optoas(gc.OGT, gc.Types[gc.TINT32]), nil, -1)
p3 := gc.Gbranch(obj.AJMP, nil, 0)
gc.Patch(p1, gc.Pc)
gc.Patch(p2, gc.Pc)
gmove(ncon(-0x80&(1<<32-1)), &t1)
gc.Patch(p3, gc.Pc)
gmove(&t1, t)
case gc.TUINT8:
gins(x86.ATESTL, ncon(0xffffff00), &t1)
p1 := gc.Gbranch(x86.AJEQ, nil, +1)
gins(x86.AMOVL, ncon(0), &t1)
gc.Patch(p1, gc.Pc)
gmove(&t1, t)
case gc.TUINT16:
gins(x86.ATESTL, ncon(0xffff0000), &t1)
p1 := gc.Gbranch(x86.AJEQ, nil, +1)
gins(x86.AMOVL, ncon(0), &t1)
gc.Patch(p1, gc.Pc)
gmove(&t1, t)
}
return
// convert via int64.
case gc.TFLOAT32<<16 | gc.TUINT32,
gc.TFLOAT64<<16 | gc.TUINT32:
//.........这里部分代码省略.........
作者:Greento
项目:g
//.........这里部分代码省略.........
dir = -dir
}
var dst gc.Node
var src gc.Node
if n.Ullman >= res.Ullman {
gc.Agenr(n, &dst, res) // temporarily use dst
gc.Regalloc(&src, gc.Types[gc.Tptr], nil)
gins(arm64.AMOVD, &dst, &src)
if res.Op == gc.ONAME {
gc.Gvardef(res)
}
gc.Agen(res, &dst)
} else {
if res.Op == gc.ONAME {
gc.Gvardef(res)
}
gc.Agenr(res, &dst, res)
gc.Agenr(n, &src, nil)
}
var tmp gc.Node
gc.Regalloc(&tmp, gc.Types[gc.Tptr], nil)
// set up end marker
var nend gc.Node
// move src and dest to the end of block if necessary
if dir < 0 {
if c >= 4 {
gc.Regalloc(&nend, gc.Types[gc.Tptr], nil)
gins(arm64.AMOVD, &src, &nend)
}
p := gins(arm64.AADD, nil, &src)
p.From.Type = obj.TYPE_CONST
p.From.Offset = w
p = gins(arm64.AADD, nil, &dst)
p.From.Type = obj.TYPE_CONST
p.From.Offset = w
} else {
p := gins(arm64.AADD, nil, &src)
p.From.Type = obj.TYPE_CONST
p.From.Offset = int64(-dir)
p = gins(arm64.AADD, nil, &dst)
p.From.Type = obj.TYPE_CONST
p.From.Offset = int64(-dir)
if c >= 4 {
gc.Regalloc(&nend, gc.Types[gc.Tptr], nil)
p := gins(arm64.AMOVD, &src, &nend)
p.From.Type = obj.TYPE_ADDR
p.From.Offset = w
}
}
// move
// TODO: enable duffcopy for larger copies.
if c >= 4 {
p := gins(op, &src, &tmp)
p.From.Type = obj.TYPE_MEM
p.From.Offset = int64(dir)
p.Scond = arm64.C_XPRE
ploop := p
p = gins(op, &tmp, &dst)
p.To.Type = obj.TYPE_MEM
p.To.Offset = int64(dir)
p.Scond = arm64.C_XPRE
p = gcmp(arm64.ACMP, &src, &nend)
gc.Patch(gc.Gbranch(arm64.ABNE, nil, 0), ploop)
gc.Regfree(&nend)
} else {
// TODO(austin): Instead of generating ADD $-8,R8; ADD
// $-8,R7; n*(MOVDU 8(R8),R9; MOVDU R9,8(R7);) just
// generate the offsets directly and eliminate the
// ADDs. That will produce shorter, more
// pipeline-able code.
var p *obj.Prog
for ; c > 0; c-- {
p = gins(op, &src, &tmp)
p.From.Type = obj.TYPE_MEM
p.From.Offset = int64(dir)
p.Scond = arm64.C_XPRE
p = gins(op, &tmp, &dst)
p.To.Type = obj.TYPE_MEM
p.To.Offset = int64(dir)
p.Scond = arm64.C_XPRE
}
}
gc.Regfree(&dst)
gc.Regfree(&src)
gc.Regfree(&tmp)
}
作者:Ericea
项目:g
/*
* generate shift according to op, one of:
* res = nl << nr
* res = nl >> nr
*/
func cgen_shift(op int, bounded bool, nl *gc.Node, nr *gc.Node, res *gc.Node) {
a := int(optoas(op, nl.Type))
if nr.Op == gc.OLITERAL {
var n1 gc.Node
gc.Regalloc(&n1, nl.Type, res)
gc.Cgen(nl, &n1)
sc := uint64(nr.Int())
if sc >= uint64(nl.Type.Width*8) {
// large shift gets 2 shifts by width-1
var n3 gc.Node
gc.Nodconst(&n3, gc.Types[gc.TUINT32], nl.Type.Width*8-1)
gins(a, &n3, &n1)
gins(a, &n3, &n1)
} else {
gins(a, nr, &n1)
}
gmove(&n1, res)
gc.Regfree(&n1)
return
}
if nl.Ullman >= gc.UINF {
var n4 gc.Node
gc.Tempname(&n4, nl.Type)
gc.Cgen(nl, &n4)
nl = &n4
}
if nr.Ullman >= gc.UINF {
var n5 gc.Node
gc.Tempname(&n5, nr.Type)
gc.Cgen(nr, &n5)
nr = &n5
}
// Allow either uint32 or uint64 as shift type,
// to avoid unnecessary conversion from uint32 to uint64
// just to do the comparison.
tcount := gc.Types[gc.Simtype[nr.Type.Etype]]
if tcount.Etype < gc.TUINT32 {
tcount = gc.Types[gc.TUINT32]
}
var n1 gc.Node
gc.Regalloc(&n1, nr.Type, nil) // to hold the shift type in CX
var n3 gc.Node
gc.Regalloc(&n3, tcount, &n1) // to clear high bits of CX
var n2 gc.Node
gc.Regalloc(&n2, nl.Type, res)
if nl.Ullman >= nr.Ullman {
gc.Cgen(nl, &n2)
gc.Cgen(nr, &n1)
gmove(&n1, &n3)
} else {
gc.Cgen(nr, &n1)
gmove(&n1, &n3)
gc.Cgen(nl, &n2)
}
gc.Regfree(&n3)
// test and fix up large shifts
if !bounded {
gc.Nodconst(&n3, tcount, nl.Type.Width*8)
gins(optoas(gc.OCMP, tcount), &n1, &n3)
p1 := (*obj.Prog)(gc.Gbranch(optoas(gc.OLT, tcount), nil, +1))
if op == gc.ORSH && gc.Issigned[nl.Type.Etype] {
gc.Nodconst(&n3, gc.Types[gc.TUINT32], nl.Type.Width*8-1)
gins(a, &n3, &n2)
} else {
gc.Nodconst(&n3, nl.Type, 0)
gmove(&n3, &n2)
}
gc.Patch(p1, gc.Pc)
}
gins(a, &n1, &n2)
gmove(&n2, res)
gc.Regfree(&n1)
gc.Regfree(&n2)
}
作者:srei
项目:g
/*
* generate comparison of nl, nr, both 64-bit.
* nl is memory; nr is constant or memory.
*/
func cmp64(nl *gc.Node, nr *gc.Node, op gc.Op, likely int, to *obj.Prog) {
var lo1 gc.Node
var hi1 gc.Node
var lo2 gc.Node
var hi2 gc.Node
var rr gc.Node
split64(nl, &lo1, &hi1)
split64(nr, &lo2, &hi2)
// compare most significant word;
// if they differ, we're done.
t := hi1.Type
if nl.Op == gc.OLITERAL || nr.Op == gc.OLITERAL {
gins(x86.ACMPL, &hi1, &hi2)
} else {
gc.Regalloc(&rr, gc.Types[gc.TINT32], nil)
gins(x86.AMOVL, &hi1, &rr)
gins(x86.ACMPL, &rr, &hi2)
gc.Regfree(&rr)
}
var br *obj.Prog
switch op {
default:
gc.Fatalf("cmp64 %v %v", gc.Oconv(int(op), 0), t)
// cmp hi
// jne L
// cmp lo
// jeq to
// L:
case gc.OEQ:
br = gc.Gbranch(x86.AJNE, nil, -likely)
// cmp hi
// jne to
// cmp lo
// jne to
case gc.ONE:
gc.Patch(gc.Gbranch(x86.AJNE, nil, likely), to)
// cmp hi
// jgt to
// jlt L
// cmp lo
// jge to (or jgt to)
// L:
case gc.OGE,
gc.OGT:
gc.Patch(gc.Gbranch(optoas(gc.OGT, t), nil, likely), to)
br = gc.Gbranch(optoas(gc.OLT, t), nil, -likely)
// cmp hi
// jlt to
// jgt L
// cmp lo
// jle to (or jlt to)
// L:
case gc.OLE,
gc.OLT:
gc.Patch(gc.Gbranch(optoas(gc.OLT, t), nil, likely), to)
br = gc.Gbranch(optoas(gc.OGT, t), nil, -likely)
}
// compare least significant word
t = lo1.Type
if nl.Op == gc.OLITERAL || nr.Op == gc.OLITERAL {
gins(x86.ACMPL, &lo1, &lo2)
} else {
gc.Regalloc(&rr, gc.Types[gc.TINT32], nil)
gins(x86.AMOVL, &lo1, &rr)
gins(x86.ACMPL, &rr, &lo2)
gc.Regfree(&rr)
}
// jump again
gc.Patch(gc.Gbranch(optoas(op, t), nil, likely), to)
// point first branch down here if appropriate
if br != nil {
gc.Patch(br, gc.Pc)
}
splitclean()
splitclean()
}
作者:Samurai
项目:g
/*
* generate division.
* caller must set:
* ax = allocated AX register
* dx = allocated DX register
* generates one of:
* res = nl / nr
* res = nl % nr
* according to op.
*/
func dodiv(op gc.Op, nl *gc.Node, nr *gc.Node, res *gc.Node, ax *gc.Node, dx *gc.Node) {
// Have to be careful about handling
// most negative int divided by -1 correctly.
// The hardware will trap.
// Also the byte divide instruction needs AH,
// which we otherwise don't have to deal with.
// Easiest way to avoid for int8, int16: use int32.
// For int32 and int64, use explicit test.
// Could use int64 hw for int32.
t := nl.Type
t0 := t
check := false
if gc.Issigned[t.Etype] {
check = true
if gc.Isconst(nl, gc.CTINT) && nl.Int() != -1<<uint64(t.Width*8-1) {
check = false
} else if gc.Isconst(nr, gc.CTINT) && nr.Int() != -1 {
check = false
}
}
if t.Width < 4 {
if gc.Issigned[t.Etype] {
t = gc.Types[gc.TINT32]
} else {
t = gc.Types[gc.TUINT32]
}
check = false
}
var t1 gc.Node
gc.Tempname(&t1, t)
var t2 gc.Node
gc.Tempname(&t2, t)
if t0 != t {
var t3 gc.Node
gc.Tempname(&t3, t0)
var t4 gc.Node
gc.Tempname(&t4, t0)
gc.Cgen(nl, &t3)
gc.Cgen(nr, &t4)
// Convert.
gmove(&t3, &t1)
gmove(&t4, &t2)
} else {
gc.Cgen(nl, &t1)
gc.Cgen(nr, &t2)
}
var n1 gc.Node
if !gc.Samereg(ax, res) && !gc.Samereg(dx, res) {
gc.Regalloc(&n1, t, res)
} else {
gc.Regalloc(&n1, t, nil)
}
gmove(&t2, &n1)
gmove(&t1, ax)
var p2 *obj.Prog
var n4 gc.Node
if gc.Nacl {
// Native Client does not relay the divide-by-zero trap
// to the executing program, so we must insert a check
// for ourselves.
gc.Nodconst(&n4, t, 0)
gins(optoas(gc.OCMP, t), &n1, &n4)
p1 := gc.Gbranch(optoas(gc.ONE, t), nil, +1)
if panicdiv == nil {
panicdiv = gc.Sysfunc("panicdivide")
}
gc.Ginscall(panicdiv, -1)
gc.Patch(p1, gc.Pc)
}
if check {
gc.Nodconst(&n4, t, -1)
gins(optoas(gc.OCMP, t), &n1, &n4)
p1 := gc.Gbranch(optoas(gc.ONE, t), nil, +1)
if op == gc.ODIV {
// a / (-1) is -a.
gins(optoas(gc.OMINUS, t), nil, ax)
gmove(ax, res)
} else {
// a % (-1) is 0.
gc.Nodconst(&n4, t, 0)
//.........这里部分代码省略.........
作者:Samurai
项目:g
/*
* generate shift according to op, one of:
* res = nl << nr
* res = nl >> nr
*/
func cgen_shift(op gc.Op, bounded bool, nl *gc.Node, nr *gc.Node, res *gc.Node) {
if nl.Type.Width > 4 {
gc.Fatalf("cgen_shift %v", nl.Type)
}
w := int(nl.Type.Width * 8)
a := optoas(op, nl.Type)
if nr.Op == gc.OLITERAL {
var n2 gc.Node
gc.Tempname(&n2, nl.Type)
gc.Cgen(nl, &n2)
var n1 gc.Node
gc.Regalloc(&n1, nl.Type, res)
gmove(&n2, &n1)
sc := uint64(nr.Int())
if sc >= uint64(nl.Type.Width*8) {
// large shift gets 2 shifts by width-1
gins(a, ncon(uint32(w)-1), &n1)
gins(a, ncon(uint32(w)-1), &n1)
} else {
gins(a, nr, &n1)
}
gmove(&n1, res)
gc.Regfree(&n1)
return
}
var oldcx gc.Node
var cx gc.Node
gc.Nodreg(&cx, gc.Types[gc.TUINT32], x86.REG_CX)
if gc.GetReg(x86.REG_CX) > 1 && !gc.Samereg(&cx, res) {
gc.Tempname(&oldcx, gc.Types[gc.TUINT32])
gmove(&cx, &oldcx)
}
var n1 gc.Node
var nt gc.Node
if nr.Type.Width > 4 {
gc.Tempname(&nt, nr.Type)
n1 = nt
} else {
gc.Nodreg(&n1, gc.Types[gc.TUINT32], x86.REG_CX)
gc.Regalloc(&n1, nr.Type, &n1) // to hold the shift type in CX
}
var n2 gc.Node
if gc.Samereg(&cx, res) {
gc.Regalloc(&n2, nl.Type, nil)
} else {
gc.Regalloc(&n2, nl.Type, res)
}
if nl.Ullman >= nr.Ullman {
gc.Cgen(nl, &n2)
gc.Cgen(nr, &n1)
} else {
gc.Cgen(nr, &n1)
gc.Cgen(nl, &n2)
}
// test and fix up large shifts
if bounded {
if nr.Type.Width > 4 {
// delayed reg alloc
gc.Nodreg(&n1, gc.Types[gc.TUINT32], x86.REG_CX)
gc.Regalloc(&n1, gc.Types[gc.TUINT32], &n1) // to hold the shift type in CX
var lo gc.Node
var hi gc.Node
split64(&nt, &lo, &hi)
gmove(&lo, &n1)
splitclean()
}
} else {
var p1 *obj.Prog
if nr.Type.Width > 4 {
// delayed reg alloc
gc.Nodreg(&n1, gc.Types[gc.TUINT32], x86.REG_CX)
gc.Regalloc(&n1, gc.Types[gc.TUINT32], &n1) // to hold the shift type in CX
var lo gc.Node
var hi gc.Node
split64(&nt, &lo, &hi)
gmove(&lo, &n1)
gins(optoas(gc.OCMP, gc.Types[gc.TUINT32]), &hi, ncon(0))
p2 := gc.Gbranch(optoas(gc.ONE, gc.Types[gc.TUINT32]), nil, +1)
gins(optoas(gc.OCMP, gc.Types[gc.TUINT32]), &n1, ncon(uint32(w)))
p1 = gc.Gbranch(optoas(gc.OLT, gc.Types[gc.TUINT32]), nil, +1)
splitclean()
gc.Patch(p2, gc.Pc)
} else {
gins(optoas(gc.OCMP, nr.Type), &n1, ncon(uint32(w)))
p1 = gc.Gbranch(optoas(gc.OLT, gc.Types[gc.TUINT32]), nil, +1)
//.........这里部分代码省略.........
作者:wheelcomple
项目:go-
//.........这里部分代码省略.........
gc.TFLOAT64<<16 | gc.TUINT8:
cvt = gc.Types[gc.TINT32]
goto hard
// convert via int64.
case gc.TFLOAT32<<16 | gc.TUINT32,
gc.TFLOAT64<<16 | gc.TUINT32:
cvt = gc.Types[gc.TINT64]
goto hard
// algorithm is:
// if small enough, use native float64 -> int64 conversion.
// otherwise, subtract 2^63, convert, and add it back.
case gc.TFLOAT32<<16 | gc.TUINT64,
gc.TFLOAT64<<16 | gc.TUINT64:
a := x86.ACVTTSS2SQ
if ft == gc.TFLOAT64 {
a = x86.ACVTTSD2SQ
}
bignodes()
var r1 gc.Node
gc.Regalloc(&r1, gc.Types[ft], nil)
var r2 gc.Node
gc.Regalloc(&r2, gc.Types[tt], t)
var r3 gc.Node
gc.Regalloc(&r3, gc.Types[ft], nil)
var r4 gc.Node
gc.Regalloc(&r4, gc.Types[tt], nil)
gins(optoas(gc.OAS, f.Type), f, &r1)
gins(optoas(gc.OCMP, f.Type), &bigf, &r1)
p1 := gc.Gbranch(optoas(gc.OLE, f.Type), nil, +1)
gins(a, &r1, &r2)
p2 := gc.Gbranch(obj.AJMP, nil, 0)
gc.Patch(p1, gc.Pc)
gins(optoas(gc.OAS, f.Type), &bigf, &r3)
gins(optoas(gc.OSUB, f.Type), &r3, &r1)
gins(a, &r1, &r2)
gins(x86.AMOVQ, &bigi, &r4)
gins(x86.AXORQ, &r4, &r2)
gc.Patch(p2, gc.Pc)
gmove(&r2, t)
gc.Regfree(&r4)
gc.Regfree(&r3)
gc.Regfree(&r2)
gc.Regfree(&r1)
return
/*
* integer to float
*/
case gc.TINT32<<16 | gc.TFLOAT32:
a = x86.ACVTSL2SS
goto rdst
case gc.TINT32<<16 | gc.TFLOAT64:
a = x86.ACVTSL2SD
goto rdst
case gc.TINT64<<16 | gc.TFLOAT32:
a = x86.ACVTSQ2SS
goto rdst
作者:arnold
项目:g
func clearfat(nl *gc.Node) {
/* clear a fat object */
if gc.Debug['g'] != 0 {
gc.Dump("\nclearfat", nl)
}
w := uint32(nl.Type.Width)
// Avoid taking the address for simple enough types.
if gc.Componentgen(nil, nl) {
return
}
c := w % 4 // bytes
q := w / 4 // quads
var r0 gc.Node
r0.Op = gc.OREGISTER
r0.Reg = arm.REG_R0
var r1 gc.Node
r1.Op = gc.OREGISTER
r1.Reg = arm.REG_R1
var dst gc.Node
gc.Regalloc(&dst, gc.Types[gc.Tptr], &r1)
gc.Agen(nl, &dst)
var nc gc.Node
gc.Nodconst(&nc, gc.Types[gc.TUINT32], 0)
var nz gc.Node
gc.Regalloc(&nz, gc.Types[gc.TUINT32], &r0)
gc.Cgen(&nc, &nz)
if q > 128 {
var end gc.Node
gc.Regalloc(&end, gc.Types[gc.Tptr], nil)
p := gins(arm.AMOVW, &dst, &end)
p.From.Type = obj.TYPE_ADDR
p.From.Offset = int64(q) * 4
p = gins(arm.AMOVW, &nz, &dst)
p.To.Type = obj.TYPE_MEM
p.To.Offset = 4
p.Scond |= arm.C_PBIT
pl := p
p = gins(arm.ACMP, &dst, nil)
raddr(&end, p)
gc.Patch(gc.Gbranch(arm.ABNE, nil, 0), pl)
gc.Regfree(&end)
} else if q >= 4 && !gc.Nacl {
f := gc.Sysfunc("duffzero")
p := gins(obj.ADUFFZERO, nil, f)
gc.Afunclit(&p.To, f)
// 4 and 128 = magic constants: see ../../runtime/asm_arm.s
p.To.Offset = 4 * (128 - int64(q))
} else {
var p *obj.Prog
for q > 0 {
p = gins(arm.AMOVW, &nz, &dst)
p.To.Type = obj.TYPE_MEM
p.To.Offset = 4
p.Scond |= arm.C_PBIT
//print("1. %v\n", p);
q--
}
}
var p *obj.Prog
for c > 0 {
p = gins(arm.AMOVB, &nz, &dst)
p.To.Type = obj.TYPE_MEM
p.To.Offset = 1
p.Scond |= arm.C_PBIT
//print("2. %v\n", p);
c--
}
gc.Regfree(&dst)
gc.Regfree(&nz)
}
作者:arnold
项目:g
//.........这里部分代码省略.........
if w < 32 && op == gc.OLSH {
gins(optoas(gc.OAS, nl.Type), &n1, &n1)
}
gmove(&n1, res)
gc.Regfree(&n1)
return
}
tr := nr.Type
var t gc.Node
var n1 gc.Node
var n2 gc.Node
var n3 gc.Node
if tr.Width > 4 {
var nt gc.Node
gc.Tempname(&nt, nr.Type)
if nl.Ullman >= nr.Ullman {
gc.Regalloc(&n2, nl.Type, res)
gc.Cgen(nl, &n2)
gc.Cgen(nr, &nt)
n1 = nt
} else {
gc.Cgen(nr, &nt)
gc.Regalloc(&n2, nl.Type, res)
gc.Cgen(nl, &n2)
}
var hi gc.Node
var lo gc.Node
split64(&nt, &lo, &hi)
gc.Regalloc(&n1, gc.Types[gc.TUINT32], nil)
gc.Regalloc(&n3, gc.Types[gc.TUINT32], nil)
gmove(&lo, &n1)
gmove(&hi, &n3)
splitclean()
gins(arm.ATST, &n3, nil)
gc.Nodconst(&t, gc.Types[gc.TUINT32], int64(w))
p1 := gins(arm.AMOVW, &t, &n1)
p1.Scond = arm.C_SCOND_NE
tr = gc.Types[gc.TUINT32]
gc.Regfree(&n3)
} else {
if nl.Ullman >= nr.Ullman {
gc.Regalloc(&n2, nl.Type, res)
gc.Cgen(nl, &n2)
gc.Regalloc(&n1, nr.Type, nil)
gc.Cgen(nr, &n1)
} else {
gc.Regalloc(&n1, nr.Type, nil)
gc.Cgen(nr, &n1)
gc.Regalloc(&n2, nl.Type, res)
gc.Cgen(nl, &n2)
}
}
// test for shift being 0
gins(arm.ATST, &n1, nil)
p3 := gc.Gbranch(arm.ABEQ, nil, -1)
// test and fix up large shifts
// TODO: if(!bounded), don't emit some of this.
gc.Regalloc(&n3, tr, nil)
gc.Nodconst(&t, gc.Types[gc.TUINT32], int64(w))
gmove(&t, &n3)
gins(arm.ACMP, &n1, &n3)
if op == gc.ORSH {
var p1 *obj.Prog
var p2 *obj.Prog
if gc.Issigned[nl.Type.Etype] {
p1 = gshift(arm.AMOVW, &n2, arm.SHIFT_AR, int32(w)-1, &n2)
p2 = gregshift(arm.AMOVW, &n2, arm.SHIFT_AR, &n1, &n2)
} else {
p1 = gins(arm.AEOR, &n2, &n2)
p2 = gregshift(arm.AMOVW, &n2, arm.SHIFT_LR, &n1, &n2)
}
p1.Scond = arm.C_SCOND_HS
p2.Scond = arm.C_SCOND_LO
} else {
p1 := gins(arm.AEOR, &n2, &n2)
p2 := gregshift(arm.AMOVW, &n2, arm.SHIFT_LL, &n1, &n2)
p1.Scond = arm.C_SCOND_HS
p2.Scond = arm.C_SCOND_LO
}
gc.Regfree(&n3)
gc.Patch(p3, gc.Pc)
// Left-shift of smaller word must be sign/zero-extended.
if w < 32 && op == gc.OLSH {
gins(optoas(gc.OAS, nl.Type), &n2, &n2)
}
gmove(&n2, res)
gc.Regfree(&n1)
gc.Regfree(&n2)
}