作者:bibbyflyawa
项目:g
func ginscmp(op int, t *gc.Type, n1, n2 *gc.Node, likely int) *obj.Prog {
if gc.Isint[t.Etype] && n1.Op == gc.OLITERAL && n2.Op != gc.OLITERAL {
// Reverse comparison to place constant last.
op = gc.Brrev(op)
n1, n2 = n2, n1
}
var r1, r2, g1, g2 gc.Node
gc.Regalloc(&r1, t, n1)
gc.Regalloc(&g1, n1.Type, &r1)
gc.Cgen(n1, &g1)
gmove(&g1, &r1)
if gc.Isint[t.Etype] && gc.Isconst(n2, gc.CTINT) {
ginscon2(optoas(gc.OCMP, t), &r1, n2.Int())
} else {
gc.Regalloc(&r2, t, n2)
gc.Regalloc(&g2, n1.Type, &r2)
gc.Cgen(n2, &g2)
gmove(&g2, &r2)
rawgins(optoas(gc.OCMP, t), &r1, &r2)
gc.Regfree(&g2)
gc.Regfree(&r2)
}
gc.Regfree(&g1)
gc.Regfree(&r1)
return gc.Gbranch(optoas(op, t), nil, likely)
}
作者:kluesk
项目:go-akaro
/*
* call to n has already been generated.
* generate:
* res = &return value from call.
*/
func cgen_aret(n *gc.Node, res *gc.Node) {
t := n.Left.Type
if gc.Isptr[t.Etype] {
t = t.Type
}
var flist gc.Iter
fp := gc.Structfirst(&flist, gc.Getoutarg(t))
if fp == nil {
gc.Fatal("cgen_aret: nil")
}
var nod1 gc.Node
nod1.Op = gc.OINDREG
nod1.Val.U.Reg = x86.REG_SP
nod1.Addable = 1
nod1.Xoffset = fp.Width
nod1.Type = fp.Type
if res.Op != gc.OREGISTER {
var nod2 gc.Node
regalloc(&nod2, gc.Types[gc.Tptr], res)
gins(x86.ALEAL, &nod1, &nod2)
gins(x86.AMOVL, &nod2, res)
regfree(&nod2)
} else {
gins(x86.ALEAL, &nod1, res)
}
}
作者:bibbyflyawa
项目:g
func ginscmp(op int, t *gc.Type, n1, n2 *gc.Node, likely int) *obj.Prog {
if gc.Isint[t.Etype] && n1.Op == gc.OLITERAL && n1.Int() == 0 && n2.Op != gc.OLITERAL {
op = gc.Brrev(op)
n1, n2 = n2, n1
}
var r1, r2, g1, g2 gc.Node
gc.Regalloc(&r1, t, n1)
gc.Regalloc(&g1, n1.Type, &r1)
gc.Cgen(n1, &g1)
gmove(&g1, &r1)
if gc.Isint[t.Etype] && n2.Op == gc.OLITERAL && n2.Int() == 0 {
gins(arm.ACMP, &r1, n2)
} else {
gc.Regalloc(&r2, t, n2)
gc.Regalloc(&g2, n1.Type, &r2)
gc.Cgen(n2, &g2)
gmove(&g2, &r2)
gins(optoas(gc.OCMP, t), &r1, &r2)
gc.Regfree(&g2)
gc.Regfree(&r2)
}
gc.Regfree(&g1)
gc.Regfree(&r1)
return gc.Gbranch(optoas(op, t), nil, likely)
}
作者:xslonepiec
项目:goio
/*
* n is a 64-bit value. fill in lo and hi to refer to its 32-bit halves.
*/
func split64(n *gc.Node, lo *gc.Node, hi *gc.Node) {
if !gc.Is64(n.Type) {
gc.Fatal("split64 %v", gc.Tconv(n.Type, 0))
}
if nsclean >= len(sclean) {
gc.Fatal("split64 clean")
}
sclean[nsclean].Op = gc.OEMPTY
nsclean++
switch n.Op {
default:
switch n.Op {
default:
var n1 gc.Node
if !dotaddable(n, &n1) {
gc.Igen(n, &n1, nil)
sclean[nsclean-1] = n1
}
n = &n1
case gc.ONAME:
if n.Class == gc.PPARAMREF {
var n1 gc.Node
gc.Cgen(n.Heapaddr, &n1)
sclean[nsclean-1] = n1
n = &n1
}
// nothing
case gc.OINDREG:
break
}
*lo = *n
*hi = *n
lo.Type = gc.Types[gc.TUINT32]
if n.Type.Etype == gc.TINT64 {
hi.Type = gc.Types[gc.TINT32]
} else {
hi.Type = gc.Types[gc.TUINT32]
}
hi.Xoffset += 4
case gc.OLITERAL:
var n1 gc.Node
gc.Convconst(&n1, n.Type, &n.Val)
i := gc.Mpgetfix(n1.Val.U.Xval)
gc.Nodconst(lo, gc.Types[gc.TUINT32], int64(uint32(i)))
i >>= 32
if n.Type.Etype == gc.TINT64 {
gc.Nodconst(hi, gc.Types[gc.TINT32], int64(int32(i)))
} else {
gc.Nodconst(hi, gc.Types[gc.TUINT32], int64(uint32(i)))
}
}
}
作者:bibbyflyawa
项目:g
func intLiteral(n *gc.Node) (x int64, ok bool) {
switch {
case n == nil:
return
case gc.Isconst(n, gc.CTINT):
return n.Int(), true
case gc.Isconst(n, gc.CTBOOL):
return int64(obj.Bool2int(n.Bool())), true
}
return
}
作者:xslonepiec
项目:goio
func dotaddable(n *gc.Node, n1 *gc.Node) bool {
if n.Op != gc.ODOT {
return false
}
var oary [10]int64
var nn *gc.Node
o := gc.Dotoffset(n, oary[:], &nn)
if nn != nil && nn.Addable && o == 1 && oary[0] >= 0 {
*n1 = *nn
n1.Type = n.Type
n1.Xoffset += oary[0]
return true
}
return false
}
作者:xslonepiec
项目:goio
/*
* register dr is one of the special ones (AX, CX, DI, SI, etc.).
* we need to use it. if it is already allocated as a temporary
* (r > 1; can only happen if a routine like sgen passed a
* special as cgen's res and then cgen used regalloc to reuse
* it as its own temporary), then move it for now to another
* register. caller must call restx to move it back.
* the move is not necessary if dr == res, because res is
* known to be dead.
*/
func savex(dr int, x *gc.Node, oldx *gc.Node, res *gc.Node, t *gc.Type) {
r := int(reg[dr])
// save current ax and dx if they are live
// and not the destination
*oldx = gc.Node{}
gc.Nodreg(x, t, dr)
if r > 1 && !gc.Samereg(x, res) {
gc.Regalloc(oldx, gc.Types[gc.TINT64], nil)
x.Type = gc.Types[gc.TINT64]
gmove(x, oldx)
x.Type = t
oldx.Ostk = int32(r) // squirrel away old r value
reg[dr] = 1
}
}
作者:xslonepiec
项目:goio
func restx(x *gc.Node, oldx *gc.Node) {
if oldx.Op != 0 {
x.Type = gc.Types[gc.TINT64]
reg[x.Reg] = uint8(oldx.Ostk)
gmove(oldx, x)
gc.Regfree(oldx)
}
}
作者:tidatid
项目:g
func restx(x *gc.Node, oldx *gc.Node) {
gc.Regfree(x)
if oldx.Op != 0 {
x.Type = gc.Types[gc.TINT32]
gmove(oldx, x)
}
}
作者:kluesk
项目:go-akaro
/*
* generate function call;
* proc=0 normal call
* proc=1 goroutine run in new proc
* proc=2 defer call save away stack
*/
func cgen_call(n *gc.Node, proc int) {
if n == nil {
return
}
var afun gc.Node
if n.Left.Ullman >= gc.UINF {
// if name involves a fn call
// precompute the address of the fn
gc.Tempname(&afun, gc.Types[gc.Tptr])
cgen(n.Left, &afun)
}
gc.Genlist(n.List) // assign the args
t := n.Left.Type
// call tempname pointer
if n.Left.Ullman >= gc.UINF {
var nod gc.Node
regalloc(&nod, gc.Types[gc.Tptr], nil)
gc.Cgen_as(&nod, &afun)
nod.Type = t
ginscall(&nod, proc)
regfree(&nod)
return
}
// call pointer
if n.Left.Op != gc.ONAME || n.Left.Class != gc.PFUNC {
var nod gc.Node
regalloc(&nod, gc.Types[gc.Tptr], nil)
gc.Cgen_as(&nod, n.Left)
nod.Type = t
ginscall(&nod, proc)
regfree(&nod)
return
}
// call direct
n.Left.Method = 1
ginscall(n.Left, proc)
}
作者:kluesk
项目:go-akaro
/*
* call to n has already been generated.
* generate:
* res = return value from call.
*/
func cgen_callret(n *gc.Node, res *gc.Node) {
t := n.Left.Type
if t.Etype == gc.TPTR32 || t.Etype == gc.TPTR64 {
t = t.Type
}
var flist gc.Iter
fp := gc.Structfirst(&flist, gc.Getoutarg(t))
if fp == nil {
gc.Fatal("cgen_callret: nil")
}
var nod gc.Node
nod.Op = gc.OINDREG
nod.Val.U.Reg = x86.REG_SP
nod.Addable = 1
nod.Xoffset = fp.Width
nod.Type = fp.Type
gc.Cgen_as(res, &nod)
}
作者:tidatid
项目:g
func fixlargeoffset(n *gc.Node) {
if n == nil {
return
}
if n.Op != gc.OINDREG {
return
}
if -4096 <= n.Xoffset && n.Xoffset < 4096 {
return
}
a := gc.Node(*n)
a.Op = gc.OREGISTER
a.Type = gc.Types[gc.Tptr]
a.Xoffset = 0
gc.Cgen_checknil(&a)
ginscon(optoas(gc.OADD, gc.Types[gc.Tptr]), n.Xoffset, &a)
n.Xoffset = 0
}
作者:kluesk
项目:go-akaro
/*
* peep.c
*/
func mgen(n *gc.Node, n1 *gc.Node, rg *gc.Node) {
n1.Op = gc.OEMPTY
if n.Addable != 0 {
*n1 = *n
if n1.Op == gc.OREGISTER || n1.Op == gc.OINDREG {
reg[n.Val.U.Reg]++
}
return
}
gc.Tempname(n1, n.Type)
cgen(n, n1)
if n.Type.Width <= int64(gc.Widthptr) || gc.Isfloat[n.Type.Etype] {
n2 := *n1
regalloc(n1, n.Type, rg)
gmove(&n2, n1)
}
}
作者:kluesk
项目:go-akaro
/*
* generate:
* res = n;
* simplifies and calls gmove.
*/
func cgen(n *gc.Node, res *gc.Node) {
//print("cgen %N(%d) -> %N(%d)\n", n, n->addable, res, res->addable);
if gc.Debug['g'] != 0 {
gc.Dump("\ncgen-n", n)
gc.Dump("cgen-res", res)
}
if n == nil || n.Type == nil {
return
}
if res == nil || res.Type == nil {
gc.Fatal("cgen: res nil")
}
for n.Op == gc.OCONVNOP {
n = n.Left
}
switch n.Op {
case gc.OSLICE,
gc.OSLICEARR,
gc.OSLICESTR,
gc.OSLICE3,
gc.OSLICE3ARR:
if res.Op != gc.ONAME || res.Addable == 0 {
var n1 gc.Node
gc.Tempname(&n1, n.Type)
gc.Cgen_slice(n, &n1)
cgen(&n1, res)
} else {
gc.Cgen_slice(n, res)
}
return
case gc.OEFACE:
if res.Op != gc.ONAME || res.Addable == 0 {
var n1 gc.Node
gc.Tempname(&n1, n.Type)
gc.Cgen_eface(n, &n1)
cgen(&n1, res)
} else {
gc.Cgen_eface(n, res)
}
return
}
if n.Ullman >= gc.UINF {
if n.Op == gc.OINDREG {
gc.Fatal("cgen: this is going to misscompile")
}
if res.Ullman >= gc.UINF {
var n1 gc.Node
gc.Tempname(&n1, n.Type)
cgen(n, &n1)
cgen(&n1, res)
return
}
}
if gc.Isfat(n.Type) {
if n.Type.Width < 0 {
gc.Fatal("forgot to compute width for %v", gc.Tconv(n.Type, 0))
}
sgen(n, res, n.Type.Width)
return
}
if res.Addable == 0 {
if n.Ullman > res.Ullman {
var n1 gc.Node
regalloc(&n1, n.Type, res)
cgen(n, &n1)
if n1.Ullman > res.Ullman {
gc.Dump("n1", &n1)
gc.Dump("res", res)
gc.Fatal("loop in cgen")
}
cgen(&n1, res)
regfree(&n1)
return
}
var f int
if res.Ullman >= gc.UINF {
goto gen
}
if gc.Complexop(n, res) {
gc.Complexgen(n, res)
return
}
f = 1 // gen thru register
//.........这里部分代码省略.........
作者:tidatid
项目:g
func clearfat(nl *gc.Node) {
/* clear a fat object */
if gc.Debug['g'] != 0 {
gc.Dump("\nclearfat", nl)
}
w := uint32(nl.Type.Width)
// Avoid taking the address for simple enough types.
if gc.Componentgen(nil, nl) {
return
}
c := w % 4 // bytes
q := w / 4 // quads
if q < 4 {
// Write sequence of MOV 0, off(base) instead of using STOSL.
// The hope is that although the code will be slightly longer,
// the MOVs will have no dependencies and pipeline better
// than the unrolled STOSL loop.
// NOTE: Must use agen, not igen, so that optimizer sees address
// being taken. We are not writing on field boundaries.
var n1 gc.Node
gc.Regalloc(&n1, gc.Types[gc.Tptr], nil)
gc.Agen(nl, &n1)
n1.Op = gc.OINDREG
var z gc.Node
gc.Nodconst(&z, gc.Types[gc.TUINT64], 0)
for {
tmp14 := q
q--
if tmp14 <= 0 {
break
}
n1.Type = z.Type
gins(x86.AMOVL, &z, &n1)
n1.Xoffset += 4
}
gc.Nodconst(&z, gc.Types[gc.TUINT8], 0)
for {
tmp15 := c
c--
if tmp15 <= 0 {
break
}
n1.Type = z.Type
gins(x86.AMOVB, &z, &n1)
n1.Xoffset++
}
gc.Regfree(&n1)
return
}
var n1 gc.Node
gc.Nodreg(&n1, gc.Types[gc.Tptr], x86.REG_DI)
gc.Agen(nl, &n1)
gconreg(x86.AMOVL, 0, x86.REG_AX)
if q > 128 || (q >= 4 && gc.Nacl) {
gconreg(x86.AMOVL, int64(q), x86.REG_CX)
gins(x86.AREP, nil, nil) // repeat
gins(x86.ASTOSL, nil, nil) // STOL AL,*(DI)+
} else if q >= 4 {
p := gins(obj.ADUFFZERO, nil, nil)
p.To.Type = obj.TYPE_ADDR
p.To.Sym = gc.Linksym(gc.Pkglookup("duffzero", gc.Runtimepkg))
// 1 and 128 = magic constants: see ../../runtime/asm_386.s
p.To.Offset = 1 * (128 - int64(q))
} else {
for q > 0 {
gins(x86.ASTOSL, nil, nil) // STOL AL,*(DI)+
q--
}
}
for c > 0 {
gins(x86.ASTOSB, nil, nil) // STOB AL,*(DI)+
c--
}
}
作者:xslonepiec
项目:goio
func clearfat(nl *gc.Node) {
/* clear a fat object */
if gc.Debug['g'] != 0 {
gc.Dump("\nclearfat", nl)
}
w := nl.Type.Width
// Avoid taking the address for simple enough types.
if gc.Componentgen(nil, nl) {
return
}
c := w % 8 // bytes
q := w / 8 // quads
if q < 4 {
// Write sequence of MOV 0, off(base) instead of using STOSQ.
// The hope is that although the code will be slightly longer,
// the MOVs will have no dependencies and pipeline better
// than the unrolled STOSQ loop.
// NOTE: Must use agen, not igen, so that optimizer sees address
// being taken. We are not writing on field boundaries.
var n1 gc.Node
gc.Agenr(nl, &n1, nil)
n1.Op = gc.OINDREG
var z gc.Node
gc.Nodconst(&z, gc.Types[gc.TUINT64], 0)
for {
tmp14 := q
q--
if tmp14 <= 0 {
break
}
n1.Type = z.Type
gins(x86.AMOVQ, &z, &n1)
n1.Xoffset += 8
}
if c >= 4 {
gc.Nodconst(&z, gc.Types[gc.TUINT32], 0)
n1.Type = z.Type
gins(x86.AMOVL, &z, &n1)
n1.Xoffset += 4
c -= 4
}
gc.Nodconst(&z, gc.Types[gc.TUINT8], 0)
for {
tmp15 := c
c--
if tmp15 <= 0 {
break
}
n1.Type = z.Type
gins(x86.AMOVB, &z, &n1)
n1.Xoffset++
}
gc.Regfree(&n1)
return
}
var oldn1 gc.Node
var n1 gc.Node
savex(x86.REG_DI, &n1, &oldn1, nil, gc.Types[gc.Tptr])
gc.Agen(nl, &n1)
var ax gc.Node
var oldax gc.Node
savex(x86.REG_AX, &ax, &oldax, nil, gc.Types[gc.Tptr])
gconreg(x86.AMOVL, 0, x86.REG_AX)
if q > 128 || gc.Nacl {
gconreg(movptr, q, x86.REG_CX)
gins(x86.AREP, nil, nil) // repeat
gins(x86.ASTOSQ, nil, nil) // STOQ AL,*(DI)+
} else {
if di := dzDI(q); di != 0 {
gconreg(addptr, di, x86.REG_DI)
}
p := gins(obj.ADUFFZERO, nil, nil)
p.To.Type = obj.TYPE_ADDR
p.To.Sym = gc.Linksym(gc.Pkglookup("duffzero", gc.Runtimepkg))
p.To.Offset = dzOff(q)
}
z := ax
di := n1
if w >= 8 && c >= 4 {
di.Op = gc.OINDREG
z.Type = gc.Types[gc.TINT64]
di.Type = z.Type
p := gins(x86.AMOVQ, &z, &di)
p.To.Scale = 1
p.To.Offset = c - 8
} else if c >= 4 {
di.Op = gc.OINDREG
z.Type = gc.Types[gc.TINT32]
//.........这里部分代码省略.........
作者:xslonepiec
项目:goio
/*
* generate shift according to op, one of:
* res = nl << nr
* res = nl >> nr
*/
func cgen_shift(op int, bounded bool, nl *gc.Node, nr *gc.Node, res *gc.Node) {
a := optoas(op, nl.Type)
if nr.Op == gc.OLITERAL {
var n1 gc.Node
gc.Regalloc(&n1, nl.Type, res)
gc.Cgen(nl, &n1)
sc := uint64(gc.Mpgetfix(nr.Val.U.Xval))
if sc >= uint64(nl.Type.Width*8) {
// large shift gets 2 shifts by width-1
var n3 gc.Node
gc.Nodconst(&n3, gc.Types[gc.TUINT32], nl.Type.Width*8-1)
gins(a, &n3, &n1)
gins(a, &n3, &n1)
} else {
gins(a, nr, &n1)
}
gmove(&n1, res)
gc.Regfree(&n1)
return
}
if nl.Ullman >= gc.UINF {
var n4 gc.Node
gc.Tempname(&n4, nl.Type)
gc.Cgen(nl, &n4)
nl = &n4
}
if nr.Ullman >= gc.UINF {
var n5 gc.Node
gc.Tempname(&n5, nr.Type)
gc.Cgen(nr, &n5)
nr = &n5
}
rcx := int(reg[x86.REG_CX])
var n1 gc.Node
gc.Nodreg(&n1, gc.Types[gc.TUINT32], x86.REG_CX)
// Allow either uint32 or uint64 as shift type,
// to avoid unnecessary conversion from uint32 to uint64
// just to do the comparison.
tcount := gc.Types[gc.Simtype[nr.Type.Etype]]
if tcount.Etype < gc.TUINT32 {
tcount = gc.Types[gc.TUINT32]
}
gc.Regalloc(&n1, nr.Type, &n1) // to hold the shift type in CX
var n3 gc.Node
gc.Regalloc(&n3, tcount, &n1) // to clear high bits of CX
var cx gc.Node
gc.Nodreg(&cx, gc.Types[gc.TUINT64], x86.REG_CX)
var oldcx gc.Node
if rcx > 0 && !gc.Samereg(&cx, res) {
gc.Regalloc(&oldcx, gc.Types[gc.TUINT64], nil)
gmove(&cx, &oldcx)
}
cx.Type = tcount
var n2 gc.Node
if gc.Samereg(&cx, res) {
gc.Regalloc(&n2, nl.Type, nil)
} else {
gc.Regalloc(&n2, nl.Type, res)
}
if nl.Ullman >= nr.Ullman {
gc.Cgen(nl, &n2)
gc.Cgen(nr, &n1)
gmove(&n1, &n3)
} else {
gc.Cgen(nr, &n1)
gmove(&n1, &n3)
gc.Cgen(nl, &n2)
}
gc.Regfree(&n3)
// test and fix up large shifts
if !bounded {
gc.Nodconst(&n3, tcount, nl.Type.Width*8)
gins(optoas(gc.OCMP, tcount), &n1, &n3)
p1 := gc.Gbranch(optoas(gc.OLT, tcount), nil, +1)
if op == gc.ORSH && gc.Issigned[nl.Type.Etype] {
gc.Nodconst(&n3, gc.Types[gc.TUINT32], nl.Type.Width*8-1)
gins(a, &n3, &n2)
} else {
gc.Nodconst(&n3, nl.Type, 0)
gmove(&n3, &n2)
}
//.........这里部分代码省略.........
作者:kluesk
项目:go-akaro
/*
* n is call to interface method.
* generate res = n.
*/
func cgen_callinter(n *gc.Node, res *gc.Node, proc int) {
i := n.Left
if i.Op != gc.ODOTINTER {
gc.Fatal("cgen_callinter: not ODOTINTER %v", gc.Oconv(int(i.Op), 0))
}
f := i.Right // field
if f.Op != gc.ONAME {
gc.Fatal("cgen_callinter: not ONAME %v", gc.Oconv(int(f.Op), 0))
}
i = i.Left // interface
if i.Addable == 0 {
var tmpi gc.Node
gc.Tempname(&tmpi, i.Type)
cgen(i, &tmpi)
i = &tmpi
}
gc.Genlist(n.List) // assign the args
// i is now addable, prepare an indirected
// register to hold its address.
var nodi gc.Node
igen(i, &nodi, res) // REG = &inter
var nodsp gc.Node
gc.Nodindreg(&nodsp, gc.Types[gc.Tptr], x86.REG_SP)
nodsp.Xoffset = 0
if proc != 0 {
nodsp.Xoffset += 2 * int64(gc.Widthptr) // leave room for size & fn
}
nodi.Type = gc.Types[gc.Tptr]
nodi.Xoffset += int64(gc.Widthptr)
cgen(&nodi, &nodsp) // {0 or 8}(SP) = 4(REG) -- i.data
var nodo gc.Node
regalloc(&nodo, gc.Types[gc.Tptr], res)
nodi.Type = gc.Types[gc.Tptr]
nodi.Xoffset -= int64(gc.Widthptr)
cgen(&nodi, &nodo) // REG = 0(REG) -- i.tab
regfree(&nodi)
var nodr gc.Node
regalloc(&nodr, gc.Types[gc.Tptr], &nodo)
if n.Left.Xoffset == gc.BADWIDTH {
gc.Fatal("cgen_callinter: badwidth")
}
gc.Cgen_checknil(&nodo)
nodo.Op = gc.OINDREG
nodo.Xoffset = n.Left.Xoffset + 3*int64(gc.Widthptr) + 8
if proc == 0 {
// plain call: use direct c function pointer - more efficient
cgen(&nodo, &nodr) // REG = 20+offset(REG) -- i.tab->fun[f]
proc = 3
} else {
// go/defer. generate go func value.
gins(x86.ALEAL, &nodo, &nodr) // REG = &(20+offset(REG)) -- i.tab->fun[f]
}
nodr.Type = n.Left.Type
ginscall(&nodr, proc)
regfree(&nodr)
regfree(&nodo)
}
作者:tidatid
项目:g
func stackcopy(n, res *gc.Node, osrc, odst, w int64) {
var dst gc.Node
gc.Nodreg(&dst, gc.Types[gc.Tptr], x86.REG_DI)
var src gc.Node
gc.Nodreg(&src, gc.Types[gc.Tptr], x86.REG_SI)
var tsrc gc.Node
gc.Tempname(&tsrc, gc.Types[gc.Tptr])
var tdst gc.Node
gc.Tempname(&tdst, gc.Types[gc.Tptr])
if n.Addable == 0 {
gc.Agen(n, &tsrc)
}
if res.Addable == 0 {
gc.Agen(res, &tdst)
}
if n.Addable != 0 {
gc.Agen(n, &src)
} else {
gmove(&tsrc, &src)
}
if res.Op == gc.ONAME {
gc.Gvardef(res)
}
if res.Addable != 0 {
gc.Agen(res, &dst)
} else {
gmove(&tdst, &dst)
}
c := int32(w % 4) // bytes
q := int32(w / 4) // doublewords
// if we are copying forward on the stack and
// the src and dst overlap, then reverse direction
if osrc < odst && int64(odst) < int64(osrc)+w {
// reverse direction
gins(x86.ASTD, nil, nil) // set direction flag
if c > 0 {
gconreg(x86.AADDL, w-1, x86.REG_SI)
gconreg(x86.AADDL, w-1, x86.REG_DI)
gconreg(x86.AMOVL, int64(c), x86.REG_CX)
gins(x86.AREP, nil, nil) // repeat
gins(x86.AMOVSB, nil, nil) // MOVB *(SI)-,*(DI)-
}
if q > 0 {
if c > 0 {
gconreg(x86.AADDL, -3, x86.REG_SI)
gconreg(x86.AADDL, -3, x86.REG_DI)
} else {
gconreg(x86.AADDL, w-4, x86.REG_SI)
gconreg(x86.AADDL, w-4, x86.REG_DI)
}
gconreg(x86.AMOVL, int64(q), x86.REG_CX)
gins(x86.AREP, nil, nil) // repeat
gins(x86.AMOVSL, nil, nil) // MOVL *(SI)-,*(DI)-
}
// we leave with the flag clear
gins(x86.ACLD, nil, nil)
} else {
gins(x86.ACLD, nil, nil) // paranoia. TODO(rsc): remove?
// normal direction
if q > 128 || (q >= 4 && gc.Nacl) {
gconreg(x86.AMOVL, int64(q), x86.REG_CX)
gins(x86.AREP, nil, nil) // repeat
gins(x86.AMOVSL, nil, nil) // MOVL *(SI)+,*(DI)+
} else if q >= 4 {
p := gins(obj.ADUFFCOPY, nil, nil)
p.To.Type = obj.TYPE_ADDR
p.To.Sym = gc.Linksym(gc.Pkglookup("duffcopy", gc.Runtimepkg))
// 10 and 128 = magic constants: see ../../runtime/asm_386.s
p.To.Offset = 10 * (128 - int64(q))
} else if !gc.Nacl && c == 0 {
var cx gc.Node
gc.Nodreg(&cx, gc.Types[gc.TINT32], x86.REG_CX)
// We don't need the MOVSL side-effect of updating SI and DI,
// and issuing a sequence of MOVLs directly is faster.
src.Op = gc.OINDREG
dst.Op = gc.OINDREG
for q > 0 {
gmove(&src, &cx) // MOVL x+(SI),CX
gmove(&cx, &dst) // MOVL CX,x+(DI)
src.Xoffset += 4
dst.Xoffset += 4
q--
}
} else {
for q > 0 {
gins(x86.AMOVSL, nil, nil) // MOVL *(SI)+,*(DI)+
q--
//.........这里部分代码省略.........
作者:xslonepiec
项目:goio
/*
* generate division.
* generates one of:
* res = nl / nr
* res = nl % nr
* according to op.
*/
func dodiv(op int, nl *gc.Node, nr *gc.Node, res *gc.Node) {
// Have to be careful about handling
// most negative int divided by -1 correctly.
// The hardware will trap.
// Also the byte divide instruction needs AH,
// which we otherwise don't have to deal with.
// Easiest way to avoid for int8, int16: use int32.
// For int32 and int64, use explicit test.
// Could use int64 hw for int32.
t := nl.Type
t0 := t
check := 0
if gc.Issigned[t.Etype] {
check = 1
if gc.Isconst(nl, gc.CTINT) && gc.Mpgetfix(nl.Val.U.Xval) != -(1<<uint64(t.Width*8-1)) {
check = 0
} else if gc.Isconst(nr, gc.CTINT) && gc.Mpgetfix(nr.Val.U.Xval) != -1 {
check = 0
}
}
if t.Width < 4 {
if gc.Issigned[t.Etype] {
t = gc.Types[gc.TINT32]
} else {
t = gc.Types[gc.TUINT32]
}
check = 0
}
a := optoas(op, t)
var n3 gc.Node
gc.Regalloc(&n3, t0, nil)
var ax gc.Node
var oldax gc.Node
if nl.Ullman >= nr.Ullman {
savex(x86.REG_AX, &ax, &oldax, res, t0)
gc.Cgen(nl, &ax)
gc.Regalloc(&ax, t0, &ax) // mark ax live during cgen
gc.Cgen(nr, &n3)
gc.Regfree(&ax)
} else {
gc.Cgen(nr, &n3)
savex(x86.REG_AX, &ax, &oldax, res, t0)
gc.Cgen(nl, &ax)
}
if t != t0 {
// Convert
ax1 := ax
n31 := n3
ax.Type = t
n3.Type = t
gmove(&ax1, &ax)
gmove(&n31, &n3)
}
var n4 gc.Node
if gc.Nacl {
// Native Client does not relay the divide-by-zero trap
// to the executing program, so we must insert a check
// for ourselves.
gc.Nodconst(&n4, t, 0)
gins(optoas(gc.OCMP, t), &n3, &n4)
p1 := gc.Gbranch(optoas(gc.ONE, t), nil, +1)
if panicdiv == nil {
panicdiv = gc.Sysfunc("panicdivide")
}
gc.Ginscall(panicdiv, -1)
gc.Patch(p1, gc.Pc)
}
var p2 *obj.Prog
if check != 0 {
gc.Nodconst(&n4, t, -1)
gins(optoas(gc.OCMP, t), &n3, &n4)
p1 := gc.Gbranch(optoas(gc.ONE, t), nil, +1)
if op == gc.ODIV {
// a / (-1) is -a.
gins(optoas(gc.OMINUS, t), nil, &ax)
gmove(&ax, res)
} else {
// a % (-1) is 0.
gc.Nodconst(&n4, t, 0)
gmove(&n4, res)
}
//.........这里部分代码省略.........