作者:xslonepiec
项目:goio
/*
* n is a 64-bit value. fill in lo and hi to refer to its 32-bit halves.
*/
func split64(n *gc.Node, lo *gc.Node, hi *gc.Node) {
if !gc.Is64(n.Type) {
gc.Fatal("split64 %v", gc.Tconv(n.Type, 0))
}
if nsclean >= len(sclean) {
gc.Fatal("split64 clean")
}
sclean[nsclean].Op = gc.OEMPTY
nsclean++
switch n.Op {
default:
switch n.Op {
default:
var n1 gc.Node
if !dotaddable(n, &n1) {
gc.Igen(n, &n1, nil)
sclean[nsclean-1] = n1
}
n = &n1
case gc.ONAME:
if n.Class == gc.PPARAMREF {
var n1 gc.Node
gc.Cgen(n.Heapaddr, &n1)
sclean[nsclean-1] = n1
n = &n1
}
// nothing
case gc.OINDREG:
break
}
*lo = *n
*hi = *n
lo.Type = gc.Types[gc.TUINT32]
if n.Type.Etype == gc.TINT64 {
hi.Type = gc.Types[gc.TINT32]
} else {
hi.Type = gc.Types[gc.TUINT32]
}
hi.Xoffset += 4
case gc.OLITERAL:
var n1 gc.Node
gc.Convconst(&n1, n.Type, &n.Val)
i := gc.Mpgetfix(n1.Val.U.Xval)
gc.Nodconst(lo, gc.Types[gc.TUINT32], int64(uint32(i)))
i >>= 32
if n.Type.Etype == gc.TINT64 {
gc.Nodconst(hi, gc.Types[gc.TINT32], int64(int32(i)))
} else {
gc.Nodconst(hi, gc.Types[gc.TUINT32], int64(uint32(i)))
}
}
}
作者:tidatid
项目:g
func intLiteral(n *gc.Node) (x int64, ok bool) {
if n == nil || n.Op != gc.OLITERAL {
return
}
switch n.Val.Ctype {
case gc.CTINT, gc.CTRUNE:
return gc.Mpgetfix(n.Val.U.Xval), true
case gc.CTBOOL:
return int64(n.Val.U.Bval), true
}
return
}
作者:kluesk
项目:go-akaro
/*
* n is on stack, either local variable
* or return value from function call.
* return n's offset from SP.
*/
func stkof(n *gc.Node) int64 {
switch n.Op {
case gc.OINDREG:
return n.Xoffset
case gc.ODOT:
t := n.Left.Type
if gc.Isptr[t.Etype] {
break
}
off := stkof(n.Left)
if off == -1000 || off == 1000 {
return off
}
return off + n.Xoffset
case gc.OINDEX:
t := n.Left.Type
if !gc.Isfixedarray(t) {
break
}
off := stkof(n.Left)
if off == -1000 || off == 1000 {
return off
}
if gc.Isconst(n.Right, gc.CTINT) {
return off + t.Type.Width*gc.Mpgetfix(n.Right.Val.U.Xval)
}
return 1000
case gc.OCALLMETH,
gc.OCALLINTER,
gc.OCALLFUNC:
t := n.Left.Type
if gc.Isptr[t.Etype] {
t = t.Type
}
var flist gc.Iter
t = gc.Structfirst(&flist, gc.Getoutarg(t))
if t != nil {
return t.Width + int64(gc.Widthptr) // +widthptr: correct for saved LR
}
}
// botch - probably failing to recognize address
// arithmetic on the above. eg INDEX and DOT
return -1000
}
作者:tidatid
项目:g
/*
* generate division.
* generates one of:
* res = nl / nr
* res = nl % nr
* according to op.
*/
func dodiv(op int, nl *gc.Node, nr *gc.Node, res *gc.Node) {
// Have to be careful about handling
// most negative int divided by -1 correctly.
// The hardware will generate undefined result.
// Also need to explicitly trap on division on zero,
// the hardware will silently generate undefined result.
// DIVW will leave unpredicable result in higher 32-bit,
// so always use DIVD/DIVDU.
t := nl.Type
t0 := t
check := 0
if gc.Issigned[t.Etype] {
check = 1
if gc.Isconst(nl, gc.CTINT) && gc.Mpgetfix(nl.Val.U.Xval) != -(1<<uint64(t.Width*8-1)) {
check = 0
} else if gc.Isconst(nr, gc.CTINT) && gc.Mpgetfix(nr.Val.U.Xval) != -1 {
check = 0
}
}
if t.Width < 8 {
if gc.Issigned[t.Etype] {
t = gc.Types[gc.TINT64]
} else {
t = gc.Types[gc.TUINT64]
}
check = 0
}
a := optoas(gc.ODIV, t)
var tl gc.Node
gc.Regalloc(&tl, t0, nil)
var tr gc.Node
gc.Regalloc(&tr, t0, nil)
if nl.Ullman >= nr.Ullman {
gc.Cgen(nl, &tl)
gc.Cgen(nr, &tr)
} else {
gc.Cgen(nr, &tr)
gc.Cgen(nl, &tl)
}
if t != t0 {
// Convert
tl2 := tl
tr2 := tr
tl.Type = t
tr.Type = t
gmove(&tl2, &tl)
gmove(&tr2, &tr)
}
// Handle divide-by-zero panic.
p1 := gins(optoas(gc.OCMP, t), &tr, nil)
p1.To.Type = obj.TYPE_REG
p1.To.Reg = ppc64.REGZERO
p1 = gc.Gbranch(optoas(gc.ONE, t), nil, +1)
if panicdiv == nil {
panicdiv = gc.Sysfunc("panicdivide")
}
gc.Ginscall(panicdiv, -1)
gc.Patch(p1, gc.Pc)
var p2 *obj.Prog
if check != 0 {
var nm1 gc.Node
gc.Nodconst(&nm1, t, -1)
gins(optoas(gc.OCMP, t), &tr, &nm1)
p1 := gc.Gbranch(optoas(gc.ONE, t), nil, +1)
if op == gc.ODIV {
// a / (-1) is -a.
gins(optoas(gc.OMINUS, t), nil, &tl)
gmove(&tl, res)
} else {
// a % (-1) is 0.
var nz gc.Node
gc.Nodconst(&nz, t, 0)
gmove(&nz, res)
}
p2 = gc.Gbranch(obj.AJMP, nil, 0)
gc.Patch(p1, gc.Pc)
}
p1 = gins(a, &tr, &tl)
if op == gc.ODIV {
gc.Regfree(&tr)
//.........这里部分代码省略.........
作者:kluesk
项目:go-akaro
/*
* generate division according to op, one of:
* res = nl / nr
* res = nl % nr
*/
func cgen_div(op int, nl *gc.Node, nr *gc.Node, res *gc.Node) {
var w int
if nr.Op != gc.OLITERAL {
goto longdiv
}
w = int(nl.Type.Width * 8)
// Front end handled 32-bit division. We only need to handle 64-bit.
// try to do division by multiply by (2^w)/d
// see hacker's delight chapter 10
switch gc.Simtype[nl.Type.Etype] {
default:
goto longdiv
case gc.TUINT64:
var m gc.Magic
m.W = w
m.Ud = uint64(gc.Mpgetfix(nr.Val.U.Xval))
gc.Umagic(&m)
if m.Bad != 0 {
break
}
if op == gc.OMOD {
goto longmod
}
var n1 gc.Node
cgenr(nl, &n1, nil)
var n2 gc.Node
gc.Nodconst(&n2, nl.Type, int64(m.Um))
var n3 gc.Node
regalloc(&n3, nl.Type, res)
cgen_hmul(&n1, &n2, &n3)
if m.Ua != 0 {
// need to add numerator accounting for overflow
gins(optoas(gc.OADD, nl.Type), &n1, &n3)
gc.Nodconst(&n2, nl.Type, 1)
gins(optoas(gc.ORROTC, nl.Type), &n2, &n3)
gc.Nodconst(&n2, nl.Type, int64(m.S)-1)
gins(optoas(gc.ORSH, nl.Type), &n2, &n3)
} else {
gc.Nodconst(&n2, nl.Type, int64(m.S))
gins(optoas(gc.ORSH, nl.Type), &n2, &n3) // shift dx
}
gmove(&n3, res)
regfree(&n1)
regfree(&n3)
return
case gc.TINT64:
var m gc.Magic
m.W = w
m.Sd = gc.Mpgetfix(nr.Val.U.Xval)
gc.Smagic(&m)
if m.Bad != 0 {
break
}
if op == gc.OMOD {
goto longmod
}
var n1 gc.Node
cgenr(nl, &n1, res)
var n2 gc.Node
gc.Nodconst(&n2, nl.Type, m.Sm)
var n3 gc.Node
regalloc(&n3, nl.Type, nil)
cgen_hmul(&n1, &n2, &n3)
if m.Sm < 0 {
// need to add numerator
gins(optoas(gc.OADD, nl.Type), &n1, &n3)
}
gc.Nodconst(&n2, nl.Type, int64(m.S))
gins(optoas(gc.ORSH, nl.Type), &n2, &n3) // shift n3
gc.Nodconst(&n2, nl.Type, int64(w)-1)
gins(optoas(gc.ORSH, nl.Type), &n2, &n1) // -1 iff num is neg
gins(optoas(gc.OSUB, nl.Type), &n1, &n3) // added
if m.Sd < 0 {
// this could probably be removed
// by factoring it into the multiplier
gins(optoas(gc.OMINUS, nl.Type), nil, &n3)
}
gmove(&n3, res)
regfree(&n1)
regfree(&n3)
//.........这里部分代码省略.........
作者:kluesk
项目:go-akaro
//.........这里部分代码省略.........
if !gc.Isconst(nl, gc.CTSTR) {
igen(nl, &n3, res)
}
} else {
p2 = igenindex(nr, &tmp, bool2int(bounded))
nr = &tmp
if !gc.Isconst(nl, gc.CTSTR) {
igen(nl, &n3, res)
}
regalloc(&n1, tmp.Type, nil)
gins(optoas(gc.OAS, tmp.Type), &tmp, &n1)
}
// For fixed array we really want the pointer in n3.
var n2 gc.Node
if gc.Isfixedarray(nl.Type) {
regalloc(&n2, gc.Types[gc.Tptr], &n3)
agen(&n3, &n2)
regfree(&n3)
n3 = n2
}
// &a[0] is in n3 (allocated in res)
// i is in n1 (if not constant)
// len(a) is in nlen (if needed)
// w is width
// constant index
if gc.Isconst(nr, gc.CTINT) {
if gc.Isconst(nl, gc.CTSTR) {
gc.Fatal("constant string constant index") // front end should handle
}
v := uint64(gc.Mpgetfix(nr.Val.U.Xval))
if gc.Isslice(nl.Type) || nl.Type.Etype == gc.TSTRING {
if gc.Debug['B'] == 0 && !n.Bounded {
nlen := n3
nlen.Type = gc.Types[gc.TUINT32]
nlen.Xoffset += int64(gc.Array_nel)
gc.Nodconst(&n2, gc.Types[gc.TUINT32], int64(v))
gins(optoas(gc.OCMP, gc.Types[gc.TUINT32]), &nlen, &n2)
p1 := gc.Gbranch(optoas(gc.OGT, gc.Types[gc.TUINT32]), nil, +1)
ginscall(gc.Panicindex, -1)
gc.Patch(p1, gc.Pc)
}
}
// Load base pointer in n2 = n3.
regalloc(&n2, gc.Types[gc.Tptr], &n3)
n3.Type = gc.Types[gc.Tptr]
n3.Xoffset += int64(gc.Array_array)
gmove(&n3, &n2)
regfree(&n3)
if v*uint64(w) != 0 {
gc.Nodconst(&n1, gc.Types[gc.Tptr], int64(v*uint64(w)))
gins(optoas(gc.OADD, gc.Types[gc.Tptr]), &n1, &n2)
}
gmove(&n2, res)
regfree(&n2)
break
}
// i is in register n1, extend to 32 bits.
t := gc.Types[gc.TUINT32]
作者:xslonepiec
项目:goio
/*
* generate division.
* generates one of:
* res = nl / nr
* res = nl % nr
* according to op.
*/
func dodiv(op int, nl *gc.Node, nr *gc.Node, res *gc.Node) {
// Have to be careful about handling
// most negative int divided by -1 correctly.
// The hardware will trap.
// Also the byte divide instruction needs AH,
// which we otherwise don't have to deal with.
// Easiest way to avoid for int8, int16: use int32.
// For int32 and int64, use explicit test.
// Could use int64 hw for int32.
t := nl.Type
t0 := t
check := 0
if gc.Issigned[t.Etype] {
check = 1
if gc.Isconst(nl, gc.CTINT) && gc.Mpgetfix(nl.Val.U.Xval) != -(1<<uint64(t.Width*8-1)) {
check = 0
} else if gc.Isconst(nr, gc.CTINT) && gc.Mpgetfix(nr.Val.U.Xval) != -1 {
check = 0
}
}
if t.Width < 4 {
if gc.Issigned[t.Etype] {
t = gc.Types[gc.TINT32]
} else {
t = gc.Types[gc.TUINT32]
}
check = 0
}
a := optoas(op, t)
var n3 gc.Node
gc.Regalloc(&n3, t0, nil)
var ax gc.Node
var oldax gc.Node
if nl.Ullman >= nr.Ullman {
savex(x86.REG_AX, &ax, &oldax, res, t0)
gc.Cgen(nl, &ax)
gc.Regalloc(&ax, t0, &ax) // mark ax live during cgen
gc.Cgen(nr, &n3)
gc.Regfree(&ax)
} else {
gc.Cgen(nr, &n3)
savex(x86.REG_AX, &ax, &oldax, res, t0)
gc.Cgen(nl, &ax)
}
if t != t0 {
// Convert
ax1 := ax
n31 := n3
ax.Type = t
n3.Type = t
gmove(&ax1, &ax)
gmove(&n31, &n3)
}
var n4 gc.Node
if gc.Nacl {
// Native Client does not relay the divide-by-zero trap
// to the executing program, so we must insert a check
// for ourselves.
gc.Nodconst(&n4, t, 0)
gins(optoas(gc.OCMP, t), &n3, &n4)
p1 := gc.Gbranch(optoas(gc.ONE, t), nil, +1)
if panicdiv == nil {
panicdiv = gc.Sysfunc("panicdivide")
}
gc.Ginscall(panicdiv, -1)
gc.Patch(p1, gc.Pc)
}
var p2 *obj.Prog
if check != 0 {
gc.Nodconst(&n4, t, -1)
gins(optoas(gc.OCMP, t), &n3, &n4)
p1 := gc.Gbranch(optoas(gc.ONE, t), nil, +1)
if op == gc.ODIV {
// a / (-1) is -a.
gins(optoas(gc.OMINUS, t), nil, &ax)
gmove(&ax, res)
} else {
// a % (-1) is 0.
gc.Nodconst(&n4, t, 0)
gmove(&n4, res)
}
//.........这里部分代码省略.........
作者:kluesk
项目:go-akaro
//.........这里部分代码省略.........
if !dotaddable(n, &n1) {
igen(n, &n1, nil)
sclean[nsclean-1] = n1
}
n = &n1
case gc.ONAME:
if n.Class == gc.PPARAMREF {
var n1 gc.Node
cgen(n.Heapaddr, &n1)
sclean[nsclean-1] = n1
n = &n1
}
// nothing
case gc.OINDREG:
break
}
*lo = *n
*hi = *n
lo.Type = gc.Types[gc.TUINT32]
if n.Type.Etype == gc.TINT64 {
hi.Type = gc.Types[gc.TINT32]
} else {
hi.Type = gc.Types[gc.TUINT32]
}
hi.Xoffset += 4
case gc.OLITERAL:
var n1 gc.Node
gc.Convconst(&n1, n.Type, &n.Val)
i := gc.Mpgetfix(n1.Val.U.Xval)
gc.Nodconst(lo, gc.Types[gc.TUINT32], int64(uint32(i)))
i >>= 32
if n.Type.Etype == gc.TINT64 {
gc.Nodconst(hi, gc.Types[gc.TINT32], int64(int32(i)))
} else {
gc.Nodconst(hi, gc.Types[gc.TUINT32], int64(uint32(i)))
}
}
}
func splitclean() {
if nsclean <= 0 {
gc.Fatal("splitclean")
}
nsclean--
if sclean[nsclean].Op != gc.OEMPTY {
regfree(&sclean[nsclean])
}
}
/*
* set up nodes representing fp constants
*/
var zerof gc.Node
var two64f gc.Node
var two63f gc.Node
var bignodes_did int
func bignodes() {
作者:kluesk
项目:go-akaro
//.........这里部分代码省略.........
cgen(nl, &tmp2)
nl = &tmp2
}
igen(nl, &nlen, res)
freelen = 1
nlen.Type = gc.Types[gc.Tptr]
nlen.Xoffset += int64(gc.Array_array)
regalloc(&n3, gc.Types[gc.Tptr], res)
gmove(&nlen, &n3)
nlen.Type = gc.Types[gc.Simtype[gc.TUINT]]
nlen.Xoffset += int64(gc.Array_nel) - int64(gc.Array_array)
}
}
if !gc.Isconst(nr, gc.CTINT) {
cgenr(nr, &n1, nil)
}
goto index
// &a is in &n3 (allocated in res)
// i is in &n1 (if not constant)
// len(a) is in nlen (if needed)
// w is width
// constant index
index:
if gc.Isconst(nr, gc.CTINT) {
if gc.Isconst(nl, gc.CTSTR) {
gc.Fatal("constant string constant index") // front end should handle
}
v := uint64(gc.Mpgetfix(nr.Val.U.Xval))
if gc.Isslice(nl.Type) || nl.Type.Etype == gc.TSTRING {
if gc.Debug['B'] == 0 && !n.Bounded {
var n2 gc.Node
gc.Nodconst(&n2, gc.Types[gc.Simtype[gc.TUINT]], int64(v))
if gc.Smallintconst(nr) {
gins(optoas(gc.OCMP, gc.Types[gc.Simtype[gc.TUINT]]), &nlen, &n2)
} else {
regalloc(&tmp, gc.Types[gc.Simtype[gc.TUINT]], nil)
gmove(&n2, &tmp)
gins(optoas(gc.OCMP, gc.Types[gc.Simtype[gc.TUINT]]), &nlen, &tmp)
regfree(&tmp)
}
p1 := gc.Gbranch(optoas(gc.OGT, gc.Types[gc.Simtype[gc.TUINT]]), nil, +1)
ginscall(gc.Panicindex, -1)
gc.Patch(p1, gc.Pc)
}
regfree(&nlen)
}
if v*w != 0 {
ginscon(optoas(gc.OADD, gc.Types[gc.Tptr]), int64(v*w), &n3)
}
*a = n3
break
}
// type of the index
t := gc.Types[gc.TUINT64]
if gc.Issigned[n1.Type.Etype] {
作者:kluesk
项目:go-akaro
//.........这里部分代码省略.........
igen(nl, &n1, nil)
n1.Type = gc.Types[gc.Tptr]
n1.Xoffset += 0
gencmp0(&n1, gc.Types[gc.Tptr], a, likely, to)
regfree(&n1)
break
}
if gc.Iscomplex[nl.Type.Etype] {
gc.Complexbool(a, nl, nr, true_, likely, to)
break
}
if gc.Is64(nr.Type) {
if nl.Addable == 0 {
var n1 gc.Node
gc.Tempname(&n1, nl.Type)
cgen(nl, &n1)
nl = &n1
}
if nr.Addable == 0 {
var n2 gc.Node
gc.Tempname(&n2, nr.Type)
cgen(nr, &n2)
nr = &n2
}
cmp64(nl, nr, a, likely, to)
break
}
if nr.Op == gc.OLITERAL {
if gc.Isconst(nr, gc.CTINT) && gc.Mpgetfix(nr.Val.U.Xval) == 0 {
gencmp0(nl, nl.Type, a, likely, to)
break
}
if nr.Val.Ctype == gc.CTNIL {
gencmp0(nl, nl.Type, a, likely, to)
break
}
}
a = optoas(a, nr.Type)
if nr.Ullman >= gc.UINF {
var n1 gc.Node
regalloc(&n1, nl.Type, nil)
cgen(nl, &n1)
var tmp gc.Node
gc.Tempname(&tmp, nl.Type)
gmove(&n1, &tmp)
regfree(&n1)
var n2 gc.Node
regalloc(&n2, nr.Type, nil)
cgen(nr, &n2)
regalloc(&n1, nl.Type, nil)
cgen(&tmp, &n1)
gcmp(optoas(gc.OCMP, nr.Type), &n1, &n2)
gc.Patch(gc.Gbranch(a, nr.Type, likely), to)
作者:kluesk
项目:go-akaro
//.........这里部分代码省略.........
igen(n.Left, a, res)
a.Xoffset += n.Xoffset
a.Type = n.Type
return
case gc.ODOTPTR:
switch n.Left.Op {
// igen-able nodes.
case gc.ODOT,
gc.ODOTPTR,
gc.OCALLFUNC,
gc.OCALLMETH,
gc.OCALLINTER:
var n1 gc.Node
igen(n.Left, &n1, res)
regalloc(a, gc.Types[gc.Tptr], &n1)
gmove(&n1, a)
regfree(&n1)
default:
regalloc(a, gc.Types[gc.Tptr], res)
cgen(n.Left, a)
}
gc.Cgen_checknil(a)
a.Op = gc.OINDREG
a.Xoffset += n.Xoffset
a.Type = n.Type
return
case gc.OCALLFUNC,
gc.OCALLMETH,
gc.OCALLINTER:
switch n.Op {
case gc.OCALLFUNC:
cgen_call(n, 0)
case gc.OCALLMETH:
gc.Cgen_callmeth(n, 0)
case gc.OCALLINTER:
cgen_callinter(n, nil, 0)
}
var flist gc.Iter
fp := gc.Structfirst(&flist, gc.Getoutarg(n.Left.Type))
*a = gc.Node{}
a.Op = gc.OINDREG
a.Val.U.Reg = x86.REG_SP
a.Addable = 1
a.Xoffset = fp.Width
a.Type = n.Type
return
// Index of fixed-size array by constant can
// put the offset in the addressing.
// Could do the same for slice except that we need
// to use the real index for the bounds checking.
case gc.OINDEX:
if gc.Isfixedarray(n.Left.Type) || (gc.Isptr[n.Left.Type.Etype] && gc.Isfixedarray(n.Left.Left.Type)) {
if gc.Isconst(n.Right, gc.CTINT) {
// Compute &a.
if !gc.Isptr[n.Left.Type.Etype] {
igen(n.Left, a, res)
} else {
var n1 gc.Node
igen(n.Left, &n1, res)
gc.Cgen_checknil(&n1)
regalloc(a, gc.Types[gc.Tptr], res)
gmove(&n1, a)
regfree(&n1)
a.Op = gc.OINDREG
}
// Compute &a[i] as &a + i*width.
a.Type = n.Type
a.Xoffset += gc.Mpgetfix(n.Right.Val.U.Xval) * n.Type.Width
return
}
}
}
// release register for now, to avoid
// confusing tempname.
if res != nil && res.Op == gc.OREGISTER {
reg[res.Val.U.Reg]--
}
var n1 gc.Node
gc.Tempname(&n1, gc.Types[gc.Tptr])
agen(n, &n1)
if res != nil && res.Op == gc.OREGISTER {
reg[res.Val.U.Reg]++
}
regalloc(a, gc.Types[gc.Tptr], res)
gmove(&n1, a)
a.Op = gc.OINDREG
a.Type = n.Type
}
作者:xslonepiec
项目:goio
/*
* generate code to compute address of n,
* a reference to a (perhaps nested) field inside
* an array or struct.
* return 0 on failure, 1 on success.
* on success, leaves usable address in a.
*
* caller is responsible for calling sudoclean
* after successful sudoaddable,
* to release the register used for a.
*/
func sudoaddable(as int, n *gc.Node, a *obj.Addr) bool {
if n.Type == nil {
return false
}
*a = obj.Addr{}
switch n.Op {
case gc.OLITERAL:
if !gc.Isconst(n, gc.CTINT) {
break
}
v := gc.Mpgetfix(n.Val.U.Xval)
if v >= 32000 || v <= -32000 {
break
}
switch as {
default:
return false
case arm.AADD,
arm.ASUB,
arm.AAND,
arm.AORR,
arm.AEOR,
arm.AMOVB,
arm.AMOVBS,
arm.AMOVBU,
arm.AMOVH,
arm.AMOVHS,
arm.AMOVHU,
arm.AMOVW:
break
}
cleani += 2
reg := &clean[cleani-1]
reg1 := &clean[cleani-2]
reg.Op = gc.OEMPTY
reg1.Op = gc.OEMPTY
gc.Naddr(a, n)
return true
case gc.ODOT,
gc.ODOTPTR:
cleani += 2
reg := &clean[cleani-1]
reg1 := &clean[cleani-2]
reg.Op = gc.OEMPTY
reg1.Op = gc.OEMPTY
var nn *gc.Node
var oary [10]int64
o := gc.Dotoffset(n, oary[:], &nn)
if nn == nil {
sudoclean()
return false
}
if nn.Addable && o == 1 && oary[0] >= 0 {
// directly addressable set of DOTs
n1 := *nn
n1.Type = n.Type
n1.Xoffset += oary[0]
gc.Naddr(a, &n1)
return true
}
gc.Regalloc(reg, gc.Types[gc.Tptr], nil)
n1 := *reg
n1.Op = gc.OINDREG
if oary[0] >= 0 {
gc.Agen(nn, reg)
n1.Xoffset = oary[0]
} else {
gc.Cgen(nn, reg)
gc.Cgen_checknil(reg)
n1.Xoffset = -(oary[0] + 1)
}
for i := 1; i < o; i++ {
if oary[i] >= 0 {
gc.Fatal("can't happen")
}
gins(arm.AMOVW, &n1, reg)
gc.Cgen_checknil(reg)
n1.Xoffset = -(oary[i] + 1)
}
//.........这里部分代码省略.........
作者:tidatid
项目:g
//.........这里部分代码省略.........
p1.To.Offset = int64(ah.Val.U.Reg)
//print("%P\n", p1);
// bh * cl + ah -> ah
p1 = gins(arm.AMULA, nil, nil)
p1.From.Type = obj.TYPE_REG
p1.From.Reg = bh.Val.U.Reg
p1.Reg = cl.Val.U.Reg
p1.To.Type = obj.TYPE_REGREG2
p1.To.Reg = ah.Val.U.Reg
p1.To.Offset = int64(ah.Val.U.Reg)
//print("%P\n", p1);
gc.Regfree(&bh)
gc.Regfree(&bl)
gc.Regfree(&ch)
gc.Regfree(&cl)
// We only rotate by a constant c in [0,64).
// if c >= 32:
// lo, hi = hi, lo
// c -= 32
// if c == 0:
// no-op
// else:
// t = hi
// shld hi:lo, c
// shld lo:t, c
case gc.OLROT:
v := uint64(gc.Mpgetfix(r.Val.U.Xval))
var bl gc.Node
gc.Regalloc(&bl, lo1.Type, nil)
var bh gc.Node
gc.Regalloc(&bh, hi1.Type, nil)
if v >= 32 {
// reverse during load to do the first 32 bits of rotate
v -= 32
gins(arm.AMOVW, &hi1, &bl)
gins(arm.AMOVW, &lo1, &bh)
} else {
gins(arm.AMOVW, &hi1, &bh)
gins(arm.AMOVW, &lo1, &bl)
}
if v == 0 {
gins(arm.AMOVW, &bh, &ah)
gins(arm.AMOVW, &bl, &al)
} else {
// rotate by 1 <= v <= 31
// MOVW bl<<v, al
// MOVW bh<<v, ah
// OR bl>>(32-v), ah
// OR bh>>(32-v), al
gshift(arm.AMOVW, &bl, arm.SHIFT_LL, int32(v), &al)
gshift(arm.AMOVW, &bh, arm.SHIFT_LL, int32(v), &ah)
gshift(arm.AORR, &bl, arm.SHIFT_LR, int32(32-v), &ah)
gshift(arm.AORR, &bh, arm.SHIFT_LR, int32(32-v), &al)
}
作者:tidatid
项目:g
/*
* generate code to compute address of n,
* a reference to a (perhaps nested) field inside
* an array or struct.
* return 0 on failure, 1 on success.
* on success, leaves usable address in a.
*
* caller is responsible for calling sudoclean
* after successful sudoaddable,
* to release the register used for a.
*/
func sudoaddable(as int, n *gc.Node, a *obj.Addr) bool {
if n.Type == nil {
return false
}
*a = obj.Addr{}
switch n.Op {
case gc.OLITERAL:
if !gc.Isconst(n, gc.CTINT) {
break
}
v := gc.Mpgetfix(n.Val.U.Xval)
if v >= 32000 || v <= -32000 {
break
}
switch as {
default:
return false
case x86.AADDB,
x86.AADDW,
x86.AADDL,
x86.AADDQ,
x86.ASUBB,
x86.ASUBW,
x86.ASUBL,
x86.ASUBQ,
x86.AANDB,
x86.AANDW,
x86.AANDL,
x86.AANDQ,
x86.AORB,
x86.AORW,
x86.AORL,
x86.AORQ,
x86.AXORB,
x86.AXORW,
x86.AXORL,
x86.AXORQ,
x86.AINCB,
x86.AINCW,
x86.AINCL,
x86.AINCQ,
x86.ADECB,
x86.ADECW,
x86.ADECL,
x86.ADECQ,
x86.AMOVB,
x86.AMOVW,
x86.AMOVL,
x86.AMOVQ:
break
}
cleani += 2
reg := &clean[cleani-1]
reg1 := &clean[cleani-2]
reg.Op = gc.OEMPTY
reg1.Op = gc.OEMPTY
gc.Naddr(a, n)
return true
case gc.ODOT,
gc.ODOTPTR:
cleani += 2
reg := &clean[cleani-1]
reg1 := &clean[cleani-2]
reg.Op = gc.OEMPTY
reg1.Op = gc.OEMPTY
var nn *gc.Node
var oary [10]int64
o := gc.Dotoffset(n, oary[:], &nn)
if nn == nil {
sudoclean()
return false
}
if nn.Addable != 0 && o == 1 && oary[0] >= 0 {
// directly addressable set of DOTs
n1 := *nn
n1.Type = n.Type
n1.Xoffset += oary[0]
gc.Naddr(a, &n1)
return true
}
gc.Regalloc(reg, gc.Types[gc.Tptr], nil)
//.........这里部分代码省略.........
作者:kluesk
项目:go-akaro
/*
* generate shift according to op, one of:
* res = nl << nr
* res = nl >> nr
*/
func cgen_shift(op int, bounded bool, nl *gc.Node, nr *gc.Node, res *gc.Node) {
if nl.Type.Width > 4 {
gc.Fatal("cgen_shift %v", gc.Tconv(nl.Type, 0))
}
w := int(nl.Type.Width * 8)
if op == gc.OLROT {
v := int(gc.Mpgetfix(nr.Val.U.Xval))
var n1 gc.Node
regalloc(&n1, nl.Type, res)
if w == 32 {
cgen(nl, &n1)
gshift(arm.AMOVW, &n1, arm.SHIFT_RR, int32(w)-int32(v), &n1)
} else {
var n2 gc.Node
regalloc(&n2, nl.Type, nil)
cgen(nl, &n2)
gshift(arm.AMOVW, &n2, arm.SHIFT_LL, int32(v), &n1)
gshift(arm.AORR, &n2, arm.SHIFT_LR, int32(w)-int32(v), &n1)
regfree(&n2)
// Ensure sign/zero-extended result.
gins(optoas(gc.OAS, nl.Type), &n1, &n1)
}
gmove(&n1, res)
regfree(&n1)
return
}
if nr.Op == gc.OLITERAL {
var n1 gc.Node
regalloc(&n1, nl.Type, res)
cgen(nl, &n1)
sc := uint64(gc.Mpgetfix(nr.Val.U.Xval))
if sc == 0 {
} else // nothing to do
if sc >= uint64(nl.Type.Width*8) {
if op == gc.ORSH && gc.Issigned[nl.Type.Etype] {
gshift(arm.AMOVW, &n1, arm.SHIFT_AR, int32(w), &n1)
} else {
gins(arm.AEOR, &n1, &n1)
}
} else {
if op == gc.ORSH && gc.Issigned[nl.Type.Etype] {
gshift(arm.AMOVW, &n1, arm.SHIFT_AR, int32(sc), &n1)
} else if op == gc.ORSH {
gshift(arm.AMOVW, &n1, arm.SHIFT_LR, int32(sc), &n1) // OLSH
} else {
gshift(arm.AMOVW, &n1, arm.SHIFT_LL, int32(sc), &n1)
}
}
if w < 32 && op == gc.OLSH {
gins(optoas(gc.OAS, nl.Type), &n1, &n1)
}
gmove(&n1, res)
regfree(&n1)
return
}
tr := nr.Type
var t gc.Node
var n1 gc.Node
var n2 gc.Node
var n3 gc.Node
if tr.Width > 4 {
var nt gc.Node
gc.Tempname(&nt, nr.Type)
if nl.Ullman >= nr.Ullman {
regalloc(&n2, nl.Type, res)
cgen(nl, &n2)
cgen(nr, &nt)
n1 = nt
} else {
cgen(nr, &nt)
regalloc(&n2, nl.Type, res)
cgen(nl, &n2)
}
var hi gc.Node
var lo gc.Node
split64(&nt, &lo, &hi)
regalloc(&n1, gc.Types[gc.TUINT32], nil)
regalloc(&n3, gc.Types[gc.TUINT32], nil)
gmove(&lo, &n1)
gmove(&hi, &n3)
splitclean()
gins(arm.ATST, &n3, nil)
gc.Nodconst(&t, gc.Types[gc.TUINT32], int64(w))
p1 := gins(arm.AMOVW, &t, &n1)
p1.Scond = arm.C_SCOND_NE
tr = gc.Types[gc.TUINT32]
regfree(&n3)
//.........这里部分代码省略.........
作者:xslonepiec
项目:goio
/*
* generate shift according to op, one of:
* res = nl << nr
* res = nl >> nr
*/
func cgen_shift(op int, bounded bool, nl *gc.Node, nr *gc.Node, res *gc.Node) {
a := optoas(op, nl.Type)
if nr.Op == gc.OLITERAL {
var n1 gc.Node
gc.Regalloc(&n1, nl.Type, res)
gc.Cgen(nl, &n1)
sc := uint64(gc.Mpgetfix(nr.Val.U.Xval))
if sc >= uint64(nl.Type.Width*8) {
// large shift gets 2 shifts by width-1
var n3 gc.Node
gc.Nodconst(&n3, gc.Types[gc.TUINT32], nl.Type.Width*8-1)
gins(a, &n3, &n1)
gins(a, &n3, &n1)
} else {
gins(a, nr, &n1)
}
gmove(&n1, res)
gc.Regfree(&n1)
return
}
if nl.Ullman >= gc.UINF {
var n4 gc.Node
gc.Tempname(&n4, nl.Type)
gc.Cgen(nl, &n4)
nl = &n4
}
if nr.Ullman >= gc.UINF {
var n5 gc.Node
gc.Tempname(&n5, nr.Type)
gc.Cgen(nr, &n5)
nr = &n5
}
rcx := int(reg[x86.REG_CX])
var n1 gc.Node
gc.Nodreg(&n1, gc.Types[gc.TUINT32], x86.REG_CX)
// Allow either uint32 or uint64 as shift type,
// to avoid unnecessary conversion from uint32 to uint64
// just to do the comparison.
tcount := gc.Types[gc.Simtype[nr.Type.Etype]]
if tcount.Etype < gc.TUINT32 {
tcount = gc.Types[gc.TUINT32]
}
gc.Regalloc(&n1, nr.Type, &n1) // to hold the shift type in CX
var n3 gc.Node
gc.Regalloc(&n3, tcount, &n1) // to clear high bits of CX
var cx gc.Node
gc.Nodreg(&cx, gc.Types[gc.TUINT64], x86.REG_CX)
var oldcx gc.Node
if rcx > 0 && !gc.Samereg(&cx, res) {
gc.Regalloc(&oldcx, gc.Types[gc.TUINT64], nil)
gmove(&cx, &oldcx)
}
cx.Type = tcount
var n2 gc.Node
if gc.Samereg(&cx, res) {
gc.Regalloc(&n2, nl.Type, nil)
} else {
gc.Regalloc(&n2, nl.Type, res)
}
if nl.Ullman >= nr.Ullman {
gc.Cgen(nl, &n2)
gc.Cgen(nr, &n1)
gmove(&n1, &n3)
} else {
gc.Cgen(nr, &n1)
gmove(&n1, &n3)
gc.Cgen(nl, &n2)
}
gc.Regfree(&n3)
// test and fix up large shifts
if !bounded {
gc.Nodconst(&n3, tcount, nl.Type.Width*8)
gins(optoas(gc.OCMP, tcount), &n1, &n3)
p1 := gc.Gbranch(optoas(gc.OLT, tcount), nil, +1)
if op == gc.ORSH && gc.Issigned[nl.Type.Etype] {
gc.Nodconst(&n3, gc.Types[gc.TUINT32], nl.Type.Width*8-1)
gins(a, &n3, &n2)
} else {
gc.Nodconst(&n3, nl.Type, 0)
gmove(&n3, &n2)
}
//.........这里部分代码省略.........
作者:xslonepiec
项目:goio
//.........这里部分代码省略.........
gins(x86.AMOVL, &dx, &fx)
gins(x86.AORL, &ex, &fx)
p1 := gc.Gbranch(x86.AJNE, nil, 0)
gins(x86.AMULL, &cx, nil) // implicit &ax
p2 := gc.Gbranch(obj.AJMP, nil, 0)
gc.Patch(p1, gc.Pc)
// full 64x64 -> 64, from 32x32 -> 64.
gins(x86.AIMULL, &cx, &dx)
gins(x86.AMOVL, &ax, &fx)
gins(x86.AIMULL, &ex, &fx)
gins(x86.AADDL, &dx, &fx)
gins(x86.AMOVL, &cx, &dx)
gins(x86.AMULL, &dx, nil) // implicit &ax
gins(x86.AADDL, &fx, &dx)
gc.Patch(p2, gc.Pc)
gc.Regfree(&ex)
gc.Regfree(&fx)
// We only rotate by a constant c in [0,64).
// if c >= 32:
// lo, hi = hi, lo
// c -= 32
// if c == 0:
// no-op
// else:
// t = hi
// shld hi:lo, c
// shld lo:t, c
case gc.OLROT:
v := uint64(gc.Mpgetfix(r.Val.U.Xval))
if v >= 32 {
// reverse during load to do the first 32 bits of rotate
v -= 32
gins(x86.AMOVL, &lo1, &dx)
gins(x86.AMOVL, &hi1, &ax)
} else {
gins(x86.AMOVL, &lo1, &ax)
gins(x86.AMOVL, &hi1, &dx)
}
if v == 0 {
} else // done
{
gins(x86.AMOVL, &dx, &cx)
p1 := gins(x86.ASHLL, ncon(uint32(v)), &dx)
p1.From.Index = x86.REG_AX // double-width shift
p1.From.Scale = 0
p1 = gins(x86.ASHLL, ncon(uint32(v)), &ax)
p1.From.Index = x86.REG_CX // double-width shift
p1.From.Scale = 0
}
case gc.OLSH:
if r.Op == gc.OLITERAL {
v := uint64(gc.Mpgetfix(r.Val.U.Xval))
if v >= 64 {
if gc.Is64(r.Type) {
splitclean()
}
splitclean()
作者:tidatid
项目:g
/*
* generate shift according to op, one of:
* res = nl << nr
* res = nl >> nr
*/
func cgen_shift(op int, bounded bool, nl *gc.Node, nr *gc.Node, res *gc.Node) {
if nl.Type.Width > 4 {
gc.Fatal("cgen_shift %v", gc.Tconv(nl.Type, 0))
}
w := int(nl.Type.Width * 8)
a := optoas(op, nl.Type)
if nr.Op == gc.OLITERAL {
var n2 gc.Node
gc.Tempname(&n2, nl.Type)
gc.Cgen(nl, &n2)
var n1 gc.Node
gc.Regalloc(&n1, nl.Type, res)
gmove(&n2, &n1)
sc := uint64(gc.Mpgetfix(nr.Val.U.Xval))
if sc >= uint64(nl.Type.Width*8) {
// large shift gets 2 shifts by width-1
gins(a, ncon(uint32(w)-1), &n1)
gins(a, ncon(uint32(w)-1), &n1)
} else {
gins(a, nr, &n1)
}
gmove(&n1, res)
gc.Regfree(&n1)
return
}
var oldcx gc.Node
var cx gc.Node
gc.Nodreg(&cx, gc.Types[gc.TUINT32], x86.REG_CX)
if reg[x86.REG_CX] > 1 && !gc.Samereg(&cx, res) {
gc.Tempname(&oldcx, gc.Types[gc.TUINT32])
gmove(&cx, &oldcx)
}
var n1 gc.Node
var nt gc.Node
if nr.Type.Width > 4 {
gc.Tempname(&nt, nr.Type)
n1 = nt
} else {
gc.Nodreg(&n1, gc.Types[gc.TUINT32], x86.REG_CX)
gc.Regalloc(&n1, nr.Type, &n1) // to hold the shift type in CX
}
var n2 gc.Node
if gc.Samereg(&cx, res) {
gc.Regalloc(&n2, nl.Type, nil)
} else {
gc.Regalloc(&n2, nl.Type, res)
}
if nl.Ullman >= nr.Ullman {
gc.Cgen(nl, &n2)
gc.Cgen(nr, &n1)
} else {
gc.Cgen(nr, &n1)
gc.Cgen(nl, &n2)
}
// test and fix up large shifts
if bounded {
if nr.Type.Width > 4 {
// delayed reg alloc
gc.Nodreg(&n1, gc.Types[gc.TUINT32], x86.REG_CX)
gc.Regalloc(&n1, gc.Types[gc.TUINT32], &n1) // to hold the shift type in CX
var lo gc.Node
var hi gc.Node
split64(&nt, &lo, &hi)
gmove(&lo, &n1)
splitclean()
}
} else {
var p1 *obj.Prog
if nr.Type.Width > 4 {
// delayed reg alloc
gc.Nodreg(&n1, gc.Types[gc.TUINT32], x86.REG_CX)
gc.Regalloc(&n1, gc.Types[gc.TUINT32], &n1) // to hold the shift type in CX
var lo gc.Node
var hi gc.Node
split64(&nt, &lo, &hi)
gmove(&lo, &n1)
gins(optoas(gc.OCMP, gc.Types[gc.TUINT32]), &hi, ncon(0))
p2 := gc.Gbranch(optoas(gc.ONE, gc.Types[gc.TUINT32]), nil, +1)
gins(optoas(gc.OCMP, gc.Types[gc.TUINT32]), &n1, ncon(uint32(w)))
p1 = gc.Gbranch(optoas(gc.OLT, gc.Types[gc.TUINT32]), nil, +1)
splitclean()
gc.Patch(p2, gc.Pc)
} else {
gins(optoas(gc.OCMP, nr.Type), &n1, ncon(uint32(w)))
p1 = gc.Gbranch(optoas(gc.OLT, gc.Types[gc.TUINT32]), nil, +1)
//.........这里部分代码省略.........