作者:glycerin
项目:zygomy
func zerorange(p *obj.Prog, frame int64, lo int64, hi int64) *obj.Prog {
cnt := hi - lo
if cnt == 0 {
return p
}
if cnt < int64(4*gc.Widthptr) {
for i := int64(0); i < cnt; i += int64(gc.Widthptr) {
p = appendpp(p, ppc64.AMOVD, obj.TYPE_REG, ppc64.REGZERO, 0, obj.TYPE_MEM, ppc64.REGSP, gc.Ctxt.FixedFrameSize()+frame+lo+i)
}
} else if cnt <= int64(128*gc.Widthptr) {
p = appendpp(p, ppc64.AADD, obj.TYPE_CONST, 0, gc.Ctxt.FixedFrameSize()+frame+lo-8, obj.TYPE_REG, ppc64.REGRT1, 0)
p.Reg = ppc64.REGSP
p = appendpp(p, obj.ADUFFZERO, obj.TYPE_NONE, 0, 0, obj.TYPE_MEM, 0, 0)
f := gc.Sysfunc("duffzero")
gc.Naddr(&p.To, f)
gc.Afunclit(&p.To, f)
p.To.Offset = 4 * (128 - cnt/int64(gc.Widthptr))
} else {
p = appendpp(p, ppc64.AMOVD, obj.TYPE_CONST, 0, gc.Ctxt.FixedFrameSize()+frame+lo-8, obj.TYPE_REG, ppc64.REGTMP, 0)
p = appendpp(p, ppc64.AADD, obj.TYPE_REG, ppc64.REGTMP, 0, obj.TYPE_REG, ppc64.REGRT1, 0)
p.Reg = ppc64.REGSP
p = appendpp(p, ppc64.AMOVD, obj.TYPE_CONST, 0, cnt, obj.TYPE_REG, ppc64.REGTMP, 0)
p = appendpp(p, ppc64.AADD, obj.TYPE_REG, ppc64.REGTMP, 0, obj.TYPE_REG, ppc64.REGRT2, 0)
p.Reg = ppc64.REGRT1
p = appendpp(p, ppc64.AMOVDU, obj.TYPE_REG, ppc64.REGZERO, 0, obj.TYPE_MEM, ppc64.REGRT1, int64(gc.Widthptr))
p1 := p
p = appendpp(p, ppc64.ACMP, obj.TYPE_REG, ppc64.REGRT1, 0, obj.TYPE_REG, ppc64.REGRT2, 0)
p = appendpp(p, ppc64.ABNE, obj.TYPE_NONE, 0, 0, obj.TYPE_BRANCH, 0, 0)
gc.Patch(p, p1)
}
return p
}
作者:glycerin
项目:zygomy
func Prog(as obj.As) *obj.Prog {
var p *obj.Prog
if as == obj.AGLOBL {
if ddumped {
Fatalf("already dumped data")
}
if dpc == nil {
dpc = Ctxt.NewProg()
dfirst = dpc
}
p = dpc
dpc = Ctxt.NewProg()
p.Link = dpc
} else {
p = Pc
Pc = Ctxt.NewProg()
Clearp(Pc)
p.Link = Pc
}
if lineno == 0 && Debug['K'] != 0 {
Warn("prog: line 0")
}
p.As = as
p.Lineno = lineno
return p
}
作者:glycerin
项目:zygomy
// Called after regopt and peep have run.
// Expand CHECKNIL pseudo-op into actual nil pointer check.
func expandchecks(firstp *obj.Prog) {
var p1 *obj.Prog
var p2 *obj.Prog
for p := firstp; p != nil; p = p.Link {
if p.As != obj.ACHECKNIL {
continue
}
if gc.Debug_checknil != 0 && p.Lineno > 1 { // p->lineno==1 in generated wrappers
gc.Warnl(p.Lineno, "generated nil check")
}
// check is
// CMP arg, $0
// JNE 2(PC) (likely)
// MOV AX, 0
p1 = gc.Ctxt.NewProg()
p2 = gc.Ctxt.NewProg()
gc.Clearp(p1)
gc.Clearp(p2)
p1.Link = p2
p2.Link = p.Link
p.Link = p1
p1.Lineno = p.Lineno
p2.Lineno = p.Lineno
p1.Pc = 9999
p2.Pc = 9999
p.As = cmpptr
p.To.Type = obj.TYPE_CONST
p.To.Offset = 0
p1.As = x86.AJNE
p1.From.Type = obj.TYPE_CONST
p1.From.Offset = 1 // likely
p1.To.Type = obj.TYPE_BRANCH
p1.To.Val = p2.Link
// crash by write to memory address 0.
// if possible, since we know arg is 0, use 0(arg),
// which will be shorter to encode than plain 0.
p2.As = x86.AMOVL
p2.From.Type = obj.TYPE_REG
p2.From.Reg = x86.REG_AX
if regtyp(&p.From) {
p2.To.Type = obj.TYPE_MEM
p2.To.Reg = p.From.Reg
} else {
p2.To.Type = obj.TYPE_MEM
p2.To.Reg = x86.REG_NONE
}
p2.To.Offset = 0
}
}
作者:glycerin
项目:zygomy
/*
* insert n into reg slot of p
*/
func raddr(n *gc.Node, p *obj.Prog) {
var a obj.Addr
gc.Naddr(&a, n)
if a.Type != obj.TYPE_REG {
if n != nil {
gc.Fatalf("bad in raddr: %v", n.Op)
} else {
gc.Fatalf("bad in raddr: <null>")
}
p.Reg = 0
} else {
p.Reg = a.Reg
}
}
作者:glycerin
项目:zygomy
/*
* generate high multiply
* res = (nl * nr) >> wordsize
*/
func cgen_hmul(nl *gc.Node, nr *gc.Node, res *gc.Node) {
if nl.Ullman < nr.Ullman {
nl, nr = nr, nl
}
t := nl.Type
w := t.Width * 8
var n1 gc.Node
gc.Regalloc(&n1, t, res)
gc.Cgen(nl, &n1)
var n2 gc.Node
gc.Regalloc(&n2, t, nil)
gc.Cgen(nr, &n2)
switch gc.Simtype[t.Etype] {
case gc.TINT8,
gc.TINT16:
gins(optoas(gc.OMUL, t), &n2, &n1)
gshift(arm.AMOVW, &n1, arm.SHIFT_AR, int32(w), &n1)
case gc.TUINT8,
gc.TUINT16:
gins(optoas(gc.OMUL, t), &n2, &n1)
gshift(arm.AMOVW, &n1, arm.SHIFT_LR, int32(w), &n1)
// perform a long multiplication.
case gc.TINT32,
gc.TUINT32:
var p *obj.Prog
if t.IsSigned() {
p = gins(arm.AMULL, &n2, nil)
} else {
p = gins(arm.AMULLU, &n2, nil)
}
// n2 * n1 -> (n1 n2)
p.Reg = n1.Reg
p.To.Type = obj.TYPE_REGREG
p.To.Reg = n1.Reg
p.To.Offset = int64(n2.Reg)
default:
gc.Fatalf("cgen_hmul %v", t)
}
gc.Cgen(&n1, res)
gc.Regfree(&n1)
gc.Regfree(&n2)
}
作者:glycerin
项目:zygomy
func rewriteToPcrel(ctxt *obj.Link, p *obj.Prog) {
// RegTo2 is set on the instructions we insert here so they don't get
// processed twice.
if p.RegTo2 != 0 {
return
}
if p.As == obj.ATEXT || p.As == obj.AFUNCDATA || p.As == obj.ACALL || p.As == obj.ARET || p.As == obj.AJMP {
return
}
// Any Prog (aside from the above special cases) with an Addr with Name ==
// NAME_EXTERN, NAME_STATIC or NAME_GOTREF has a CALL __x86.get_pc_thunk.cx
// inserted before it.
isName := func(a *obj.Addr) bool {
if a.Sym == nil || (a.Type != obj.TYPE_MEM && a.Type != obj.TYPE_ADDR) || a.Reg != 0 {
return false
}
if a.Sym.Type == obj.STLSBSS {
return false
}
return a.Name == obj.NAME_EXTERN || a.Name == obj.NAME_STATIC || a.Name == obj.NAME_GOTREF
}
if isName(&p.From) && p.From.Type == obj.TYPE_ADDR {
// Handle things like "MOVL $sym, (SP)" or "PUSHL $sym" by rewriting
// to "MOVL $sym, CX; MOVL CX, (SP)" or "MOVL $sym, CX; PUSHL CX"
// respectively.
if p.To.Type != obj.TYPE_REG {
q := obj.Appendp(ctxt, p)
q.As = p.As
q.From.Type = obj.TYPE_REG
q.From.Reg = REG_CX
q.To = p.To
p.As = AMOVL
p.To.Type = obj.TYPE_REG
p.To.Reg = REG_CX
p.To.Sym = nil
p.To.Name = obj.NAME_NONE
}
}
if !isName(&p.From) && !isName(&p.To) && (p.From3 == nil || !isName(p.From3)) {
return
}
q := obj.Appendp(ctxt, p)
q.RegTo2 = 1
r := obj.Appendp(ctxt, q)
r.RegTo2 = 1
q.As = obj.ACALL
q.To.Sym = obj.Linklookup(ctxt, "__x86.get_pc_thunk.cx", 0)
q.To.Type = obj.TYPE_MEM
q.To.Name = obj.NAME_EXTERN
q.To.Sym.Local = true
r.As = p.As
r.Scond = p.Scond
r.From = p.From
r.From3 = p.From3
r.Reg = p.Reg
r.To = p.To
obj.Nopout(p)
}
作者:glycerin
项目:zygomy
func stacksplitPost(ctxt *obj.Link, p *obj.Prog, pPre *obj.Prog, pPreempt *obj.Prog) *obj.Prog {
// MOVD LR, R5
p = obj.Appendp(ctxt, p)
pPre.Pcond = p
p.As = AMOVD
p.From.Type = obj.TYPE_REG
p.From.Reg = REG_LR
p.To.Type = obj.TYPE_REG
p.To.Reg = REG_R5
if pPreempt != nil {
pPreempt.Pcond = p
}
// BL runtime.morestack(SB)
p = obj.Appendp(ctxt, p)
p.As = ABL
p.To.Type = obj.TYPE_BRANCH
if ctxt.Cursym.Cfunc {
p.To.Sym = obj.Linklookup(ctxt, "runtime.morestackc", 0)
} else if ctxt.Cursym.Text.From3.Offset&obj.NEEDCTXT == 0 {
p.To.Sym = obj.Linklookup(ctxt, "runtime.morestack_noctxt", 0)
} else {
p.To.Sym = obj.Linklookup(ctxt, "runtime.morestack", 0)
}
// BR start
p = obj.Appendp(ctxt, p)
p.As = ABR
p.To.Type = obj.TYPE_BRANCH
p.Pcond = ctxt.Cursym.Text.Link
return p
}
作者:glycerin
项目:zygomy
/*
* The idea is to remove redundant constants.
* $c1->v1
* ($c1->v2 s/$c1/v1)*
* set v1 return
* The v1->v2 should be eliminated by copy propagation.
*/
func constprop(c1 *obj.Addr, v1 *obj.Addr, r *gc.Flow) {
if gc.Debug['P'] != 0 {
fmt.Printf("constprop %v->%v\n", gc.Ctxt.Dconv(c1), gc.Ctxt.Dconv(v1))
}
var p *obj.Prog
for ; r != nil; r = r.S1 {
p = r.Prog
if gc.Debug['P'] != 0 {
fmt.Printf("%v", p)
}
if gc.Uniqp(r) == nil {
if gc.Debug['P'] != 0 {
fmt.Printf("; merge; return\n")
}
return
}
if p.As == arm.AMOVW && copyas(&p.From, c1) {
if gc.Debug['P'] != 0 {
fmt.Printf("; sub%v/%v", gc.Ctxt.Dconv(&p.From), gc.Ctxt.Dconv(v1))
}
p.From = *v1
} else if copyu(p, v1, nil) > 1 {
if gc.Debug['P'] != 0 {
fmt.Printf("; %vset; return\n", gc.Ctxt.Dconv(v1))
}
return
}
if gc.Debug['P'] != 0 {
fmt.Printf("\n")
}
if r.S2 != nil {
constprop(c1, v1, r.S2)
}
}
}
作者:glycerin
项目:zygomy
func appendpp(p *obj.Prog, as obj.As, ftype obj.AddrType, freg int, foffset int32, ttype obj.AddrType, treg int, toffset int32) *obj.Prog {
q := gc.Ctxt.NewProg()
gc.Clearp(q)
q.As = as
q.Lineno = p.Lineno
q.From.Type = ftype
q.From.Reg = int16(freg)
q.From.Offset = int64(foffset)
q.To.Type = ttype
q.To.Reg = int16(treg)
q.To.Offset = int64(toffset)
q.Link = p.Link
p.Link = q
return q
}
作者:glycerin
项目:zygomy
func addnop(ctxt *obj.Link, p *obj.Prog) {
q := ctxt.NewProg()
// we want to use the canonical NOP (SLL $0,R0,R0) here,
// however, as the assembler will always replace $0
// as R0, we have to resort to manually encode the SLL
// instruction as WORD $0.
q.As = AWORD
q.Lineno = p.Lineno
q.From.Type = obj.TYPE_CONST
q.From.Name = obj.NAME_NONE
q.From.Offset = 0
q.Link = p.Link
p.Link = q
}
作者:glycerin
项目:zygomy
func oplook(ctxt *obj.Link, p *obj.Prog) *Optab {
if oprange[AOR&obj.AMask] == nil {
buildop(ctxt)
}
a1 := int(p.Optab)
if a1 != 0 {
return &optab[a1-1]
}
a1 = int(p.From.Class)
if a1 == 0 {
a1 = aclass(ctxt, &p.From) + 1
p.From.Class = int8(a1)
}
a1--
a3 := int(p.To.Class)
if a3 == 0 {
a3 = aclass(ctxt, &p.To) + 1
p.To.Class = int8(a3)
}
a3--
a2 := C_NONE
if p.Reg != 0 {
a2 = C_REG
}
//print("oplook %P %d %d %d\n", p, a1, a2, a3);
ops := oprange[p.As&obj.AMask]
c1 := &xcmp[a1]
c3 := &xcmp[a3]
for i := range ops {
op := &ops[i]
if int(op.a2) == a2 && c1[op.a1] && c3[op.a3] {
p.Optab = uint16(cap(optab) - cap(ops) + i + 1)
return op
}
}
ctxt.Diag("illegal combination %v %v %v %v", obj.Aconv(p.As), DRconv(a1), DRconv(a2), DRconv(a3))
prasm(p)
if ops == nil {
ops = optab
}
return &ops[0]
}
作者:glycerin
项目:zygomy
// movb elimination.
// movb is simulated by the linker
// when a register other than ax, bx, cx, dx
// is used, so rewrite to other instructions
// when possible. a movb into a register
// can smash the entire 64-bit register without
// causing any trouble.
func elimshortmov(g *gc.Graph) {
var p *obj.Prog
for r := g.Start; r != nil; r = r.Link {
p = r.Prog
if regtyp(&p.To) {
switch p.As {
case x86.AINCB,
x86.AINCW:
p.As = x86.AINCL
case x86.ADECB,
x86.ADECW:
p.As = x86.ADECL
case x86.ANEGB,
x86.ANEGW:
p.As = x86.ANEGL
case x86.ANOTB,
x86.ANOTW:
p.As = x86.ANOTL
}
if regtyp(&p.From) || p.From.Type == obj.TYPE_CONST {
// move or arithmetic into partial register.
// from another register or constant can be movl.
// we don't switch to 32-bit arithmetic if it can
// change how the carry bit is set (and the carry bit is needed).
switch p.As {
case x86.AMOVB,
x86.AMOVW:
p.As = x86.AMOVL
case x86.AADDB,
x86.AADDW:
if !needc(p.Link) {
p.As = x86.AADDL
}
case x86.ASUBB,
x86.ASUBW:
if !needc(p.Link) {
p.As = x86.ASUBL
}
case x86.AMULB,
x86.AMULW:
p.As = x86.AMULL
case x86.AIMULB,
x86.AIMULW:
p.As = x86.AIMULL
case x86.AANDB,
x86.AANDW:
p.As = x86.AANDL
case x86.AORB,
x86.AORW:
p.As = x86.AORL
case x86.AXORB,
x86.AXORW:
p.As = x86.AXORL
case x86.ASHLB,
x86.ASHLW:
p.As = x86.ASHLL
}
} else {
// explicit zero extension
switch p.As {
case x86.AMOVB:
p.As = x86.AMOVBLZX
case x86.AMOVW:
p.As = x86.AMOVWLZX
}
}
}
}
}
作者:glycerin
项目:zygomy
func Clearp(p *obj.Prog) {
obj.Nopout(p)
p.As = obj.AEND
p.Pc = int64(pcloc)
pcloc++
}
作者:glycerin
项目:zygomy
func peep(firstp *obj.Prog) {
g := gc.Flowstart(firstp, nil)
if g == nil {
return
}
gactive = 0
var p *obj.Prog
var r *gc.Flow
var t obj.As
loop1:
if gc.Debug['P'] != 0 && gc.Debug['v'] != 0 {
gc.Dumpit("loop1", g.Start, 0)
}
t = 0
for r = g.Start; r != nil; r = r.Link {
p = r.Prog
// TODO(austin) Handle smaller moves. arm and amd64
// distinguish between moves that moves that *must*
// sign/zero extend and moves that don't care so they
// can eliminate moves that don't care without
// breaking moves that do care. This might let us
// simplify or remove the next peep loop, too.
if p.As == ppc64.AMOVD || p.As == ppc64.AFMOVD {
if regtyp(&p.To) {
// Try to eliminate reg->reg moves
if regtyp(&p.From) {
if p.From.Type == p.To.Type {
if copyprop(r) {
excise(r)
t++
} else if subprop(r) && copyprop(r) {
excise(r)
t++
}
}
}
// Convert uses to $0 to uses of R0 and
// propagate R0
if regzer(&p.From) {
if p.To.Type == obj.TYPE_REG {
p.From.Type = obj.TYPE_REG
p.From.Reg = ppc64.REGZERO
if copyprop(r) {
excise(r)
t++
} else if subprop(r) && copyprop(r) {
excise(r)
t++
}
}
}
}
}
}
if t != 0 {
goto loop1
}
/*
* look for MOVB x,R; MOVB R,R (for small MOVs not handled above)
*/
var p1 *obj.Prog
var r1 *gc.Flow
for r := g.Start; r != nil; r = r.Link {
p = r.Prog
switch p.As {
default:
continue
case ppc64.AMOVH,
ppc64.AMOVHZ,
ppc64.AMOVB,
ppc64.AMOVBZ,
ppc64.AMOVW,
ppc64.AMOVWZ:
if p.To.Type != obj.TYPE_REG {
continue
}
}
r1 = r.Link
if r1 == nil {
continue
}
p1 = r1.Prog
if p1.As != p.As {
continue
}
if p1.From.Type != obj.TYPE_REG || p1.From.Reg != p.To.Reg {
continue
}
if p1.To.Type != obj.TYPE_REG || p1.To.Reg != p.To.Reg {
continue
}
excise(r1)
//.........这里部分代码省略.........
作者:glycerin
项目:zygomy
func xfol(ctxt *obj.Link, p *obj.Prog, last **obj.Prog) {
var q *obj.Prog
var r *obj.Prog
var i int
loop:
if p == nil {
return
}
a := p.As
if a == AB {
q = p.Pcond
if q != nil && q.As != obj.ATEXT {
p.Mark |= FOLL
p = q
if p.Mark&FOLL == 0 {
goto loop
}
}
}
if p.Mark&FOLL != 0 {
i = 0
q = p
for ; i < 4; i, q = i+1, q.Link {
if q == *last || q == nil {
break
}
a = q.As
if a == obj.ANOP {
i--
continue
}
if a == AB || (a == obj.ARET && q.Scond == C_SCOND_NONE) || a == ARFE || a == obj.AUNDEF {
goto copy
}
if q.Pcond == nil || (q.Pcond.Mark&FOLL != 0) {
continue
}
if a != ABEQ && a != ABNE {
continue
}
copy:
for {
r = ctxt.NewProg()
*r = *p
if r.Mark&FOLL == 0 {
fmt.Printf("can't happen 1\n")
}
r.Mark |= FOLL
if p != q {
p = p.Link
(*last).Link = r
*last = r
continue
}
(*last).Link = r
*last = r
if a == AB || (a == obj.ARET && q.Scond == C_SCOND_NONE) || a == ARFE || a == obj.AUNDEF {
return
}
r.As = ABNE
if a == ABNE {
r.As = ABEQ
}
r.Pcond = p.Link
r.Link = p.Pcond
if r.Link.Mark&FOLL == 0 {
xfol(ctxt, r.Link, last)
}
if r.Pcond.Mark&FOLL == 0 {
fmt.Printf("can't happen 2\n")
}
return
}
}
a = AB
q = ctxt.NewProg()
q.As = a
q.Lineno = p.Lineno
q.To.Type = obj.TYPE_BRANCH
q.To.Offset = p.Pc
q.Pcond = p
p = q
}
p.Mark |= FOLL
(*last).Link = p
*last = p
if a == AB || (a == obj.ARET && p.Scond == C_SCOND_NONE) || a == ARFE || a == obj.AUNDEF {
return
}
if p.Pcond != nil {
if a != ABL && a != ABX && p.Link != nil {
q = obj.Brchain(ctxt, p.Link)
//.........这里部分代码省略.........
作者:glycerin
项目:zygomy
func stacksplit(ctxt *obj.Link, p *obj.Prog, framesize int32) *obj.Prog {
// MOVW g_stackguard(g), R1
p = obj.Appendp(ctxt, p)
p.As = AMOVW
p.From.Type = obj.TYPE_MEM
p.From.Reg = REGG
p.From.Offset = 2 * int64(ctxt.Arch.PtrSize) // G.stackguard0
if ctxt.Cursym.Cfunc {
p.From.Offset = 3 * int64(ctxt.Arch.PtrSize) // G.stackguard1
}
p.To.Type = obj.TYPE_REG
p.To.Reg = REG_R1
if framesize <= obj.StackSmall {
// small stack: SP < stackguard
// CMP stackguard, SP
p = obj.Appendp(ctxt, p)
p.As = ACMP
p.From.Type = obj.TYPE_REG
p.From.Reg = REG_R1
p.Reg = REGSP
} else if framesize <= obj.StackBig {
// large stack: SP-framesize < stackguard-StackSmall
// MOVW $-framesize(SP), R2
// CMP stackguard, R2
p = obj.Appendp(ctxt, p)
p.As = AMOVW
p.From.Type = obj.TYPE_ADDR
p.From.Reg = REGSP
p.From.Offset = int64(-framesize)
p.To.Type = obj.TYPE_REG
p.To.Reg = REG_R2
p = obj.Appendp(ctxt, p)
p.As = ACMP
p.From.Type = obj.TYPE_REG
p.From.Reg = REG_R1
p.Reg = REG_R2
} else {
// Such a large stack we need to protect against wraparound
// if SP is close to zero.
// SP-stackguard+StackGuard < framesize + (StackGuard-StackSmall)
// The +StackGuard on both sides is required to keep the left side positive:
// SP is allowed to be slightly below stackguard. See stack.h.
// CMP $StackPreempt, R1
// MOVW.NE $StackGuard(SP), R2
// SUB.NE R1, R2
// MOVW.NE $(framesize+(StackGuard-StackSmall)), R3
// CMP.NE R3, R2
p = obj.Appendp(ctxt, p)
p.As = ACMP
p.From.Type = obj.TYPE_CONST
p.From.Offset = int64(uint32(obj.StackPreempt & (1<<32 - 1)))
p.Reg = REG_R1
p = obj.Appendp(ctxt, p)
p.As = AMOVW
p.From.Type = obj.TYPE_ADDR
p.From.Reg = REGSP
p.From.Offset = obj.StackGuard
p.To.Type = obj.TYPE_REG
p.To.Reg = REG_R2
p.Scond = C_SCOND_NE
p = obj.Appendp(ctxt, p)
p.As = ASUB
p.From.Type = obj.TYPE_REG
p.From.Reg = REG_R1
p.To.Type = obj.TYPE_REG
p.To.Reg = REG_R2
p.Scond = C_SCOND_NE
p = obj.Appendp(ctxt, p)
p.As = AMOVW
p.From.Type = obj.TYPE_ADDR
p.From.Offset = int64(framesize) + (obj.StackGuard - obj.StackSmall)
p.To.Type = obj.TYPE_REG
p.To.Reg = REG_R3
p.Scond = C_SCOND_NE
p = obj.Appendp(ctxt, p)
p.As = ACMP
p.From.Type = obj.TYPE_REG
p.From.Reg = REG_R3
p.Reg = REG_R2
p.Scond = C_SCOND_NE
}
// BLS call-to-morestack
bls := obj.Appendp(ctxt, p)
bls.As = ABLS
bls.To.Type = obj.TYPE_BRANCH
var last *obj.Prog
for last = ctxt.Cursym.Text; last.Link != nil; last = last.Link {
}
//.........这里部分代码省略.........
作者:glycerin
项目:zygomy
func progedit(ctxt *obj.Link, p *obj.Prog) {
p.From.Class = 0
p.To.Class = 0
// Rewrite B/BL to symbol as TYPE_BRANCH.
switch p.As {
case AB,
ABL,
obj.ADUFFZERO,
obj.ADUFFCOPY:
if p.To.Type == obj.TYPE_MEM && (p.To.Name == obj.NAME_EXTERN || p.To.Name == obj.NAME_STATIC) && p.To.Sym != nil {
p.To.Type = obj.TYPE_BRANCH
}
}
// Replace TLS register fetches on older ARM processors.
switch p.As {
// Treat MRC 15, 0, <reg>, C13, C0, 3 specially.
case AMRC:
if p.To.Offset&0xffff0fff == 0xee1d0f70 {
// Because the instruction might be rewritten to a BL which returns in R0
// the register must be zero.
if p.To.Offset&0xf000 != 0 {
ctxt.Diag("%v: TLS MRC instruction must write to R0 as it might get translated into a BL instruction", p.Line())
}
if ctxt.Goarm < 7 {
// Replace it with BL runtime.read_tls_fallback(SB) for ARM CPUs that lack the tls extension.
if progedit_tlsfallback == nil {
progedit_tlsfallback = obj.Linklookup(ctxt, "runtime.read_tls_fallback", 0)
}
// MOVW LR, R11
p.As = AMOVW
p.From.Type = obj.TYPE_REG
p.From.Reg = REGLINK
p.To.Type = obj.TYPE_REG
p.To.Reg = REGTMP
// BL runtime.read_tls_fallback(SB)
p = obj.Appendp(ctxt, p)
p.As = ABL
p.To.Type = obj.TYPE_BRANCH
p.To.Sym = progedit_tlsfallback
p.To.Offset = 0
// MOVW R11, LR
p = obj.Appendp(ctxt, p)
p.As = AMOVW
p.From.Type = obj.TYPE_REG
p.From.Reg = REGTMP
p.To.Type = obj.TYPE_REG
p.To.Reg = REGLINK
break
}
}
// Otherwise, MRC/MCR instructions need no further treatment.
p.As = AWORD
}
// Rewrite float constants to values stored in memory.
switch p.As {
case AMOVF:
if p.From.Type == obj.TYPE_FCONST && chipfloat5(ctxt, p.From.Val.(float64)) < 0 && (chipzero5(ctxt, p.From.Val.(float64)) < 0 || p.Scond&C_SCOND != C_SCOND_NONE) {
f32 := float32(p.From.Val.(float64))
i32 := math.Float32bits(f32)
literal := fmt.Sprintf("$f32.%08x", i32)
s := obj.Linklookup(ctxt, literal, 0)
p.From.Type = obj.TYPE_MEM
p.From.Sym = s
p.From.Name = obj.NAME_EXTERN
p.From.Offset = 0
}
case AMOVD:
if p.From.Type == obj.TYPE_FCONST && chipfloat5(ctxt, p.From.Val.(float64)) < 0 && (chipzero5(ctxt, p.From.Val.(float64)) < 0 || p.Scond&C_SCOND != C_SCOND_NONE) {
i64 := math.Float64bits(p.From.Val.(float64))
literal := fmt.Sprintf("$f64.%016x", i64)
s := obj.Linklookup(ctxt, literal, 0)
p.From.Type = obj.TYPE_MEM
p.From.Sym = s
p.From.Name = obj.NAME_EXTERN
p.From.Offset = 0
}
}
if ctxt.Flag_dynlink {
rewriteToUseGot(ctxt, p)
}
}
作者:glycerin
项目:zygomy
// If s==nil, copyu returns the set/use of v in p; otherwise, it
// modifies p to replace reads of v with reads of s and returns 0 for
// success or non-zero for failure.
//
// If s==nil, copy returns one of the following values:
// 1 if v only used
// 2 if v is set and used in one address (read-alter-rewrite;
// can't substitute)
// 3 if v is only set
// 4 if v is set in one address and used in another (so addresses
// can be rewritten independently)
// 0 otherwise (not touched)
func copyu(p *obj.Prog, v *obj.Addr, s *obj.Addr) int {
if p.From3Type() != obj.TYPE_NONE {
// never generates a from3
fmt.Printf("copyu: from3 (%v) not implemented\n", gc.Ctxt.Dconv(p.From3))
}
switch p.As {
default:
fmt.Printf("copyu: can't find %v\n", obj.Aconv(p.As))
return 2
case obj.ANOP, /* read p->from, write p->to */
mips.AMOVV,
mips.AMOVF,
mips.AMOVD,
mips.AMOVH,
mips.AMOVHU,
mips.AMOVB,
mips.AMOVBU,
mips.AMOVW,
mips.AMOVWU,
mips.AMOVFD,
mips.AMOVDF,
mips.AMOVDW,
mips.AMOVWD,
mips.AMOVFW,
mips.AMOVWF,
mips.AMOVDV,
mips.AMOVVD,
mips.AMOVFV,
mips.AMOVVF,
mips.ATRUNCFV,
mips.ATRUNCDV,
mips.ATRUNCFW,
mips.ATRUNCDW:
if s != nil {
if copysub(&p.From, v, s, true) {
return 1
}
// Update only indirect uses of v in p->to
if !copyas(&p.To, v) {
if copysub(&p.To, v, s, true) {
return 1
}
}
return 0
}
if copyas(&p.To, v) {
// Fix up implicit from
if p.From.Type == obj.TYPE_NONE {
p.From = p.To
}
if copyau(&p.From, v) {
return 4
}
return 3
}
if copyau(&p.From, v) {
return 1
}
if copyau(&p.To, v) {
// p->to only indirectly uses v
return 1
}
return 0
case mips.ASGT, /* read p->from, read p->reg, write p->to */
mips.ASGTU,
mips.AADD,
mips.AADDU,
mips.ASUB,
mips.ASUBU,
mips.ASLL,
mips.ASRL,
mips.ASRA,
mips.AOR,
mips.ANOR,
mips.AAND,
mips.AXOR,
mips.AADDV,
mips.AADDVU,
mips.ASUBV,
//.........这里部分代码省略.........
作者:glycerin
项目:zygomy
// copysub1 replaces v with s in p1->reg if f==true or indicates if it could if f==false.
// Returns true on failure to substitute (it always succeeds on mips).
// TODO(dfc) remove unused return value, remove calls with f=false as they do nothing.
func copysub1(p1 *obj.Prog, v *obj.Addr, s *obj.Addr, f bool) bool {
if f && copyau1(p1, v) {
p1.Reg = s.Reg
}
return false
}
作者:glycerin
项目:zygomy
// Rewrite p, if necessary, to access global data via the global offset table.
func rewriteToUseGot(ctxt *obj.Link, p *obj.Prog) {
if p.As == obj.ADUFFCOPY || p.As == obj.ADUFFZERO {
// ADUFFxxx $offset
// becomes
// MOVW [email protected], R9
// ADD $offset, R9
// CALL (R9)
var sym *obj.LSym
if p.As == obj.ADUFFZERO {
sym = obj.Linklookup(ctxt, "runtime.duffzero", 0)
} else {
sym = obj.Linklookup(ctxt, "runtime.duffcopy", 0)
}
offset := p.To.Offset
p.As = AMOVW
p.From.Type = obj.TYPE_MEM
p.From.Name = obj.NAME_GOTREF
p.From.Sym = sym
p.To.Type = obj.TYPE_REG
p.To.Reg = REG_R9
p.To.Name = obj.NAME_NONE
p.To.Offset = 0
p.To.Sym = nil
p1 := obj.Appendp(ctxt, p)
p1.As = AADD
p1.From.Type = obj.TYPE_CONST
p1.From.Offset = offset
p1.To.Type = obj.TYPE_REG
p1.To.Reg = REG_R9
p2 := obj.Appendp(ctxt, p1)
p2.As = obj.ACALL
p2.To.Type = obj.TYPE_MEM
p2.To.Reg = REG_R9
return
}
// We only care about global data: NAME_EXTERN means a global
// symbol in the Go sense, and p.Sym.Local is true for a few
// internally defined symbols.
if p.From.Type == obj.TYPE_ADDR && p.From.Name == obj.NAME_EXTERN && !p.From.Sym.Local {
// MOVW $sym, Rx becomes MOVW [email protected], Rx
// MOVW $sym+<off>, Rx becomes MOVW [email protected], Rx; ADD <off>, Rx
if p.As != AMOVW {
ctxt.Diag("do not know how to handle TYPE_ADDR in %v with -dynlink", p)
}
if p.To.Type != obj.TYPE_REG {
ctxt.Diag("do not know how to handle LEAQ-type insn to non-register in %v with -dynlink", p)
}
p.From.Type = obj.TYPE_MEM
p.From.Name = obj.NAME_GOTREF
if p.From.Offset != 0 {
q := obj.Appendp(ctxt, p)
q.As = AADD
q.From.Type = obj.TYPE_CONST
q.From.Offset = p.From.Offset
q.To = p.To
p.From.Offset = 0
}
}
if p.From3 != nil && p.From3.Name == obj.NAME_EXTERN {
ctxt.Diag("don't know how to handle %v with -dynlink", p)
}
var source *obj.Addr
// MOVx sym, Ry becomes MOVW [email protected], R9; MOVx (R9), Ry
// MOVx Ry, sym becomes MOVW [email protected], R9; MOVx Ry, (R9)
// An addition may be inserted between the two MOVs if there is an offset.
if p.From.Name == obj.NAME_EXTERN && !p.From.Sym.Local {
if p.To.Name == obj.NAME_EXTERN && !p.To.Sym.Local {
ctxt.Diag("cannot handle NAME_EXTERN on both sides in %v with -dynlink", p)
}
source = &p.From
} else if p.To.Name == obj.NAME_EXTERN && !p.To.Sym.Local {
source = &p.To
} else {
return
}
if p.As == obj.ATEXT || p.As == obj.AFUNCDATA || p.As == obj.ACALL || p.As == obj.ARET || p.As == obj.AJMP {
return
}
if source.Sym.Type == obj.STLSBSS {
return
}
if source.Type != obj.TYPE_MEM {
ctxt.Diag("don't know how to handle %v with -dynlink", p)
}
p1 := obj.Appendp(ctxt, p)
p2 := obj.Appendp(ctxt, p1)
p1.As = AMOVW
p1.From.Type = obj.TYPE_MEM
p1.From.Sym = source.Sym
p1.From.Name = obj.NAME_GOTREF
p1.To.Type = obj.TYPE_REG
p1.To.Reg = REG_R9
p2.As = p.As
p2.From = p.From
p2.To = p.To
if p.From.Name == obj.NAME_EXTERN {
//.........这里部分代码省略.........