作者:Jeffai
项目:leap
/*
FixPrematureTransform - Used by clients to fix incoming and outgoing transforms when local changes
have been applied to a document before being routed through the server.
In order for a client UI to be unblocking it must apply local changes as the user types them before
knowing the correct order of the change. Therefore, it is possible to apply a local change before
receiving incoming transforms that are meant to be applied beforehand.
As a solution to those situations this function allows a client to alter and incoming interations
such that if they were to be applied to the local document after our local change they would result
in the same document. The outgoing transform is also modified for sending out to the server.
It is possible that the local change has already been dispatched to the server, in which case it is
the servers responsibility to fix the transform so that other clients end up at the same result.
NOTE: These fixes do not regard or alter the versions of either transform.
*/
func FixPrematureTransform(unapplied, unsent *OTransform) {
var before, after *OTransform
// Order the OTs by position in the document.
if unapplied.Position < unsent.Position {
before = unapplied
after = unsent
} else {
before = unsent
after = unapplied
}
// Get insertion lengths (codepoints)
bInsert, aInsert := bytes.Runes([]byte(before.Insert)), bytes.Runes([]byte(after.Insert))
bLength, aLength := len(bInsert), len(aInsert)
if before.Delete == 0 {
after.Position += bLength
} else if (before.Delete + before.Position) <= after.Position {
after.Position += (bLength - before.Delete)
} else {
posGap := after.Position - before.Position
excess := intMax(0, before.Delete-posGap)
if excess > after.Delete {
before.Delete += (aLength - after.Delete)
before.Insert = before.Insert + after.Insert
} else {
before.Delete = posGap
}
after.Delete = intMax(0, after.Delete-excess)
after.Position = before.Position + bLength
}
}
作者:haldea
项目:x
func NewBuffer(r io.Reader) (Buffer, error) {
var b Buffer
b.Lines = make([]Line, 0)
// bytes are read into this from the reader
rb := make([]byte, READ_BUFFER_SIZE)
// bytes are queued here until we find EOL
lb := make([]byte, 0, READ_BUFFER_SIZE)
for {
n, err := r.Read(rb)
for i := 0; i < n; i++ {
lb = append(lb, rb[i])
if rb[i] == '\n' {
l := Line(bytes.Runes(lb))
b.Lines = append(b.Lines, l)
lb = make([]byte, 0, READ_BUFFER_SIZE)
}
}
if err != nil && err == io.EOF {
lb = append(lb, '\n')
l := Line(bytes.Runes(lb))
b.Lines = append(b.Lines, l)
lb = make([]byte, 0, READ_BUFFER_SIZE)
break
} else if err != nil {
return b, err
}
}
return b, nil
}
作者:Jeffai
项目:leap
/*
FixOutOfDateTransform - When a transform created for a specific version is later determined to come
after one or more other transforms it can be fixed. This fix translates the transform such that
being applied in the correct order will preserve the original intention.
In order to apply these fixes this function should be called with the target transform and the
actual versioned transform that the target currently 'believes' it is. So, for example, if the
transform was written for version 7 and was actually 10 you would call FixOutOfDateTransform in this
order:
FixOutOfDateTransform(target, version7)
FixOutOfDateTransform(target, version8)
FixOutOfDateTransform(target, version9)
Once the transform is adjusted through this fix it can be harmlessly dispatched to all other clients
which will end up with the same document as the client that submitted this transform.
NOTE: These fixes do not regard or alter the versions of either transform.
*/
func FixOutOfDateTransform(sub, pre *OTransform) {
// Get insertion lengths (codepoints)
subInsert, preInsert := bytes.Runes([]byte(sub.Insert)), bytes.Runes([]byte(pre.Insert))
subLength, preLength := len(subInsert), len(preInsert)
if pre.Position <= sub.Position {
if preLength > 0 && pre.Delete == 0 {
sub.Position += preLength
} else if pre.Delete > 0 && (pre.Position+pre.Delete) <= sub.Position {
sub.Position += (preLength - pre.Delete)
} else if pre.Delete > 0 && (pre.Position+pre.Delete) > sub.Position {
overhang := intMin(sub.Delete, (pre.Position+pre.Delete)-sub.Position)
sub.Delete -= overhang
sub.Position = pre.Position + preLength
}
} else if sub.Delete > 0 && (sub.Position+sub.Delete) > pre.Position {
posGap := pre.Position - sub.Position
excess := intMax(0, (sub.Delete - posGap))
if excess > pre.Delete {
sub.Delete += (preLength - pre.Delete)
newInsert := make([]rune, subLength+preLength)
copy(newInsert[:], subInsert)
copy(newInsert[subLength:], preInsert)
sub.Insert = string(newInsert)
} else {
sub.Delete = posGap
}
}
}
作者:andrewhamo
项目:encoj
func makeEncoding(decode bool) (encoding map[rune]rune) {
base64Runes := bytes.Runes([]byte("ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/="))
encojiRunes := bytes.Runes([]byte(":grinning_face::grimacing_face::beaming_face_with_smiling_eyes::face_with_tears_of_joy::grinning_face_with_big_eyes::grinning_face_with_smiling_eyes::grinning_face_with_sweat::grinning_squinting_face::smiling_face_with_halo::winking_face::smiling_face_with_smiling_eyes::face_savoring_food::relieved_face::smiling_face_with_heart-eyes::face_blowing_a_kiss::kissing_face::kissing_face_with_smiling_eyes::kissing_face_with_closed_eyes::winking_face_with_tongue::squinting_face_with_tongue::face_with_tongue::smiling_face_with_sunglasses::smirking_face::pile_of_poo::ghost::grinning_cat::grinning_cat_with_smiling_eyes::cat_with_tears_of_joy::smiling_cat_with_heart-eyes::dog_face::cat_face::mouse_face::hamster::rabbit_face::bear::panda::koala::tiger_face::cow_face::pig_face::pig_nose::frog::octopus::monkey_face::see-no-evil_monkey::hear-no-evil_monkey::speak-no-evil_monkey::monkey::chicken::penguin::bird::baby_chick::hatching_chick::front-facing_baby_chick::wolf::boar::horse_face::honeybee::bug::snail::lady_beetle::ant::elephant::dolphin::spouting_whale:"))
if len(base64Runes) != len(encojiRunes) {
panic("Charsets must be of same length")
}
encoding = make(map[rune]rune)
var from, to []rune
if decode {
from = encojiRunes
to = base64Runes
} else {
from = base64Runes
to = encojiRunes
}
for i := 0; i < len(from); i++ {
encoding[from[i]] = to[i]
}
return encoding
}
作者:gobwa
项目:json.g
func (self *Scanner) scanIdentifier() (*Token, error) {
var (
buf bytes.Buffer
stop bool
)
for !stop {
char := self.read()
switch {
case isLowerCaseLetter(char):
buf.WriteRune(char)
case char == eof:
stop = true
default:
self.unread()
stop = true
}
}
lit := buf.String()
switch lit {
case "null":
return &Token{NULL, lit, bytes.Runes(buf.Bytes())}, nil
case "true":
return &Token{TRUE, lit, bytes.Runes(buf.Bytes())}, nil
case "false":
return &Token{FALSE, lit, bytes.Runes(buf.Bytes())}, nil
default:
return &Token{ILLEGAL, lit, bytes.Runes(buf.Bytes())}, nil
}
}
作者:gobwa
项目:json.g
func (self *Scanner) scanString() (*Token, error) {
var (
buf bytes.Buffer
screen bool
stop bool
)
// skip first quote
char := self.read()
if char != '"' {
// todo err here?
self.unread()
return &Token{STRING, buf.String(), bytes.Runes(buf.Bytes())}, nil
}
buf.WriteRune(char)
for !stop {
char := self.read()
if unicode.IsControl(char) {
self.unread()
stop = true
continue
}
if screen {
screen = false
switch char {
case '"', '\\', '/', 'b', 'f', 'n', 'r', 't', 'u':
buf.WriteRune(char)
default:
return nil, fmt.Errorf("unexpected end of input: %q", string(char))
}
continue
}
switch {
case char == '\\':
screen = true
buf.WriteRune(char)
case char == '"':
buf.WriteRune(char)
stop = true
case char == eof:
stop = true
default:
buf.WriteRune(char)
}
}
return &Token{STRING, buf.String(), bytes.Runes(buf.Bytes())}, nil
}
作者:dohodge
项目:gofung
func (fs *FungeSpace) Load(reader io.Reader) error {
b, err := ioutil.ReadAll(reader)
if err != nil {
return err
}
address := fs.funge.Origin()
for _, r := range bytes.Runes(b) {
switch r {
case 012, 015:
if fs.funge > 1 {
address = address.Add(fs.funge.Delta(YAxis, Forward))
address.Set(XAxis, 0)
}
case 014:
if fs.funge > 2 {
address = address.Add(fs.funge.Delta(ZAxis, Forward))
address.Set(XAxis, 0)
address.Set(YAxis, 0)
} else if fs.funge == 2 {
address = address.Add(fs.funge.Delta(YAxis, Forward))
address.Set(XAxis, 0)
}
default:
fs.Put(address, r)
address = address.Add(fs.funge.Delta(XAxis, Forward))
}
}
return nil
}
作者:sunfmi
项目:learn_go_the_hard_wa
func main() {
h := []byte("Hello, I think you are right.你是对的!")
fmt.Println(`Say you have a string:`, "\n\t", string(h))
fmt.Println(`Whose byte array in decimal are:`, "\n\t", h)
fmt.Println(`So in hex: `, "\n\t", fmt.Sprintf("% x", h))
hex := fmt.Sprintf("%x", h)
fmt.Println(`So you see this form a lot:`, "\n\t", hex)
buffer := new(bytes.Buffer)
encoder := base64.NewEncoder(base64.URLEncoding, buffer)
encoder.Write(h)
encoder.Close()
fmt.Println(`And base64 of the same data:`, "\n\t", buffer.String())
var bs []byte
fmt.Sscanf(hex, "%x", &bs)
fmt.Println(`And you can parse`, "\n\t", hex, "\n", "into bytes", "\n\t", bs)
fmt.Println(`Whose runes are:`, "\n\t", bytes.Runes(bs))
fmt.Println(`And each rune matching one charactor not matter its chinese or english:`, "\n\t", string(bs))
}
作者:robert-butt
项目:slackter
// Write appends a byte slice into the view's internal buffer. Because
// View implements the io.Writer interface, it can be passed as parameter
// of functions like fmt.Fprintf, fmt.Fprintln, io.Copy, etc. Clear must
// be called to clear the view's buffer.
func (v *View) Write(p []byte) (n int, err error) {
v.tainted = true
for _, ch := range bytes.Runes(p) {
switch ch {
case '\n':
v.lines = append(v.lines, nil)
case '\r':
nl := len(v.lines)
if nl > 0 {
v.lines[nl-1] = nil
} else {
v.lines = make([][]cell, 1)
}
default:
cells := v.parseInput(ch)
if cells == nil {
continue
}
nl := len(v.lines)
if nl > 0 {
v.lines[nl-1] = append(v.lines[nl-1], cells...)
} else {
v.lines = append(v.lines, cells)
}
}
}
return len(p), nil
}
作者:rjkroeg
项目:acm
func (b *Buffer) Load(q0 uint, fd *os.File, nulls *int) int {
if q0 > b.nc {
panic("internal error: buffer.Load")
}
p := make([]byte, MaxBlock+utf8.UTFMax+1)
m := 0
n := 1
q1 := q0
var err error
for n > 0 {
n, err = fd.Read(p[m:])
if err != nil {
panic(err)
}
m += n
l := m
if n > 0 {
l -= utf8.UTFMax
}
r := bytes.Runes(p[:l])
nr := len(r)
//nb := len([]byte(string(r)))
copy(p, p[:m])
b.Insert(q1, uint(nr), r)
q1 += uint(nr)
}
return int(q1 - q0)
}
作者:4gopher
项目:transli
// Encode writes the transliteration encoding of data to the stream.
func (enc *Encoder) Encode(data []byte) error {
r := bytes.Runes(data)
l := len(r)
var rPrev, rNext rune
for i, rCurr := range r {
if i+1 <= l {
rNext = r[i]
} else {
rNext = 0
}
var err error
if s, ok := enc.tbl[rCurr]; ok {
if sFix, ok := fixRuleRune(rPrev, rCurr, rNext, enc.sys); ok {
s = sFix
}
_, err = enc.WriteString(s)
if err != nil {
return err
}
} else {
_, err = enc.WriteRune(rCurr)
if err != nil {
return err
}
}
rPrev = rCurr
}
return enc.Flush()
}
作者:keg
项目:k
func truncate(s string, length int) string {
runes := bytes.Runes([]byte(s))
if len(runes) > length {
return string(runes[:length-3]) + "..."
}
return string(runes)
}
作者:bcampbel
项目:blev
func (s *CJKWidthFilter) Filter(input analysis.TokenStream) analysis.TokenStream {
for _, token := range input {
runeCount := utf8.RuneCount(token.Term)
runes := bytes.Runes(token.Term)
for i := 0; i < runeCount; i++ {
ch := runes[i]
if ch >= 0xFF01 && ch <= 0xFF5E {
// fullwidth ASCII variants
runes[i] -= 0xFEE0
} else if ch >= 0xFF65 && ch <= 0xFF9F {
// halfwidth Katakana variants
if (ch == 0xFF9E || ch == 0xFF9F) && i > 0 && combine(runes, i, ch) {
runes = analysis.DeleteRune(runes, i)
i--
runeCount = len(runes)
} else {
runes[i] = kanaNorm[ch-0xFF65]
}
}
}
token.Term = analysis.BuildTermFromRunes(runes)
}
return input
}
作者:Jack
项目:WebTer
func redirToWs(fd int, ws *websocket.Conn) {
defer func() {
if r := recover(); r != nil {
fmt.Fprintf(os.Stderr, "Error occured: %s\n", r)
runtime.Goexit()
}
}()
var buf [8192]byte
start, end, buflen := 0, 0, 0
for {
switch nr, er := syscall.Read(fd, buf[start:]); {
case nr < 0:
fmt.Fprintf(os.Stderr, "error reading from websocket %d with code %d\n", fd, er)
return
case nr == 0: // EOF
return
case nr > 0:
buflen = start + nr
for end = buflen - 1; end >= 0; end-- {
if utf8.RuneStart(buf[end]) {
ch, width := utf8.DecodeRune(buf[end:buflen])
if ch != utf8.RuneError {
end += width
}
break
}
if buflen-end >= 6 {
fmt.Fprintf(os.Stderr, "Invalid UTF-8 sequence in output")
end = nr
break
}
}
runes := bytes.Runes(buf[0:end])
buf_clean := []byte(string(runes))
nw, ew := ws.Write(buf_clean[:])
if ew != nil {
fmt.Fprintf(os.Stderr, "error writing to websocket with code %s\n", ew)
return
}
if nw != len(buf_clean) {
fmt.Fprintf(os.Stderr, "Written %d instead of expected %d\n", nw, end)
}
start = buflen - end
if start > 0 {
// copy remaning read bytes from the end to the beginning of a buffer
// so that we will get normal bytes
for i := 0; i < start; i++ {
buf[i] = buf[end+i]
}
}
}
}
}
作者:zacharyvoas
项目:jpip
func encodeStringAsASCII(str string) string {
var output bytes.Buffer
output.WriteByte('"')
for _, b := range bytes.Runes([]byte(str)) {
if b < utf8.RuneSelf {
switch b {
case '\\', '"':
output.WriteByte('\\')
output.WriteByte(byte(b))
case '\n':
output.WriteByte('\\')
output.WriteByte('n')
case '\r':
output.WriteByte('\\')
output.WriteByte('r')
case '\t':
output.WriteByte('\\')
output.WriteByte('t')
default:
if b < 0x20 || b == '<' || b == '>' || b == '&' {
output.WriteString(`\u00`)
output.WriteByte(hex[b>>4])
output.WriteByte(hex[b&0xF])
} else {
output.WriteByte(byte(b))
}
}
} else {
output.WriteString(fmt.Sprintf("\\u%04x", b))
}
}
output.WriteByte('"')
return output.String()
}
作者:nouris
项目:blev
func (s *NgramFilter) Filter(input analysis.TokenStream) analysis.TokenStream {
rv := make(analysis.TokenStream, 0, len(input))
for _, token := range input {
runeCount := utf8.RuneCount(token.Term)
runes := bytes.Runes(token.Term)
for i := 0; i < runeCount; i++ {
// index of the starting rune for this token
for ngramSize := s.minLength; ngramSize <= s.maxLength; ngramSize++ {
// build an ngram of this size starting at i
if i+ngramSize <= runeCount {
ngramTerm := buildTermFromRunes(runes[i : i+ngramSize])
token := analysis.Token{
Position: token.Position,
Start: token.Start,
End: token.End,
Type: token.Type,
Term: ngramTerm,
}
rv = append(rv, &token)
}
}
}
}
return rv
}
作者:acoo
项目:blackligh
func (t T) Rmo(n N) sequence {
rv := bytes.Runes([]byte(t))
a := rv[:n]
b := rv[n+1:]
rv = append(a, b...)
return T(rv)
}
作者:grafo
项目:gocu
// Write appends a byte slice into the view's internal buffer. Because
// View implements the io.Writer interface, it can be passed as parameter
// of functions like fmt.Fprintf, fmt.Fprintln, io.Copy, etc. Clear must
// be called to clear the view's buffer.
func (v *View) Write(p []byte) (n int, err error) {
v.tainted = true
for _, ch := range bytes.Runes(p) {
switch ch {
case '\n':
v.lines = append(v.lines, nil)
case '\r':
nl := len(v.lines)
if nl > 0 {
v.lines[nl-1] = nil
} else {
v.lines = make([][]rune, 1)
}
default:
nl := len(v.lines)
if nl > 0 {
v.lines[nl-1] = append(v.lines[nl-1], ch)
} else {
v.lines = append(v.lines, []rune{ch})
}
}
}
return len(p), nil
}
作者:rolandshoemake
项目:dns-workbenc
// decode transforms punycode input bytes (that represent DNS label) into Unicode bytestream
func decode(b []byte) []byte {
src := b // b would move and we need to keep it
n, bias := _N, _BIAS
if !bytes.HasPrefix(b, []byte(_PREFIX)) {
return b
}
out := make([]rune, 0, len(b))
b = b[len(_PREFIX):]
for pos := len(b) - 1; pos >= 0; pos-- {
// only last delimiter is our interest
if b[pos] == _DELIMITER {
out = append(out, bytes.Runes(b[:pos])...)
b = b[pos+1:] // trim source string
break
}
}
if len(b) == 0 {
return src
}
var (
i, oldi, w rune
ch byte
t, digit rune
ln int
)
for i = 0; len(b) > 0; i++ {
oldi, w = i, 1
for k := _BASE; len(b) > 0; k += _BASE {
ch, b = b[0], b[1:]
digit = digitval(rune(ch))
if digit == errdigit {
return src
}
i += digit * w
t = tfunc(k, bias)
if digit < t {
break
}
w *= _BASE - t
}
ln = len(out) + 1
bias = adapt(i-oldi, ln, oldi == 0)
n += i / rune(ln)
i = i % rune(ln)
// insert
out = append(out, 0)
copy(out[i+1:], out[i:])
out[i] = n
}
var ret bytes.Buffer
for _, r := range out {
ret.WriteRune(r)
}
return ret.Bytes()
}
作者:acgshar
项目:blev
func (f *DictionaryCompoundFilter) decompose(token *analysis.Token) []*analysis.Token {
runes := bytes.Runes(token.Term)
rv := make([]*analysis.Token, 0)
rlen := len(runes)
for i := 0; i <= (rlen - f.minSubWordSize); i++ {
var longestMatchToken *analysis.Token
for j := f.minSubWordSize; j <= f.maxSubWordSize; j++ {
if i+j > rlen {
break
}
_, inDict := f.dict[string(runes[i:i+j])]
if inDict {
newtoken := analysis.Token{
Term: []byte(string(runes[i : i+j])),
Position: token.Position,
Start: token.Start + i,
End: token.Start + i + j,
Type: token.Type,
KeyWord: token.KeyWord,
}
if f.onlyLongestMatch {
if longestMatchToken == nil || utf8.RuneCount(longestMatchToken.Term) < j {
longestMatchToken = &newtoken
}
} else {
rv = append(rv, &newtoken)
}
}
}
if f.onlyLongestMatch && longestMatchToken != nil {
rv = append(rv, longestMatchToken)
}
}
return rv
}