Golang bytes.LastIndexByte类(方法)实例源码

下面列出了Golang bytes.LastIndexByte 类(方法)源码代码实例,从而了解它的用法。

作者:stella    项目:bridge-serve   
func normalizePath(dst, src []byte) []byte {
	dst = dst[:0]
	dst = addLeadingSlash(dst, src)
	dst = decodeArgAppend(dst, src, false)

	// remove duplicate slashes
	b := dst
	bSize := len(b)
	for {
		n := bytes.Index(b, strSlashSlash)
		if n < 0 {
			break
		}
		b = b[n:]
		copy(b, b[1:])
		b = b[:len(b)-1]
		bSize--
	}
	dst = dst[:bSize]

	// remove /./ parts
	b = dst
	for {
		n := bytes.Index(b, strSlashDotSlash)
		if n < 0 {
			break
		}
		nn := n + len(strSlashDotSlash) - 1
		copy(b[n:], b[nn:])
		b = b[:len(b)-nn+n]
	}

	// remove /foo/../ parts
	for {
		n := bytes.Index(b, strSlashDotDotSlash)
		if n < 0 {
			break
		}
		nn := bytes.LastIndexByte(b[:n], '/')
		if nn < 0 {
			nn = 0
		}
		n += len(strSlashDotDotSlash) - 1
		copy(b[nn:], b[n:])
		b = b[:len(b)-n+nn]
	}

	// remove trailing /foo/..
	n := bytes.LastIndex(b, strSlashDotDot)
	if n >= 0 && n+len(strSlashDotDot) == len(b) {
		nn := bytes.LastIndexByte(b[:n], '/')
		if nn < 0 {
			return strSlash
		}
		b = b[:nn+1]
	}

	return b
}

作者:f    项目:1pw   
func parseFolders(data []byte) (Folders, error) {
	var (
		idx     int
		folders Folders
	)

	idx = bytes.IndexByte(data, '{')
	if idx < 0 {
		return nil, errors.New("invalid folders data")
	}
	data = data[idx:]

	idx = bytes.LastIndexByte(data, '}')
	if idx < 0 {
		return nil, errors.New("invalid folders data")
	}
	data = data[:idx+1]

	err := json.Unmarshal(data, &folders)
	if err != nil {
		return nil, err
	}

	return folders, nil
}

作者:udho    项目:nextho   
func (c *Client) LineBufferComplete(autoComplete string, attach bool) {

	defer c.mutex.Unlock()
	c.mutex.Lock()

	buf := c.telnetLine

	if !attach {
		// overwrite current label
		spc := bytes.LastIndexByte(buf.lineBuf[:buf.linePos], ' ')
		buf.lineSize = spc + 1
	}

	// append label

	for _, b := range autoComplete {
		if buf.lineSize >= len(buf.lineBuf) {
			break // overflow
		}

		// copy byte
		buf.lineBuf[buf.lineSize] = byte(b)
		buf.lineSize++
	}
	buf.linePos = buf.lineSize
}

作者:f    项目:1pw   
func parseBand(data []byte) (Band, error) {
	var (
		idx  int
		band Band
	)

	idx = bytes.IndexByte(data, '{')
	if idx < 0 {
		return nil, errors.New("invalid band data")
	}
	data = data[idx:]

	idx = bytes.LastIndexByte(data, '}')
	if idx < 0 {
		return nil, errors.New("invalid band data")
	}
	data = data[:idx+1]

	err := json.Unmarshal(data, &band)
	if err != nil {
		return nil, err
	}

	return band, nil
}

作者:f    项目:1pw   
func parseProfile(data []byte) (*Profile, error) {
	var (
		idx     int
		profile *Profile
	)

	idx = bytes.IndexByte(data, '{')
	if idx < 0 {
		return nil, errors.New("invalid profile data")
	}
	data = data[idx:]

	idx = bytes.LastIndexByte(data, '}')
	if idx < 0 {
		return nil, errors.New("invalid profile data")
	}
	data = data[:idx+1]

	err := json.Unmarshal(data, &profile)
	if err != nil {
		return nil, err
	}

	return profile, nil
}

作者:veqry    项目:go-emai   
// Write ...
func (w *headerWriter) Write(p []byte) (int, error) {
	// TODO: logic for wrapping headers is actually pretty complex for some header types, like received headers
	var total int
	for len(p)+w.curLineLen > w.maxLineLen {
		toWrite := w.maxLineLen - w.curLineLen
		// Wrap at last space, if any
		lastSpace := bytes.LastIndexByte(p[:toWrite], byte(' '))
		if lastSpace > 0 {
			toWrite = lastSpace
		}
		written, err := w.w.Write(p[:toWrite])
		total += written
		if err != nil {
			return total, err
		}
		written, err = w.w.Write([]byte("\r\n "))
		total += written
		if err != nil {
			return total, err
		}
		p = p[toWrite:]
		w.curLineLen = 1 // Continuation lines are indented
	}
	written, err := w.w.Write(p)
	total += written
	w.curLineLen += written
	return total, err
}

作者:stormgb    项目:fasthtt   
func (h *RequestHeader) parseFirstLine(buf []byte) (int, error) {
	bNext := buf
	var b []byte
	var err error
	for len(b) == 0 {
		if b, bNext, err = nextLine(bNext); err != nil {
			return 0, err
		}
	}

	// parse method
	n := bytes.IndexByte(b, ' ')
	if n <= 0 {
		return 0, fmt.Errorf("cannot find http request method in %q", buf)
	}
	h.method = append(h.method[:0], b[:n]...)
	b = b[n+1:]

	// parse requestURI
	n = bytes.LastIndexByte(b, ' ')
	if n < 0 {
		h.noHTTP11 = true
		n = len(b)
	} else if n == 0 {
		return 0, fmt.Errorf("RequestURI cannot be empty in %q", buf)
	} else if !bytes.Equal(b[n+1:], strHTTP11) {
		h.noHTTP11 = true
	}
	h.requestURI = append(h.requestURI[:0], b[:n]...)

	return len(buf) - len(bNext), nil
}

作者:hambste    项目:fasthtt   
// LastPathSegment returns the last part of uri path after '/'.
//
// Examples:
//
//    * For /foo/bar/baz.html path returns baz.html.
//    * For /foo/bar/ returns empty byte slice.
//    * For /foobar.js returns foobar.js.
func (x *URI) LastPathSegment() []byte {
	path := x.Path()
	n := bytes.LastIndexByte(path, '/')
	if n < 0 {
		return path
	}
	return path[n+1:]
}

作者:MStoyko    项目:jsonutil   
func getOffsetXLineBack(contents []byte, lines int64) (offset int64) {
	for offset = int64(len(contents)); offset != -1 && lines > 0; lines-- {
		offset = int64(bytes.LastIndexByte(contents[:offset], newline))
	}
	if offset == -1 {
		offset = 0
	}

	return
}

作者:xi    项目:logro   
// Open opens the file at path for writing in append mode. If it does
// not exist it is created with permissions of perm.
//
// The returned WriteCloser keeps track of the size of the file and
// the position of the most recent newline. If during a call to Write
// a particular byte to be written would cause the file size to exceed
// maxSize bytes, and at least one newline has been written to the
// file already, then a rotation occurs before the byte is written. A
// rotation is the following procedure:
//
// Let N = highest n such that <path>.<n>.gz exists or zero
// otherwise. Let M = maxFiles. Starting at n = N, while n > M-2 and n
// > 0 delete <path>.<n>.gz and decrement n. Then, while n > 0, rename
// <path>.<n>.gz to <path>.<n+1>.gz and decrement n. Next, if M > 1,
// the contents of <path> up to and including the final newline are
// gzipped and saved to the file <path>.1.gz . Lastly, the contents of
// <path> beyond the final newline are copied to the beginning of the
// file and <path> is truncated to contain just those contents.
//
// It is safe to call Write/Close from multiple goroutines.
func Open(path string, perm os.FileMode, maxSize int64, maxFiles int) (io.WriteCloser, error) {
	if maxSize < 1 {
		return nil, errors.New("logrot: maxSize < 1")
	}
	if maxFiles < 1 {
		return nil, errors.New("logrot: maxFiles < 1")
	}
	// if path exists determine size and check path is a regular file.
	var size int64
	fi, err := os.Lstat(path)
	if err != nil && !os.IsNotExist(err) {
		return nil, err
	}
	if err == nil {
		if fi.Mode()&os.ModeType != 0 {
			return nil, fmt.Errorf("logrot: %s is not a regular file", path)
		}
		size = fi.Size()
	}
	// open path for reading/writing, creating it if necessary.
	file, err := os.OpenFile(path, os.O_RDWR|os.O_CREATE, perm)
	if err != nil {
		return nil, err
	}
	// determine last newline position within file by reading backwards.
	var lastNewline int64 = -1
	const bufExp = 13 // 8KB buffer
	buf := make([]byte, 1<<bufExp)
	off := ((size - 1) >> bufExp) << bufExp
	bufSz := size - off
	for off >= 0 {
		_, err = file.ReadAt(buf[:bufSz], off)
		if err != nil {
			_ = file.Close()
			return nil, err
		}
		i := bytes.LastIndexByte(buf[:bufSz], '\n')
		if i != -1 {
			lastNewline = off + int64(i)
			break
		}
		off -= 1 << bufExp
		bufSz = 1 << bufExp
	}
	return &writeCloser{
		path:        path,
		perm:        perm,
		maxSize:     maxSize,
		maxFiles:    maxFiles,
		file:        file,
		size:        size,
		lastNewline: lastNewline,
	}, nil
}

作者:cloudfoundry-incubato    项目:consul-releas   
func parseNetstatLine(line [][]byte) (port, pid int, err error) {
	n := bytes.LastIndexByte(line[1], ':')
	if n < 0 {
		return port, pid, errors.New("parsing port")
	}
	port, err = strconv.Atoi(string(line[1][n+1:]))
	if err != nil {
		return
	}
	pid, err = strconv.Atoi(string(line[4]))
	return
}

作者:stella    项目:bridge-serve   
func (u *URI) updateBytes(newURI, buf []byte) []byte {
	if len(newURI) == 0 {
		return buf
	}

	n := bytes.Index(newURI, strSlashSlash)
	if n >= 0 {
		// absolute uri
		var b [32]byte
		schemeOriginal := b[:0]
		if len(u.scheme) > 0 {
			schemeOriginal = append([]byte(nil), u.scheme...)
		}
		u.Parse(nil, newURI)
		if len(schemeOriginal) > 0 && len(u.scheme) == 0 {
			u.scheme = append(u.scheme[:0], schemeOriginal...)
		}
		return buf
	}

	if newURI[0] == '/' {
		// uri without host
		buf = u.appendSchemeHost(buf[:0])
		buf = append(buf, newURI...)
		u.Parse(nil, buf)
		return buf
	}

	// relative path
	switch newURI[0] {
	case '?':
		// query string only update
		u.SetQueryStringBytes(newURI[1:])
		return append(buf[:0], u.FullURI()...)
	case '#':
		// update only hash
		u.SetHashBytes(newURI[1:])
		return append(buf[:0], u.FullURI()...)
	default:
		// update the last path part after the slash
		path := u.Path()
		n = bytes.LastIndexByte(path, '/')
		if n < 0 {
			panic("BUG: path must contain at least one slash")
		}
		buf = u.appendSchemeHost(buf[:0])
		buf = appendQuotedPath(buf, path[:n+1])
		buf = append(buf, newURI...)
		u.Parse(nil, buf)
		return buf
	}
}

作者:yzx22    项目:scrap   
func json2Filef(url string, save_to string, maxPage string) (int, error) {
	//fmt.Println(url, maxPage)
	for {
		ll = append(ll, time.Now())
		statusCode, body, err := fasthttp.Get(nil, url)
		if err != nil {
			return 0, err
		}
		if 200 != statusCode {
			return 0, fmt.Errorf("!200")
		}

		anti_spider := "smPolicy=tmallrateweb-rate-anti_Spider-checklogin"
		idx_anti_spider := bytes.Index(body, []byte(anti_spider))
		paginator_empty := "\"paginator\":\"\""
		idx_paginator_empty := bytes.Index(body, []byte(paginator_empty))
		if idx_anti_spider >= 0 || idx_paginator_empty >= 0 {
			time.Sleep(5 * time.Second)
			continue
		}

		out := make([]byte, len(body)*2)
		_, bytesWritten, err := iconv.Convert(body, out, "gbk", "utf-8")

		//fmt.Println(bytesRead, bytesWritten, err)
		idx := bytes.IndexByte(out, '(')
		if idx < 0 || bytesWritten < idx+1 {
			return 0, fmt.Errorf("idx error")
		}
		out = out[idx+1 : bytesWritten]
		//fmt.Println(string(out))
		idx_end := bytes.LastIndexByte(out, ')')
		if idx_end < 0 {
			return 0, fmt.Errorf("idx_end<0, )")
		}
		out = out[:idx_end]
		fmt.Println(save_to)
		ioutil.WriteFile(save_to, out, 0666)

		time.Sleep(1 * time.Second)
		if len(maxPage) > 0 {
			return strconv.Atoi(pickString(body, maxPage, ":", ","))
		} else {
			return 0, nil
		}
	}
}

作者:yzx22    项目:scrap   
func json2Filef(t *ShopType, sellerId, itemId string) (int, error) {
	if ex := os.MkdirAll(fmt.Sprintf("%s/%s/%s/%s", root, t.Type, sellerId, itemId), 0777); ex != nil {
		fmt.Println(ex)
		return 0, ex
	}

	for p, max := 1, 1; p <= max; {
		url := fmt.Sprintf(t.RateFormat, itemId, sellerId, p)
		// fmt.Println(url)
		if body, ok := fetchData(url); ok {
			if len(body) < 100 {
				if "jsonp({\"status\":1111,\"wait\":5})" == string(body) {
					time.Sleep(5 * time.Minute)
				}
			}
			if p == 1 {
				lastPage := pickString(body, "\"lastPage\":", ",\"page\"")
				fmt.Println("lastPage", lastPage)
				if v_max, err := strconv.Atoi(lastPage); err == nil {
					max = v_max
				} else {
					fmt.Println(err)
				}
			}
			out := make([]byte, len(body)*2)
			_, bytesWritten, _ := iconv.Convert(body, out, "gbk", "utf-8")
			idx := bytes.IndexByte(out, '(')
			if idx < 0 || bytesWritten < idx+1 {
				return 0, fmt.Errorf("idx error")
			}
			out = out[idx+1 : bytesWritten]
			idx_end := bytes.LastIndexByte(out, ')')
			if idx_end < 0 {
				return 0, fmt.Errorf("idx_end<0, )")
			}
			out = out[:idx_end]

			save_to := fmt.Sprintf("%s/%s/%s/%s/%s.%d.log", root, t.Type, sellerId, itemId, itemId, p)
			fmt.Printf("%s %d/%d [%d]\n%s\n", url, p, max, len(out), save_to)
			ioutil.WriteFile(save_to, out, 0666)
			time.Sleep(time.Duration(rand.Intn(5)) * time.Second)

			p++
		}
	}
	return 0, nil
}

作者:mewme    项目:u   
// NewFromBytes returns a new scanner lexing from input.
func NewFromBytes(input []byte) Scanner {
	// Append new line to files not ending with new line.
	appendNewLine := false
	lastNewLine := bytes.LastIndexByte(input, '\n')
	for _, r := range string(input[lastNewLine+1:]) {
		if !strings.ContainsRune(whitespace, r) {
			// Non-whitespace character located after the last new line character.
			appendNewLine = true
			break
		}
	}
	if appendNewLine {
		input = append(input, '\n')
	}

	return lexer.NewLexer(input)
}

作者:kaaLabs1    项目:embell   
func expectEcho(match string, immed bool, flusher func(string)) (string, bool) {
	var collected []byte
	for {
		data := readWithTimeout()
		collected = append(collected, data...)
		if bytes.HasSuffix(collected, []byte(match)) {
			bytesBefore := len(collected) - len(match)
			return string(collected[:bytesBefore]), true
		}
		if immed || len(data) == 0 {
			return string(collected), false
		}
		if n := bytes.LastIndexByte(collected, '\n'); n >= 0 {
			flusher(string(collected[:n+1]))
			collected = collected[n+1:]
		}
	}
}

作者:vmwar    项目:vi   
func lastIndexLines(s []byte, n *int) int64 {
	i := len(s) - 1

	for i > 0 {
		o := bytes.LastIndexByte(s[:i], '\n')
		if o < 0 {
			break
		}

		i = o
		*n--
		if *n == 0 {
			break
		}
	}

	return int64(i)
}

作者:googl    项目:syzkalle   
func (merger *OutputMerger) Add(name string, r io.ReadCloser) {
	merger.wg.Add(1)
	go func() {
		var pending []byte
		var buf [4 << 10]byte
		for {
			n, err := r.Read(buf[:])
			if n != 0 {
				pending = append(pending, buf[:n]...)
				if pos := bytes.LastIndexByte(pending, '\n'); pos != -1 {
					out := pending[:pos+1]
					if merger.tee != nil {
						merger.tee.Write(out)
					}
					select {
					case merger.Output <- append([]byte{}, out...):
						r := copy(pending[:], pending[pos+1:])
						pending = pending[:r]
					default:
					}
				}
			}
			if err != nil {
				if len(pending) != 0 {
					pending = append(pending, '\n')
					if merger.tee != nil {
						merger.tee.Write(pending)
					}
					select {
					case merger.Output <- pending:
					default:
					}
				}
				r.Close()
				select {
				case merger.Err <- fmt.Errorf("failed to read from %v: %v", name, err):
				default:
				}
				merger.wg.Done()
				return
			}
		}
	}()
}

作者:achand    项目:g   
// scanUntilBoundary scans buf to identify how much of it can be safely
// returned as part of the Part body.
// dashBoundary is "--boundary".
// nlDashBoundary is "\r\n--boundary" or "\n--boundary", depending on what mode we are in.
// The comments below (and the name) assume "\n--boundary", but either is accepted.
// total is the number of bytes read out so far. If total == 0, then a leading "--boundary" is recognized.
// readErr is the read error, if any, that followed reading the bytes in buf.
// scanUntilBoundary returns the number of data bytes from buf that can be
// returned as part of the Part body and also the error to return (if any)
// once those data bytes are done.
func scanUntilBoundary(buf, dashBoundary, nlDashBoundary []byte, total int64, readErr error) (int, error) {
	if total == 0 {
		// At beginning of body, allow dashBoundary.
		if bytes.HasPrefix(buf, dashBoundary) {
			switch matchAfterPrefix(buf, dashBoundary, readErr) {
			case -1:
				return len(dashBoundary), nil
			case 0:
				return 0, nil
			case +1:
				return 0, io.EOF
			}
		}
		if bytes.HasPrefix(dashBoundary, buf) {
			return 0, readErr
		}
	}

	// Search for "\n--boundary".
	if i := bytes.Index(buf, nlDashBoundary); i >= 0 {
		switch matchAfterPrefix(buf[i:], nlDashBoundary, readErr) {
		case -1:
			return i + len(nlDashBoundary), nil
		case 0:
			return i, nil
		case +1:
			return i, io.EOF
		}
	}
	if bytes.HasPrefix(nlDashBoundary, buf) {
		return 0, readErr
	}

	// Otherwise, anything up to the final \n is not part of the boundary
	// and so must be part of the body.
	// Also if the section from the final \n onward is not a prefix of the boundary,
	// it too must be part of the body.
	i := bytes.LastIndexByte(buf, nlDashBoundary[0])
	if i >= 0 && bytes.HasPrefix(nlDashBoundary, buf[i:]) {
		return i, nil
	}
	return len(buf), readErr
}

作者:vmwar    项目:vi   
// Find the offset we want to start tailing from.
// This should either be beginning-of-file or tailLines
// newlines from the EOF.
func findSeekPos(f *os.File) int64 {
	defer trace.End(trace.Begin(""))
	nlines := tailLines
	readPos, err := f.Seek(0, 2)
	// If for some reason we can't seek, we will just start tailing from beginning-of-file
	if err != nil {
		return int64(0)
	}

	// Buffer so we can seek nBytes (default: 1k) at a time
	buf := make([]byte, nBytes)

	for readPos > 0 {
		// Go back nBytes from the last readPos we've seen (stopping at beginning-of-file)
		// and read the next nBytes
		readPos -= int64(len(buf))
		if readPos < 0 {
			// We don't want to overlap our read with previous reads...
			buf = buf[:(int(readPos) + nBytes)]
			readPos = 0
		}
		bufend, err := f.ReadAt(buf, readPos)

		// It's OK to get io.EOF here.  Anything else is bad.
		if err != nil && err != io.EOF {
			log.Errorf("Error reading from file %s: %s", f.Name(), err)
			return 0
		}

		// Start from the end of the buffer and start looking for newlines
		for bufend > 0 {
			bufend = bytes.LastIndexByte(buf[:bufend], '\n')
			if bufend < 0 {
				break
			}
			nlines--
			if nlines < 0 {
				return readPos + int64(bufend) + 1
			}
		}
	}
	return 0
}


问题


面经


文章

微信
公众号

扫码关注公众号