作者:hagn
项目:camlistor
func (x *Index) GetImageInfo(fileRef *blobref.BlobRef) (*search.ImageInfo, error) {
// it might be that the key does not exist because image.DecodeConfig failed earlier
// (because of unsupported JPEG features like progressive mode).
key := keyImageSize.Key(fileRef.String())
dim, err := x.s.Get(key)
if err == ErrNotFound {
err = os.ErrNotExist
}
if err != nil {
return nil, err
}
valPart := strings.Split(dim, "|")
if len(valPart) != 2 {
return nil, fmt.Errorf("index: bogus key %q = %q", key, dim)
}
width, err := strconv.Atoi(valPart[0])
if err != nil {
return nil, fmt.Errorf("index: bogus integer at position 0 in key %q: %q", key, valPart[0])
}
height, err := strconv.Atoi(valPart[1])
if err != nil {
return nil, fmt.Errorf("index: bogus integer at position 1 in key %q: %q", key, valPart[1])
}
imgInfo := &search.ImageInfo{
Width: width,
Height: height,
}
return imgInfo, nil
}
作者:hagn
项目:camlistor
func (ds *DiskStorage) blobDirectory(partition string, b *blobref.BlobRef) string {
d := b.Digest()
if len(d) < 6 {
d = d + "______"
}
return filepath.Join(ds.PartitionRoot(partition), b.HashName(), d[0:3], d[3:6])
}
作者:splad
项目:camlistor
func NewShareRef(authType string, target *blobref.BlobRef, transitive bool) map[string]interface{} {
m := newCamliMap(1, "share")
m["authType"] = authType
m["target"] = target.String()
m["transitive"] = transitive
return m
}
作者:robry
项目:camlistor
// Given a blobref and a few hex characters of the digest of the next hop, return the complete
// blobref of the prefix, if that's a valid next hop.
func (sh *Handler) ResolvePrefixHop(parent *blobref.BlobRef, prefix string) (child *blobref.BlobRef, err error) {
// TODO: this is a linear scan right now. this should be
// optimized to use a new database table of members so this is
// a quick lookup. in the meantime it should be in memcached
// at least.
if len(prefix) < 8 {
return nil, fmt.Errorf("Member prefix %q too small", prefix)
}
dr := sh.NewDescribeRequest()
dr.Describe(parent, 1)
res, err := dr.Result()
if err != nil {
return
}
des, ok := res[parent.String()]
if !ok {
return nil, fmt.Errorf("Failed to describe member %q in parent %q", prefix, parent)
}
if des.Permanode != nil {
if cr, ok := des.ContentRef(); ok && strings.HasPrefix(cr.Digest(), prefix) {
return cr, nil
}
for _, member := range des.Members() {
if strings.HasPrefix(member.BlobRef.Digest(), prefix) {
return member.BlobRef, nil
}
}
}
return nil, fmt.Errorf("Member prefix %q not found in %q", prefix, parent)
}
作者:splad
项目:camlistor
func (ix *Index) ReceiveBlob(blobRef *blobref.BlobRef, source io.Reader) (retsb blobref.SizedBlobRef, err error) {
sniffer := new(BlobSniffer)
hash := blobRef.Hash()
var written int64
written, err = io.Copy(io.MultiWriter(hash, sniffer), source)
if err != nil {
return
}
if !blobRef.HashMatches(hash) {
err = blobserver.ErrCorruptBlob
return
}
sniffer.Parse()
bm := ix.s.BeginBatch()
err = ix.populateMutation(blobRef, sniffer, bm)
if err != nil {
return
}
err = ix.s.CommitBatch(bm)
if err != nil {
return
}
mimeType := sniffer.MimeType()
log.Printf("indexer: received %s; type=%v; truncated=%v", blobRef, mimeType, sniffer.IsTruncated())
return blobref.SizedBlobRef{blobRef, written}, nil
}
作者:splad
项目:camlistor
func (fi *FakeIndex) AddClaim(owner, permanode *blobref.BlobRef, claimType, attr, value string) {
fi.lk.Lock()
defer fi.lk.Unlock()
date := fi.nextDate()
claim := &search.Claim{
Permanode: permanode,
Signer: nil,
BlobRef: nil,
Date: date,
Type: claimType,
Attr: attr,
Value: value,
}
key := permanode.String() + "/" + owner.String()
fi.ownerClaims[key] = append(fi.ownerClaims[key], claim)
if claimType == "set-attribute" && strings.HasPrefix(attr, "camliPath:") {
suffix := attr[len("camliPath:"):]
path := &search.Path{
Target: blobref.MustParse(value),
Suffix: suffix,
}
fi.path[fmt.Sprintf("%s\x00%s\x00%s", owner, permanode, suffix)] = path
}
}
作者:t3rm1n4
项目:camlistor
// ScaledCached reads the scaled version of the image in file,
// if it is in cache. On success, the image format is returned.
func (ih *ImageHandler) scaledCached(buf *bytes.Buffer, file *blobref.BlobRef) (format string, err error) {
name := cacheKey(file.String(), ih.MaxWidth, ih.MaxHeight)
br, err := ih.sc.Get(name)
if err != nil {
return format, fmt.Errorf("%v: %v", name, err)
}
fr, err := ih.cached(br)
if err != nil {
return format, fmt.Errorf("No cache hit for %v: %v", br, err)
}
_, err = io.Copy(buf, fr)
if err != nil {
return format, fmt.Errorf("error reading cached thumbnail %v: %v", name, err)
}
mime := magic.MimeType(buf.Bytes())
if mime == "" {
return format, fmt.Errorf("error with cached thumbnail %v: unknown mime type", name)
}
pieces := strings.Split(mime, "/")
if len(pieces) < 2 {
return format, fmt.Errorf("error with cached thumbnail %v: bogus mime type", name)
}
if pieces[0] != "image" {
return format, fmt.Errorf("error with cached thumbnail %v: not an image", name)
}
return pieces[1], nil
}
作者:oria
项目:camlistor
func (fr *FileReader) getSuperset(br *blobref.BlobRef) (*Superset, error) {
if root := fr.rootReader(); root != fr {
return root.getSuperset(br)
}
brStr := br.String()
ssi, err := fr.sfg.Do(brStr, func() (interface{}, error) {
fr.ssmmu.Lock()
ss, ok := fr.ssm[brStr]
fr.ssmmu.Unlock()
if ok {
return ss, nil
}
rsc, _, err := fr.fetcher.Fetch(br)
if err != nil {
return nil, fmt.Errorf("schema/filereader: fetching file schema blob: %v", err)
}
defer rsc.Close()
ss, err = ParseSuperset(rsc)
if err != nil {
return nil, err
}
fr.ssmmu.Lock()
defer fr.ssmmu.Unlock()
fr.ssm[brStr] = ss
return ss, nil
})
if err != nil {
return nil, err
}
return ssi.(*Superset), nil
}
作者:propp
项目:camlistor
func newClaim(permaNode *blobref.BlobRef, t time.Time, claimType string) *Builder {
bb := newMap(1, "claim")
bb.m["permaNode"] = permaNode.String()
bb.m["claimType"] = claimType
bb.SetClaimDate(t)
return bb
}
作者:t3rm1n4
项目:camlistor
func newClaim(permaNode *blobref.BlobRef, t time.Time, claimType string) Map {
m := newMap(1, "claim")
m["permaNode"] = permaNode.String()
m["claimType"] = claimType
m.SetClaimDate(t)
return m
}
作者:t3rm1n4
项目:camlistor
func NewShareRef(authType string, target *blobref.BlobRef, transitive bool) Map {
m := newMap(1, "share")
m["authType"] = authType
m["target"] = target.String()
m["transitive"] = transitive
return m
}
作者:t3rm1n4
项目:camlistor
// Errors returned are:
// os.ErrNotExist -- blob not found
// os.ErrInvalid -- not JSON or a camli schema blob
func (fs *CamliFileSystem) fetchSchemaSuperset(br *blobref.BlobRef) (*schema.Superset, error) {
blobStr := br.String()
if ss, ok := fs.blobToSchema.Get(blobStr); ok {
return ss.(*schema.Superset), nil
}
rsc, _, err := fs.fetcher.Fetch(br)
if err != nil {
return nil, err
}
defer rsc.Close()
jd := json.NewDecoder(rsc)
ss := new(schema.Superset)
err = jd.Decode(ss)
if err != nil {
log.Printf("Error parsing %s as schema blob: %v", br, err)
return nil, os.ErrInvalid
}
if ss.Type == "" {
log.Printf("blob %s is JSON but lacks camliType", br)
return nil, os.ErrInvalid
}
ss.BlobRef = br
fs.blobToSchema.Add(blobStr, ss)
return ss, nil
}
作者:ngerakine
项目:camlistor
func (c *FlatHaveCache) NoteBlobExists(br *blobref.BlobRef, size int64) {
c.mu.Lock()
defer c.mu.Unlock()
if size < 0 {
panic("negative size")
}
k := br.String()
if c.m[k] == size {
// dup
return
}
c.m[k] = size
if c.af == nil {
var err error
c.af, err = os.OpenFile(c.filename, os.O_CREATE|os.O_APPEND|os.O_WRONLY, 0600)
if err != nil {
log.Printf("opening have-cache for append: %v", err)
return
}
}
// TODO: flocking. see leveldb-go.
c.af.Seek(0, os.SEEK_END)
c.af.Write([]byte(fmt.Sprintf("%s %d\n", k, size)))
}
作者:splad
项目:camlistor
func NewClaim(permaNode *blobref.BlobRef, claimType string) map[string]interface{} {
m := newCamliMap(1, "claim")
m["permaNode"] = permaNode.String()
m["claimType"] = claimType
m["claimDate"] = RFC3339FromTime(time.Now())
return m
}
作者:robry
项目:camlistor
func NewShareRef(authType string, target *blobref.BlobRef, transitive bool) *Builder {
bb := base(1, "share")
bb.m["authType"] = authType
bb.m["target"] = target.String()
bb.m["transitive"] = transitive
return bb
}
作者:robry
项目:camlistor
func (c *SQLiteHaveCache) StatBlobCache(br *blobref.BlobRef) (size int64, ok bool) {
if br == nil {
return
}
// TODO(mpl): is it enough that we know it's a valid blobref to avoid any injection risk ?
query := blobSizeQuery + fmt.Sprintf("'%v';\n", br.String())
c.mu.Lock()
defer c.mu.Unlock()
err := c.startSQLiteChild()
if err != nil {
log.Fatalf("Could not start sqlite child process: %v", err)
}
_, err = c.w.Write([]byte(query))
if err != nil {
log.Fatalf("failed to query have cache: %v", err)
}
out, err := c.r.ReadString('\n')
if err != nil {
log.Fatalf("failed to read have cache query result: %v", err)
}
out = strings.TrimRight(out, "\n")
if out == noResult {
return
}
size, err = strconv.ParseInt(out, 10, 64)
if err != nil {
log.Fatalf("Bogus blob size in %v table: %v", haveTableName, err)
}
return size, true
}
作者:capote
项目:camlistor
// populateMutation populates keys & values into the provided BatchMutation.
//
// the blobref can be trusted at this point (it's been fully consumed
// and verified to match), and the sniffer has been populated.
func (ix *Index) populateMutation(br *blobref.BlobRef, sniffer *BlobSniffer, bm BatchMutation) error {
bm.Set("have:"+br.String(), fmt.Sprintf("%d", sniffer.Size()))
bm.Set("meta:"+br.String(), fmt.Sprintf("%d|%s", sniffer.Size(), sniffer.MimeType()))
if camli, ok := sniffer.Superset(); ok {
switch camli.Type {
case "claim":
if err := ix.populateClaim(br, camli, sniffer, bm); err != nil {
return err
}
case "permanode":
//if err := mi.populatePermanode(blobRef, camli, bm); err != nil {
//return err
//}
case "file":
if err := ix.populateFile(br, camli, bm); err != nil {
return err
}
case "directory":
if err := ix.populateDir(br, camli, bm); err != nil {
return err
}
}
}
return nil
}
作者:splad
项目:camlistor
func (h *SimpleBlobHub) NotifyBlobReceived(blob *blobref.BlobRef) {
h.l.Lock()
defer h.l.Unlock()
// Callback channels to notify, nil until non-empty
var notify []chan *blobref.BlobRef
// Append global listeners
for ch, _ := range h.listeners {
notify = append(notify, ch)
}
// Append blob-specific listeners
if h.blobListeners != nil {
blobstr := blob.String()
if set, ok := h.blobListeners[blobstr]; ok {
for ch, _ := range set {
notify = append(notify, ch)
}
}
}
// Run in a separate Goroutine so NotifyBlobReceived doesn't block
// callers if callbacks are slow.
go func() {
for _, ch := range notify {
ch <- blob
}
}()
}
作者:oria
项目:camlistor
func (c *Client) FetchVia(b *blobref.BlobRef, v []*blobref.BlobRef) (io.ReadCloser, int64, error) {
pfx, err := c.prefix()
if err != nil {
return nil, 0, err
}
url := fmt.Sprintf("%s/camli/%s", pfx, b)
if len(v) > 0 {
buf := bytes.NewBufferString(url)
buf.WriteString("?via=")
for i, br := range v {
if i != 0 {
buf.WriteString(",")
}
buf.WriteString(br.String())
}
url = buf.String()
}
req := c.newRequest("GET", url)
resp, err := c.httpClient.Do(req)
if err != nil {
return nil, 0, err
}
if resp.StatusCode != 200 {
return nil, 0, errors.New(fmt.Sprintf("Got status code %d from blobserver for %s", resp.StatusCode, b))
}
size := resp.ContentLength
if size == -1 {
return nil, 0, errors.New("blobserver didn't return a Content-Length for blob")
}
if c.via == nil {
// Not in sharing mode, so return immediately.
return resp.Body, size, nil
}
// Slurp 1 MB to find references to other blobrefs for the via path.
const maxSlurp = 1 << 20
var buf bytes.Buffer
_, err = io.Copy(&buf, io.LimitReader(resp.Body, maxSlurp))
if err != nil {
return nil, 0, err
}
// If it looks like a JSON schema blob (starts with '{')
if schema.LikelySchemaBlob(buf.Bytes()) {
for _, blobstr := range blobsRx.FindAllString(buf.String(), -1) {
c.via[blobstr] = b.String()
}
}
// Read from the multireader, but close the HTTP response body.
type rc struct {
io.Reader
io.Closer
}
return rc{io.MultiReader(&buf, resp.Body), resp.Body}, size, nil
}
作者:robry
项目:camlistor
func (dr *DescribeRequest) DescribeSync(br *blobref.BlobRef) (*DescribedBlob, error) {
dr.Describe(br, 1)
res, err := dr.Result()
if err != nil {
return nil, err
}
return res[br.String()], nil
}