作者:JayBlaze42
项目:camlistor
// Empirically:
// open for read: req.Flags == 0
// open for append: req.Flags == 1
// open for write: req.Flags == 1
// open for read/write (+<) == 2 (bitmask? of?)
//
// open flags are O_WRONLY (1), O_RDONLY (0), or O_RDWR (2). and also
// bitmaks of O_SYMLINK (0x200000) maybe. (from
// fuse_filehandle_xlate_to_oflags in macosx/kext/fuse_file.h)
func (n *mutFile) Open(req *fuse.OpenRequest, res *fuse.OpenResponse, intr fuse.Intr) (fuse.Handle, fuse.Error) {
mutFileOpen.Incr()
log.Printf("mutFile.Open: %v: content: %v dir=%v flags=%v mode=%v", n.permanode, n.content, req.Dir, req.Flags, req.Mode)
r, err := schema.NewFileReader(n.fs.fetcher, n.content)
if err != nil {
mutFileOpenError.Incr()
log.Printf("mutFile.Open: %v", err)
return nil, fuse.EIO
}
// Turn off the OpenDirectIO bit (on by default in rsc fuse server.go),
// else append operations don't work for some reason.
res.Flags &= ^fuse.OpenDirectIO
// Read-only.
if req.Flags == 0 {
mutFileOpenRO.Incr()
log.Printf("mutFile.Open returning read-only file")
n := &node{
fs: n.fs,
blobref: n.content,
}
return &nodeReader{n: n, fr: r}, nil
}
mutFileOpenRW.Incr()
log.Printf("mutFile.Open returning read-write filehandle")
defer r.Close()
return n.newHandle(r)
}
作者:camlistor
项目:camlistor
// Empirically:
// open for read: req.Flags == 0
// open for append: req.Flags == 1
// open for write: req.Flags == 1
// open for read/write (+<) == 2 (bitmask? of?)
//
// open flags are O_WRONLY (1), O_RDONLY (0), or O_RDWR (2). and also
// bitmaks of O_SYMLINK (0x200000) maybe. (from
// fuse_filehandle_xlate_to_oflags in macosx/kext/fuse_file.h)
func (n *roFile) Open(ctx context.Context, req *fuse.OpenRequest, res *fuse.OpenResponse) (fs.Handle, error) {
roFileOpen.Incr()
if isWriteFlags(req.Flags) {
return nil, fuse.EPERM
}
log.Printf("roFile.Open: %v: content: %v dir=%v flags=%v", n.permanode, n.content, req.Dir, req.Flags)
r, err := schema.NewFileReader(n.fs.fetcher, n.content)
if err != nil {
roFileOpenError.Incr()
log.Printf("roFile.Open: %v", err)
return nil, fuse.EIO
}
// Turn off the OpenDirectIO bit (on by default in rsc fuse server.go),
// else append operations don't work for some reason.
res.Flags &= ^fuse.OpenDirectIO
// Read-only.
nod := &node{
fs: n.fs,
blobref: n.content,
}
return &nodeReader{n: nod, fr: r}, nil
}
作者:sfrdm
项目:camlistor
// Empirically:
// open for read: req.Flags == 0
// open for append: req.Flags == 1
// open for write: req.Flags == 1
// open for read/write (+<) == 2 (bitmask? of?)
//
// open flags are O_WRONLY (1), O_RDONLY (0), or O_RDWR (2). and also
// bitmaks of O_SYMLINK (0x200000) maybe. (from
// fuse_filehandle_xlate_to_oflags in macosx/kext/fuse_file.h)
func (n *mutFile) Open(req *fuse.OpenRequest, res *fuse.OpenResponse, intr fs.Intr) (fs.Handle, fuse.Error) {
mutFileOpen.Incr()
log.Printf("mutFile.Open: %v: content: %v dir=%v flags=%v", n.permanode, n.content, req.Dir, req.Flags)
r, err := schema.NewFileReader(n.fs.fetcher, n.content)
if err != nil {
mutFileOpenError.Incr()
log.Printf("mutFile.Open: %v", err)
return nil, fuse.EIO
}
// Read-only.
if !isWriteFlags(req.Flags) {
mutFileOpenRO.Incr()
log.Printf("mutFile.Open returning read-only file")
n := &node{
fs: n.fs,
blobref: n.content,
}
return &nodeReader{n: n, fr: r}, nil
}
mutFileOpenRW.Incr()
log.Printf("mutFile.Open returning read-write filehandle")
defer r.Close()
return n.newHandle(r)
}
作者:stevear
项目:camlistor
func (dh *DownloadHandler) fileInfo(req *http.Request, file blob.Ref) (fi fileInfo, packed bool, err error) {
// Fast path for blobpacked.
fi, ok := fileInfoPacked(dh.Search, dh.Fetcher, req, file)
if debugPack {
log.Printf("download.go: fileInfoPacked: ok=%v, %+v", ok, fi)
}
if ok {
return fi, true, nil
}
fr, err := schema.NewFileReader(dh.blobSource(), file)
if err != nil {
return
}
mime := dh.ForceMIME
if mime == "" {
mime = magic.MIMETypeFromReaderAt(fr)
}
if mime == "" {
mime = "application/octet-stream"
}
return fileInfo{
mime: mime,
name: fr.FileName(),
size: fr.Size(),
rs: fr,
close: fr.Close,
}, false, nil
}
作者:rayleyv
项目:camlistor
func (ih *ImageHandler) scaleImage(fileRef blob.Ref) (*formatAndImage, error) {
fr, err := schema.NewFileReader(ih.storageSeekFetcher(), fileRef)
if err != nil {
return nil, err
}
defer fr.Close()
var buf bytes.Buffer
scaleImageGateSlurp.Start()
n, err := io.Copy(&buf, fr)
scaleImageGateSlurp.Done()
imageBytesFetchedVar.Add(n)
if err != nil {
return nil, fmt.Errorf("image resize: error reading image %s: %v", fileRef, err)
}
scaleImageGateResize.Start()
defer scaleImageGateResize.Done()
i, imConfig, err := images.Decode(bytes.NewReader(buf.Bytes()),
&images.DecodeOpts{MaxWidth: ih.MaxWidth, MaxHeight: ih.MaxHeight})
if err != nil {
return nil, err
}
b := i.Bounds()
format := imConfig.Format
useBytesUnchanged := !imConfig.Modified &&
format != "cr2" // always recompress CR2 files
isSquare := b.Dx() == b.Dy()
if ih.Square && !isSquare {
useBytesUnchanged = false
i = squareImage(i)
b = i.Bounds()
}
if !useBytesUnchanged {
// Encode as a new image
buf.Reset()
switch format {
case "png":
err = png.Encode(&buf, i)
case "cr":
// Recompress CR2 files as JPEG
format = "jpeg"
fallthrough
default:
err = jpeg.Encode(&buf, i, &jpeg.Options{
Quality: 90,
})
}
if err != nil {
return nil, err
}
}
return &formatAndImage{format: format, image: buf.Bytes()}, nil
}
作者:kristofe
项目:camlistor
func (dh *DownloadHandler) ServeHTTP(rw http.ResponseWriter, req *http.Request, file blob.Ref) {
if req.Method != "GET" && req.Method != "HEAD" {
http.Error(rw, "Invalid download method", 400)
return
}
if req.Header.Get("If-Modified-Since") != "" {
// Immutable, so any copy's a good copy.
rw.WriteHeader(http.StatusNotModified)
return
}
fr, err := schema.NewFileReader(dh.blobSource(), file)
if err != nil {
http.Error(rw, "Can't serve file: "+err.Error(), 500)
return
}
defer fr.Close()
schema := fr.FileSchema()
h := rw.Header()
h.Set("Content-Length", fmt.Sprintf("%d", schema.SumPartsSize()))
h.Set("Expires", time.Now().Add(oneYear).Format(http.TimeFormat))
mimeType := magic.MIMETypeFromReaderAt(fr)
if dh.ForceMime != "" {
mimeType = dh.ForceMime
}
if mimeType == "" {
mimeType = "application/octet-stream"
}
h.Set("Content-Type", mimeType)
if mimeType == "application/octet-stream" {
// Chrome seems to silently do nothing on
// application/octet-stream unless this is set.
// Maybe it's confused by lack of URL it recognizes
// along with lack of mime type?
rw.Header().Set("Content-Disposition", "attachment; filename=file-"+file.String()+".dat")
}
if req.Method == "HEAD" && req.FormValue("verifycontents") != "" {
vbr, ok := blob.Parse(req.FormValue("verifycontents"))
if !ok {
return
}
hash := vbr.Hash()
if hash == nil {
return
}
io.Copy(hash, fr) // ignore errors, caught later
if vbr.HashMatches(hash) {
rw.Header().Set("X-Camli-Contents", vbr.String())
}
return
}
http.ServeContent(rw, req, "", time.Now(), fr)
}
作者:t3rm1n4
项目:camlistor
func (ih *ImageHandler) cached(br *blobref.BlobRef) (fr *schema.FileReader, err error) {
fetchSeeker := blobref.SeekerFromStreamingFetcher(ih.Cache)
fr, err = schema.NewFileReader(fetchSeeker, br)
if err != nil {
return nil, err
}
if imageDebug {
log.Printf("Image Cache: hit: %v\n", br)
}
return fr, nil
}
作者:rayleyv
项目:camlistor
// cached returns a FileReader for the given file schema blobref.
// The FileReader should be closed when done reading.
func (ih *ImageHandler) cached(fileRef blob.Ref) (*schema.FileReader, error) {
fetchSeeker := blob.SeekerFromStreamingFetcher(ih.Cache)
fr, err := schema.NewFileReader(fetchSeeker, fileRef)
if err != nil {
return nil, err
}
if imageDebug {
log.Printf("Image Cache: hit: %v\n", fileRef)
}
return fr, nil
}
作者:rakyl
项目:camlistor
func (ih *ImageHandler) scaleImage(buf *bytes.Buffer, file blob.Ref) (format string, err error) {
fr, err := schema.NewFileReader(ih.storageSeekFetcher(), file)
if err != nil {
return format, err
}
defer fr.Close()
_, err = io.Copy(buf, fr)
if err != nil {
return format, fmt.Errorf("image resize: error reading image %s: %v", file, err)
}
i, imConfig, err := images.Decode(bytes.NewReader(buf.Bytes()),
&images.DecodeOpts{MaxWidth: ih.MaxWidth, MaxHeight: ih.MaxHeight})
if err != nil {
return format, err
}
b := i.Bounds()
format = imConfig.Format
useBytesUnchanged := !imConfig.Modified &&
format != "cr2" // always recompress CR2 files
isSquare := b.Dx() == b.Dy()
if ih.Square && !isSquare {
useBytesUnchanged = false
i = squareImage(i)
b = i.Bounds()
}
if !useBytesUnchanged {
// Encode as a new image
buf.Reset()
switch format {
case "png":
err = png.Encode(buf, i)
case "cr":
// Recompress CR2 files as JPEG
format = "jpeg"
fallthrough
default:
err = jpeg.Encode(buf, i, nil)
}
if err != nil {
return format, err
}
}
return format, nil
}
作者:camlistor
项目:camlistor
func (s *storage) packFile(fileRef blob.Ref) (err error) {
s.Logf("Packing file %s ...", fileRef)
defer func() {
if err == nil {
s.Logf("Packed file %s", fileRef)
} else {
s.Logf("Error packing file %s: %v", fileRef, err)
}
}()
fr, err := schema.NewFileReader(s, fileRef)
if err != nil {
return err
}
return newPacker(s, fileRef, fr).pack()
}
作者:camarox5
项目:coreos-baremeta
// serveRef gets the file at ref from fetcher and serves its contents.
// It is used by Service as a one time handler to serve to the thumbnail child process on localhost.
func serveRef(rw http.ResponseWriter, req *http.Request, ref blob.Ref, fetcher blob.Fetcher) {
if !httputil.IsGet(req) {
http.Error(rw, "Invalid download method.", 400)
return
}
if !httputil.IsLocalhost(req) {
http.Error(rw, "Forbidden.", 403)
return
}
parts := strings.Split(req.URL.Path, "/")
if len(parts) < 2 {
http.Error(rw, "Malformed GET URL.", 400)
return
}
blobRef, ok := blob.Parse(parts[1])
if !ok {
http.Error(rw, "Malformed GET URL.", 400)
return
}
// only serves its ref
if blobRef != ref {
log.Printf("videothumbnail: access to %v forbidden; wrong blobref for handler", blobRef)
http.Error(rw, "Forbidden.", 403)
return
}
rw.Header().Set("Content-Type", "application/octet-stream")
fr, err := schema.NewFileReader(fetcher, ref)
if err != nil {
httputil.ServeError(rw, req, err)
return
}
defer fr.Close()
http.ServeContent(rw, req, "", time.Now(), fr)
}
作者:stevear
项目:camlistor
func (ih *ImageHandler) newFileReader(fileRef blob.Ref) (io.ReadCloser, error) {
fi, ok := fileInfoPacked(ih.Search, ih.Fetcher, nil, fileRef)
if debugPack {
log.Printf("pkg/server/image.go: fileInfoPacked: ok=%v, %+v", ok, fi)
}
if ok {
// This would be less gross if fileInfoPacked just
// returned an io.ReadCloser, but then the download
// handler would need more invasive changes for
// ServeContent. So tolerate this for now.
return struct {
io.Reader
io.Closer
}{
fi.rs,
types.CloseFunc(fi.close),
}, nil
}
// Default path, not going through blobpacked's fast path:
return schema.NewFileReader(ih.Fetcher, fileRef)
}
作者:camarox5
项目:coreos-baremeta
func TestStorage(t *testing.T) {
store, ref := storageAndBlobRef(t)
fr, err := schema.NewFileReader(store, ref)
if err != nil {
t.Fatal(err)
}
inFile, err := os.Open(testFilepath)
if err != nil {
t.Fatal(err)
}
data, err := ioutil.ReadAll(inFile)
if err != nil {
t.Fatal(err)
}
bd, err := ioutil.ReadAll(fr)
if err != nil {
t.Fatal(err)
}
if !bytes.Equal(bd, data) {
t.Error("expected to be the same")
}
}
作者:rakyl
项目:camlistor
// vivify verifies that all the chunks for the file described by fileblob are on the blobserver.
// It makes a planned permanode, signs it, and uploads it. It finally makes a camliContent claim
// on that permanode for fileblob, signs it, and uploads it to the blobserver.
func vivify(blobReceiver blobserver.BlobReceiveConfiger, fileblob blob.SizedRef) error {
sf, ok := blobReceiver.(blob.StreamingFetcher)
if !ok {
return fmt.Errorf("BlobReceiver is not a StreamingFetcher")
}
fetcher := blob.SeekerFromStreamingFetcher(sf)
fr, err := schema.NewFileReader(fetcher, fileblob.Ref)
if err != nil {
return fmt.Errorf("Filereader error for blobref %v: %v", fileblob.Ref.String(), err)
}
defer fr.Close()
h := sha1.New()
n, err := io.Copy(h, fr)
if err != nil {
return fmt.Errorf("Could not read all file of blobref %v: %v", fileblob.Ref.String(), err)
}
if n != fr.Size() {
return fmt.Errorf("Could not read all file of blobref %v. Wanted %v, got %v", fileblob.Ref.String(), fr.Size(), n)
}
config := blobReceiver.Config()
if config == nil {
return errors.New("blobReceiver has no config")
}
hf := config.HandlerFinder
if hf == nil {
return errors.New("blobReceiver config has no HandlerFinder")
}
JSONSignRoot, sh, err := hf.FindHandlerByType("jsonsign")
if err != nil || sh == nil {
return errors.New("jsonsign handler not found")
}
sigHelper, ok := sh.(*signhandler.Handler)
if !ok {
return errors.New("handler is not a JSON signhandler")
}
discoMap := sigHelper.DiscoveryMap(JSONSignRoot)
publicKeyBlobRef, ok := discoMap["publicKeyBlobRef"].(string)
if !ok {
return fmt.Errorf("Discovery: json decoding error: %v", err)
}
// The file schema must have a modtime to vivify, as the modtime is used for all three of:
// 1) the permanode's signature
// 2) the camliContent attribute claim's "claimDate"
// 3) the signature time of 2)
claimDate, err := time.Parse(time.RFC3339, fr.FileSchema().UnixMtime)
if err != nil {
return fmt.Errorf("While parsing modtime for file %v: %v", fr.FileSchema().FileName, err)
}
permanodeBB := schema.NewHashPlannedPermanode(h)
permanodeBB.SetSigner(blob.MustParse(publicKeyBlobRef))
permanodeBB.SetClaimDate(claimDate)
permanodeSigned, err := sigHelper.Sign(permanodeBB)
if err != nil {
return fmt.Errorf("Signing permanode %v: %v", permanodeSigned, err)
}
permanodeRef := blob.SHA1FromString(permanodeSigned)
_, err = blobserver.ReceiveNoHash(blobReceiver, permanodeRef, strings.NewReader(permanodeSigned))
if err != nil {
return fmt.Errorf("While uploading signed permanode %v, %v: %v", permanodeRef, permanodeSigned, err)
}
contentClaimBB := schema.NewSetAttributeClaim(permanodeRef, "camliContent", fileblob.Ref.String())
contentClaimBB.SetSigner(blob.MustParse(publicKeyBlobRef))
contentClaimBB.SetClaimDate(claimDate)
contentClaimSigned, err := sigHelper.Sign(contentClaimBB)
if err != nil {
return fmt.Errorf("Signing camliContent claim: %v", err)
}
contentClaimRef := blob.SHA1FromString(contentClaimSigned)
_, err = blobserver.ReceiveNoHash(blobReceiver, contentClaimRef, strings.NewReader(contentClaimSigned))
if err != nil {
return fmt.Errorf("While uploading signed camliContent claim %v, %v: %v", contentClaimRef, contentClaimSigned, err)
}
return nil
}
作者:hagn
项目:camlistor
func main() {
client.AddFlags()
flag.Parse()
if *flagVersion {
fmt.Fprintf(os.Stderr, "camget version: %s\n", buildinfo.Version())
return
}
if *flagGraph && flag.NArg() != 1 {
log.Fatalf("The --graph option requires exactly one parameter.")
}
var cl *client.Client
var items []*blobref.BlobRef
if *flagShared != "" {
if client.ExplicitServer() != "" {
log.Fatal("Can't use --shared with an explicit blobserver; blobserver is implicit from the --shared URL.")
}
if flag.NArg() != 0 {
log.Fatal("No arguments permitted when using --shared")
}
cl1, target, err := client.NewFromShareRoot(*flagShared,
client.OptionInsecure(*flagInsecureTLS))
if err != nil {
log.Fatal(err)
}
cl = cl1
items = append(items, target)
} else {
cl = client.NewOrFail()
for n := 0; n < flag.NArg(); n++ {
arg := flag.Arg(n)
br := blobref.Parse(arg)
if br == nil {
log.Fatalf("Failed to parse argument %q as a blobref.", arg)
}
items = append(items, br)
}
}
cl.InsecureTLS = *flagInsecureTLS
tr := cl.TransportForConfig(&client.TransportConfig{
Verbose: *flagHTTP,
})
httpStats, _ := tr.(*httputil.StatsTransport)
cl.SetHTTPClient(&http.Client{Transport: tr})
diskCacheFetcher, err := cacher.NewDiskCache(cl)
if err != nil {
log.Fatalf("Error setting up local disk cache: %v", err)
}
defer diskCacheFetcher.Clean()
if *flagVerbose {
log.Printf("Using temp blob cache directory %s", diskCacheFetcher.Root)
}
for _, br := range items {
if *flagGraph {
printGraph(diskCacheFetcher, br)
return
}
if *flagCheck {
// TODO: do HEAD requests checking if the blobs exists.
log.Fatal("not implemented")
return
}
if *flagOutput == "-" {
var rc io.ReadCloser
var err error
if *flagContents {
rc, err = schema.NewFileReader(diskCacheFetcher, br)
if err == nil {
rc.(*schema.FileReader).LoadAllChunks()
}
} else {
rc, err = fetch(diskCacheFetcher, br)
}
if err != nil {
log.Fatal(err)
}
defer rc.Close()
if _, err := io.Copy(os.Stdout, rc); err != nil {
log.Fatalf("Failed reading %q: %v", br, err)
}
} else {
if err := smartFetch(diskCacheFetcher, *flagOutput, br); err != nil {
log.Fatal(err)
}
}
}
if *flagVerbose {
log.Printf("HTTP requests: %d\n", httpStats.Requests())
}
}
作者:hagn
项目:camlistor
// smartFetch the things that blobs point to, not just blobs.
func smartFetch(src blobref.StreamingFetcher, targ string, br *blobref.BlobRef) error {
rc, err := fetch(src, br)
if err != nil {
return err
}
defer rc.Close()
sniffer := index.NewBlobSniffer(br)
_, err = io.CopyN(sniffer, rc, sniffSize)
if err != nil && err != io.EOF {
return err
}
sniffer.Parse()
blob, ok := sniffer.SchemaBlob()
if !ok {
if *flagVerbose {
log.Printf("Fetching opaque data %v into %q", br, targ)
}
// opaque data - put it in a file
f, err := os.Create(targ)
if err != nil {
return fmt.Errorf("opaque: %v", err)
}
defer f.Close()
body, _ := sniffer.Body()
r := io.MultiReader(bytes.NewReader(body), rc)
_, err = io.Copy(f, r)
return err
}
switch blob.Type() {
case "directory":
dir := filepath.Join(targ, blob.FileName())
if *flagVerbose {
log.Printf("Fetching directory %v into %s", br, dir)
}
if err := os.MkdirAll(dir, blob.FileMode()); err != nil {
return err
}
if err := setFileMeta(dir, blob); err != nil {
log.Print(err)
}
entries := blob.DirectoryEntries()
if entries == nil {
return fmt.Errorf("bad entries blobref in dir %v", blob.BlobRef())
}
return smartFetch(src, dir, entries)
case "static-set":
if *flagVerbose {
log.Printf("Fetching directory entries %v into %s", br, targ)
}
// directory entries
const numWorkers = 10
type work struct {
br *blobref.BlobRef
errc chan<- error
}
members := blob.StaticSetMembers()
workc := make(chan work, len(members))
defer close(workc)
for i := 0; i < numWorkers; i++ {
go func() {
for wi := range workc {
wi.errc <- smartFetch(src, targ, wi.br)
}
}()
}
var errcs []<-chan error
for _, mref := range members {
errc := make(chan error, 1)
errcs = append(errcs, errc)
workc <- work{mref, errc}
}
for _, errc := range errcs {
if err := <-errc; err != nil {
return err
}
}
return nil
case "file":
seekFetcher := blobref.SeekerFromStreamingFetcher(src)
fr, err := schema.NewFileReader(seekFetcher, br)
if err != nil {
return fmt.Errorf("NewFileReader: %v", err)
}
fr.LoadAllChunks()
defer fr.Close()
name := filepath.Join(targ, blob.FileName())
if fi, err := os.Stat(name); err == nil && fi.Size() == fi.Size() {
if *flagVerbose {
log.Printf("Skipping %s; already exists.", name)
return nil
}
//.........这里部分代码省略.........
作者:t3rm1n4
项目:camlistor
// vivify verifies that all the chunks for the file described by fileblob are on the blobserver.
// It makes a planned permanode, signs it, and uploads it. It finally makes a camliContent claim
// on that permanode for fileblob, signs it, and uploads it to the blobserver.
func vivify(blobReceiver blobserver.BlobReceiveConfiger, fileblob blobref.SizedBlobRef) error {
sf, ok := blobReceiver.(blobref.StreamingFetcher)
if !ok {
return fmt.Errorf("BlobReceiver is not a StreamingFetcher")
}
fetcher := blobref.SeekerFromStreamingFetcher(sf)
fr, err := schema.NewFileReader(fetcher, fileblob.BlobRef)
if err != nil {
return fmt.Errorf("Filereader error for blobref %v: %v", fileblob.BlobRef.String(), err)
}
defer fr.Close()
h := sha1.New()
n, err := io.Copy(h, fr)
if err != nil {
return fmt.Errorf("Could not read all file of blobref %v: %v", fileblob.BlobRef.String(), err)
}
if n != fr.Size() {
return fmt.Errorf("Could not read all file of blobref %v. Wanted %v, got %v", fileblob.BlobRef.String(), fr.Size(), n)
}
config := blobReceiver.Config()
if config == nil {
return errors.New("blobReceiver has no config")
}
hf := config.HandlerFinder
if hf == nil {
return errors.New("blobReceiver config has no HandlerFinder")
}
JSONSignRoot, sh, err := hf.FindHandlerByType("jsonsign")
// TODO(mpl): second check should not be necessary, and yet it happens. Figure it out.
if err != nil || sh == nil {
return errors.New("jsonsign handler not found")
}
sigHelper, ok := sh.(*signhandler.Handler)
if !ok {
return errors.New("handler is not a JSON signhandler")
}
discoMap := sigHelper.DiscoveryMap(JSONSignRoot)
publicKeyBlobRef, ok := discoMap["publicKeyBlobRef"].(string)
if !ok {
return fmt.Errorf("Discovery: json decoding error: %v", err)
}
unsigned := schema.NewHashPlannedPermanode(h)
unsigned["camliSigner"] = publicKeyBlobRef
signed, err := sigHelper.SignMap(unsigned)
if err != nil {
return fmt.Errorf("Signing permanode %v: %v", signed, err)
}
signedPerm := blobref.SHA1FromString(signed)
_, err = blobReceiver.ReceiveBlob(signedPerm, strings.NewReader(signed))
if err != nil {
return fmt.Errorf("While uploading signed permanode %v: %v", signed, err)
}
contentAttr := schema.NewSetAttributeClaim(signedPerm, "camliContent", fileblob.BlobRef.String())
claimDate, err := time.Parse(time.RFC3339, fr.FileSchema().UnixMtime)
contentAttr.SetClaimDate(claimDate)
contentAttr["camliSigner"] = publicKeyBlobRef
signed, err = sigHelper.SignMap(contentAttr)
if err != nil {
return fmt.Errorf("Signing camliContent claim: %v", err)
}
signedClaim := blobref.SHA1FromString(signed)
_, err = blobReceiver.ReceiveBlob(signedClaim, strings.NewReader(signed))
if err != nil {
return fmt.Errorf("While uploading signed camliContent claim %v: %v", signed, err)
}
return nil
}
作者:kristofe
项目:camlistor
func (ih *ImageHandler) scaleImage(fileRef blob.Ref) (*formatAndImage, error) {
fr, err := schema.NewFileReader(ih.Fetcher, fileRef)
if err != nil {
return nil, err
}
defer fr.Close()
sr := types.NewStatsReader(imageBytesFetchedVar, fr)
sr, conf, err := imageConfigFromReader(sr)
if err != nil {
return nil, err
}
// TODO(wathiede): build a size table keyed by conf.ColorModel for
// common color models for a more exact size estimate.
// This value is an estimate of the memory required to decode an image.
// PNGs range from 1-64 bits per pixel (not all of which are supported by
// the Go standard parser). JPEGs encoded in YCbCr 4:4:4 are 3 byte/pixel.
// For all other JPEGs this is an overestimate. For GIFs it is 3x larger
// than needed. How accurate this estimate is depends on the mix of
// images being resized concurrently.
ramSize := int64(conf.Width) * int64(conf.Height) * 3
if err = ih.resizeSem.Acquire(ramSize); err != nil {
return nil, err
}
defer ih.resizeSem.Release(ramSize)
i, imConfig, err := images.Decode(sr, &images.DecodeOpts{
MaxWidth: ih.MaxWidth,
MaxHeight: ih.MaxHeight,
})
if err != nil {
return nil, err
}
b := i.Bounds()
format := imConfig.Format
isSquare := b.Dx() == b.Dy()
if ih.Square && !isSquare {
i = squareImage(i)
b = i.Bounds()
}
// Encode as a new image
var buf bytes.Buffer
switch format {
case "png":
err = png.Encode(&buf, i)
case "cr2":
// Recompress CR2 files as JPEG
format = "jpeg"
fallthrough
default:
err = jpeg.Encode(&buf, i, &jpeg.Options{
Quality: 90,
})
}
if err != nil {
return nil, err
}
return &formatAndImage{format: format, image: buf.Bytes()}, nil
}
作者:t3rm1n4
项目:camlistor
func (dh *DownloadHandler) ServeHTTP(rw http.ResponseWriter, req *http.Request, file *blobref.BlobRef) {
if req.Method != "GET" && req.Method != "HEAD" {
http.Error(rw, "Invalid download method", 400)
return
}
fr, err := schema.NewFileReader(dh.storageSeekFetcher(), file)
if err != nil {
http.Error(rw, "Can't serve file: "+err.Error(), 500)
return
}
defer fr.Close()
schema := fr.FileSchema()
rw.Header().Set("Content-Length", fmt.Sprintf("%d", schema.SumPartsSize()))
mimeType, reader := magic.MimeTypeFromReader(fr)
if dh.ForceMime != "" {
mimeType = dh.ForceMime
}
if mimeType == "" {
mimeType = "application/octet-stream"
}
rw.Header().Set("Content-Type", mimeType)
if mimeType == "application/octet-stream" {
// Chrome seems to silently do nothing on
// application/octet-stream unless this is set.
// Maybe it's confused by lack of URL it recognizes
// along with lack of mime type?
rw.Header().Set("Content-Disposition", "attachment; filename=file-"+file.String()+".dat")
}
if req.Method == "HEAD" {
vbr := blobref.Parse(req.FormValue("verifycontents"))
if vbr == nil {
return
}
hash := vbr.Hash()
if hash == nil {
return
}
io.Copy(hash, reader) // ignore errors, caught later
if vbr.HashMatches(hash) {
rw.Header().Set("X-Camli-Contents", vbr.String())
}
return
}
n, err := io.Copy(rw, reader)
log.Printf("For %q request of %s: copied %d, %v", req.Method, req.URL.Path, n, err)
if err != nil {
log.Printf("error serving download of file schema %s: %v", file, err)
return
}
if size := schema.SumPartsSize(); n != int64(size) {
log.Printf("error serving download of file schema %s: sent %d, expected size of %d",
file, n, size)
return
}
}
作者:t3rm1n4
项目:camlistor
// smartFetch the things that blobs point to, not just blobs.
func smartFetch(cl *client.Client, targ string, br *blobref.BlobRef) error {
if *flagVerbose {
log.Printf("Fetching %v into %q", br, targ)
}
rc, err := fetch(cl, br)
if err != nil {
return err
}
defer rc.Close()
sniffer := new(index.BlobSniffer)
_, err = io.CopyN(sniffer, rc, sniffSize)
if err != nil && err != io.EOF {
return err
}
sniffer.Parse()
sc, ok := sniffer.Superset()
if !ok {
// opaque data - put it in a file
f, err := os.Create(targ)
if err != nil {
return fmt.Errorf("opaque: %v", err)
}
defer f.Close()
body, _ := sniffer.Body()
r := io.MultiReader(bytes.NewBuffer(body), rc)
_, err = io.Copy(f, r)
return err
}
sc.BlobRef = br
switch sc.Type {
case "directory":
dir := filepath.Join(targ, sc.FileName)
if err := os.MkdirAll(dir, sc.FileMode()); err != nil {
return err
}
if err := setFileMeta(dir, sc); err != nil {
log.Print(err)
}
entries := blobref.Parse(sc.Entries)
if entries == nil {
return fmt.Errorf("bad entries blobref: %v", sc.Entries)
}
return smartFetch(cl, dir, entries)
case "static-set":
// directory entries
for _, m := range sc.Members {
dref := blobref.Parse(m)
if dref == nil {
return fmt.Errorf("bad member blobref: %v", m)
}
if err := smartFetch(cl, targ, dref); err != nil {
return err
}
}
return nil
case "file":
name := filepath.Join(targ, sc.FileName)
f, err := os.Create(name)
if err != nil {
return fmt.Errorf("file type: %v", err)
}
defer f.Close()
seekFetcher := blobref.SeekerFromStreamingFetcher(cl)
fr, err := schema.NewFileReader(seekFetcher, br)
if err != nil {
return fmt.Errorf("NewFileReader: %v", err)
}
defer fr.Close()
if err := setFileMeta(name, sc); err != nil {
log.Print(err)
}
return nil
default:
return errors.New("unknown blob type: " + sc.Type)
}
panic("unreachable")
}