作者:kdevroed
项目:camlistor
func newFromConfig(_ blobserver.Loader, config jsonconfig.Obj) (storage blobserver.Storage, err error) {
url := config.RequiredString("url")
auth := config.RequiredString("auth")
skipStartupCheck := config.OptionalBool("skipStartupCheck", false)
if err := config.Validate(); err != nil {
return nil, err
}
client := client.New(url)
if err = client.SetupAuthFromString(auth); err != nil {
return nil, err
}
client.SetLogger(log.New(os.Stderr, "remote", log.LstdFlags))
sto := &remoteStorage{
client: client,
}
if !skipStartupCheck {
// Do a quick dummy operation to check that our credentials are
// correct.
// TODO(bradfitz,mpl): skip this operation smartly if it turns out this is annoying/slow for whatever reason.
c := make(chan blob.SizedRef, 1)
err = sto.EnumerateBlobs(context.TODO(), c, "", 1)
if err != nil {
return nil, err
}
}
return sto, nil
}
作者:kdevroed
项目:camlistor
func TestIsolation(t *testing.T) {
ld := test.NewLoader()
master, _ := ld.GetStorage("/good-storage/")
ns1 := newNamespace(t, ld)
ns2 := newNamespace(t, ld)
stoMap := map[string]blobserver.Storage{
"ns1": ns1,
"ns2": ns2,
"master": master,
}
want := func(src string, want ...blob.Ref) {
if _, ok := stoMap[src]; !ok {
t.Fatalf("undefined storage %q", src)
}
sort.Sort(blob.ByRef(want))
var got []blob.Ref
if err := blobserver.EnumerateAll(context.TODO(), stoMap[src], func(sb blob.SizedRef) error {
got = append(got, sb.Ref)
return nil
}); err != nil {
t.Fatal(err)
}
if !reflect.DeepEqual(got, want) {
t.Errorf("server %q = %q; want %q", src, got, want)
}
}
b1 := &test.Blob{Contents: "Blob 1"}
b1r := b1.BlobRef()
b2 := &test.Blob{Contents: "Blob 2"}
b2r := b2.BlobRef()
b3 := &test.Blob{Contents: "Shared Blob"}
b3r := b3.BlobRef()
b1.MustUpload(t, ns1)
want("ns1", b1r)
want("ns2")
want("master", b1r)
b2.MustUpload(t, ns2)
want("ns1", b1r)
want("ns2", b2r)
want("master", b1r, b2r)
b3.MustUpload(t, ns2)
want("ns1", b1r)
want("ns2", b2r, b3r)
want("master", b1r, b2r, b3r)
b3.MustUpload(t, ns1)
want("ns1", b1r, b3r)
want("ns2", b2r, b3r)
want("master", b1r, b2r, b3r)
if _, _, err := ns2.FetchStreaming(b1r); err == nil {
t.Errorf("b1 shouldn't be accessible via ns2")
}
}
作者:jiaqub
项目:camlistor
// Reindex rewrites the index files of the diskpacked .pack files
func Reindex(root string, overwrite bool, indexConf jsonconfig.Obj) (err error) {
// there is newStorage, but that may open a file for writing
var s = &storage{root: root}
index, err := newIndex(root, indexConf)
if err != nil {
return err
}
defer func() {
closeErr := index.Close()
// just returning the first error - if the index or disk is corrupt
// and can't close, it's very likely these two errors are related and
// have the same root cause.
if err == nil {
err = closeErr
}
}()
ctx := context.TODO() // TODO(tgulacsi): get the verbosity from context
for i := 0; i >= 0; i++ {
fh, err := os.Open(s.filename(i))
if err != nil {
if os.IsNotExist(err) {
break
}
return err
}
err = s.reindexOne(ctx, index, overwrite, i)
fh.Close()
if err != nil {
return err
}
}
return nil
}
作者:kristofe
项目:camlistor
func TestParseExpression(t *testing.T) {
qj := func(sq *SearchQuery) []byte {
v, err := json.MarshalIndent(sq, "", " ")
if err != nil {
panic(err)
}
return v
}
for _, tt := range parseExpressionTests {
ins := tt.inList
if len(ins) == 0 {
ins = []string{tt.in}
}
for _, in := range ins {
got, err := parseExpression(context.TODO(), in)
if err != nil {
if tt.errContains != "" && strings.Contains(err.Error(), tt.errContains) {
continue
}
t.Errorf("%s: parseExpression(%q) error: %v", tt.name, in, err)
continue
}
if tt.errContains != "" {
t.Errorf("%s: parseExpression(%q) succeeded; want error containing %q", tt.name, in, tt.errContains)
continue
}
if !reflect.DeepEqual(got, tt.want) {
t.Errorf("%s: parseExpression(%q) got:\n%s\n\nwant:%s\n", tt.name, in, qj(got), qj(tt.want))
}
}
}
}
作者:Jimmy9
项目:camlistor
// Serves oauth callback at http://host/importer/TYPE/callback
func (h *Host) serveImporterAcctCallback(w http.ResponseWriter, r *http.Request, imp *importer) {
if r.Method != "GET" {
http.Error(w, "invalid method", 400)
return
}
acctRef, err := imp.impl.CallbackRequestAccount(r)
if err != nil {
httputil.ServeError(w, r, err)
return
}
if !acctRef.Valid() {
httputil.ServeError(w, r, errors.New("No valid blobref returned from CallbackRequestAccount(r)"))
return
}
ia, err := imp.account(acctRef)
if err != nil {
http.Error(w, "invalid 'acct' param: "+err.Error(), 400)
return
}
imp.impl.ServeCallback(w, r, &SetupContext{
Context: context.TODO(),
Host: h,
AccountNode: ia.acct,
ia: ia,
})
}
作者:peterwatt
项目:camlistor
func (ia *importerAcct) setup(w http.ResponseWriter, r *http.Request) {
ia.im.impl.ServeSetup(w, r, &SetupContext{
Context: context.TODO(),
Host: ia.im.host,
AccountNode: ia.acct,
ia: ia,
})
}
作者:kristofe
项目:camlistor
func TestParseOperand(t *testing.T) {
for _, tt := range parseOperandTests {
p := newParser(tt.in)
got, err := p.parseOperand(context.TODO())
doChecking("parseOperand", t, tt, got, err, p)
}
}
作者:kristofe
项目:camlistor
func TestParseConjuction(t *testing.T) {
for _, tt := range parseAndRHSTests {
p := newParser(tt.in)
got, err := p.parseAndRHS(context.TODO(), tt.lhs)
doSticherChecking("parseAndRHS", t, tt, got, err, p)
}
}
作者:Jimmy9
项目:camlistor
func (ia *importerAcct) setup(w http.ResponseWriter, r *http.Request) {
if err := ia.im.impl.ServeSetup(w, r, &SetupContext{
Context: context.TODO(),
Host: ia.im.host,
AccountNode: ia.acct,
ia: ia,
}); err != nil {
log.Printf("%v", err)
}
}
作者:sfrdm
项目:camlistor
func (c *gceCmd) RunCommand(args []string) error {
if c.verbose {
gce.Verbose = true
}
if c.project == "" {
return cmdmain.UsageError("Missing --project flag.")
}
if (c.certFile == "") != (c.keyFile == "") {
return cmdmain.UsageError("--cert and --key must both be given together.")
}
if c.certFile == "" && c.hostname == "" {
return cmdmain.UsageError("Either --hostname, or --cert & --key must provided.")
}
config := gce.NewOAuthConfig(readFile(clientIdDat), readFile(clientSecretDat))
config.RedirectURL = "urn:ietf:wg:oauth:2.0:oob"
instConf := &gce.InstanceConf{
Name: c.instName,
Project: c.project,
Machine: c.machine,
Zone: c.zone,
CertFile: c.certFile,
KeyFile: c.keyFile,
Hostname: c.hostname,
}
if c.sshPub != "" {
instConf.SSHPub = strings.TrimSpace(readFile(c.sshPub))
}
depl := &gce.Deployer{
Client: oauth2.NewClient(oauth2.NoContext, oauth2.ReuseTokenSource(nil, &oauthutil.TokenSource{
Config: config,
CacheFile: c.project + "-token.json",
AuthCode: func() string {
fmt.Println("Get auth code from:")
fmt.Printf("%v\n", config.AuthCodeURL("my-state", oauth2.AccessTypeOffline, oauth2.ApprovalForce))
fmt.Println("Enter auth code:")
sc := bufio.NewScanner(os.Stdin)
sc.Scan()
return strings.TrimSpace(sc.Text())
},
})),
Conf: instConf,
}
inst, err := depl.Create(context.TODO())
if err != nil {
return err
}
log.Printf("Instance is up at %s", inst.NetworkInterfaces[0].AccessConfigs[0].NatIP)
return nil
}
作者:peterwatt
项目:camlistor
// Serves oauth callback at http://host/importer/TYPE/callback
func (h *Host) serveImporterAcctCallback(w http.ResponseWriter, r *http.Request, imp *importer) {
if r.Method != "GET" {
http.Error(w, "invalid method", 400)
return
}
acctRef, ok := blob.Parse(r.FormValue("acct"))
if !ok {
http.Error(w, "missing 'acct' blobref param", 400)
return
}
ia, err := imp.account(acctRef)
if err != nil {
http.Error(w, "invalid 'acct' param: "+err.Error(), 400)
return
}
imp.impl.ServeCallback(w, r, &SetupContext{
Context: context.TODO(),
Host: h,
AccountNode: ia.acct,
ia: ia,
})
}
作者:Jimmy9
项目:camlistor
func (h *DeployHandler) serveCallback(w http.ResponseWriter, r *http.Request) {
ck, err := r.Cookie("user")
if err != nil {
http.Error(w,
fmt.Sprintf("Cookie expired, or CSRF attempt. Restart from %s%s%s", h.scheme, h.host, h.prefix),
http.StatusBadRequest)
h.Printf("Cookie expired, or CSRF attempt on callback.")
return
}
code := r.FormValue("code")
if code == "" {
httputil.ServeError(w, r, errors.New("No oauth code parameter in callback URL"))
return
}
h.Printf("successful authentication: %v", r.URL.RawQuery)
br, tk, err := fromState(r)
if err != nil {
httputil.ServeError(w, r, err)
return
}
if !xsrftoken.Valid(tk, h.xsrfKey, ck.Value, br.String()) {
httputil.ServeError(w, r, fmt.Errorf("Invalid xsrf token: %q", tk))
return
}
oAuthConf := h.oAuthConfig()
tok, err := oAuthConf.Exchange(oauth2.NoContext, code)
if err != nil {
httputil.ServeError(w, r, fmt.Errorf("could not obtain a token: %v", err))
return
}
h.Printf("successful authorization with token: %v", tok)
instConf, err := h.instanceConf(br)
if err != nil {
httputil.ServeError(w, r, err)
return
}
depl := &Deployer{
Client: oAuthConf.Client(oauth2.NoContext, tok),
Conf: instConf,
}
if found := h.serveOldInstance(w, br, depl); found {
return
}
if err := h.recordState(br, &creationState{
InstConf: br,
}); err != nil {
httputil.ServeError(w, r, err)
return
}
if h.debug {
// We simulate an instance creation, without actually ever doing anything on Google Cloud,
// by sleeping for a while. Then, as we would do in the real case, we record a creation
// state (but a made-up one). In the meantime, the progress page/animation is served as
// usual.
go func() {
time.Sleep(7 * time.Second)
if err := h.recordState(br, &creationState{
InstConf: br,
InstAddr: "fake.instance.com",
Success: true,
CertFingerprintSHA1: "XXXXXXXXXXXXXXXXXXXX",
CertFingerprintSHA256: "YYYYYYYYYYYYYYYYYYYY",
}); err != nil {
h.Printf("Could not record creation state for %v: %v", br, err)
h.recordStateErrMu.Lock()
defer h.recordStateErrMu.Unlock()
h.recordStateErr[br.String()] = err
}
}()
h.serveProgress(w, br)
return
}
go func() {
inst, err := depl.Create(context.TODO())
state := &creationState{
InstConf: br,
}
if err != nil {
h.Printf("could not create instance: %v", err)
switch e := err.(type) {
case instanceExistsError:
state.Err = fmt.Sprintf("%v %v", e, helpDeleteInstance)
case projectIDError:
state.Err = fmt.Sprintf("%v", e)
default:
state.Err = fmt.Sprintf("%v. %v", err, fileIssue(br.String()))
}
} else {
state.InstAddr = addr(inst)
state.Success = true
state.CertFingerprintSHA1 = depl.certFingerprints["SHA-1"]
state.CertFingerprintSHA256 = depl.certFingerprints["SHA-256"]
//.........这里部分代码省略.........
作者:ndarile
项目:camlistor
func newSyncFromConfig(ld blobserver.Loader, conf jsonconfig.Obj) (http.Handler, error) {
var (
from = conf.RequiredString("from")
to = conf.RequiredString("to")
fullSync = conf.OptionalBool("fullSyncOnStart", false)
blockFullSync = conf.OptionalBool("blockingFullSyncOnStart", false)
idle = conf.OptionalBool("idle", false)
queueConf = conf.OptionalObject("queue")
copierPoolSize = conf.OptionalInt("copierPoolSize", 5)
validate = conf.OptionalBool("validateOnStart", validateOnStartDefault)
)
if err := conf.Validate(); err != nil {
return nil, err
}
if idle {
return newIdleSyncHandler(from, to), nil
}
if len(queueConf) == 0 {
return nil, errors.New(`Missing required "queue" object`)
}
q, err := sorted.NewKeyValue(queueConf)
if err != nil {
return nil, err
}
isToIndex := false
fromBs, err := ld.GetStorage(from)
if err != nil {
return nil, err
}
toBs, err := ld.GetStorage(to)
if err != nil {
return nil, err
}
if _, ok := fromBs.(*index.Index); !ok {
if _, ok := toBs.(*index.Index); ok {
isToIndex = true
}
}
sh := newSyncHandler(from, to, fromBs, toBs, q)
sh.toIndex = isToIndex
sh.copierPoolSize = copierPoolSize
if err := sh.readQueueToMemory(); err != nil {
return nil, fmt.Errorf("Error reading sync queue to memory: %v", err)
}
if fullSync || blockFullSync {
sh.logf("Doing full sync")
didFullSync := make(chan bool, 1)
go func() {
for {
n := sh.runSync("queue", sh.enumeratePendingBlobs)
if n > 0 {
sh.logf("Queue sync copied %d blobs", n)
continue
}
break
}
n := sh.runSync("full", blobserverEnumerator(context.TODO(), fromBs))
sh.logf("Full sync copied %d blobs", n)
didFullSync <- true
sh.syncLoop()
}()
if blockFullSync {
sh.logf("Blocking startup, waiting for full sync from %q to %q", from, to)
<-didFullSync
sh.logf("Full sync complete.")
}
} else {
go sh.syncLoop()
}
if validate {
go sh.startFullValidation()
}
blobserver.GetHub(fromBs).AddReceiveHook(sh.enqueue)
return sh, nil
}
作者:rayleyv
项目:camlistor
// src: non-nil source
// dest: non-nil destination
// thirdLeg: optional third-leg client. if not nil, anything on src
// but not on dest will instead be copied to thirdLeg, instead of
// directly to dest. (sneakernet mode, copying to a portable drive
// and transporting thirdLeg to dest)
func (c *syncCmd) doPass(src, dest, thirdLeg blobserver.Storage) (stats SyncStats, retErr error) {
srcBlobs := make(chan blob.SizedRef, 100)
destBlobs := make(chan blob.SizedRef, 100)
srcErr := make(chan error, 1)
destErr := make(chan error, 1)
ctx := context.TODO()
defer ctx.Cancel()
go func() {
srcErr <- enumerateAllBlobs(ctx, src, srcBlobs)
}()
checkSourceError := func() {
if err := <-srcErr; err != nil {
retErr = fmt.Errorf("Enumerate error from source: %v", err)
}
}
if c.dest == "stdout" {
for sb := range srcBlobs {
fmt.Printf("%s %d\n", sb.Ref, sb.Size)
}
checkSourceError()
return
}
go func() {
destErr <- enumerateAllBlobs(ctx, dest, destBlobs)
}()
checkDestError := func() {
if err := <-destErr; err != nil {
retErr = errors.New(fmt.Sprintf("Enumerate error from destination: %v", err))
}
}
destNotHaveBlobs := make(chan blob.SizedRef)
sizeMismatch := make(chan blob.Ref)
readSrcBlobs := srcBlobs
if c.verbose {
readSrcBlobs = loggingBlobRefChannel(srcBlobs)
}
mismatches := []blob.Ref{}
go client.ListMissingDestinationBlobs(destNotHaveBlobs, sizeMismatch, readSrcBlobs, destBlobs)
// Handle three-legged mode if tc is provided.
checkThirdError := func() {} // default nop
syncBlobs := destNotHaveBlobs
firstHopDest := dest
if thirdLeg != nil {
thirdBlobs := make(chan blob.SizedRef, 100)
thirdErr := make(chan error, 1)
go func() {
thirdErr <- enumerateAllBlobs(ctx, thirdLeg, thirdBlobs)
}()
checkThirdError = func() {
if err := <-thirdErr; err != nil {
retErr = fmt.Errorf("Enumerate error from third leg: %v", err)
}
}
thirdNeedBlobs := make(chan blob.SizedRef)
go client.ListMissingDestinationBlobs(thirdNeedBlobs, sizeMismatch, destNotHaveBlobs, thirdBlobs)
syncBlobs = thirdNeedBlobs
firstHopDest = thirdLeg
}
For:
for {
select {
case br := <-sizeMismatch:
// TODO(bradfitz): check both sides and repair, carefully. For now, fail.
log.Printf("WARNING: blobref %v has differing sizes on source and dest", br)
stats.ErrorCount++
mismatches = append(mismatches, br)
case sb, ok := <-syncBlobs:
if !ok {
break For
}
fmt.Printf("Destination needs blob: %s\n", sb)
blobReader, size, err := src.FetchStreaming(sb.Ref)
if err != nil {
stats.ErrorCount++
log.Printf("Error fetching %s: %v", sb.Ref, err)
continue
}
if size != sb.Size {
stats.ErrorCount++
log.Printf("Source blobserver's enumerate size of %d for blob %s doesn't match its Get size of %d",
sb.Size, sb.Ref, size)
continue
}
if _, err := blobserver.Receive(firstHopDest, sb.Ref, blobReader); err != nil {
stats.ErrorCount++
log.Printf("Upload of %s to destination blobserver failed: %v", sb.Ref, err)
continue
//.........这里部分代码省略.........
作者:kristofe
项目:camlistor
func TestParseCoreAtom(t *testing.T) {
for _, tt := range parseCoreAtomTests {
got, err := parseCoreAtom(context.TODO(), tt.in)
doAtomChecking("parseCoreAtom", t, tt, got, err)
}
}
作者:kdevroed
项目:camlistor
func newSyncFromConfig(ld blobserver.Loader, conf jsonconfig.Obj) (http.Handler, error) {
var (
from = conf.RequiredString("from")
to = conf.RequiredString("to")
fullSync = conf.OptionalBool("fullSyncOnStart", false)
blockFullSync = conf.OptionalBool("blockingFullSyncOnStart", false)
idle = conf.OptionalBool("idle", false)
queueConf = conf.OptionalObject("queue")
)
if err := conf.Validate(); err != nil {
return nil, err
}
if idle {
synch, err := createIdleSyncHandler(from, to)
if err != nil {
return nil, err
}
return synch, nil
}
if len(queueConf) == 0 {
return nil, errors.New(`Missing required "queue" object`)
}
q, err := sorted.NewKeyValue(queueConf)
if err != nil {
return nil, err
}
isToIndex := false
fromBs, err := ld.GetStorage(from)
if err != nil {
return nil, err
}
toBs, err := ld.GetStorage(to)
if err != nil {
return nil, err
}
if _, ok := fromBs.(*index.Index); !ok {
if _, ok := toBs.(*index.Index); ok {
isToIndex = true
}
}
sh, err := createSyncHandler(from, to, fromBs, toBs, q, isToIndex)
if err != nil {
return nil, err
}
if fullSync || blockFullSync {
didFullSync := make(chan bool, 1)
go func() {
n := sh.runSync("queue", sh.enumerateQueuedBlobs)
sh.logf("Queue sync copied %d blobs", n)
n = sh.runSync("full", blobserverEnumerator(context.TODO(), fromBs))
sh.logf("Full sync copied %d blobs", n)
didFullSync <- true
sh.syncQueueLoop()
}()
if blockFullSync {
sh.logf("Blocking startup, waiting for full sync from %q to %q", from, to)
<-didFullSync
sh.logf("Full sync complete.")
}
} else {
go sh.syncQueueLoop()
}
blobserver.GetHub(fromBs).AddReceiveHook(sh.enqueue)
return sh, nil
}
作者:rayleyv
项目:camlistor
func handleEnumerateBlobs(conn http.ResponseWriter, req *http.Request, storage blobserver.BlobEnumerator) {
// Potential input parameters
formValueLimit := req.FormValue("limit")
formValueMaxWaitSec := req.FormValue("maxwaitsec")
formValueAfter := req.FormValue("after")
maxEnumerate := defaultMaxEnumerate
if config, ok := storage.(blobserver.MaxEnumerateConfig); ok {
maxEnumerate = config.MaxEnumerate() - 1 // Since we'll add one below.
}
limit := defaultEnumerateSize
if formValueLimit != "" {
n, err := strconv.ParseUint(formValueLimit, 10, 32)
if err != nil || n > uint64(maxEnumerate) {
limit = maxEnumerate
} else {
limit = int(n)
}
}
waitSeconds := 0
if formValueMaxWaitSec != "" {
waitSeconds, _ = strconv.Atoi(formValueMaxWaitSec)
if waitSeconds != 0 && formValueAfter != "" {
conn.WriteHeader(http.StatusBadRequest)
fmt.Fprintf(conn, errMsgMaxWaitSecWithAfter)
return
}
switch {
case waitSeconds < 0:
waitSeconds = 0
case waitSeconds > 30:
// TODO: don't hard-code 30. push this up into a blobserver interface
// for getting the configuration of the server (ultimately a flag in
// in the binary)
waitSeconds = 30
}
}
conn.Header().Set("Content-Type", "text/javascript; charset=utf-8")
fmt.Fprintf(conn, "{\n \"blobs\": [\n")
loop := true
needsComma := false
deadline := time.Now().Add(time.Duration(waitSeconds) * time.Second)
after := ""
for loop && (waitSeconds == 0 || time.Now().After(deadline)) {
if waitSeconds == 0 {
loop = false
}
blobch := make(chan blob.SizedRef, 100)
resultch := make(chan error, 1)
go func() {
resultch <- storage.EnumerateBlobs(context.TODO(), blobch, formValueAfter, limit+1)
}()
endsReached := 0
gotBlobs := 0
for endsReached < 2 {
select {
case sb, ok := <-blobch:
if !ok {
endsReached++
if gotBlobs <= limit {
after = ""
}
continue
}
gotBlobs++
loop = false
if gotBlobs > limit {
// We requested one more from storage than the user asked for.
// Now we know to return a "continueAfter" response key.
// But we don't return this blob.
continue
}
blobName := sb.Ref.String()
if needsComma {
fmt.Fprintf(conn, ",\n")
}
fmt.Fprintf(conn, " {\"blobRef\": \"%s\", \"size\": %d}",
blobName, sb.Size)
after = blobName
needsComma = true
case err := <-resultch:
if err != nil {
log.Printf("Error during enumerate: %v", err)
fmt.Fprintf(conn, "{{{ SERVER ERROR }}}")
return
}
endsReached++
}
}
if loop {
blobserver.WaitForBlob(storage, deadline, nil)
}
}
//.........这里部分代码省略.........
作者:camarox5
项目:coreos-baremeta
func (h *Handler) Query(rawq *SearchQuery) (*SearchResult, error) {
ctx := context.TODO() // TODO: set from rawq
exprResult, err := rawq.checkValid(ctx)
if err != nil {
return nil, fmt.Errorf("Invalid SearchQuery: %v", err)
}
q := rawq.plannedQuery(exprResult)
res := new(SearchResult)
s := &search{
h: h,
q: q,
res: res,
ctx: context.TODO(),
}
defer s.ctx.Cancel()
corpus := h.corpus
var unlockOnce sync.Once
if corpus != nil {
corpus.RLock()
defer unlockOnce.Do(corpus.RUnlock)
}
ch := make(chan camtypes.BlobMeta, buffered)
errc := make(chan error, 1)
cands := q.pickCandidateSource(s)
if candSourceHook != nil {
candSourceHook(cands.name)
}
sendCtx := s.ctx.New()
defer sendCtx.Cancel()
go func() { errc <- cands.send(sendCtx, s, ch) }()
wantAround, foundAround := false, false
if q.Around.Valid() {
wantAround = true
}
blobMatches := q.Constraint.matcher()
for meta := range ch {
match, err := blobMatches(s, meta.Ref, meta)
if err != nil {
return nil, err
}
if match {
res.Blobs = append(res.Blobs, &SearchResultBlob{
Blob: meta.Ref,
})
if q.Limit <= 0 || !cands.sorted {
continue
}
if !wantAround || foundAround {
if len(res.Blobs) == q.Limit {
sendCtx.Cancel()
break
}
continue
}
if q.Around == meta.Ref {
foundAround = true
if len(res.Blobs)*2 > q.Limit {
// If we've already collected more than half of the Limit when Around is found,
// we ditch the surplus from the beginning of the slice of results.
// If Limit is even, and the number of results before and after Around
// are both greater than half the limit, then there will be one more result before
// than after.
discard := len(res.Blobs) - q.Limit/2 - 1
if discard < 0 {
discard = 0
}
res.Blobs = res.Blobs[discard:]
}
if len(res.Blobs) == q.Limit {
sendCtx.Cancel()
break
}
continue
}
if len(res.Blobs) == q.Limit {
n := copy(res.Blobs, res.Blobs[len(res.Blobs)/2:])
res.Blobs = res.Blobs[:n]
}
}
}
if err := <-errc; err != nil && err != context.ErrCanceled {
return nil, err
}
if q.Limit > 0 && cands.sorted && wantAround && !foundAround {
// results are ignored if Around was not found
res.Blobs = nil
}
if !cands.sorted {
switch q.Sort {
case UnspecifiedSort, Unsorted:
// Nothing to do.
case BlobRefAsc:
sort.Sort(sortSearchResultBlobs{res.Blobs, func(a, b *SearchResultBlob) bool {
return a.Blob.Less(b.Blob)
}})
//.........这里部分代码省略.........
作者:camarox5
项目:coreos-baremeta
func (s *storage) readAllMetaBlobs() error {
type metaBlob struct {
br blob.Ref
dat []byte // encrypted blob
err error
}
metac := make(chan metaBlob, 16)
const maxInFlight = 50
var gate = make(chan bool, maxInFlight)
var stopEnumerate = make(chan bool) // closed on error
enumErrc := make(chan error, 1)
go func() {
var wg sync.WaitGroup
enumErrc <- blobserver.EnumerateAll(context.TODO(), s.meta, func(sb blob.SizedRef) error {
select {
case <-stopEnumerate:
return errors.New("enumeration stopped")
default:
}
wg.Add(1)
gate <- true
go func() {
defer wg.Done()
defer func() { <-gate }()
rc, _, err := s.meta.Fetch(sb.Ref)
var all []byte
if err == nil {
all, err = ioutil.ReadAll(rc)
rc.Close()
}
metac <- metaBlob{sb.Ref, all, err}
}()
return nil
})
wg.Wait()
close(metac)
}()
for mi := range metac {
err := mi.err
if err == nil {
err = s.processEncryptedMetaBlob(mi.br, mi.dat)
}
if err != nil {
close(stopEnumerate)
go func() {
for _ = range metac {
}
}()
// TODO: advertise in this error message a new option or environment variable
// to skip a certain or all meta blobs, to allow partial recovery, if some
// are corrupt. For now, require all to be correct.
return fmt.Errorf("Error with meta blob %v: %v", mi.br, err)
}
}
return <-enumErrc
}
作者:Micra
项目:camlistor
func (h *DeployHandler) serveCallback(w http.ResponseWriter, r *http.Request) {
ck, err := r.Cookie("user")
if err != nil {
http.Error(w,
fmt.Sprintf("Cookie expired, or CSRF attempt. Restart from %s%s%s", h.scheme, h.host, h.prefix),
http.StatusBadRequest)
h.Printf("Cookie expired, or CSRF attempt on callback.")
return
}
code := r.FormValue("code")
if code == "" {
httputil.ServeError(w, r, errors.New("No oauth code parameter in callback URL"))
return
}
h.Printf("successful authentication: %v", r.URL.RawQuery)
br, tk, err := fromState(r)
if err != nil {
httputil.ServeError(w, r, err)
return
}
if !xsrftoken.Valid(tk, h.xsrfKey, ck.Value, br.String()) {
httputil.ServeError(w, r, fmt.Errorf("Invalid xsrf token: %q", tk))
return
}
oAuthConf := h.oAuthConfig()
tok, err := oAuthConf.Exchange(oauth2.NoContext, code)
if err != nil {
httputil.ServeError(w, r, fmt.Errorf("could not obtain a token: %v", err))
return
}
h.Printf("successful authorization with token: %v", tok)
instConf, err := h.instanceConf(br)
if err != nil {
httputil.ServeError(w, r, err)
return
}
depl := &Deployer{
Client: oAuthConf.Client(oauth2.NoContext, tok),
Conf: instConf,
}
if found := h.serveOldInstance(w, br, depl); found {
return
}
if err := h.recordState(br, &creationState{
InstConf: br,
}); err != nil {
httputil.ServeError(w, r, err)
return
}
go func() {
inst, err := depl.Create(context.TODO())
state := &creationState{
InstConf: br,
}
if err != nil {
h.Printf("could not create instance: %v", err)
switch e := err.(type) {
case instanceExistsError:
state.Err = fmt.Sprintf("%v %v", e, helpDeleteInstance)
case projectIDError:
state.Err = fmt.Sprintf("%v", e)
default:
state.Err = fmt.Sprintf("%v. %v", err, fileIssue(br.String()))
}
} else {
state.InstAddr = addr(inst)
state.Success = true
state.CertFingerprintSHA1 = depl.certFingerprints["SHA-1"]
state.CertFingerprintSHA256 = depl.certFingerprints["SHA-256"]
}
if err := h.recordState(br, state); err != nil {
h.Printf("Could not record creation state for %v: %v", br, err)
h.recordStateErrMu.Lock()
defer h.recordStateErrMu.Unlock()
h.recordStateErr[br.String()] = err
}
}()
h.serveProgress(w, br)
}