作者:rfistma
项目:camlistor
func TestBuffer(t *testing.T) {
var (
toBack = []mod{
{false, "b", "b1"},
{false, "d", "d1"},
{false, "f", "f1"},
}
toBuf = []mod{
{false, "a", "a2"},
{false, "b", "b2"},
{false, "c", "c2"},
{false, "e", "e2"},
{true, "f", ""},
{false, "g", "g2"},
}
backBeforeFlush = []mod{
{false, "b", "b1"},
{false, "d", "d1"},
// f deleted
}
want = []mod{
{false, "a", "a2"},
{false, "b", "b2"},
{false, "c", "c2"},
{false, "d", "d1"},
{false, "e", "e2"},
// f deleted
{false, "g", "g2"},
}
)
// Populate backing storage.
backing := sorted.NewMemoryKeyValue()
for _, m := range toBack {
backing.Set(m.key, m.value)
}
// Wrap with buffered storage, populate.
buf := New(sorted.NewMemoryKeyValue(), backing, 1<<20)
for _, m := range toBuf {
if m.isDelete {
buf.Delete(m.key)
} else {
buf.Set(m.key, m.value)
}
}
// Check contents of buffered storage.
check(t, buf, "buffered", want)
check(t, backing, "backing before flush", backBeforeFlush)
// Flush.
if err := buf.Flush(); err != nil {
t.Fatal("flush error: ", err)
}
// Check contents of backing storage.
check(t, backing, "backing after flush", want)
}
作者:rayleyv
项目:camlistor
func BenchmarkCorpusFromStorage(b *testing.B) {
defer test.TLog(b)()
buildKvOnce.Do(func() {
kvForBenchmark = sorted.NewMemoryKeyValue()
idx := index.New(kvForBenchmark)
id := indextest.NewIndexDeps(idx)
id.Fataler = b
for i := 0; i < 10; i++ {
fileRef, _ := id.UploadFile("file.txt", fmt.Sprintf("some file %d", i), time.Unix(1382073153, 0))
pn := id.NewPlannedPermanode(fmt.Sprint(i))
id.SetAttribute(pn, "camliContent", fileRef.String())
}
})
defer index.SetVerboseCorpusLogging(true)
index.SetVerboseCorpusLogging(false)
b.ResetTimer()
for i := 0; i < b.N; i++ {
_, err := index.NewCorpusFromStorage(kvForBenchmark)
if err != nil {
b.Fatal(err)
}
}
}
作者:kristofe
项目:camlistor
// NewMemoryIndex returns an Index backed only by memory, for use in tests.
func NewMemoryIndex() *Index {
ix, err := New(sorted.NewMemoryKeyValue())
if err != nil {
// Nothing to fail in memory, so worth panicing about
// if we ever see something.
panic(err)
}
return ix
}
作者:rfistma
项目:camlistor
// TODO(mpl): move this test into kvtest. But that might require
// kvtest taking a "func () sorted.KeyValue) constructor param,
// so kvtest can create several and close in different ways.
func TestMemoryKV_DoubleClose(t *testing.T) {
kv := sorted.NewMemoryKeyValue()
it := kv.Find("", "")
it.Close()
it.Close()
kv.Close()
kv.Close()
}
作者:rfistma
项目:camlistor
func TestStorage(t *testing.T) {
storagetest.Test(t, func(t *testing.T) (sto blobserver.Storage, cleanup func()) {
s := &storage{
small: new(test.Fetcher),
large: new(test.Fetcher),
meta: sorted.NewMemoryKeyValue(),
log: test.NewLogger(t, "blobpacked: "),
}
s.init()
return s, func() {}
})
}
作者:rfistma
项目:camlistor
func TestIndexingClaimMissingPubkey(t *testing.T) {
s := sorted.NewMemoryKeyValue()
idx, err := index.New(s)
if err != nil {
t.Fatal(err)
}
id := indextest.NewIndexDeps(idx)
id.Fataler = t
goodKeyFetcher := id.Index.KeyFetcher
emptyFetcher := new(test.Fetcher)
pn := id.NewPermanode()
// Prevent the index from being able to find the public key:
idx.KeyFetcher = emptyFetcher
// This previous failed to upload, since the signer's public key was
// unavailable.
claimRef := id.SetAttribute(pn, "tag", "foo")
t.Logf(" Claim is %v", claimRef)
t.Logf("Signer is %v", id.SignerBlobRef)
// Verify that populateClaim noted the missing public key blob:
{
key := fmt.Sprintf("missing|%s|%s", claimRef, id.SignerBlobRef)
if got, err := s.Get(key); got == "" || err != nil {
t.Errorf("key %q missing (err: %v); want 1", key, err)
}
}
// Now make it available again:
idx.KeyFetcher = idx.Exp_BlobSource()
if err := copyBlob(id.SignerBlobRef, idx.Exp_BlobSource().(*test.Fetcher), goodKeyFetcher); err != nil {
t.Errorf("Error copying public key to BlobSource: %v", err)
}
if err := copyBlob(id.SignerBlobRef, idx, goodKeyFetcher); err != nil {
t.Errorf("Error uploading public key to indexer: %v", err)
}
idx.Exp_AwaitReindexing(t)
// Verify that populateClaim noted the missing public key blob:
{
key := fmt.Sprintf("missing|%s|%s", claimRef, id.SignerBlobRef)
if got, err := s.Get(key); got != "" || err == nil {
t.Errorf("row %q still exists", key)
}
}
}
作者:ndarile
项目:camlistor
func TestOutOfOrderIndexing(t *testing.T) {
tf := new(test.Fetcher)
s := sorted.NewMemoryKeyValue()
ix, err := index.New(s)
if err != nil {
t.Fatal(err)
}
ix.BlobSource = tf
t.Logf("file ref = %v", fileBlobRef)
t.Logf("missing data chunks = %v, %v, %v", chunk1ref, chunk2ref, chunk3ref)
add := func(b *test.Blob) {
tf.AddBlob(b)
if _, err := ix.ReceiveBlob(b.BlobRef(), b.Reader()); err != nil {
t.Fatalf("ReceiveBlob(%v): %v", b.BlobRef(), err)
}
}
add(fileBlob)
{
key := fmt.Sprintf("missing|%s|%s", fileBlobRef, chunk1ref)
if got, err := s.Get(key); got == "" || err != nil {
t.Errorf("key %q missing (err: %v); want 1", key, err)
}
}
add(chunk1)
add(chunk2)
ix.Exp_AwaitReindexing(t)
{
key := fmt.Sprintf("missing|%s|%s", fileBlobRef, chunk3ref)
if got, err := s.Get(key); got == "" || err != nil {
t.Errorf("key %q missing (err: %v); want 1", key, err)
}
}
add(chunk3)
ix.Exp_AwaitReindexing(t)
foreachSorted(t, s, func(k, v string) {
if strings.HasPrefix(k, "missing|") {
t.Errorf("Shouldn't have missing key: %q", k)
}
})
}
作者:rfistma
项目:camlistor
func TestStorageNoSmallSubfetch(t *testing.T) {
storagetest.Test(t, func(t *testing.T) (sto blobserver.Storage, cleanup func()) {
s := &storage{
// We need to hide SubFetcher, to test *storage's SubFetch, as it delegates
// to the underlying SubFetcher, if small implements that interface.
small: hideSubFetcher(new(test.Fetcher)),
large: new(test.Fetcher),
meta: sorted.NewMemoryKeyValue(),
log: test.NewLogger(t, "blobpacked: "),
}
s.init()
return s, func() {}
})
}
作者:rfistma
项目:camlistor
func testStreamBlobs(t *testing.T,
small blobserver.Storage,
large subFetcherStorage,
populate func(*testing.T, *storage) []storagetest.StreamerTestOpt) {
s := &storage{
small: small,
large: large,
meta: sorted.NewMemoryKeyValue(),
log: test.NewLogger(t, "blobpacked: "),
}
s.init()
wants := populate(t, s)
storagetest.TestStreamer(t, s, wants...)
}
作者:rfistma
项目:camlistor
// see if storage proxies through to small for Fetch, Stat, and Enumerate.
func TestSmallFallback(t *testing.T) {
small := new(test.Fetcher)
s := &storage{
small: small,
large: new(test.Fetcher),
meta: sorted.NewMemoryKeyValue(),
log: test.NewLogger(t, "blobpacked: "),
}
s.init()
b1 := &test.Blob{"foo"}
b1.MustUpload(t, small)
wantSB := b1.SizedRef()
// Fetch
rc, _, err := s.Fetch(b1.BlobRef())
if err != nil {
t.Errorf("failed to Get blob: %v", err)
} else {
rc.Close()
}
// Stat.
sb, err := blobserver.StatBlob(s, b1.BlobRef())
if err != nil {
t.Errorf("failed to Stat blob: %v", err)
} else if sb != wantSB {
t.Errorf("Stat = %v; want %v", sb, wantSB)
}
// Enumerate
saw := false
ctx, cancel := context.WithCancel(context.TODO())
defer cancel()
if err := blobserver.EnumerateAll(ctx, s, func(sb blob.SizedRef) error {
if sb != wantSB {
return fmt.Errorf("saw blob %v; want %v", sb, wantSB)
}
saw = true
return nil
}); err != nil {
t.Errorf("EnuerateAll: %v", err)
}
if !saw {
t.Error("didn't see blob in Enumerate")
}
}
作者:rfistma
项目:camlistor
func TestStreamBlobs(t *testing.T) {
small := new(test.Fetcher)
s := &storage{
small: small,
large: new(test.Fetcher),
meta: sorted.NewMemoryKeyValue(),
log: test.NewLogger(t, "blobpacked: "),
}
s.init()
all := map[blob.Ref]bool{}
const nBlobs = 10
for i := 0; i < nBlobs; i++ {
b := &test.Blob{strconv.Itoa(i)}
b.MustUpload(t, small)
all[b.BlobRef()] = true
}
ctx, cancel := context.WithCancel(context.TODO())
defer cancel()
token := "" // beginning
got := map[blob.Ref]bool{}
dest := make(chan blobserver.BlobAndToken, 16)
done := make(chan bool)
go func() {
defer close(done)
for bt := range dest {
got[bt.Blob.Ref()] = true
}
}()
err := s.StreamBlobs(ctx, dest, token)
if err != nil {
t.Fatalf("StreamBlobs = %v", err)
}
<-done
if !reflect.DeepEqual(got, all) {
t.Errorf("Got blobs %v; want %v", got, all)
}
storagetest.TestStreamer(t, s, storagetest.WantN(nBlobs))
}
作者:Jimmy9
项目:camlistor
// dataStores returns the blobserver that stores the instances configurations, and the kv
// store for the instances states.
func dataStores() (blobserver.Storage, sorted.KeyValue, error) {
if DevHandler {
return &memory.Storage{}, sorted.NewMemoryKeyValue(), nil
}
dataDir := os.Getenv("CAMLI_GCE_DATA")
if dataDir == "" {
dataDir = "camli-gce-data"
log.Printf("data dir not provided as env var CAMLI_GCE_DATA, so defaulting to %v", dataDir)
}
blobsDir := filepath.Join(dataDir, "instance-conf")
if err := os.MkdirAll(blobsDir, 0700); err != nil {
return nil, nil, err
}
instConf, err := localdisk.New(blobsDir)
if err != nil {
return nil, nil, err
}
instState, err := leveldb.NewStorage(filepath.Join(dataDir, "instance-state"))
if err != nil {
return nil, nil, err
}
return instConf, instState, nil
}
作者:pombredann
项目:camlistor
func main() {
launchConfig.MaybeDeploy()
addr := flag.String("addr", defaultListenAddr(), "specify address for server to listen on")
flag.Parse()
memkv := sorted.NewMemoryKeyValue()
if err := memkv.Set("6401800c.camlistore.net.", "159.203.246.79"); err != nil {
panic(err)
}
if err := memkv.Set("camlistore.net.", "104.154.231.160"); err != nil {
panic(err)
}
if err := memkv.Set("www.camlistore.net.", "104.154.231.160"); err != nil {
panic(err)
}
ds := NewDNSServer(memkv)
log.Printf("serving DNS on %s\n", *addr)
if err := dns.ListenAndServe(*addr, "udp", ds); err != nil {
log.Fatal(err)
}
}
作者:rayleyv
项目:camlistor
func newTestStorage() *testStorage {
sto := &storage{
index: sorted.NewMemoryKeyValue(),
}
if err := sto.setKey(testKey); err != nil {
panic(err)
}
ts := &testStorage{
sto: sto,
blobs: new(test.Fetcher),
meta: new(test.Fetcher),
}
sto.blobs = ts.blobs
sto.meta = ts.meta
sto.testRandIV = func() []byte {
ts.mu.Lock()
defer ts.mu.Unlock()
var ret [16]byte
ts.iv++
binary.BigEndian.PutUint64(ret[8:], ts.iv)
return ret[:]
}
return ts
}
作者:rayleyv
项目:camlistor
// NewMemoryIndex returns an Index backed only by memory, for use in tests.
func NewMemoryIndex() *Index {
return New(sorted.NewMemoryKeyValue())
}
作者:camlistor
项目:camlistor
func main() {
launchConfig.MaybeDeploy()
flag.Parse()
var kv keyValue
var httpsListenAddr string
if metadata.OnGCE() {
httpsListenAddr = ":443"
dsClient, err := datastore.NewClient(context.Background(), GCEProjectID)
if err != nil {
log.Fatalf("Error creating datastore client for records: %v", err)
}
kv = cachedStore{
dsClient: dsClient,
cache: lru.New(cacheSize),
}
} else {
httpsListenAddr = ":4430"
kv = memkv{skv: sorted.NewMemoryKeyValue()}
}
if err := kv.Set("6401800c.camlistore.net.", "159.203.246.79"); err != nil {
log.Fatalf("Error adding %v:%v record: %v", "6401800c.camlistore.net.", "159.203.246.79", err)
}
if err := kv.Set(domain, *flagServerIP); err != nil {
log.Fatalf("Error adding %v:%v record: %v", domain, *flagServerIP, err)
}
if err := kv.Set("www.camlistore.net.", *flagServerIP); err != nil {
log.Fatalf("Error adding %v:%v record: %v", "www.camlistore.net.", *flagServerIP, err)
}
ds := newDNSServer(kv)
cs := &gpgchallenge.Server{
OnSuccess: func(identity string, address string) error {
log.Printf("Adding %v.camlistore.net. as %v", identity, address)
return ds.dataSource.Set(strings.ToLower(identity+".camlistore.net."), address)
},
}
tcperr := make(chan error, 1)
udperr := make(chan error, 1)
httperr := make(chan error, 1)
log.Printf("serving DNS on %s\n", *addr)
go func() {
tcperr <- dns.ListenAndServe(*addr, "tcp", ds)
}()
go func() {
udperr <- dns.ListenAndServe(*addr, "udp", ds)
}()
if metadata.OnGCE() {
// TODO(mpl): if we want to get a cert for anything
// *.camlistore.net, it's a bit of a chicken and egg problem, since
// we need camnetdns itself to be already running and answering DNS
// queries. It's probably doable, but easier for now to just ask
// one for camnetdns.camlistore.org, since that name is not
// resolved by camnetdns.
hostname := strings.TrimSuffix(authorityNS, ".")
m := autocert.Manager{
Prompt: autocert.AcceptTOS,
HostPolicy: autocert.HostWhitelist(hostname),
Cache: autocert.DirCache(osutil.DefaultLetsEncryptCache()),
}
ln, err := tls.Listen("tcp", httpsListenAddr, &tls.Config{
Rand: rand.Reader,
Time: time.Now,
NextProtos: []string{http2.NextProtoTLS, "http/1.1"},
MinVersion: tls.VersionTLS12,
GetCertificate: m.GetCertificate,
})
if err != nil {
log.Fatalf("Error listening on %v: %v", httpsListenAddr, err)
}
go func() {
httperr <- http.Serve(ln, cs)
}()
}
select {
case err := <-tcperr:
log.Fatalf("DNS over TCP error: %v", err)
case err := <-udperr:
log.Fatalf("DNS error: %v", err)
case err := <-httperr:
log.Fatalf("HTTP server error: %v", err)
}
}
作者:ndarile
项目:camlistor
func TestInitNeededMaps(t *testing.T) {
s := sorted.NewMemoryKeyValue()
// Start unknowning that the data chunks are all gone:
s.Set("schemaversion", fmt.Sprint(index.Exp_schemaVersion()))
s.Set(index.Exp_missingKey(fileBlobRef, chunk1ref), "1")
s.Set(index.Exp_missingKey(fileBlobRef, chunk2ref), "1")
s.Set(index.Exp_missingKey(fileBlobRef, chunk3ref), "1")
ix, err := index.New(s)
if err != nil {
t.Fatal(err)
}
{
needs, neededBy, _ := ix.NeededMapsForTest()
needsWant := map[blob.Ref][]blob.Ref{
fileBlobRef: []blob.Ref{chunk1ref, chunk2ref, chunk3ref},
}
neededByWant := map[blob.Ref][]blob.Ref{
chunk1ref: []blob.Ref{fileBlobRef},
chunk2ref: []blob.Ref{fileBlobRef},
chunk3ref: []blob.Ref{fileBlobRef},
}
if !reflect.DeepEqual(needs, needsWant) {
t.Errorf("needs = %v; want %v", needs, needsWant)
}
if !reflect.DeepEqual(neededBy, neededByWant) {
t.Errorf("neededBy = %v; want %v", neededBy, neededByWant)
}
}
ix.Exp_noteBlobIndexed(chunk2ref)
{
needs, neededBy, ready := ix.NeededMapsForTest()
needsWant := map[blob.Ref][]blob.Ref{
fileBlobRef: []blob.Ref{chunk1ref, chunk3ref},
}
neededByWant := map[blob.Ref][]blob.Ref{
chunk1ref: []blob.Ref{fileBlobRef},
chunk3ref: []blob.Ref{fileBlobRef},
}
if !reflect.DeepEqual(needs, needsWant) {
t.Errorf("needs = %v; want %v", needs, needsWant)
}
if !reflect.DeepEqual(neededBy, neededByWant) {
t.Errorf("neededBy = %v; want %v", neededBy, neededByWant)
}
if len(ready) != 0 {
t.Errorf("ready = %v; want nothing", ready)
}
}
ix.Exp_noteBlobIndexed(chunk1ref)
{
needs, neededBy, ready := ix.NeededMapsForTest()
needsWant := map[blob.Ref][]blob.Ref{
fileBlobRef: []blob.Ref{chunk3ref},
}
neededByWant := map[blob.Ref][]blob.Ref{
chunk3ref: []blob.Ref{fileBlobRef},
}
if !reflect.DeepEqual(needs, needsWant) {
t.Errorf("needs = %v; want %v", needs, needsWant)
}
if !reflect.DeepEqual(neededBy, neededByWant) {
t.Errorf("neededBy = %v; want %v", neededBy, neededByWant)
}
if len(ready) != 0 {
t.Errorf("ready = %v; want nothing", ready)
}
}
ix.Exp_noteBlobIndexed(chunk3ref)
{
needs, neededBy, ready := ix.NeededMapsForTest()
needsWant := map[blob.Ref][]blob.Ref{}
neededByWant := map[blob.Ref][]blob.Ref{}
if !reflect.DeepEqual(needs, needsWant) {
t.Errorf("needs = %v; want %v", needs, needsWant)
}
if !reflect.DeepEqual(neededBy, neededByWant) {
t.Errorf("neededBy = %v; want %v", neededBy, neededByWant)
}
if !ready[fileBlobRef] {
t.Error("fileBlobRef not ready")
}
}
dumpSorted(t, s)
}
作者:stevear
项目:camlistor
// Populates the bs, and the index at the same time through the sync handler
func populate(b *testing.B, dbfile string,
sortedProvider func(dbfile string) (sorted.KeyValue, error)) *index.Index {
b.Logf("populating %v", dbfile)
kv, err := sortedProvider(dbfile)
if err != nil {
b.Fatal(err)
}
bsRoot := filepath.Join(filepath.Dir(dbfile), "bs")
if err := os.MkdirAll(bsRoot, 0700); err != nil {
b.Fatal(err)
}
dataDir, err := os.Open("testdata")
if err != nil {
b.Fatal(err)
}
fis, err := dataDir.Readdir(-1)
if err != nil {
b.Fatal(err)
}
if len(fis) == 0 {
b.Fatalf("no files in %s dir", "testdata")
}
ks := doKeyStuff(b)
bs, err := localdisk.New(bsRoot)
if err != nil {
b.Fatal(err)
}
if _, err := blobserver.Receive(bs, ks.pubKeyRef, strings.NewReader(ks.pubKey)); err != nil {
b.Fatal(err)
}
idx, err := index.New(kv)
if err != nil {
b.Fatal(err)
}
idx.InitBlobSource(bs)
sh := server.NewSyncHandler("/bs/", "/index/", bs, idx, sorted.NewMemoryKeyValue())
b.ResetTimer()
for _, v := range fis {
f, err := os.Open(filepath.Join(dataDir.Name(), v.Name()))
if err != nil {
b.Fatal(err)
}
td := &trackDigestReader{r: f}
fm := schema.NewFileMap(v.Name())
fm.SetModTime(v.ModTime())
fileRef, err := schema.WriteFileMap(bs, fm, td)
if err != nil {
b.Fatal(err)
}
f.Close()
unsigned := schema.NewPlannedPermanode(td.Sum())
unsigned.SetSigner(ks.pubKeyRef)
sr := &jsonsign.SignRequest{
UnsignedJSON: unsigned.Blob().JSON(),
// TODO(mpl): if we make a bs that discards, replace this with a memory bs that has only the pubkey
Fetcher: bs,
EntityFetcher: ks.entityFetcher,
SignatureTime: time.Unix(0, 0),
}
signed, err := sr.Sign()
if err != nil {
b.Fatal("problem signing: " + err.Error())
}
pn := blob.SHA1FromString(signed)
// N.B: use blobserver.Receive so that the blob hub gets notified, and the blob gets enqueued into the index
if _, err := blobserver.Receive(bs, pn, strings.NewReader(signed)); err != nil {
b.Fatal(err)
}
contentAttr := schema.NewSetAttributeClaim(pn, "camliContent", fileRef.String())
claimTime, ok := fm.ModTime()
if !ok {
b.Fatal(err)
}
contentAttr.SetClaimDate(claimTime)
contentAttr.SetSigner(ks.pubKeyRef)
sr = &jsonsign.SignRequest{
UnsignedJSON: contentAttr.Blob().JSON(),
// TODO(mpl): if we make a bs that discards, replace this with a memory bs that has only the pubkey
Fetcher: bs,
EntityFetcher: ks.entityFetcher,
SignatureTime: claimTime,
}
signed, err = sr.Sign()
if err != nil {
b.Fatal("problem signing: " + err.Error())
}
cl := blob.SHA1FromString(signed)
if _, err := blobserver.Receive(bs, cl, strings.NewReader(signed)); err != nil {
b.Fatal(err)
}
}
sh.IdleWait()
return idx
//.........这里部分代码省略.........
作者:rfistma
项目:camlistor
// tests that we add the missing wholeRef entries in FileInfo rows when going from
// a version 4 to a version 5 index.
func TestFixMissingWholeref(t *testing.T) {
tf := new(test.Fetcher)
s := sorted.NewMemoryKeyValue()
ix, err := index.New(s)
if err != nil {
t.Fatal(err)
}
ix.InitBlobSource(tf)
// populate with a file
add := func(b *test.Blob) {
tf.AddBlob(b)
if _, err := ix.ReceiveBlob(b.BlobRef(), b.Reader()); err != nil {
t.Fatalf("ReceiveBlob(%v): %v", b.BlobRef(), err)
}
}
add(chunk1)
add(chunk2)
add(chunk3)
add(fileBlob)
// revert the row to the old form, by stripping the wholeRef suffix
key := "fileinfo|" + fileBlobRef.String()
val5, err := s.Get(key)
if err != nil {
t.Fatalf("could not get %v: %v", key, err)
}
parts := strings.SplitN(val5, "|", 4)
val4 := strings.Join(parts[:3], "|")
if err := s.Set(key, val4); err != nil {
t.Fatalf("could not set (%v, %v): %v", key, val4, err)
}
// revert index version at 4 to trigger the fix
if err := s.Set("schemaversion", "4"); err != nil {
t.Fatal(err)
}
// init broken index
ix, err = index.New(s)
if err != index.Exp_ErrMissingWholeRef {
t.Fatalf("wrong error upon index initialization: got %v, wanted %v", err, index.Exp_ErrMissingWholeRef)
}
// and fix it
if err := ix.Exp_FixMissingWholeRef(tf); err != nil {
t.Fatal(err)
}
// init fixed index
ix, err = index.New(s)
if err != nil {
t.Fatal(err)
}
// and check that the value is now actually fixed
fi, err := ix.GetFileInfo(fileBlobRef)
if err != nil {
t.Fatal(err)
}
if fi.WholeRef.String() != parts[3] {
t.Fatalf("index fileInfo wholeref was not fixed: got %q, wanted %v", fi.WholeRef, parts[3])
}
}
作者:stevear
项目:camlistor
func TestReindex(t *testing.T) {
if testing.Short() {
t.Skip("skipping in short mode")
}
type file struct {
size int64
name string
contents []byte
}
files := []file{
{17 << 20, "foo.dat", randBytesSrc(17<<20, 42)},
{10 << 20, "bar.dat", randBytesSrc(10<<20, 43)},
{5 << 20, "baz.dat", randBytesSrc(5<<20, 44)},
}
pt := testPack(t,
func(sto blobserver.Storage) error {
for _, f := range files {
if _, err := schema.WriteFileFromReader(sto, f.name, bytes.NewReader(f.contents)); err != nil {
return err
}
}
return nil
},
wantNumLargeBlobs(4),
wantNumSmallBlobs(0),
)
// backup the meta that is supposed to be lost/erased.
// pt.sto.reindex allocates a new pt.sto.meta, so meta != pt.sto.meta after it is called.
meta := pt.sto.meta
// and build new meta index
if err := pt.sto.reindex(context.TODO(), func() (sorted.KeyValue, error) {
return sorted.NewMemoryKeyValue(), nil
}); err != nil {
t.Fatal(err)
}
// check that new meta is identical to "lost" one
newRows := 0
if err := sorted.Foreach(pt.sto.meta, func(key, newValue string) error {
oldValue, err := meta.Get(key)
if err != nil {
t.Fatalf("Could not get value for %v in old meta: %v", key, err)
}
if oldValue != newValue {
t.Fatalf("Reindexing error: for key %v, got %v, want %v", key, newValue, oldValue)
}
newRows++
return nil
}); err != nil {
t.Fatal(err)
}
// make sure they have the same number of entries too, to be sure that the reindexing
// did not miss entries that the old meta had.
oldRows := countSortedRows(t, meta)
if oldRows != newRows {
t.Fatalf("index number of entries mismatch: got %d entries in new index, wanted %d (as in index before reindexing)", newRows, oldRows)
}
// And verify we can read one of the files back out.
hash := blob.NewHash()
hash.Write(files[0].contents)
pt.testOpenWholeRef(t, blob.RefFromHash(hash), files[0].size)
}