refactor filer_pb.Entry and filer.Entry to use GetChunks()

for later locking on reading chunks
This commit is contained in:
chrislu 2022-11-15 06:33:36 -08:00
parent 371972a1c2
commit 70a4c98b00
56 changed files with 107 additions and 103 deletions

View file

@ -59,7 +59,7 @@ func walkMetaFile(dst *os.File) error {
}
fmt.Fprintf(os.Stdout, "file %s %v\n", util.FullPath(fullEntry.Dir).Child(fullEntry.Entry.Name), fullEntry.Entry.Attributes.String())
for i, chunk := range fullEntry.Entry.Chunks {
for i, chunk := range fullEntry.Entry.GetChunks() {
fmt.Fprintf(os.Stdout, " chunk: %d %v %d,%x%08x\n", i+1, chunk, chunk.Fid.VolumeId, chunk.Fid.FileKey, chunk.Fid.Cookie)
}

View file

@ -114,7 +114,7 @@ func runFilerCat(cmd *Command, args []string) bool {
filerCat.filerClient = client
return filer.StreamContent(&filerCat, writer, respLookupEntry.Entry.Chunks, 0, int64(filer.FileSize(respLookupEntry.Entry)))
return filer.StreamContent(&filerCat, writer, respLookupEntry.Entry.GetChunks(), 0, int64(filer.FileSize(respLookupEntry.Entry)))
})

View file

@ -158,7 +158,7 @@ func (store *AbstractSqlStore) InsertEntry(ctx context.Context, entry *filer.Ent
return fmt.Errorf("encode %s: %s", entry.FullPath, err)
}
if len(entry.Chunks) > filer.CountEntryChunksForGzip {
if len(entry.GetChunks()) > filer.CountEntryChunksForGzip {
meta = util.MaybeGzipData(meta)
}

View file

@ -157,7 +157,7 @@ func (store *ArangodbStore) InsertEntry(ctx context.Context, entry *filer.Entry)
return fmt.Errorf("encode %s: %s", entry.FullPath, err)
}
if len(entry.Chunks) > filer.CountEntryChunksForGzip {
if len(entry.GetChunks()) > filer.CountEntryChunksForGzip {
meta = util.MaybeGzipData(meta)
}
model := &Model{
@ -196,7 +196,7 @@ func (store *ArangodbStore) UpdateEntry(ctx context.Context, entry *filer.Entry)
return fmt.Errorf("encode %s: %s", entry.FullPath, err)
}
if len(entry.Chunks) > filer.CountEntryChunksForGzip {
if len(entry.GetChunks()) > filer.CountEntryChunksForGzip {
meta = util.MaybeGzipData(meta)
}
model := &Model{

View file

@ -100,7 +100,7 @@ func (store *CassandraStore) InsertEntry(ctx context.Context, entry *filer.Entry
return fmt.Errorf("encode %s: %s", entry.FullPath, err)
}
if len(entry.Chunks) > filer.CountEntryChunksForGzip {
if len(entry.GetChunks()) > filer.CountEntryChunksForGzip {
meta = util.MaybeGzipData(meta)
}

View file

@ -46,7 +46,7 @@ type Entry struct {
}
func (entry *Entry) Size() uint64 {
return maxUint64(maxUint64(TotalSize(entry.Chunks), entry.FileSize), uint64(len(entry.Content)))
return maxUint64(maxUint64(TotalSize(entry.GetChunks()), entry.FileSize), uint64(len(entry.Content)))
}
func (entry *Entry) Timestamp() time.Time {
@ -91,7 +91,7 @@ func (entry *Entry) ToExistingProtoEntry(message *filer_pb.Entry) {
}
message.IsDirectory = entry.IsDirectory()
message.Attributes = EntryAttributeToPb(entry)
message.Chunks = entry.Chunks
message.Chunks = entry.GetChunks()
message.Extended = entry.Extended
message.HardLinkId = entry.HardLinkId
message.HardLinkCounter = entry.HardLinkCounter
@ -123,6 +123,10 @@ func (entry *Entry) ToProtoFullEntry() *filer_pb.FullEntry {
}
}
func (entry *Entry) GetChunks() []*filer_pb.FileChunk {
return entry.Chunks
}
func FromPbEntry(dir string, entry *filer_pb.Entry) *Entry {
t := &Entry{}
t.FullPath = util.NewFullPath(dir, entry.Name)

View file

@ -82,7 +82,7 @@ func (store *EtcdStore) InsertEntry(ctx context.Context, entry *filer.Entry) (er
return fmt.Errorf("encoding %s %+v: %v", entry.FullPath, entry.Attr, err)
}
if len(entry.Chunks) > filer.CountEntryChunksForGzip {
if len(entry.GetChunks()) > filer.CountEntryChunksForGzip {
meta = weed_util.MaybeGzipData(meta)
}

View file

@ -31,19 +31,19 @@ func FileSize(entry *filer_pb.Entry) (size uint64) {
fileSize = maxUint64(fileSize, uint64(entry.RemoteEntry.RemoteSize))
}
}
return maxUint64(TotalSize(entry.Chunks), fileSize)
return maxUint64(TotalSize(entry.GetChunks()), fileSize)
}
func ETag(entry *filer_pb.Entry) (etag string) {
if entry.Attributes == nil || entry.Attributes.Md5 == nil {
return ETagChunks(entry.Chunks)
return ETagChunks(entry.GetChunks())
}
return fmt.Sprintf("%x", entry.Attributes.Md5)
}
func ETagEntry(entry *Entry) (etag string) {
if entry.Attr.Md5 == nil {
return ETagChunks(entry.Chunks)
return ETagChunks(entry.GetChunks())
}
return fmt.Sprintf("%x", entry.Attr.Md5)
}

View file

@ -75,7 +75,7 @@ func (fc *FilerConf) loadFromFiler(filer *Filer) (err error) {
return fc.LoadFromBytes(entry.Content)
}
return fc.loadFromChunks(filer, entry.Content, entry.Chunks, entry.Size())
return fc.loadFromChunks(filer, entry.Content, entry.GetChunks(), entry.Size())
}
func (fc *FilerConf) loadFromChunks(filer *Filer, content []byte, chunks []*filer_pb.FileChunk, size uint64) (err error) {

View file

@ -48,7 +48,7 @@ func (f *Filer) DeleteEntryMetaAndData(ctx context.Context, p util.FullPath, isR
}
if shouldDeleteChunks && !isDeleteCollection {
f.DirectDeleteChunks(entry.Chunks)
f.DirectDeleteChunks(entry.GetChunks())
}
// delete the file or folder
@ -93,7 +93,7 @@ func (f *Filer) doBatchDeleteFolderMetaAndData(ctx context.Context, entry *Entry
// hard link chunk data are deleted separately
err = onHardLinkIdsFn([]HardLinkId{sub.HardLinkId})
} else {
err = onChunksFn(sub.Chunks)
err = onChunksFn(sub.GetChunks())
}
}
if err != nil && !ignoreRecursiveError {

View file

@ -143,17 +143,17 @@ func (f *Filer) deleteChunksIfNotNew(oldEntry, newEntry *Entry) {
return
}
if newEntry == nil {
f.DeleteChunks(oldEntry.Chunks)
f.DeleteChunks(oldEntry.GetChunks())
return
}
var toDelete []*filer_pb.FileChunk
newChunkIds := make(map[string]bool)
newDataChunks, newManifestChunks, err := ResolveChunkManifest(f.MasterClient.GetLookupFileIdFunction(),
newEntry.Chunks, 0, math.MaxInt64)
newEntry.GetChunks(), 0, math.MaxInt64)
if err != nil {
glog.Errorf("Failed to resolve new entry chunks when delete old entry chunks. new: %s, old: %s",
newEntry.Chunks, oldEntry.Chunks)
newEntry.GetChunks(), oldEntry.Chunks)
return
}
for _, newChunk := range newDataChunks {
@ -164,10 +164,10 @@ func (f *Filer) deleteChunksIfNotNew(oldEntry, newEntry *Entry) {
}
oldDataChunks, oldManifestChunks, err := ResolveChunkManifest(f.MasterClient.GetLookupFileIdFunction(),
oldEntry.Chunks, 0, math.MaxInt64)
oldEntry.GetChunks(), 0, math.MaxInt64)
if err != nil {
glog.Errorf("Failed to resolve old entry chunks when delete old entry chunks. new: %s, old: %s",
newEntry.Chunks, oldEntry.Chunks)
newEntry.GetChunks(), oldEntry.GetChunks())
return
}
for _, oldChunk := range oldDataChunks {

View file

@ -153,7 +153,7 @@ func (f *Filer) ReadPersistedLogBuffer(startTime time.Time, stopTsNs int64, each
}
}
// println("processing", hourMinuteEntry.FullPath)
chunkedFileReader := NewChunkStreamReaderFromFiler(f.MasterClient, hourMinuteEntry.Chunks)
chunkedFileReader := NewChunkStreamReaderFromFiler(f.MasterClient, hourMinuteEntry.GetChunks())
if lastTsNs, err = ReadEachLogEntry(chunkedFileReader, sizeBuf, startTsNs, stopTsNs, eachLogEntryFn); err != nil {
chunkedFileReader.Close()
if err == io.EOF {

View file

@ -36,11 +36,11 @@ func (f *Filer) appendToFile(targetFile string, data []byte) error {
} else if err != nil {
return fmt.Errorf("find %s: %v", fullpath, err)
} else {
offset = int64(TotalSize(entry.Chunks))
offset = int64(TotalSize(entry.GetChunks()))
}
// append to existing chunks
entry.Chunks = append(entry.Chunks, uploadResult.ToPbFileChunk(assignResult.Fid, offset))
entry.Chunks = append(entry.GetChunks(), uploadResult.ToPbFileChunk(assignResult.Fid, offset))
// update the entry
err = f.CreateEntry(context.Background(), entry, false, false, nil, false)

View file

@ -44,7 +44,7 @@ func TestProtoMarshal(t *testing.T) {
notification2 := &filer_pb.EventNotification{}
proto.Unmarshal(text, notification2)
if notification2.OldEntry.Chunks[0].SourceFileId != notification.OldEntry.Chunks[0].SourceFileId {
if notification2.OldEntry.GetChunks()[0].SourceFileId != notification.OldEntry.GetChunks()[0].SourceFileId {
t.Fatalf("marshal/unmarshal error: %s", text)
}

View file

@ -60,7 +60,7 @@ func (f *Filer) readEntry(chunks []*filer_pb.FileChunk, size uint64) ([]byte, er
func (f *Filer) reloadFilerConfiguration(entry *filer_pb.Entry) {
fc := NewFilerConf()
err := fc.loadFromChunks(f, entry.Content, entry.Chunks, FileSize(entry))
err := fc.loadFromChunks(f, entry.Content, entry.GetChunks(), FileSize(entry))
if err != nil {
glog.Errorf("read filer conf chunks: %v", err)
return

View file

@ -118,7 +118,7 @@ func (fsw *FilerStoreWrapper) InsertEntry(ctx context.Context, entry *Entry) err
stats.FilerStoreHistogram.WithLabelValues(actualStore.GetName(), "insert").Observe(time.Since(start).Seconds())
}()
filer_pb.BeforeEntrySerialization(entry.Chunks)
filer_pb.BeforeEntrySerialization(entry.GetChunks())
if entry.Mime == "application/octet-stream" {
entry.Mime = ""
}
@ -139,7 +139,7 @@ func (fsw *FilerStoreWrapper) UpdateEntry(ctx context.Context, entry *Entry) err
stats.FilerStoreHistogram.WithLabelValues(actualStore.GetName(), "update").Observe(time.Since(start).Seconds())
}()
filer_pb.BeforeEntrySerialization(entry.Chunks)
filer_pb.BeforeEntrySerialization(entry.GetChunks())
if entry.Mime == "application/octet-stream" {
entry.Mime = ""
}
@ -168,7 +168,7 @@ func (fsw *FilerStoreWrapper) FindEntry(ctx context.Context, fp util.FullPath) (
fsw.maybeReadHardLink(ctx, entry)
filer_pb.AfterEntryDeserialization(entry.Chunks)
filer_pb.AfterEntryDeserialization(entry.GetChunks())
return
}
@ -239,7 +239,7 @@ func (fsw *FilerStoreWrapper) ListDirectoryEntries(ctx context.Context, dirPath
// glog.V(4).Infof("ListDirectoryEntries %s from %s limit %d", dirPath, startFileName, limit)
return actualStore.ListDirectoryEntries(ctx, dirPath, startFileName, includeStartFile, limit, func(entry *Entry) bool {
fsw.maybeReadHardLink(ctx, entry)
filer_pb.AfterEntryDeserialization(entry.Chunks)
filer_pb.AfterEntryDeserialization(entry.GetChunks())
return eachEntryFunc(entry)
})
}
@ -257,7 +257,7 @@ func (fsw *FilerStoreWrapper) ListDirectoryPrefixedEntries(ctx context.Context,
// glog.V(4).Infof("ListDirectoryPrefixedEntries %s from %s prefix %s limit %d", dirPath, startFileName, prefix, limit)
adjustedEntryFunc := func(entry *Entry) bool {
fsw.maybeReadHardLink(ctx, entry)
filer_pb.AfterEntryDeserialization(entry.Chunks)
filer_pb.AfterEntryDeserialization(entry.GetChunks())
return eachEntryFunc(entry)
}
lastFileName, err = actualStore.ListDirectoryPrefixedEntries(ctx, dirPath, startFileName, includeStartFile, limit, prefix, adjustedEntryFunc)

View file

@ -75,7 +75,7 @@ func (store *HbaseStore) InsertEntry(ctx context.Context, entry *filer.Entry) er
if err != nil {
return fmt.Errorf("encoding %s %+v: %v", entry.FullPath, entry.Attr, err)
}
if len(entry.Chunks) > filer.CountEntryChunksForGzip {
if len(entry.GetChunks()) > filer.CountEntryChunksForGzip {
value = util.MaybeGzipData(value)
}

View file

@ -86,7 +86,7 @@ func (store *LevelDBStore) InsertEntry(ctx context.Context, entry *filer.Entry)
return fmt.Errorf("encoding %s %+v: %v", entry.FullPath, entry.Attr, err)
}
if len(entry.Chunks) > filer.CountEntryChunksForGzip {
if len(entry.GetChunks()) > filer.CountEntryChunksForGzip {
value = weed_util.MaybeGzipData(value)
}
@ -96,7 +96,7 @@ func (store *LevelDBStore) InsertEntry(ctx context.Context, entry *filer.Entry)
return fmt.Errorf("persisting %s : %v", entry.FullPath, err)
}
// println("saved", entry.FullPath, "chunks", len(entry.Chunks))
// println("saved", entry.FullPath, "chunks", len(entry.GetChunks()))
return nil
}
@ -126,7 +126,7 @@ func (store *LevelDBStore) FindEntry(ctx context.Context, fullpath weed_util.Ful
return entry, fmt.Errorf("decode %s : %v", entry.FullPath, err)
}
// println("read", entry.FullPath, "chunks", len(entry.Chunks), "data", len(data), string(data))
// println("read", entry.FullPath, "chunks", len(entry.GetChunks()), "data", len(data), string(data))
return entry, nil
}

View file

@ -88,7 +88,7 @@ func (store *LevelDB2Store) InsertEntry(ctx context.Context, entry *filer.Entry)
return fmt.Errorf("encoding %s %+v: %v", entry.FullPath, entry.Attr, err)
}
if len(entry.Chunks) > filer.CountEntryChunksForGzip {
if len(entry.GetChunks()) > filer.CountEntryChunksForGzip {
value = weed_util.MaybeGzipData(value)
}
@ -98,7 +98,7 @@ func (store *LevelDB2Store) InsertEntry(ctx context.Context, entry *filer.Entry)
return fmt.Errorf("persisting %s : %v", entry.FullPath, err)
}
// println("saved", entry.FullPath, "chunks", len(entry.Chunks))
// println("saved", entry.FullPath, "chunks", len(entry.GetChunks()))
return nil
}
@ -129,7 +129,7 @@ func (store *LevelDB2Store) FindEntry(ctx context.Context, fullpath weed_util.Fu
return entry, fmt.Errorf("decode %s : %v", entry.FullPath, err)
}
// println("read", entry.FullPath, "chunks", len(entry.Chunks), "data", len(data), string(data))
// println("read", entry.FullPath, "chunks", len(entry.GetChunks()), "data", len(data), string(data))
return entry, nil
}
@ -208,7 +208,7 @@ func (store *LevelDB2Store) ListDirectoryPrefixedEntries(ctx context.Context, di
FullPath: weed_util.NewFullPath(string(dirPath), fileName),
}
// println("list", entry.FullPath, "chunks", len(entry.Chunks))
// println("list", entry.FullPath, "chunks", len(entry.GetChunks()))
if decodeErr := entry.DecodeAttributesAndChunks(weed_util.MaybeDecompressData(iter.Value())); decodeErr != nil {
err = decodeErr
glog.V(0).Infof("list %s : %v", entry.FullPath, err)

View file

@ -185,7 +185,7 @@ func (store *LevelDB3Store) InsertEntry(ctx context.Context, entry *filer.Entry)
return fmt.Errorf("encoding %s %+v: %v", entry.FullPath, entry.Attr, err)
}
if len(entry.Chunks) > filer.CountEntryChunksForGzip {
if len(entry.GetChunks()) > filer.CountEntryChunksForGzip {
value = weed_util.MaybeGzipData(value)
}
@ -195,7 +195,7 @@ func (store *LevelDB3Store) InsertEntry(ctx context.Context, entry *filer.Entry)
return fmt.Errorf("persisting %s : %v", entry.FullPath, err)
}
// println("saved", entry.FullPath, "chunks", len(entry.Chunks))
// println("saved", entry.FullPath, "chunks", len(entry.GetChunks()))
return nil
}
@ -232,7 +232,7 @@ func (store *LevelDB3Store) FindEntry(ctx context.Context, fullpath weed_util.Fu
return entry, fmt.Errorf("decode %s : %v", entry.FullPath, err)
}
// println("read", entry.FullPath, "chunks", len(entry.Chunks), "data", len(data), string(data))
// println("read", entry.FullPath, "chunks", len(entry.GetChunks()), "data", len(data), string(data))
return entry, nil
}
@ -336,7 +336,7 @@ func (store *LevelDB3Store) ListDirectoryPrefixedEntries(ctx context.Context, di
FullPath: weed_util.NewFullPath(string(dirPath), fileName),
}
// println("list", entry.FullPath, "chunks", len(entry.Chunks))
// println("list", entry.FullPath, "chunks", len(entry.GetChunks()))
if decodeErr := entry.DecodeAttributesAndChunks(weed_util.MaybeDecompressData(iter.Value())); decodeErr != nil {
err = decodeErr
glog.V(0).Infof("list %s : %v", entry.FullPath, err)

View file

@ -107,7 +107,7 @@ func (store *MongodbStore) UpdateEntry(ctx context.Context, entry *filer.Entry)
return fmt.Errorf("encode %s: %s", entry.FullPath, err)
}
if len(entry.Chunks) > filer.CountEntryChunksForGzip {
if len(entry.GetChunks()) > filer.CountEntryChunksForGzip {
meta = util.MaybeGzipData(meta)
}

View file

@ -8,7 +8,7 @@ import (
)
func (entry *Entry) IsInRemoteOnly() bool {
return len(entry.Chunks) == 0 && entry.Remote != nil && entry.Remote.RemoteSize > 0
return len(entry.GetChunks()) == 0 && entry.Remote != nil && entry.Remote.RemoteSize > 0
}
func MapFullPathToRemoteStorageLocation(localMountedDir util.FullPath, remoteMountedLocation *remote_pb.RemoteStorageLocation, fp util.FullPath) *remote_pb.RemoteStorageLocation {

View file

@ -23,7 +23,7 @@ func ReadEntry(masterClient *wdclient.MasterClient, filerClient filer_pb.Seaweed
return err
}
return StreamContent(masterClient, byteBuffer, respLookupEntry.Entry.Chunks, 0, int64(FileSize(respLookupEntry.Entry)))
return StreamContent(masterClient, byteBuffer, respLookupEntry.Entry.GetChunks(), 0, int64(FileSize(respLookupEntry.Entry)))
}

View file

@ -56,7 +56,7 @@ func (store *UniversalRedisStore) doInsertEntry(ctx context.Context, entry *file
return fmt.Errorf("encoding %s %+v: %v", entry.FullPath, entry.Attr, err)
}
if len(entry.Chunks) > filer.CountEntryChunksForGzip {
if len(entry.GetChunks()) > filer.CountEntryChunksForGzip {
value = util.MaybeGzipData(value)
}

View file

@ -71,7 +71,7 @@ func (store *UniversalRedis2Store) doInsertEntry(ctx context.Context, entry *fil
return fmt.Errorf("encoding %s %+v: %v", entry.FullPath, entry.Attr, err)
}
if len(entry.Chunks) > filer.CountEntryChunksForGzip {
if len(entry.GetChunks()) > filer.CountEntryChunksForGzip {
value = util.MaybeGzipData(value)
}

View file

@ -56,7 +56,7 @@ func (store *UniversalRedis3Store) doInsertEntry(ctx context.Context, entry *fil
return fmt.Errorf("encoding %s %+v: %v", entry.FullPath, entry.Attr, err)
}
if len(entry.Chunks) > filer.CountEntryChunksForGzip {
if len(entry.GetChunks()) > filer.CountEntryChunksForGzip {
value = util.MaybeGzipData(value)
}

View file

@ -53,7 +53,7 @@ func (store *UniversalRedisLuaStore) InsertEntry(ctx context.Context, entry *fil
return fmt.Errorf("encoding %s %+v: %v", entry.FullPath, entry.Attr, err)
}
if len(entry.Chunks) > filer.CountEntryChunksForGzip {
if len(entry.GetChunks()) > filer.CountEntryChunksForGzip {
value = util.MaybeGzipData(value)
}

View file

@ -108,7 +108,7 @@ func (store *RocksDBStore) InsertEntry(ctx context.Context, entry *filer.Entry)
return fmt.Errorf("persisting %s : %v", entry.FullPath, err)
}
// println("saved", entry.FullPath, "chunks", len(entry.Chunks))
// println("saved", entry.FullPath, "chunks", len(entry.GetChunks()))
return nil
}
@ -140,7 +140,7 @@ func (store *RocksDBStore) FindEntry(ctx context.Context, fullpath weed_util.Ful
return entry, fmt.Errorf("decode %s : %v", entry.FullPath, err)
}
// println("read", entry.FullPath, "chunks", len(entry.Chunks), "data", len(data), string(data))
// println("read", entry.FullPath, "chunks", len(entry.GetChunks()), "data", len(data), string(data))
return entry, nil
}
@ -259,7 +259,7 @@ func (store *RocksDBStore) ListDirectoryPrefixedEntries(ctx context.Context, dir
}
lastFileName = fileName
// println("list", entry.FullPath, "chunks", len(entry.Chunks))
// println("list", entry.FullPath, "chunks", len(entry.GetChunks()))
if decodeErr := entry.DecodeAttributesAndChunks(value); decodeErr != nil {
err = decodeErr
glog.V(0).Infof("list %s : %v", entry.FullPath, err)

View file

@ -30,7 +30,7 @@ func HasData(entry *filer_pb.Entry) bool {
return true
}
return len(entry.Chunks) > 0
return len(entry.GetChunks()) > 0
}
func IsSameData(a, b *filer_pb.Entry) bool {
@ -64,7 +64,7 @@ func NewFileReader(filerClient filer_pb.FilerClient, entry *filer_pb.Entry) io.R
if len(entry.Content) > 0 {
return bytes.NewReader(entry.Content)
}
return NewChunkStreamReader(filerClient, entry.Chunks)
return NewChunkStreamReader(filerClient, entry.GetChunks())
}
func StreamContent(masterClient wdclient.HasLookupFileIdFunction, writer io.Writer, chunks []*filer_pb.FileChunk, offset int64, size int64) error {

View file

@ -144,7 +144,7 @@ func (store *YdbStore) insertOrUpdateEntry(ctx context.Context, entry *filer.Ent
return fmt.Errorf("encode %s: %s", entry.FullPath, err)
}
if len(entry.Chunks) > filer.CountEntryChunksForGzip {
if len(entry.GetChunks()) > filer.CountEntryChunksForGzip {
meta = util.MaybeGzipData(meta)
}
tablePathPrefix, shortDir := store.getPrefix(ctx, &dir)

View file

@ -94,7 +94,7 @@ func (fh *FileHandle) AddChunks(chunks []*filer_pb.FileChunk) {
}
// pick out-of-order chunks from existing chunks
for _, chunk := range fh.entry.Chunks {
for _, chunk := range fh.entry.GetChunks() {
if lessThan(earliestChunk, chunk) {
chunks = append(chunks, chunk)
}
@ -105,9 +105,9 @@ func (fh *FileHandle) AddChunks(chunks []*filer_pb.FileChunk) {
return lessThan(a, b)
})
glog.V(4).Infof("%s existing %d chunks adds %d more", fh.FullPath(), len(fh.entry.Chunks), len(chunks))
glog.V(4).Infof("%s existing %d chunks adds %d more", fh.FullPath(), len(fh.entry.GetChunks()), len(chunks))
fh.entry.Chunks = append(fh.entry.Chunks, newChunks...)
fh.entry.Chunks = append(fh.entry.GetChunks(), newChunks...)
fh.entryViewCache = nil
}

View file

@ -56,7 +56,7 @@ func (fh *FileHandle) readFromChunks(buff []byte, offset int64) (int64, error) {
var chunkResolveErr error
if fh.entryViewCache == nil {
fh.entryViewCache, chunkResolveErr = filer.NonOverlappingVisibleIntervals(fh.wfs.LookupFn(), entry.Chunks, 0, fileSize)
fh.entryViewCache, chunkResolveErr = filer.NonOverlappingVisibleIntervals(fh.wfs.LookupFn(), entry.GetChunks(), 0, fileSize)
if chunkResolveErr != nil {
return 0, fmt.Errorf("fail to resolve chunk manifest: %v", chunkResolveErr)
}

View file

@ -50,12 +50,12 @@ func (wfs *WFS) SetAttr(cancel <-chan struct{}, input *fuse.SetAttrIn, out *fuse
}
if size, ok := input.GetSize(); ok && entry != nil {
glog.V(4).Infof("%v setattr set size=%v chunks=%d", path, size, len(entry.Chunks))
glog.V(4).Infof("%v setattr set size=%v chunks=%d", path, size, len(entry.GetChunks()))
if size < filer.FileSize(entry) {
// fmt.Printf("truncate %v \n", fullPath)
var chunks []*filer_pb.FileChunk
var truncatedChunks []*filer_pb.FileChunk
for _, chunk := range entry.Chunks {
for _, chunk := range entry.GetChunks() {
int64Size := int64(chunk.Size)
if chunk.Offset+int64Size > int64(size) {
// this chunk is truncated

View file

@ -59,7 +59,7 @@ func (wfs *WFS) Lseek(cancel <-chan struct{}, in *fuse.LseekIn, out *fuse.LseekO
// refresh view cache if necessary
if fh.entryViewCache == nil {
var err error
fh.entryViewCache, err = filer.NonOverlappingVisibleIntervals(fh.wfs.LookupFn(), fh.entry.Chunks, 0, fileSize)
fh.entryViewCache, err = filer.NonOverlappingVisibleIntervals(fh.wfs.LookupFn(), fh.entry.GetChunks(), 0, fileSize)
if err != nil {
return fuse.EIO
}

View file

@ -148,12 +148,12 @@ func (wfs *WFS) doFlush(fh *FileHandle, uid, gid uint32) fuse.Status {
SkipCheckParentDirectory: true,
}
glog.V(4).Infof("%s set chunks: %v", fileFullPath, len(entry.Chunks))
for i, chunk := range entry.Chunks {
glog.V(4).Infof("%s set chunks: %v", fileFullPath, len(entry.GetChunks()))
for i, chunk := range entry.GetChunks() {
glog.V(4).Infof("%s chunks %d: %v [%d,%d)", fileFullPath, i, chunk.GetFileIdString(), chunk.Offset, chunk.Offset+int64(chunk.Size))
}
manifestChunks, nonManifestChunks := filer.SeparateManifestChunks(entry.Chunks)
manifestChunks, nonManifestChunks := filer.SeparateManifestChunks(entry.GetChunks())
chunks, _ := filer.CompactFileChunks(wfs.LookupFn(), nonManifestChunks)
chunks, manifestErr := filer.MaybeManifestize(wfs.saveDataAsChunk(fileFullPath), chunks)

View file

@ -67,7 +67,7 @@ func (wfs *WFS) Link(cancel <-chan struct{}, in *fuse.LinkIn, name string, out *
Name: name,
IsDirectory: false,
Attributes: oldEntry.Attributes,
Chunks: oldEntry.Chunks,
Chunks: oldEntry.GetChunks(),
Extended: oldEntry.Extended,
HardLinkId: oldEntry.HardLinkId,
HardLinkCounter: oldEntry.HardLinkCounter,

View file

@ -4480,7 +4480,7 @@ var file_filer_proto_goTypes = []interface{}{
var file_filer_proto_depIdxs = []int32{
5, // 0: filer_pb.LookupDirectoryEntryResponse.entry:type_name -> filer_pb.Entry
5, // 1: filer_pb.ListEntriesResponse.entry:type_name -> filer_pb.Entry
8, // 2: filer_pb.Entry.chunks:type_name -> filer_pb.FileChunk
8, // 2: filer_pb.Entry.GetChunks():type_name -> filer_pb.FileChunk
11, // 3: filer_pb.Entry.attributes:type_name -> filer_pb.FuseAttributes
55, // 4: filer_pb.Entry.extended:type_name -> filer_pb.Entry.ExtendedEntry
4, // 5: filer_pb.Entry.remote_entry:type_name -> filer_pb.RemoteEntry

View file

@ -14,7 +14,7 @@ import (
)
func (entry *Entry) IsInRemoteOnly() bool {
return len(entry.Chunks) == 0 && entry.RemoteEntry != nil && entry.RemoteEntry.RemoteSize > 0
return len(entry.GetChunks()) == 0 && entry.RemoteEntry != nil && entry.RemoteEntry.RemoteSize > 0
}
func (entry *Entry) IsDirectoryKeyObject() bool {

View file

@ -103,7 +103,7 @@ func (g *AzureSink) CreateEntry(key string, entry *filer_pb.Entry, signatures []
}
totalSize := filer.FileSize(entry)
chunkViews := filer.ViewFromChunks(g.filerSource.LookupFileId, entry.Chunks, 0, int64(totalSize))
chunkViews := filer.ViewFromChunks(g.filerSource.LookupFileId, entry.GetChunks(), 0, int64(totalSize))
// Create a URL that references a to-be-created blob in your
// Azure Storage account's container.

View file

@ -92,7 +92,7 @@ func (g *B2Sink) CreateEntry(key string, entry *filer_pb.Entry, signatures []int
}
totalSize := filer.FileSize(entry)
chunkViews := filer.ViewFromChunks(g.filerSource.LookupFileId, entry.Chunks, 0, int64(totalSize))
chunkViews := filer.ViewFromChunks(g.filerSource.LookupFileId, entry.GetChunks(), 0, int64(totalSize))
bucket, err := g.client.Bucket(context.Background(), g.bucket)
if err != nil {

View file

@ -120,14 +120,14 @@ func (fs *FilerSink) CreateEntry(key string, entry *filer_pb.Entry, signatures [
}
}
replicatedChunks, err := fs.replicateChunks(entry.Chunks, key)
replicatedChunks, err := fs.replicateChunks(entry.GetChunks(), key)
if err != nil {
// only warning here since the source chunk may have been deleted already
glog.Warningf("replicate entry chunks %s: %v", key, err)
}
glog.V(4).Infof("replicated %s %+v ===> %+v", key, entry.Chunks, replicatedChunks)
glog.V(4).Infof("replicated %s %+v ===> %+v", key, entry.GetChunks(), replicatedChunks)
request := &filer_pb.CreateEntryRequest{
Directory: dir,
@ -199,7 +199,7 @@ func (fs *FilerSink) UpdateEntry(key string, oldEntry *filer_pb.Entry, newParent
// delete the chunks that are deleted from the source
if deleteIncludeChunks {
// remove the deleted chunks. Actual data deletion happens in filer UpdateEntry FindUnusedFileChunks
existingEntry.Chunks = filer.DoMinusChunksBySourceFileId(existingEntry.Chunks, deletedChunks)
existingEntry.Chunks = filer.DoMinusChunksBySourceFileId(existingEntry.GetChunks(), deletedChunks)
}
// replicate the chunks that are new in the source
@ -207,7 +207,7 @@ func (fs *FilerSink) UpdateEntry(key string, oldEntry *filer_pb.Entry, newParent
if err != nil {
return true, fmt.Errorf("replicate %s chunks error: %v", key, err)
}
existingEntry.Chunks = append(existingEntry.Chunks, replicatedChunks...)
existingEntry.Chunks = append(existingEntry.GetChunks(), replicatedChunks...)
existingEntry.Attributes = newEntry.Attributes
existingEntry.Extended = newEntry.Extended
existingEntry.HardLinkId = newEntry.HardLinkId
@ -235,11 +235,11 @@ func (fs *FilerSink) UpdateEntry(key string, oldEntry *filer_pb.Entry, newParent
}
func compareChunks(lookupFileIdFn wdclient.LookupFileIdFunctionType, oldEntry, newEntry *filer_pb.Entry) (deletedChunks, newChunks []*filer_pb.FileChunk, err error) {
aData, aMeta, aErr := filer.ResolveChunkManifest(lookupFileIdFn, oldEntry.Chunks, 0, math.MaxInt64)
aData, aMeta, aErr := filer.ResolveChunkManifest(lookupFileIdFn, oldEntry.GetChunks(), 0, math.MaxInt64)
if aErr != nil {
return nil, nil, aErr
}
bData, bMeta, bErr := filer.ResolveChunkManifest(lookupFileIdFn, newEntry.Chunks, 0, math.MaxInt64)
bData, bMeta, bErr := filer.ResolveChunkManifest(lookupFileIdFn, newEntry.GetChunks(), 0, math.MaxInt64)
if bErr != nil {
return nil, nil, bErr
}

View file

@ -97,7 +97,7 @@ func (g *GcsSink) CreateEntry(key string, entry *filer_pb.Entry, signatures []in
}
totalSize := filer.FileSize(entry)
chunkViews := filer.ViewFromChunks(g.filerSource.LookupFileId, entry.Chunks, 0, int64(totalSize))
chunkViews := filer.ViewFromChunks(g.filerSource.LookupFileId, entry.GetChunks(), 0, int64(totalSize))
wc := g.client.Bucket(g.bucket).Object(key).NewWriter(context.Background())
defer wc.Close()

View file

@ -75,7 +75,7 @@ func (localsink *LocalSink) CreateEntry(key string, entry *filer_pb.Entry, signa
glog.V(4).Infof("Create Entry key: %s", key)
totalSize := filer.FileSize(entry)
chunkViews := filer.ViewFromChunks(localsink.filerSource.LookupFileId, entry.Chunks, 0, int64(totalSize))
chunkViews := filer.ViewFromChunks(localsink.filerSource.LookupFileId, entry.GetChunks(), 0, int64(totalSize))
dir := filepath.Dir(key)

View file

@ -106,7 +106,7 @@ func (s3a *S3ApiServer) completeMultipartUpload(input *s3.CompleteMultipartUploa
glog.Errorf("completeMultipartUpload %s ETag mismatch chunk: %s part: %s", entry.Name, entryETag, partETag)
return nil, s3err.ErrInvalidPart
}
for _, chunk := range entry.Chunks {
for _, chunk := range entry.GetChunks() {
p := &filer_pb.FileChunk{
FileId: chunk.GetFileIdString(),
Offset: offset,

View file

@ -203,14 +203,14 @@ func (fs *FilerServer) cleanupChunks(fullpath string, existingEntry *filer.Entry
// remove old chunks if not included in the new ones
if existingEntry != nil {
garbage, err = filer.MinusChunks(fs.lookupFileId, existingEntry.Chunks, newEntry.Chunks)
garbage, err = filer.MinusChunks(fs.lookupFileId, existingEntry.GetChunks(), newEntry.GetChunks())
if err != nil {
return newEntry.Chunks, nil, fmt.Errorf("MinusChunks: %v", err)
return newEntry.GetChunks(), nil, fmt.Errorf("MinusChunks: %v", err)
}
}
// files with manifest chunks are usually large and append only, skip calculating covered chunks
manifestChunks, nonManifestChunks := filer.SeparateManifestChunks(newEntry.Chunks)
manifestChunks, nonManifestChunks := filer.SeparateManifestChunks(newEntry.GetChunks())
chunks, coveredChunks := filer.CompactFileChunks(fs.lookupFileId, nonManifestChunks)
garbage = append(garbage, coveredChunks...)
@ -256,7 +256,7 @@ func (fs *FilerServer) AppendToEntry(ctx context.Context, req *filer_pb.AppendTo
},
}
} else {
offset = int64(filer.TotalSize(entry.Chunks))
offset = int64(filer.TotalSize(entry.GetChunks()))
}
for _, chunk := range req.Chunks {
@ -264,13 +264,13 @@ func (fs *FilerServer) AppendToEntry(ctx context.Context, req *filer_pb.AppendTo
offset += int64(chunk.Size)
}
entry.Chunks = append(entry.Chunks, req.Chunks...)
entry.Chunks = append(entry.GetChunks(), req.Chunks...)
so, err := fs.detectStorageOption(string(fullpath), "", "", entry.TtlSec, "", "", "", "")
if err != nil {
glog.Warningf("detectStorageOption: %v", err)
return &filer_pb.AppendToEntryResponse{}, err
}
entry.Chunks, err = filer.MaybeManifestize(fs.saveAsChunk(so), entry.Chunks)
entry.Chunks, err = filer.MaybeManifestize(fs.saveAsChunk(so), entry.GetChunks())
if err != nil {
// not good, but should be ok
glog.V(0).Infof("MaybeManifestize: %v", err)

View file

@ -169,7 +169,7 @@ func (fs *FilerServer) CacheRemoteObjectToLocalCluster(ctx context.Context, req
return nil, fetchAndWriteErr
}
garbage := entry.Chunks
garbage := entry.GetChunks()
newEntry := entry.ShallowClone()
newEntry.Chunks = chunks

View file

@ -165,7 +165,7 @@ func (fs *FilerServer) moveSelfEntry(ctx context.Context, stream filer_pb.Seawee
newEntry := &filer.Entry{
FullPath: newPath,
Attr: entry.Attr,
Chunks: entry.Chunks,
Chunks: entry.GetChunks(),
Extended: entry.Extended,
Content: entry.Content,
HardLinkCounter: entry.HardLinkCounter,

View file

@ -135,7 +135,7 @@ func (fs *FilerServer) GetOrHeadHandler(w http.ResponseWriter, r *http.Request)
if query.Get("resolveManifest") == "true" {
if entry.Chunks, _, err = filer.ResolveChunkManifest(
fs.filer.MasterClient.GetLookupFileIdFunction(),
entry.Chunks, 0, math.MaxInt64); err != nil {
entry.GetChunks(), 0, math.MaxInt64); err != nil {
err = fmt.Errorf("failed to resolve chunk manifest, err: %s", err.Error())
writeJsonError(w, r, http.StatusInternalServerError, err)
}
@ -212,7 +212,7 @@ func (fs *FilerServer) GetOrHeadHandler(w http.ResponseWriter, r *http.Request)
if shouldResize {
data := mem.Allocate(int(totalSize))
defer mem.Free(data)
err := filer.ReadAll(data, fs.filer.MasterClient, entry.Chunks)
err := filer.ReadAll(data, fs.filer.MasterClient, entry.GetChunks())
if err != nil {
glog.Errorf("failed to read %s: %v", path, err)
w.WriteHeader(http.StatusInternalServerError)
@ -233,7 +233,7 @@ func (fs *FilerServer) GetOrHeadHandler(w http.ResponseWriter, r *http.Request)
}
return err
}
chunks := entry.Chunks
chunks := entry.GetChunks()
if entry.IsInRemoteOnly() {
dir, name := entry.FullPath.DirAndName()
if resp, err := fs.CacheRemoteObjectToLocalCluster(context.Background(), &filer_pb.CacheRemoteObjectToLocalClusterRequest{
@ -244,7 +244,7 @@ func (fs *FilerServer) GetOrHeadHandler(w http.ResponseWriter, r *http.Request)
glog.Errorf("CacheRemoteObjectToLocalCluster %s: %v", entry.FullPath, err)
return fmt.Errorf("cache %s: %v", entry.FullPath, err)
} else {
chunks = resp.Entry.Chunks
chunks = resp.Entry.GetChunks()
}
}

View file

@ -181,7 +181,7 @@ func (fs *FilerServer) saveMetaData(ctx context.Context, r *http.Request, fileNa
}
entry.FileSize += uint64(chunkOffset)
}
newChunks = append(entry.Chunks, fileChunks...)
newChunks = append(entry.GetChunks(), fileChunks...)
// TODO
if len(entry.Content) > 0 {

View file

@ -91,7 +91,7 @@ func (fs *FilerServer) encrypt(ctx context.Context, w http.ResponseWriter, r *ht
}
if dbErr := fs.filer.CreateEntry(ctx, entry, false, false, nil, false); dbErr != nil {
fs.filer.DeleteChunks(entry.Chunks)
fs.filer.DeleteChunks(entry.GetChunks())
err = dbErr
filerResult.Error = dbErr.Error()
return

View file

@ -438,13 +438,13 @@ func (f *WebDavFile) Write(buf []byte) (int, error) {
}
f.entry.Content = nil
f.entry.Chunks = append(f.entry.Chunks, chunk)
f.entry.Chunks = append(f.entry.GetChunks(), chunk)
return flushErr
}
f.bufWriter.CloseFunc = func() error {
manifestedChunks, manifestErr := filer.MaybeManifestize(f.saveDataAsChunk, f.entry.Chunks)
manifestedChunks, manifestErr := filer.MaybeManifestize(f.saveDataAsChunk, f.entry.GetChunks())
if manifestErr != nil {
// not good, but should be ok
glog.V(0).Infof("file %s close MaybeManifestize: %v", f.name, manifestErr)
@ -514,7 +514,7 @@ func (f *WebDavFile) Read(p []byte) (readSize int, err error) {
return 0, io.EOF
}
if f.entryViewCache == nil {
f.entryViewCache, _ = filer.NonOverlappingVisibleIntervals(filer.LookupFn(f.fs), f.entry.Chunks, 0, fileSize)
f.entryViewCache, _ = filer.NonOverlappingVisibleIntervals(filer.LookupFn(f.fs), f.entry.GetChunks(), 0, fileSize)
f.reader = nil
}
if f.reader == nil {

View file

@ -55,7 +55,7 @@ func (c *commandFsCat) Do(args []string, commandEnv *CommandEnv, writer io.Write
return err
}
return filer.StreamContent(commandEnv.MasterClient, writer, respLookupEntry.Entry.Chunks, 0, int64(filer.FileSize(respLookupEntry.Entry)))
return filer.StreamContent(commandEnv.MasterClient, writer, respLookupEntry.Entry.GetChunks(), 0, int64(filer.FileSize(respLookupEntry.Entry)))
})

View file

@ -69,7 +69,7 @@ func duTraverseDirectory(writer io.Writer, filerClient filer_pb.FilerClient, dir
byteCount += numByte
}
} else {
fileBlockCount = uint64(len(entry.Chunks))
fileBlockCount = uint64(len(entry.GetChunks()))
fileByteCount = filer.FileSize(entry)
blockCount += fileBlockCount
byteCount += fileByteCount

View file

@ -93,7 +93,7 @@ func (c *commandFsLs) Do(args []string, commandEnv *CommandEnv, writer io.Writer
dir = dir[:len(dir)-1]
}
fmt.Fprintf(writer, "%s %3d %s %s %6d %s/%s\n",
fileMode, len(entry.Chunks),
fileMode, len(entry.GetChunks()),
userName, groupName,
filer.FileSize(entry), dir, entry.Name)
} else {

View file

@ -54,8 +54,8 @@ func (c *commandFsMetaCat) Do(args []string, commandEnv *CommandEnv, writer io.W
bytes, _ := proto.Marshal(respLookupEntry.Entry)
gzippedBytes, _ := util.GzipData(bytes)
// zstdBytes, _ := util.ZstdData(bytes)
// fmt.Fprintf(writer, "chunks %d meta size: %d gzip:%d zstd:%d\n", len(respLookupEntry.Entry.Chunks), len(bytes), len(gzippedBytes), len(zstdBytes))
fmt.Fprintf(writer, "chunks %d meta size: %d gzip:%d\n", len(respLookupEntry.Entry.Chunks), len(bytes), len(gzippedBytes))
// fmt.Fprintf(writer, "chunks %d meta size: %d gzip:%d zstd:%d\n", len(respLookupEntry.Entry.GetChunks()), len(bytes), len(gzippedBytes), len(zstdBytes))
fmt.Fprintf(writer, "chunks %d meta size: %d gzip:%d\n", len(respLookupEntry.Entry.GetChunks()), len(bytes), len(gzippedBytes))
return nil

View file

@ -216,7 +216,7 @@ func (c *commandVolumeFsck) collectFilerFileIdAndPaths(dataNodeVolumeIdToVInfo m
if *c.verbose && entry.Entry.IsDirectory {
fmt.Fprintf(c.writer, "checking directory %s\n", util.NewFullPath(entry.Dir, entry.Entry.Name))
}
dataChunks, manifestChunks, resolveErr := filer.ResolveChunkManifest(filer.LookupFn(c.env), entry.Entry.Chunks, 0, math.MaxInt64)
dataChunks, manifestChunks, resolveErr := filer.ResolveChunkManifest(filer.LookupFn(c.env), entry.Entry.GetChunks(), 0, math.MaxInt64)
if resolveErr != nil {
return fmt.Errorf("failed to ResolveChunkManifest: %+v", resolveErr)
}