refactoring

This commit is contained in:
Chris Lu 2019-06-22 20:04:56 -07:00
parent 1babec00e7
commit 3fa1f150d9
8 changed files with 20 additions and 36 deletions

View file

@ -40,7 +40,7 @@ func CompactFileChunks(chunks []*filer_pb.FileChunk) (compacted, garbage []*file
fileIds[interval.fileId] = true fileIds[interval.fileId] = true
} }
for _, chunk := range chunks { for _, chunk := range chunks {
if found := fileIds[chunk.FileId]; found { if _, found := fileIds[chunk.GetFileIdString()]; found {
compacted = append(compacted, chunk) compacted = append(compacted, chunk)
} else { } else {
garbage = append(garbage, chunk) garbage = append(garbage, chunk)
@ -50,15 +50,15 @@ func CompactFileChunks(chunks []*filer_pb.FileChunk) (compacted, garbage []*file
return return
} }
func FindUnusedFileChunks(oldChunks, newChunks []*filer_pb.FileChunk) (unused []*filer_pb.FileChunk) { func MinusChunks(as, bs []*filer_pb.FileChunk) (delta []*filer_pb.FileChunk) {
fileIds := make(map[string]bool) fileIds := make(map[string]bool)
for _, interval := range newChunks { for _, interval := range bs {
fileIds[interval.FileId] = true fileIds[interval.GetFileIdString()] = true
} }
for _, chunk := range oldChunks { for _, chunk := range as {
if found := fileIds[chunk.FileId]; !found { if _, found := fileIds[chunk.GetFileIdString()]; !found {
unused = append(unused, chunk) delta = append(delta, chunk)
} }
} }
@ -123,7 +123,7 @@ func MergeIntoVisibles(visibles, newVisibles []VisibleInterval, chunk *filer_pb.
newV := newVisibleInterval( newV := newVisibleInterval(
chunk.Offset, chunk.Offset,
chunk.Offset+int64(chunk.Size), chunk.Offset+int64(chunk.Size),
chunk.FileId, chunk.GetFileIdString(),
chunk.Mtime, chunk.Mtime,
true, true,
) )

View file

@ -54,7 +54,7 @@ func (f *Filer) loopProcessingDeletion() {
func (f *Filer) DeleteChunks(fullpath FullPath, chunks []*filer_pb.FileChunk) { func (f *Filer) DeleteChunks(fullpath FullPath, chunks []*filer_pb.FileChunk) {
for _, chunk := range chunks { for _, chunk := range chunks {
glog.V(3).Infof("deleting %s chunk %s", fullpath, chunk.String()) glog.V(3).Infof("deleting %s chunk %s", fullpath, chunk.String())
f.fileIdDeletionChan <- chunk.FileId f.fileIdDeletionChan <- chunk.GetFileIdString()
} }
} }

View file

@ -17,7 +17,7 @@ func (wfs *WFS) deleteFileChunks(ctx context.Context, chunks []*filer_pb.FileChu
var fileIds []string var fileIds []string
for _, chunk := range chunks { for _, chunk := range chunks {
fileIds = append(fileIds, chunk.FileId) fileIds = append(fileIds, chunk.GetFileIdString())
} }
wfs.WithFilerClient(ctx, func(client filer_pb.SeaweedFilerClient) error { wfs.WithFilerClient(ctx, func(client filer_pb.SeaweedFilerClient) error {

View file

@ -39,7 +39,7 @@ func (fs *FilerSink) replicateOneChunk(ctx context.Context, sourceChunk *filer_p
fileId, err := fs.fetchAndWrite(ctx, sourceChunk) fileId, err := fs.fetchAndWrite(ctx, sourceChunk)
if err != nil { if err != nil {
return nil, fmt.Errorf("copy %s: %v", sourceChunk.FileId, err) return nil, fmt.Errorf("copy %s: %v", sourceChunk.GetFileIdString(), err)
} }
return &filer_pb.FileChunk{ return &filer_pb.FileChunk{
@ -48,15 +48,15 @@ func (fs *FilerSink) replicateOneChunk(ctx context.Context, sourceChunk *filer_p
Size: sourceChunk.Size, Size: sourceChunk.Size,
Mtime: sourceChunk.Mtime, Mtime: sourceChunk.Mtime,
ETag: sourceChunk.ETag, ETag: sourceChunk.ETag,
SourceFileId: sourceChunk.FileId, SourceFileId: sourceChunk.GetFileIdString(),
}, nil }, nil
} }
func (fs *FilerSink) fetchAndWrite(ctx context.Context, sourceChunk *filer_pb.FileChunk) (fileId string, err error) { func (fs *FilerSink) fetchAndWrite(ctx context.Context, sourceChunk *filer_pb.FileChunk) (fileId string, err error) {
filename, header, readCloser, err := fs.filerSource.ReadPart(ctx, sourceChunk.FileId) filename, header, readCloser, err := fs.filerSource.ReadPart(ctx, sourceChunk.GetFileIdString())
if err != nil { if err != nil {
return "", fmt.Errorf("read part %s: %v", sourceChunk.FileId, err) return "", fmt.Errorf("read part %s: %v", sourceChunk.GetFileIdString(), err)
} }
defer readCloser.Close() defer readCloser.Close()

View file

@ -179,7 +179,7 @@ func (fs *FilerSink) UpdateEntry(ctx context.Context, key string, oldEntry *file
// delete the chunks that are deleted from the source // delete the chunks that are deleted from the source
if deleteIncludeChunks { if deleteIncludeChunks {
// remove the deleted chunks. Actual data deletion happens in filer UpdateEntry FindUnusedFileChunks // remove the deleted chunks. Actual data deletion happens in filer UpdateEntry FindUnusedFileChunks
existingEntry.Chunks = minusChunks(existingEntry.Chunks, deletedChunks) existingEntry.Chunks = filer2.MinusChunks(existingEntry.Chunks, deletedChunks)
} }
// replicate the chunks that are new in the source // replicate the chunks that are new in the source
@ -207,23 +207,7 @@ func (fs *FilerSink) UpdateEntry(ctx context.Context, key string, oldEntry *file
} }
func compareChunks(oldEntry, newEntry *filer_pb.Entry) (deletedChunks, newChunks []*filer_pb.FileChunk) { func compareChunks(oldEntry, newEntry *filer_pb.Entry) (deletedChunks, newChunks []*filer_pb.FileChunk) {
deletedChunks = minusChunks(oldEntry.Chunks, newEntry.Chunks) deletedChunks = filer2.MinusChunks(oldEntry.Chunks, newEntry.Chunks)
newChunks = minusChunks(newEntry.Chunks, oldEntry.Chunks) newChunks = filer2.MinusChunks(newEntry.Chunks, oldEntry.Chunks)
return
}
func minusChunks(as, bs []*filer_pb.FileChunk) (delta []*filer_pb.FileChunk) {
for _, a := range as {
found := false
for _, b := range bs {
if a.FileId == b.FileId {
found = true
break
}
}
if !found {
delta = append(delta, a)
}
}
return return
} }

View file

@ -69,7 +69,7 @@ func (s3a *S3ApiServer) completeMultipartUpload(ctx context.Context, input *s3.C
if strings.HasSuffix(entry.Name, ".part") && !entry.IsDirectory { if strings.HasSuffix(entry.Name, ".part") && !entry.IsDirectory {
for _, chunk := range entry.Chunks { for _, chunk := range entry.Chunks {
p := &filer_pb.FileChunk{ p := &filer_pb.FileChunk{
FileId: chunk.FileId, FileId: chunk.GetFileIdString(),
Offset: offset, Offset: offset,
Size: chunk.Size, Size: chunk.Size,
Mtime: chunk.Mtime, Mtime: chunk.Mtime,

View file

@ -140,7 +140,7 @@ func (fs *FilerServer) UpdateEntry(ctx context.Context, req *filer_pb.UpdateEntr
} }
// remove old chunks if not included in the new ones // remove old chunks if not included in the new ones
unusedChunks := filer2.FindUnusedFileChunks(entry.Chunks, req.Entry.Chunks) unusedChunks := filer2.MinusChunks(entry.Chunks, req.Entry.Chunks)
chunks, garbages := filer2.CompactFileChunks(req.Entry.Chunks) chunks, garbages := filer2.CompactFileChunks(req.Entry.Chunks)

View file

@ -78,7 +78,7 @@ func (fs *FilerServer) GetOrHeadHandler(w http.ResponseWriter, r *http.Request,
func (fs *FilerServer) handleSingleChunk(w http.ResponseWriter, r *http.Request, entry *filer2.Entry) { func (fs *FilerServer) handleSingleChunk(w http.ResponseWriter, r *http.Request, entry *filer2.Entry) {
fileId := entry.Chunks[0].FileId fileId := entry.Chunks[0].GetFileIdString()
urlString, err := fs.filer.MasterClient.LookupFileId(fileId) urlString, err := fs.filer.MasterClient.LookupFileId(fileId)
if err != nil { if err != nil {