refactoring

This commit is contained in:
Chris Lu 2019-12-08 19:44:16 -08:00
parent 2b8e20f122
commit 10bd3c6b4b
10 changed files with 15 additions and 24 deletions

View file

@ -64,7 +64,7 @@ func (vs *VolumeServer) VolumeTierCopyDatToRemote(req *volume_server_pb.VolumeTi
// copy the data file
key, size, err := backendStorage.CopyFile(diskFile.File, fn)
if err != nil {
return fmt.Errorf("backend %s copy file %s: %v", req.DestinationBackendName, diskFile.String(), err)
return fmt.Errorf("backend %s copy file %s: %v", req.DestinationBackendName, diskFile.Name(), err)
}
// save the remote file to volume tier info

View file

@ -18,7 +18,7 @@ type BackendStorageFile interface {
Truncate(off int64) error
io.Closer
GetStat() (datSize int64, modTime time.Time, err error)
String() string
Name() string
}
type BackendStorage interface {

View file

@ -45,10 +45,6 @@ func (df *DiskFile) GetStat() (datSize int64, modTime time.Time, err error) {
return 0, time.Time{}, err
}
func (df *DiskFile) String() string {
func (df *DiskFile) Name() string {
return df.fullFilePath
}
func (df *DiskFile) Instantiate(src *os.File) error {
panic("should not implement Instantiate for DiskFile")
}

View file

@ -55,10 +55,6 @@ func (mmf *MemoryMappedFile) GetStat() (datSize int64, modTime time.Time, err er
return 0, time.Time{}, err
}
func (mmf *MemoryMappedFile) String() string {
func (mmf *MemoryMappedFile) Name() string {
return mmf.mm.File.Name()
}
func (mmf *MemoryMappedFile) Instantiate(src *os.File) error {
panic("should not implement Instantiate for MemoryMappedFile")
}

View file

@ -153,7 +153,6 @@ func (s3backendStorageFile S3BackendStorageFile) GetStat() (datSize int64, modTi
return
}
func (s3backendStorageFile S3BackendStorageFile) String() string {
func (s3backendStorageFile S3BackendStorageFile) Name() string {
return s3backendStorageFile.key
}

View file

@ -131,7 +131,7 @@ func (n *Needle) Append(w backend.BackendStorageFile, version Version) (offset u
defer func(w backend.BackendStorageFile, off int64) {
if err != nil {
if te := w.Truncate(end); te != nil {
glog.V(0).Infof("Failed to truncate %s back to %d with error: %v", w.String(), end, te)
glog.V(0).Infof("Failed to truncate %s back to %d with error: %v", w.Name(), end, te)
}
}
}(w, end)

View file

@ -83,7 +83,7 @@ func (v *Volume) FileStat() (datSize uint64, idxSize uint64, modTime time.Time)
if e == nil {
return uint64(datFileSize), v.nm.IndexFileSize(), modTime
}
glog.V(0).Infof("Failed to read file size %s %v", v.DataBackend.String(), e)
glog.V(0).Infof("Failed to read file size %s %v", v.DataBackend.Name(), e)
return // -1 causes integer overflow and the volume to become unwritable.
}

View file

@ -59,7 +59,7 @@ func (v *Volume) Destroy() (err error) {
func (v *Volume) writeNeedle(n *needle.Needle) (offset uint64, size uint32, isUnchanged bool, err error) {
glog.V(4).Infof("writing needle %s", needle.NewFileIdFromNeedle(v.Id, n).String())
if v.readOnly {
err = fmt.Errorf("%s is read-only", v.DataBackend.String())
err = fmt.Errorf("%s is read-only", v.DataBackend.Name())
return
}
v.dataFileAccessLock.Lock()
@ -112,7 +112,7 @@ func (v *Volume) writeNeedle(n *needle.Needle) (offset uint64, size uint32, isUn
func (v *Volume) deleteNeedle(n *needle.Needle) (uint32, error) {
glog.V(4).Infof("delete needle %s", needle.NewFileIdFromNeedle(v.Id, n).String())
if v.readOnly {
return 0, fmt.Errorf("%s is read-only", v.DataBackend.String())
return 0, fmt.Errorf("%s is read-only", v.DataBackend.Name())
}
v.dataFileAccessLock.Lock()
defer v.dataFileAccessLock.Unlock()
@ -202,7 +202,7 @@ func ScanVolumeFileFrom(version needle.Version, datBackend backend.BackendStorag
if e == io.EOF {
return nil
}
return fmt.Errorf("cannot read %s at offset %d: %v", datBackend.String(), offset, e)
return fmt.Errorf("cannot read %s at offset %d: %v", datBackend.Name(), offset, e)
}
for n != nil {
var needleBody []byte

View file

@ -78,7 +78,7 @@ func (v *Volume) maybeWriteSuperBlock() error {
datSize, _, e := v.DataBackend.GetStat()
if e != nil {
glog.V(0).Infof("failed to stat datafile %s: %v", v.DataBackend.String(), e)
glog.V(0).Infof("failed to stat datafile %s: %v", v.DataBackend.Name(), e)
return e
}
if datSize == 0 {
@ -87,7 +87,7 @@ func (v *Volume) maybeWriteSuperBlock() error {
if e != nil && os.IsPermission(e) {
//read-only, but zero length - recreate it!
var dataFile *os.File
if dataFile, e = os.Create(v.DataBackend.String()); e == nil {
if dataFile, e = os.Create(v.DataBackend.Name()); e == nil {
v.DataBackend = backend.NewDiskFile(dataFile)
if _, e = v.DataBackend.WriteAt(v.SuperBlock.Bytes(), 0); e == nil {
v.readOnly = false
@ -108,7 +108,7 @@ func ReadSuperBlock(datBackend backend.BackendStorageFile) (superBlock SuperBloc
header := make([]byte, _SuperBlockSize)
if _, e := datBackend.ReadAt(header, 0); e != nil {
err = fmt.Errorf("cannot read volume %s super block: %v", datBackend.String(), e)
err = fmt.Errorf("cannot read volume %s super block: %v", datBackend.Name(), e)
return
}
@ -127,7 +127,7 @@ func ReadSuperBlock(datBackend backend.BackendStorageFile) (superBlock SuperBloc
superBlock.Extra = &master_pb.SuperBlockExtra{}
err = proto.Unmarshal(extraData, superBlock.Extra)
if err != nil {
err = fmt.Errorf("cannot read volume %s super block extra: %v", datBackend.String(), err)
err = fmt.Errorf("cannot read volume %s super block extra: %v", datBackend.Name(), err)
return
}
}

View file

@ -58,4 +58,4 @@ func (q *Queue) Dequeue() interface{} {
q.count--
return n.data
}
}