mirror of
https://github.com/seaweedfs/seaweedfs.git
synced 2024-01-19 02:48:24 +00:00
switch to ReadAt() for thread-safe read
fix bugs during volume compaction
This commit is contained in:
parent
463589da01
commit
0563773944
|
@ -137,7 +137,7 @@ func (vg *VolumeGrowth) grow(topo *topology.Topology, vid storage.VolumeId, coll
|
|||
if err := AllocateVolume(server, vid, collection, rp); err == nil {
|
||||
vi := storage.VolumeInfo{Id: vid, Size: 0, Collection: collection, ReplicaPlacement: rp, Version: storage.CurrentVersion}
|
||||
server.AddOrUpdateVolume(vi)
|
||||
topo.RegisterVolumeLayout(&vi, server)
|
||||
topo.RegisterVolumeLayout(vi, server)
|
||||
glog.V(0).Infoln("Created Volume", vid, "on", server)
|
||||
} else {
|
||||
glog.V(0).Infoln("Failed to assign", vid, "to", servers, "error", err)
|
||||
|
|
|
@ -106,12 +106,12 @@ func (v *Volume) load(alsoLoadIndex bool, createDatIfMissing bool) error {
|
|||
if v.readOnly {
|
||||
glog.V(1).Infoln("open to read file", fileName+".idx")
|
||||
if indexFile, e = os.OpenFile(fileName+".idx", os.O_RDONLY, 0644); e != nil {
|
||||
return fmt.Errorf("cannot read Volume Data %s.dat: %s", fileName, e.Error())
|
||||
return fmt.Errorf("cannot read Volume Index %s.idx: %s", fileName, e.Error())
|
||||
}
|
||||
} else {
|
||||
glog.V(1).Infoln("open to write file", fileName+".idx")
|
||||
if indexFile, e = os.OpenFile(fileName+".idx", os.O_RDWR|os.O_CREATE, 0644); e != nil {
|
||||
return fmt.Errorf("cannot write Volume Data %s.dat: %s", fileName, e.Error())
|
||||
return fmt.Errorf("cannot write Volume Index %s.idx: %s", fileName, e.Error())
|
||||
}
|
||||
}
|
||||
glog.V(0).Infoln("loading file", fileName+".idx", "readonly", v.readOnly)
|
||||
|
@ -287,7 +287,8 @@ func (v *Volume) Compact() error {
|
|||
v.accessLock.Lock()
|
||||
defer v.accessLock.Unlock()
|
||||
|
||||
filePath := path.Join(v.dir, v.Id.String())
|
||||
filePath := v.FileName()
|
||||
glog.V(3).Infof("creating copies for volume %d ...", v.Id)
|
||||
return v.copyDataAndGenerateIndexFile(filePath+".cpd", filePath+".cpx")
|
||||
}
|
||||
func (v *Volume) commitCompact() error {
|
||||
|
@ -295,10 +296,10 @@ func (v *Volume) commitCompact() error {
|
|||
defer v.accessLock.Unlock()
|
||||
_ = v.dataFile.Close()
|
||||
var e error
|
||||
if e = os.Rename(path.Join(v.dir, v.Id.String()+".cpd"), path.Join(v.dir, v.Id.String()+".dat")); e != nil {
|
||||
if e = os.Rename(v.FileName()+".cpd", v.FileName()+".dat"); e != nil {
|
||||
return e
|
||||
}
|
||||
if e = os.Rename(path.Join(v.dir, v.Id.String()+".cpx"), path.Join(v.dir, v.Id.String()+".idx")); e != nil {
|
||||
if e = os.Rename(v.FileName()+".cpx", v.FileName()+".idx"); e != nil {
|
||||
return e
|
||||
}
|
||||
if e = v.load(true, false); e != nil {
|
||||
|
@ -337,10 +338,10 @@ func ScanVolumeFile(dirname string, collection string, id VolumeId,
|
|||
visitNeedle func(n *Needle, offset int64) error) (err error) {
|
||||
var v *Volume
|
||||
if v, err = loadVolumeWithoutIndex(dirname, collection, id); err != nil {
|
||||
return
|
||||
return errors.New("Failed to load volume:" + err.Error())
|
||||
}
|
||||
if err = visitSuperBlock(v.SuperBlock); err != nil {
|
||||
return
|
||||
return errors.New("Failed to read super block:" + err.Error())
|
||||
}
|
||||
|
||||
version := v.Version()
|
||||
|
@ -352,15 +353,14 @@ func ScanVolumeFile(dirname string, collection string, id VolumeId,
|
|||
return
|
||||
}
|
||||
for n != nil {
|
||||
offset += int64(NeedleHeaderSize)
|
||||
if err = n.ReadNeedleBody(v.dataFile, version, offset, rest); err != nil {
|
||||
if err = n.ReadNeedleBody(v.dataFile, version, offset+int64(NeedleHeaderSize), rest); err != nil {
|
||||
err = fmt.Errorf("cannot read needle body: %s", err)
|
||||
return
|
||||
}
|
||||
if err = visitNeedle(n, offset); err != nil {
|
||||
return
|
||||
}
|
||||
offset += int64(rest)
|
||||
offset += int64(NeedleHeaderSize) + int64(rest)
|
||||
if n, rest, err = ReadNeedleHeader(v.dataFile, version, offset); err != nil {
|
||||
if err == io.EOF {
|
||||
return nil
|
||||
|
@ -386,6 +386,7 @@ func (v *Volume) copyDataAndGenerateIndexFile(dstName, idxName string) (err erro
|
|||
}
|
||||
defer idx.Close()
|
||||
|
||||
nm := NewNeedleMap(idx)
|
||||
new_offset := int64(SuperBlockSize)
|
||||
|
||||
err = ScanVolumeFile(v.dir, v.Collection, v.Id, func(superBlock SuperBlock) error {
|
||||
|
@ -393,13 +394,16 @@ func (v *Volume) copyDataAndGenerateIndexFile(dstName, idxName string) (err erro
|
|||
return err
|
||||
}, func(n *Needle, offset int64) error {
|
||||
nv, ok := v.nm.Get(n.Id)
|
||||
//glog.V(0).Infoln("file size is", n.Size, "rest", rest)
|
||||
glog.V(3).Infoln("needle expected offset ", offset, "ok", ok, "nv", nv)
|
||||
if ok && int64(nv.Offset)*NeedlePaddingSize == offset && nv.Size > 0 {
|
||||
if _, err = nm.Put(n.Id, uint32(new_offset/NeedlePaddingSize), n.Size); err != nil {
|
||||
return fmt.Errorf("cannot put needle: %s", err)
|
||||
}
|
||||
if _, err = n.Append(dst, v.Version()); err != nil {
|
||||
return fmt.Errorf("cannot append needle: %s", err)
|
||||
}
|
||||
new_offset += n.DiskSize()
|
||||
//glog.V(0).Infoln("saving key", n.Id, "volume offset", old_offset, "=>", new_offset, "data_size", n.Size, "rest", rest)
|
||||
glog.V(3).Infoln("saving key", n.Id, "volume offset", offset, "=>", new_offset, "data_size", n.Size)
|
||||
}
|
||||
return nil
|
||||
})
|
||||
|
|
|
@ -129,8 +129,8 @@ func (t *Topology) DeleteCollection(collectionName string) {
|
|||
delete(t.collectionMap, collectionName)
|
||||
}
|
||||
|
||||
func (t *Topology) RegisterVolumeLayout(v *storage.VolumeInfo, dn *DataNode) {
|
||||
t.GetVolumeLayout(v.Collection, v.ReplicaPlacement).RegisterVolume(v, dn)
|
||||
func (t *Topology) RegisterVolumeLayout(v storage.VolumeInfo, dn *DataNode) {
|
||||
t.GetVolumeLayout(v.Collection, v.ReplicaPlacement).RegisterVolume(&v, dn)
|
||||
}
|
||||
|
||||
func (t *Topology) RegisterVolumes(init bool, volumeInfos []storage.VolumeInfo, ip string, port int, publicUrl string, maxVolumeCount int, dcName string, rackName string) {
|
||||
|
@ -144,7 +144,7 @@ func (t *Topology) RegisterVolumes(init bool, volumeInfos []storage.VolumeInfo,
|
|||
dn = rack.GetOrCreateDataNode(ip, port, publicUrl, maxVolumeCount)
|
||||
dn.UpdateVolumes(volumeInfos)
|
||||
for _, v := range volumeInfos {
|
||||
t.RegisterVolumeLayout(&v, dn)
|
||||
t.RegisterVolumeLayout(v, dn)
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -33,15 +33,22 @@ func (vl *VolumeLayout) RegisterVolume(v *storage.VolumeInfo, dn *DataNode) {
|
|||
if _, ok := vl.vid2location[v.Id]; !ok {
|
||||
vl.vid2location[v.Id] = NewVolumeLocationList()
|
||||
}
|
||||
if vl.vid2location[v.Id].Add(dn) {
|
||||
if len(vl.vid2location[v.Id].list) == v.ReplicaPlacement.GetCopyCount() {
|
||||
if vl.isWritable(v) {
|
||||
vl.writables = append(vl.writables, v.Id)
|
||||
} else {
|
||||
vl.removeFromWritable(v.Id)
|
||||
}
|
||||
vl.vid2location[v.Id].Set(dn)
|
||||
glog.V(3).Infoln("volume", v.Id, "added to dn", dn, "len", vl.vid2location[v.Id].Length(), "copy", v.ReplicaPlacement.GetCopyCount())
|
||||
if vl.vid2location[v.Id].Length() == vl.rp.GetCopyCount() && vl.isWritable(v) {
|
||||
vl.AddToWritable(v.Id)
|
||||
} else {
|
||||
vl.removeFromWritable(v.Id)
|
||||
}
|
||||
}
|
||||
|
||||
func (vl *VolumeLayout) AddToWritable(vid storage.VolumeId) {
|
||||
for _, id := range vl.writables {
|
||||
if vid == id {
|
||||
return
|
||||
}
|
||||
}
|
||||
vl.writables = append(vl.writables, vid)
|
||||
}
|
||||
|
||||
func (vl *VolumeLayout) isWritable(v *storage.VolumeInfo) bool {
|
||||
|
@ -156,10 +163,9 @@ func (vl *VolumeLayout) SetVolumeAvailable(dn *DataNode, vid storage.VolumeId) b
|
|||
vl.accessLock.Lock()
|
||||
defer vl.accessLock.Unlock()
|
||||
|
||||
if vl.vid2location[vid].Add(dn) {
|
||||
if vl.vid2location[vid].Length() >= vl.rp.GetCopyCount() {
|
||||
return vl.setVolumeWritable(vid)
|
||||
}
|
||||
vl.vid2location[vid].Set(dn)
|
||||
if vl.vid2location[vid].Length() >= vl.rp.GetCopyCount() {
|
||||
return vl.setVolumeWritable(vid)
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
|
|
@ -18,14 +18,14 @@ func (dnll *VolumeLocationList) Length() int {
|
|||
return len(dnll.list)
|
||||
}
|
||||
|
||||
func (dnll *VolumeLocationList) Add(loc *DataNode) bool {
|
||||
for _, dnl := range dnll.list {
|
||||
if loc.Ip == dnl.Ip && loc.Port == dnl.Port {
|
||||
return false
|
||||
func (dnll *VolumeLocationList) Set(loc *DataNode) {
|
||||
for i := 0; i < len(dnll.list); i++ {
|
||||
if loc.Ip == dnll.list[i].Ip && loc.Port == dnll.list[i].Port {
|
||||
dnll.list[i] = loc
|
||||
return
|
||||
}
|
||||
}
|
||||
dnll.list = append(dnll.list, loc)
|
||||
return true
|
||||
}
|
||||
|
||||
func (dnll *VolumeLocationList) Remove(loc *DataNode) bool {
|
||||
|
|
Loading…
Reference in a new issue