mirror of
https://github.com/seaweedfs/seaweedfs.git
synced 2024-01-19 02:48:24 +00:00
go fmt
This commit is contained in:
parent
ef75ce8a34
commit
99ecf63276
|
@ -73,7 +73,7 @@ func (store *AbstractSqlStore) InsertEntry(ctx context.Context, entry *filer2.En
|
||||||
}
|
}
|
||||||
|
|
||||||
affectedRows, err := res.RowsAffected()
|
affectedRows, err := res.RowsAffected()
|
||||||
if err == nil && affectedRows > 0{
|
if err == nil && affectedRows > 0 {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -37,7 +37,6 @@ func SeparateManifestChunks(chunks []*filer_pb.FileChunk) (manifestChunks, nonMa
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
func ResolveChunkManifest(lookupFileIdFn LookupFileIdFunctionType, chunks []*filer_pb.FileChunk) (dataChunks, manifestChunks []*filer_pb.FileChunk, manefestResolveErr error) {
|
func ResolveChunkManifest(lookupFileIdFn LookupFileIdFunctionType, chunks []*filer_pb.FileChunk) (dataChunks, manifestChunks []*filer_pb.FileChunk, manefestResolveErr error) {
|
||||||
// TODO maybe parallel this
|
// TODO maybe parallel this
|
||||||
for _, chunk := range chunks {
|
for _, chunk := range chunks {
|
||||||
|
|
|
@ -160,7 +160,7 @@ func logPrintf(name string, visibles []VisibleInterval) {
|
||||||
for _, v := range visibles {
|
for _, v := range visibles {
|
||||||
glog.V(0).Infof("%s: [%d,%d) %s %d", name, v.start, v.stop, v.fileId, v.chunkOffset)
|
glog.V(0).Infof("%s: [%d,%d) %s %d", name, v.start, v.stop, v.fileId, v.chunkOffset)
|
||||||
}
|
}
|
||||||
*/
|
*/
|
||||||
}
|
}
|
||||||
|
|
||||||
var bufPool = sync.Pool{
|
var bufPool = sync.Pool{
|
||||||
|
|
|
@ -11,17 +11,17 @@ import (
|
||||||
func TestCompactFileChunksRealCase(t *testing.T) {
|
func TestCompactFileChunksRealCase(t *testing.T) {
|
||||||
|
|
||||||
chunks := []*filer_pb.FileChunk{
|
chunks := []*filer_pb.FileChunk{
|
||||||
{FileId:"2,512f31f2c0700a", Offset: 0, Size: 25- 0, Mtime: 5320497},
|
{FileId: "2,512f31f2c0700a", Offset: 0, Size: 25 - 0, Mtime: 5320497},
|
||||||
{FileId:"6,512f2c2e24e9e8", Offset: 868352, Size: 917585- 868352, Mtime: 5320492},
|
{FileId: "6,512f2c2e24e9e8", Offset: 868352, Size: 917585 - 868352, Mtime: 5320492},
|
||||||
{FileId:"7,514468dd5954ca", Offset: 884736, Size: 901120- 884736, Mtime: 5325928},
|
{FileId: "7,514468dd5954ca", Offset: 884736, Size: 901120 - 884736, Mtime: 5325928},
|
||||||
{FileId:"5,5144463173fe77", Offset: 917504, Size: 2297856- 917504, Mtime: 5325894},
|
{FileId: "5,5144463173fe77", Offset: 917504, Size: 2297856 - 917504, Mtime: 5325894},
|
||||||
{FileId:"4,51444c7ab54e2d", Offset: 2301952, Size: 2367488-2301952, Mtime: 5325900},
|
{FileId: "4,51444c7ab54e2d", Offset: 2301952, Size: 2367488 - 2301952, Mtime: 5325900},
|
||||||
{FileId:"4,514450e643ad22", Offset: 2371584, Size: 2420736-2371584, Mtime: 5325904},
|
{FileId: "4,514450e643ad22", Offset: 2371584, Size: 2420736 - 2371584, Mtime: 5325904},
|
||||||
{FileId:"6,514456a5e9e4d7", Offset: 2449408, Size: 2490368-2449408, Mtime: 5325910},
|
{FileId: "6,514456a5e9e4d7", Offset: 2449408, Size: 2490368 - 2449408, Mtime: 5325910},
|
||||||
{FileId:"3,51444f8d53eebe", Offset: 2494464, Size: 2555904-2494464, Mtime: 5325903},
|
{FileId: "3,51444f8d53eebe", Offset: 2494464, Size: 2555904 - 2494464, Mtime: 5325903},
|
||||||
{FileId:"4,5144578b097c7e", Offset: 2560000, Size: 2596864-2560000, Mtime: 5325911},
|
{FileId: "4,5144578b097c7e", Offset: 2560000, Size: 2596864 - 2560000, Mtime: 5325911},
|
||||||
{FileId:"3,51445500b6b4ac", Offset: 2637824, Size: 2678784-2637824, Mtime: 5325909},
|
{FileId: "3,51445500b6b4ac", Offset: 2637824, Size: 2678784 - 2637824, Mtime: 5325909},
|
||||||
{FileId:"1,51446285e52a61", Offset: 2695168, Size: 2715648-2695168, Mtime: 5325922},
|
{FileId: "1,51446285e52a61", Offset: 2695168, Size: 2715648 - 2695168, Mtime: 5325922},
|
||||||
}
|
}
|
||||||
|
|
||||||
printChunks("before", chunks)
|
printChunks("before", chunks)
|
||||||
|
@ -43,4 +43,4 @@ func printChunks(name string, chunks []*filer_pb.FileChunk) {
|
||||||
for _, chunk := range chunks {
|
for _, chunk := range chunks {
|
||||||
glog.V(0).Infof("%s chunk %s [%10d,%10d)", name, chunk.GetFileIdString(), chunk.Offset, chunk.Offset+int64(chunk.Size))
|
glog.V(0).Infof("%s chunk %s [%10d,%10d)", name, chunk.GetFileIdString(), chunk.Offset, chunk.Offset+int64(chunk.Size))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -74,7 +74,7 @@ func TestRandomFileChunksCompact(t *testing.T) {
|
||||||
if start > stop {
|
if start > stop {
|
||||||
start, stop = stop, start
|
start, stop = stop, start
|
||||||
}
|
}
|
||||||
if start + 16 < stop {
|
if start+16 < stop {
|
||||||
stop = start + 16
|
stop = start + 16
|
||||||
}
|
}
|
||||||
chunk := &filer_pb.FileChunk{
|
chunk := &filer_pb.FileChunk{
|
||||||
|
@ -352,9 +352,9 @@ func TestChunksReading(t *testing.T) {
|
||||||
// case 6: same updates
|
// case 6: same updates
|
||||||
{
|
{
|
||||||
Chunks: []*filer_pb.FileChunk{
|
Chunks: []*filer_pb.FileChunk{
|
||||||
{Offset: 0, Size: 100, FileId: "abc", Fid: &filer_pb.FileId{FileKey: 1}, Mtime: 123},
|
{Offset: 0, Size: 100, FileId: "abc", Fid: &filer_pb.FileId{FileKey: 1}, Mtime: 123},
|
||||||
{Offset: 0, Size: 100, FileId: "def", Fid: &filer_pb.FileId{FileKey: 2}, Mtime: 123},
|
{Offset: 0, Size: 100, FileId: "def", Fid: &filer_pb.FileId{FileKey: 2}, Mtime: 123},
|
||||||
{Offset: 0, Size: 100, FileId: "xyz", Fid: &filer_pb.FileId{FileKey: 3}, Mtime: 123},
|
{Offset: 0, Size: 100, FileId: "xyz", Fid: &filer_pb.FileId{FileKey: 3}, Mtime: 123},
|
||||||
},
|
},
|
||||||
Offset: 0,
|
Offset: 0,
|
||||||
Size: 100,
|
Size: 100,
|
||||||
|
|
|
@ -18,7 +18,7 @@ import (
|
||||||
|
|
||||||
const (
|
const (
|
||||||
LogFlushInterval = time.Minute
|
LogFlushInterval = time.Minute
|
||||||
PaginationSize = 1024 * 256
|
PaginationSize = 1024 * 256
|
||||||
)
|
)
|
||||||
|
|
||||||
var (
|
var (
|
||||||
|
|
|
@ -109,7 +109,7 @@ func (c *ChunkReadAt) doReadAt(p []byte, offset int64) (n int, err error) {
|
||||||
glog.V(4).Infof("doReadAt [%d,%d), n:%v, err:%v", offset, offset+int64(len(p)), n, err)
|
glog.V(4).Infof("doReadAt [%d,%d), n:%v, err:%v", offset, offset+int64(len(p)), n, err)
|
||||||
|
|
||||||
if err == nil && remaining > 0 && c.fileSize > startOffset {
|
if err == nil && remaining > 0 && c.fileSize > startOffset {
|
||||||
delta := int(min(remaining, c.fileSize - startOffset))
|
delta := int(min(remaining, c.fileSize-startOffset))
|
||||||
glog.V(4).Infof("zero2 [%d,%d) of file size %d bytes", startOffset, startOffset+int64(delta), c.fileSize)
|
glog.V(4).Infof("zero2 [%d,%d) of file size %d bytes", startOffset, startOffset+int64(delta), c.fileSize)
|
||||||
n += delta
|
n += delta
|
||||||
}
|
}
|
||||||
|
|
|
@ -27,33 +27,33 @@ func TestReaderAt(t *testing.T) {
|
||||||
|
|
||||||
visibles := []VisibleInterval{
|
visibles := []VisibleInterval{
|
||||||
{
|
{
|
||||||
start: 1,
|
start: 1,
|
||||||
stop: 2,
|
stop: 2,
|
||||||
fileId: "1",
|
fileId: "1",
|
||||||
chunkSize: 9,
|
chunkSize: 9,
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
start: 3,
|
start: 3,
|
||||||
stop: 4,
|
stop: 4,
|
||||||
fileId: "3",
|
fileId: "3",
|
||||||
chunkSize: 1,
|
chunkSize: 1,
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
start: 5,
|
start: 5,
|
||||||
stop: 6,
|
stop: 6,
|
||||||
fileId: "5",
|
fileId: "5",
|
||||||
chunkSize: 2,
|
chunkSize: 2,
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
start: 7,
|
start: 7,
|
||||||
stop: 9,
|
stop: 9,
|
||||||
fileId: "7",
|
fileId: "7",
|
||||||
chunkSize: 2,
|
chunkSize: 2,
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
start: 9,
|
start: 9,
|
||||||
stop: 10,
|
stop: 10,
|
||||||
fileId: "9",
|
fileId: "9",
|
||||||
chunkSize: 2,
|
chunkSize: 2,
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
@ -95,15 +95,15 @@ func TestReaderAt0(t *testing.T) {
|
||||||
|
|
||||||
visibles := []VisibleInterval{
|
visibles := []VisibleInterval{
|
||||||
{
|
{
|
||||||
start: 2,
|
start: 2,
|
||||||
stop: 5,
|
stop: 5,
|
||||||
fileId: "1",
|
fileId: "1",
|
||||||
chunkSize: 9,
|
chunkSize: 9,
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
start: 7,
|
start: 7,
|
||||||
stop: 9,
|
stop: 9,
|
||||||
fileId: "2",
|
fileId: "2",
|
||||||
chunkSize: 9,
|
chunkSize: 9,
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
@ -129,9 +129,9 @@ func TestReaderAt1(t *testing.T) {
|
||||||
|
|
||||||
visibles := []VisibleInterval{
|
visibles := []VisibleInterval{
|
||||||
{
|
{
|
||||||
start: 2,
|
start: 2,
|
||||||
stop: 5,
|
stop: 5,
|
||||||
fileId: "1",
|
fileId: "1",
|
||||||
chunkSize: 9,
|
chunkSize: 9,
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
|
@ -73,14 +73,14 @@ func TestRandomWrites(t *testing.T) {
|
||||||
|
|
||||||
data := make([]byte, 1024)
|
data := make([]byte, 1024)
|
||||||
|
|
||||||
for i:=0;i<1024;i++ {
|
for i := 0; i < 1024; i++ {
|
||||||
|
|
||||||
start, stop := rand.Intn(len(data)), rand.Intn(len(data))
|
start, stop := rand.Intn(len(data)), rand.Intn(len(data))
|
||||||
if start > stop {
|
if start > stop {
|
||||||
start,stop = stop, start
|
start, stop = stop, start
|
||||||
}
|
}
|
||||||
|
|
||||||
rand.Read(data[start:stop+1])
|
rand.Read(data[start : stop+1])
|
||||||
|
|
||||||
c.AddInterval(data[start:stop+1], int64(start))
|
c.AddInterval(data[start:stop+1], int64(start))
|
||||||
|
|
||||||
|
|
|
@ -95,7 +95,6 @@ func TestFsCacheMove(t *testing.T) {
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
func TestFsCacheMove2(t *testing.T) {
|
func TestFsCacheMove2(t *testing.T) {
|
||||||
|
|
||||||
cache := newFsCache(nil)
|
cache := newFsCache(nil)
|
||||||
|
@ -114,4 +113,3 @@ func TestFsCacheMove2(t *testing.T) {
|
||||||
}
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -17,18 +17,18 @@ import (
|
||||||
)
|
)
|
||||||
|
|
||||||
type ListBucketResultV2 struct {
|
type ListBucketResultV2 struct {
|
||||||
XMLName xml.Name `xml:"http://s3.amazonaws.com/doc/2006-03-01/ ListBucketResult"`
|
XMLName xml.Name `xml:"http://s3.amazonaws.com/doc/2006-03-01/ ListBucketResult"`
|
||||||
Name string `xml:"Name"`
|
Name string `xml:"Name"`
|
||||||
Prefix string `xml:"Prefix"`
|
Prefix string `xml:"Prefix"`
|
||||||
MaxKeys int `xml:"MaxKeys"`
|
MaxKeys int `xml:"MaxKeys"`
|
||||||
Delimiter string `xml:"Delimiter,omitempty"`
|
Delimiter string `xml:"Delimiter,omitempty"`
|
||||||
IsTruncated bool `xml:"IsTruncated"`
|
IsTruncated bool `xml:"IsTruncated"`
|
||||||
Contents []ListEntry `xml:"Contents,omitempty"`
|
Contents []ListEntry `xml:"Contents,omitempty"`
|
||||||
CommonPrefixes []PrefixEntry `xml:"CommonPrefixes,omitempty"`
|
CommonPrefixes []PrefixEntry `xml:"CommonPrefixes,omitempty"`
|
||||||
ContinuationToken string `xml:"ContinuationToken,omitempty"`
|
ContinuationToken string `xml:"ContinuationToken,omitempty"`
|
||||||
NextContinuationToken string `xml:"NextContinuationToken,omitempty"`
|
NextContinuationToken string `xml:"NextContinuationToken,omitempty"`
|
||||||
KeyCount int `xml:"KeyCount"`
|
KeyCount int `xml:"KeyCount"`
|
||||||
StartAfter string `xml:"StartAfter,omitempty"`
|
StartAfter string `xml:"StartAfter,omitempty"`
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s3a *S3ApiServer) ListObjectsV2Handler(w http.ResponseWriter, r *http.Request) {
|
func (s3a *S3ApiServer) ListObjectsV2Handler(w http.ResponseWriter, r *http.Request) {
|
||||||
|
|
|
@ -148,7 +148,6 @@ func (fs *FilerServer) saveMetaData(ctx context.Context, r *http.Request, fileNa
|
||||||
crTime = existingEntry.Crtime
|
crTime = existingEntry.Crtime
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
glog.V(4).Infoln("saving", path)
|
glog.V(4).Infoln("saving", path)
|
||||||
entry := &filer2.Entry{
|
entry := &filer2.Entry{
|
||||||
FullPath: util.FullPath(path),
|
FullPath: util.FullPath(path),
|
||||||
|
|
|
@ -24,7 +24,7 @@ const (
|
||||||
type Needle struct {
|
type Needle struct {
|
||||||
Cookie Cookie `comment:"random number to mitigate brute force lookups"`
|
Cookie Cookie `comment:"random number to mitigate brute force lookups"`
|
||||||
Id NeedleId `comment:"needle id"`
|
Id NeedleId `comment:"needle id"`
|
||||||
Size Size `comment:"sum of DataSize,Data,NameSize,Name,MimeSize,Mime"`
|
Size Size `comment:"sum of DataSize,Data,NameSize,Name,MimeSize,Mime"`
|
||||||
|
|
||||||
DataSize uint32 `comment:"Data size"` //version2
|
DataSize uint32 `comment:"Data size"` //version2
|
||||||
Data []byte `comment:"The actual file data"`
|
Data []byte `comment:"The actual file data"`
|
||||||
|
|
|
@ -18,7 +18,7 @@ const SectionalNeedleIdLimit = 1<<32 - 1
|
||||||
type SectionalNeedleValue struct {
|
type SectionalNeedleValue struct {
|
||||||
Key SectionalNeedleId
|
Key SectionalNeedleId
|
||||||
OffsetLower OffsetLower `comment:"Volume offset"` //since aligned to 8 bytes, range is 4G*8=32G
|
OffsetLower OffsetLower `comment:"Volume offset"` //since aligned to 8 bytes, range is 4G*8=32G
|
||||||
Size Size `comment:"Size of the data portion"`
|
Size Size `comment:"Size of the data portion"`
|
||||||
}
|
}
|
||||||
|
|
||||||
type SectionalNeedleValueExtra struct {
|
type SectionalNeedleValueExtra struct {
|
||||||
|
@ -116,7 +116,7 @@ func (cs *CompactSection) deleteOverflowEntry(key SectionalNeedleId) {
|
||||||
})
|
})
|
||||||
if deleteCandidate != length && cs.overflow[deleteCandidate].Key == key {
|
if deleteCandidate != length && cs.overflow[deleteCandidate].Key == key {
|
||||||
if cs.overflow[deleteCandidate].Size.IsValid() {
|
if cs.overflow[deleteCandidate].Size.IsValid() {
|
||||||
cs.overflow[deleteCandidate].Size = - cs.overflow[deleteCandidate].Size
|
cs.overflow[deleteCandidate].Size = -cs.overflow[deleteCandidate].Size
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -9,7 +9,7 @@ import (
|
||||||
type NeedleValue struct {
|
type NeedleValue struct {
|
||||||
Key NeedleId
|
Key NeedleId
|
||||||
Offset Offset `comment:"Volume offset"` //since aligned to 8 bytes, range is 4G*8=32G
|
Offset Offset `comment:"Volume offset"` //since aligned to 8 bytes, range is 4G*8=32G
|
||||||
Size Size `comment:"Size of the data portion"`
|
Size Size `comment:"Size of the data portion"`
|
||||||
}
|
}
|
||||||
|
|
||||||
func (this NeedleValue) Less(than btree.Item) bool {
|
func (this NeedleValue) Less(than btree.Item) bool {
|
||||||
|
|
|
@ -18,7 +18,7 @@ func (s Size) IsDeleted() bool {
|
||||||
return s < 0 || s == TombstoneFileSize
|
return s < 0 || s == TombstoneFileSize
|
||||||
}
|
}
|
||||||
func (s Size) IsValid() bool {
|
func (s Size) IsValid() bool {
|
||||||
return s >0 && s != TombstoneFileSize
|
return s > 0 && s != TombstoneFileSize
|
||||||
}
|
}
|
||||||
|
|
||||||
type OffsetLower struct {
|
type OffsetLower struct {
|
||||||
|
|
Loading…
Reference in a new issue