Merge branch 'filer2_development'

This commit is contained in:
Chris Lu 2018-06-02 00:35:03 -07:00
commit c546c309f1
88 changed files with 4530 additions and 3355 deletions

View file

@ -2,17 +2,18 @@ package command
import (
"net/http"
"os"
"strconv"
"time"
"github.com/chrislusf/seaweedfs/weed/filer2"
"github.com/chrislusf/seaweedfs/weed/glog"
"github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
"github.com/chrislusf/seaweedfs/weed/server"
"github.com/chrislusf/seaweedfs/weed/util"
"github.com/soheilhy/cmux"
"google.golang.org/grpc/reflection"
"google.golang.org/grpc"
"github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
"google.golang.org/grpc/reflection"
"strings"
)
var (
@ -20,50 +21,35 @@ var (
)
type FilerOptions struct {
master *string
masters *string
ip *string
port *int
publicPort *int
collection *string
defaultReplicaPlacement *string
dir *string
redirectOnRead *bool
disableDirListing *bool
confFile *string
maxMB *int
secretKey *string
cassandra_server *string
cassandra_keyspace *string
redis_server *string
redis_password *string
redis_database *int
}
func init() {
cmdFiler.Run = runFiler // break init cycle
f.master = cmdFiler.Flag.String("master", "localhost:9333", "master server location")
f.masters = cmdFiler.Flag.String("master", "localhost:9333", "comma-separated master servers")
f.collection = cmdFiler.Flag.String("collection", "", "all data will be stored in this collection")
f.ip = cmdFiler.Flag.String("ip", "", "filer server http listen ip address")
f.port = cmdFiler.Flag.Int("port", 8888, "filer server http listen port")
f.publicPort = cmdFiler.Flag.Int("port.public", 0, "port opened to public")
f.dir = cmdFiler.Flag.String("dir", os.TempDir(), "directory to store meta data")
f.defaultReplicaPlacement = cmdFiler.Flag.String("defaultReplicaPlacement", "000", "default replication type if not specified")
f.redirectOnRead = cmdFiler.Flag.Bool("redirectOnRead", false, "whether proxy or redirect to volume server during file GET request")
f.disableDirListing = cmdFiler.Flag.Bool("disableDirListing", false, "turn off directory listing")
f.confFile = cmdFiler.Flag.String("confFile", "", "json encoded filer conf file")
f.maxMB = cmdFiler.Flag.Int("maxMB", 32, "split files larger than the limit")
f.cassandra_server = cmdFiler.Flag.String("cassandra.server", "", "host[:port] of the cassandra server")
f.cassandra_keyspace = cmdFiler.Flag.String("cassandra.keyspace", "seaweed", "keyspace of the cassandra server")
f.redis_server = cmdFiler.Flag.String("redis.server", "", "comma separated host:port[,host2:port2]* of the redis server, e.g., 127.0.0.1:6379")
f.redis_password = cmdFiler.Flag.String("redis.password", "", "password in clear text")
f.redis_database = cmdFiler.Flag.Int("redis.database", 0, "the database on the redis server")
f.secretKey = cmdFiler.Flag.String("secure.secret", "", "secret to encrypt Json Web Token(JWT)")
}
var cmdFiler = &Command{
UsageLine: "filer -port=8888 -dir=/tmp -master=<ip:port>",
Short: "start a file server that points to a master server",
UsageLine: "filer -port=8888 -master=<ip:port>[,<ip:port>]*",
Short: "start a file server that points to a master server, or a list of master servers",
Long: `start a file server which accepts REST operation for any files.
//create or overwrite the file, the directories /path/to will be automatically created
@ -75,20 +61,15 @@ var cmdFiler = &Command{
//return a json format subdirectory and files listing
GET /path/to/
Current <fullpath~fileid> mapping metadata store is local embedded leveldb.
It should be highly scalable to hundreds of millions of files on a modest machine.
The configuration file "filer.toml" is read from ".", "$HOME/.seaweedfs/", or "/etc/seaweedfs/", in that order.
Future we will ensure it can avoid of being SPOF.
The following are example filer.toml configuration file.
`,
` + filer2.FILER_TOML_EXAMPLE + "\n",
}
func runFiler(cmd *Command, args []string) bool {
if err := util.TestFolderWritable(*f.dir); err != nil {
glog.Fatalf("Check Meta Folder (-dir) Writable %s : %s", *f.dir, err)
}
f.start()
return true
@ -103,14 +84,13 @@ func (fo *FilerOptions) start() {
publicVolumeMux = http.NewServeMux()
}
masters := *f.masters
fs, nfs_err := weed_server.NewFilerServer(defaultMux, publicVolumeMux,
*fo.ip, *fo.port, *fo.master, *fo.dir, *fo.collection,
*fo.ip, *fo.port, strings.Split(masters, ","), *fo.collection,
*fo.defaultReplicaPlacement, *fo.redirectOnRead, *fo.disableDirListing,
*fo.confFile,
*fo.maxMB,
*fo.secretKey,
*fo.cassandra_server, *fo.cassandra_keyspace,
*fo.redis_server, *fo.redis_password, *fo.redis_database,
)
if nfs_err != nil {
glog.Fatalf("Filer startup error: %v", nfs_err)

View file

@ -9,8 +9,15 @@ import (
"strings"
"github.com/chrislusf/seaweedfs/weed/operation"
filer_operation "github.com/chrislusf/seaweedfs/weed/operation/filer"
"github.com/chrislusf/seaweedfs/weed/security"
"path"
"net/http"
"github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
"strconv"
"io"
"time"
"google.golang.org/grpc"
"context"
)
var (
@ -68,20 +75,20 @@ func runCopy(cmd *Command, args []string) bool {
return false
}
filerDestination := args[len(args)-1]
fileOrDirs := args[0 : len(args)-1]
fileOrDirs := args[0: len(args)-1]
filerUrl, err := url.Parse(filerDestination)
if err != nil {
fmt.Printf("The last argument should be a URL on filer: %v\n", err)
return false
}
path := filerUrl.Path
if !strings.HasSuffix(path, "/") {
path = path + "/"
urlPath := filerUrl.Path
if !strings.HasSuffix(urlPath, "/") {
urlPath = urlPath + "/"
}
for _, fileOrDir := range fileOrDirs {
if !doEachCopy(fileOrDir, filerUrl.Host, path) {
if !doEachCopy(fileOrDir, filerUrl.Host, urlPath) {
return false
}
}
@ -91,14 +98,14 @@ func runCopy(cmd *Command, args []string) bool {
func doEachCopy(fileOrDir string, host string, path string) bool {
f, err := os.Open(fileOrDir)
if err != nil {
fmt.Printf("Failed to open file %s: %v", fileOrDir, err)
fmt.Printf("Failed to open file %s: %v\n", fileOrDir, err)
return false
}
defer f.Close()
fi, err := f.Stat()
if err != nil {
fmt.Printf("Failed to get stat for file %s: %v", fileOrDir, err)
fmt.Printf("Failed to get stat for file %s: %v\n", fileOrDir, err)
return false
}
@ -120,28 +127,199 @@ func doEachCopy(fileOrDir string, host string, path string) bool {
}
}
parts, err := operation.NewFileParts([]string{fileOrDir})
// find the chunk count
chunkSize := int64(*copy.maxMB * 1024 * 1024)
chunkCount := 1
if chunkSize > 0 && fi.Size() > chunkSize {
chunkCount = int(fi.Size()/chunkSize) + 1
}
if chunkCount == 1 {
return uploadFileAsOne(host, path, f, fi)
}
return uploadFileInChunks(host, path, f, fi, chunkCount, chunkSize)
}
func uploadFileAsOne(filerUrl string, urlFolder string, f *os.File, fi os.FileInfo) bool {
// upload the file content
fileName := filepath.Base(f.Name())
mimeType := detectMimeType(f)
isGzipped := isGzipped(fileName)
var chunks []*filer_pb.FileChunk
if fi.Size() > 0 {
// assign a volume
assignResult, err := operation.Assign(*copy.master, &operation.VolumeAssignRequest{
Count: 1,
Replication: *copy.replication,
Collection: *copy.collection,
Ttl: *copy.ttl,
})
if err != nil {
fmt.Printf("Failed to read file %s: %v", fileOrDir, err)
fmt.Printf("Failed to assign from %s: %v\n", *copy.master, err)
}
results, err := operation.SubmitFiles(*copy.master, parts,
*copy.replication, *copy.collection, "",
*copy.ttl, *copy.maxMB, copy.secret)
targetUrl := "http://" + assignResult.Url + "/" + assignResult.Fid
uploadResult, err := operation.Upload(targetUrl, fileName, f, isGzipped, mimeType, nil, "")
if err != nil {
fmt.Printf("Failed to submit file %s: %v", fileOrDir, err)
fmt.Printf("upload data %v to %s: %v\n", fileName, targetUrl, err)
return false
}
if uploadResult.Error != "" {
fmt.Printf("upload %v to %s result: %v\n", fileName, targetUrl, uploadResult.Error)
return false
}
fmt.Printf("uploaded %s to %s\n", fileName, targetUrl)
chunks = append(chunks, &filer_pb.FileChunk{
FileId: assignResult.Fid,
Offset: 0,
Size: uint64(uploadResult.Size),
Mtime: time.Now().UnixNano(),
})
fmt.Printf("copied %s => http://%s%s%s\n", fileName, filerUrl, urlFolder, fileName)
}
if strings.HasSuffix(path, "/") {
path = path + fi.Name()
if err := withFilerClient(filerUrl, func(client filer_pb.SeaweedFilerClient) error {
request := &filer_pb.CreateEntryRequest{
Directory: urlFolder,
Entry: &filer_pb.Entry{
Name: fileName,
Attributes: &filer_pb.FuseAttributes{
Crtime: time.Now().Unix(),
Mtime: time.Now().Unix(),
Gid: uint32(os.Getgid()),
Uid: uint32(os.Getuid()),
FileSize: uint64(fi.Size()),
FileMode: uint32(fi.Mode()),
Mime: mimeType,
},
Chunks: chunks,
},
}
if err = filer_operation.RegisterFile(host, path, results[0].Fid, copy.secret); err != nil {
fmt.Printf("Failed to register file %s on %s: %v", fileOrDir, host, err)
if _, err := client.CreateEntry(context.Background(), request); err != nil {
return fmt.Errorf("update fh: %v", err)
}
return nil
}); err != nil {
fmt.Printf("upload data %v to http://%s%s%s: %v\n", fileName, filerUrl, urlFolder, fileName, err)
return false
}
fmt.Printf("Copy %s => http://%s%s\n", fileOrDir, host, path)
return true
}
func uploadFileInChunks(filerUrl string, urlFolder string, f *os.File, fi os.FileInfo, chunkCount int, chunkSize int64) bool {
fileName := filepath.Base(f.Name())
mimeType := detectMimeType(f)
var chunks []*filer_pb.FileChunk
for i := int64(0); i < int64(chunkCount); i++ {
// assign a volume
assignResult, err := operation.Assign(*copy.master, &operation.VolumeAssignRequest{
Count: 1,
Replication: *copy.replication,
Collection: *copy.collection,
Ttl: *copy.ttl,
})
if err != nil {
fmt.Printf("Failed to assign from %s: %v\n", *copy.master, err)
}
targetUrl := "http://" + assignResult.Url + "/" + assignResult.Fid
uploadResult, err := operation.Upload(targetUrl,
fileName+"-"+strconv.FormatInt(i+1, 10),
io.LimitReader(f, chunkSize),
false, "application/octet-stream", nil, "")
if err != nil {
fmt.Printf("upload data %v to %s: %v\n", fileName, targetUrl, err)
return false
}
if uploadResult.Error != "" {
fmt.Printf("upload %v to %s result: %v\n", fileName, targetUrl, uploadResult.Error)
return false
}
chunks = append(chunks, &filer_pb.FileChunk{
FileId: assignResult.Fid,
Offset: i * chunkSize,
Size: uint64(uploadResult.Size),
Mtime: time.Now().UnixNano(),
})
fmt.Printf("uploaded %s-%d to %s [%d,%d)\n", fileName, i+1, targetUrl, i*chunkSize, i*chunkSize+int64(uploadResult.Size))
}
if err := withFilerClient(filerUrl, func(client filer_pb.SeaweedFilerClient) error {
request := &filer_pb.CreateEntryRequest{
Directory: urlFolder,
Entry: &filer_pb.Entry{
Name: fileName,
Attributes: &filer_pb.FuseAttributes{
Crtime: time.Now().Unix(),
Mtime: time.Now().Unix(),
Gid: uint32(os.Getgid()),
Uid: uint32(os.Getuid()),
FileSize: uint64(fi.Size()),
FileMode: uint32(fi.Mode()),
Mime: mimeType,
},
Chunks: chunks,
},
}
if _, err := client.CreateEntry(context.Background(), request); err != nil {
return fmt.Errorf("update fh: %v", err)
}
return nil
}); err != nil {
fmt.Printf("upload data %v to http://%s%s%s: %v\n", fileName, filerUrl, urlFolder, fileName, err)
return false
}
fmt.Printf("copied %s => http://%s%s%s\n", fileName, filerUrl, urlFolder, fileName)
return true
}
func isGzipped(filename string) bool {
return strings.ToLower(path.Ext(filename)) == ".gz"
}
func detectMimeType(f *os.File) string {
head := make([]byte, 512)
f.Seek(0, 0)
n, err := f.Read(head)
if err == io.EOF {
return ""
}
if err != nil {
fmt.Printf("read head of %v: %v\n", f.Name(), err)
return "application/octet-stream"
}
f.Seek(0, 0)
mimeType := http.DetectContentType(head[:n])
return mimeType
}
func withFilerClient(filerAddress string, fn func(filer_pb.SeaweedFilerClient) error) error {
grpcConnection, err := grpc.Dial(filerAddress, grpc.WithInsecure())
if err != nil {
return fmt.Errorf("fail to dial %s: %v", filerAddress, err)
}
defer grpcConnection.Close()
client := filer_pb.NewSeaweedFilerClient(grpcConnection)
return fn(client)
}

View file

@ -3,6 +3,9 @@ package command
type MountOptions struct {
filer *string
dir *string
collection *string
replication *string
chunkSizeLimitMB *int
}
var (
@ -11,9 +14,11 @@ var (
func init() {
cmdMount.Run = runMount // break init cycle
cmdMount.IsDebug = cmdMount.Flag.Bool("debug", false, "verbose debug information")
mountOptions.filer = cmdMount.Flag.String("filer", "localhost:8888", "weed filer location")
mountOptions.dir = cmdMount.Flag.String("dir", ".", "mount weed filer to this directory")
mountOptions.collection = cmdMount.Flag.String("collection", "", "collection to create the files")
mountOptions.replication = cmdMount.Flag.String("replication", "000", "replication to create to files")
mountOptions.chunkSizeLimitMB = cmdMount.Flag.Int("chunkSizeLimitMB", 16, "local write buffer size, also chunk large files")
}
var cmdMount = &Command{

View file

@ -8,9 +8,9 @@ import (
"bazil.org/fuse"
"bazil.org/fuse/fs"
"github.com/chrislusf/seaweedfs/weed/filesys"
"github.com/chrislusf/seaweedfs/weed/glog"
"github.com/chrislusf/seaweedfs/weed/util"
"github.com/chrislusf/seaweedfs/weed/filesys"
)
func runMount(cmd *Command, args []string) bool {
@ -19,6 +19,10 @@ func runMount(cmd *Command, args []string) bool {
fmt.Printf("Please specify the mount directory via \"-dir\"")
return false
}
if *mountOptions.chunkSizeLimitMB <= 0 {
fmt.Printf("Please specify a reasonable buffer size.")
return false
}
fuse.Unmount(*mountOptions.dir)
@ -47,7 +51,8 @@ func runMount(cmd *Command, args []string) bool {
c.Close()
})
err = fs.Serve(c, filesys.NewSeaweedFileSystem(*mountOptions.filer))
err = fs.Serve(c, filesys.NewSeaweedFileSystem(
*mountOptions.filer, *mountOptions.collection, *mountOptions.replication, *mountOptions.chunkSizeLimitMB))
if err != nil {
fuse.Unmount(*mountOptions.dir)
}

View file

@ -83,21 +83,13 @@ var (
func init() {
serverOptions.cpuprofile = cmdServer.Flag.String("cpuprofile", "", "cpu profile output file")
filerOptions.master = cmdServer.Flag.String("filer.master", "", "default to current master server")
filerOptions.collection = cmdServer.Flag.String("filer.collection", "", "all data will be stored in this collection")
filerOptions.port = cmdServer.Flag.Int("filer.port", 8888, "filer server http listen port")
filerOptions.publicPort = cmdServer.Flag.Int("filer.port.public", 0, "filer server public http listen port")
filerOptions.dir = cmdServer.Flag.String("filer.dir", "", "directory to store meta data, default to a 'filer' sub directory of what -dir is specified")
filerOptions.defaultReplicaPlacement = cmdServer.Flag.String("filer.defaultReplicaPlacement", "", "Default replication type if not specified during runtime.")
filerOptions.redirectOnRead = cmdServer.Flag.Bool("filer.redirectOnRead", false, "whether proxy or redirect to volume server during file GET request")
filerOptions.disableDirListing = cmdServer.Flag.Bool("filer.disableDirListing", false, "turn off directory listing")
filerOptions.confFile = cmdServer.Flag.String("filer.confFile", "", "json encoded filer conf file")
filerOptions.maxMB = cmdServer.Flag.Int("filer.maxMB", 32, "split files larger than the limit")
filerOptions.cassandra_server = cmdServer.Flag.String("filer.cassandra.server", "", "host[:port] of the cassandra server")
filerOptions.cassandra_keyspace = cmdServer.Flag.String("filer.cassandra.keyspace", "seaweed", "keyspace of the cassandra server")
filerOptions.redis_server = cmdServer.Flag.String("filer.redis.server", "", "host:port of the redis server, e.g., 127.0.0.1:6379")
filerOptions.redis_password = cmdServer.Flag.String("filer.redis.password", "", "redis password in clear text")
filerOptions.redis_database = cmdServer.Flag.Int("filer.redis.database", 0, "the database on the redis server")
}
func runServer(cmd *Command, args []string) bool {
@ -115,7 +107,7 @@ func runServer(cmd *Command, args []string) bool {
*isStartingFiler = true
}
*filerOptions.master = *serverIp + ":" + strconv.Itoa(*masterPort)
master := *serverIp + ":" + strconv.Itoa(*masterPort)
filerOptions.ip = serverIp
if *filerOptions.defaultReplicaPlacement == "" {
@ -157,15 +149,6 @@ func runServer(cmd *Command, args []string) bool {
if *masterMetaFolder == "" {
*masterMetaFolder = folders[0]
}
if *isStartingFiler {
if *filerOptions.dir == "" {
*filerOptions.dir = *masterMetaFolder + "/filer"
os.MkdirAll(*filerOptions.dir, 0700)
}
if err := util.TestFolderWritable(*filerOptions.dir); err != nil {
glog.Fatalf("Check Mapping Meta Folder (-filer.dir=\"%s\") Writable: %s", *filerOptions.dir, err)
}
}
if err := util.TestFolderWritable(*masterMetaFolder); err != nil {
glog.Fatalf("Check Meta Folder (-mdir=\"%s\") Writable: %s", *masterMetaFolder, err)
}
@ -267,7 +250,7 @@ func runServer(cmd *Command, args []string) bool {
*serverIp, *volumePort, *volumeServerPublicUrl,
folders, maxCounts,
volumeNeedleMapKind,
*serverIp+":"+strconv.Itoa(*masterPort), *volumePulse, *serverDataCenter, *serverRack,
[]string{master}, *volumePulse, *serverDataCenter, *serverRack,
serverWhiteList, *volumeFixJpgOrientation, *volumeReadRedirect,
)

View file

@ -26,7 +26,7 @@ type VolumeServerOptions struct {
ip *string
publicUrl *string
bindIp *string
master *string
masters *string
pulseSeconds *int
idleConnectionTimeout *int
maxCpu *int
@ -47,7 +47,7 @@ func init() {
v.ip = cmdVolume.Flag.String("ip", "", "ip or server name")
v.publicUrl = cmdVolume.Flag.String("publicUrl", "", "Publicly accessible address")
v.bindIp = cmdVolume.Flag.String("ip.bind", "0.0.0.0", "ip address to bind to")
v.master = cmdVolume.Flag.String("mserver", "localhost:9333", "master server location")
v.masters = cmdVolume.Flag.String("mserver", "localhost:9333", "comma-separated master servers")
v.pulseSeconds = cmdVolume.Flag.Int("pulseSeconds", 5, "number of seconds between heartbeats, must be smaller than or equal to the master's setting")
v.idleConnectionTimeout = cmdVolume.Flag.Int("idleTimeout", 30, "connection idle seconds")
v.maxCpu = cmdVolume.Flag.Int("maxCpu", 0, "maximum number of CPUs. 0 means all available CPUs")
@ -132,11 +132,14 @@ func runVolume(cmd *Command, args []string) bool {
case "btree":
volumeNeedleMapKind = storage.NeedleMapBtree
}
masters := *v.masters
volumeServer := weed_server.NewVolumeServer(volumeMux, publicVolumeMux,
*v.ip, *v.port, *v.publicUrl,
v.folders, v.folderMaxLimits,
volumeNeedleMapKind,
*v.master, *v.pulseSeconds, *v.dataCenter, *v.rack,
strings.Split(masters, ","), *v.pulseSeconds, *v.dataCenter, *v.rack,
v.whiteList,
*v.fixJpgOrientation, *v.readRedirect,
)

View file

@ -1,96 +0,0 @@
package cassandra_store
import (
"fmt"
"strings"
"github.com/chrislusf/seaweedfs/weed/filer"
"github.com/chrislusf/seaweedfs/weed/glog"
"github.com/gocql/gocql"
)
/*
Basically you need a table just like this:
CREATE TABLE seaweed_files (
path varchar,
fids list<varchar>,
PRIMARY KEY (path)
);
Need to match flat_namespace.FlatNamespaceStore interface
Put(fullFileName string, fid string) (err error)
Get(fullFileName string) (fid string, err error)
Delete(fullFileName string) (fid string, err error)
*/
type CassandraStore struct {
cluster *gocql.ClusterConfig
session *gocql.Session
}
func NewCassandraStore(keyspace string, hosts string) (c *CassandraStore, err error) {
c = &CassandraStore{}
s := strings.Split(hosts, ",")
if len(s) == 1 {
glog.V(2).Info("Only one cassandra node to connect! A cluster is Recommended! Now using:", string(hosts))
c.cluster = gocql.NewCluster(hosts)
} else if len(s) > 1 {
c.cluster = gocql.NewCluster(s...)
}
c.cluster.Keyspace = keyspace
c.cluster.Consistency = gocql.LocalQuorum
c.session, err = c.cluster.CreateSession()
if err != nil {
glog.V(0).Infof("Failed to open cassandra store, hosts %v, keyspace %s", hosts, keyspace)
}
return
}
func (c *CassandraStore) Put(fullFileName string, fid string) (err error) {
var input []string
input = append(input, fid)
if err := c.session.Query(
`INSERT INTO seaweed_files (path, fids) VALUES (?, ?)`,
fullFileName, input).Exec(); err != nil {
glog.V(0).Infof("Failed to save file %s with id %s: %v", fullFileName, fid, err)
return err
}
return nil
}
func (c *CassandraStore) Get(fullFileName string) (fid string, err error) {
var output []string
if err := c.session.Query(
`select fids FROM seaweed_files WHERE path = ? LIMIT 1`,
fullFileName).Consistency(gocql.One).Scan(&output); err != nil {
if err != gocql.ErrNotFound {
glog.V(0).Infof("Failed to find file %s: %v", fullFileName, fid, err)
return "", filer.ErrNotFound
}
}
if len(output) == 0 {
return "", fmt.Errorf("No file id found for %s", fullFileName)
}
return output[0], nil
}
// Currently the fid is not returned
func (c *CassandraStore) Delete(fullFileName string) (err error) {
if err := c.session.Query(
`DELETE FROM seaweed_files WHERE path = ?`,
fullFileName).Exec(); err != nil {
if err != gocql.ErrNotFound {
glog.V(0).Infof("Failed to delete file %s: %v", fullFileName, err)
}
return err
}
return nil
}
func (c *CassandraStore) Close() {
if c.session != nil {
c.session.Close()
}
}

View file

@ -1,22 +0,0 @@
/*
Here is the CQL to create the table.CassandraStore
Optionally you can adjust the keyspace name and replication settings.
For production server, very likely you want to set replication_factor to 3
*/
create keyspace seaweed WITH replication = {
'class':'SimpleStrategy',
'replication_factor':1
};
use seaweed;
CREATE TABLE seaweed_files (
path varchar,
fids list<varchar>,
PRIMARY KEY (path)
);

View file

@ -1,26 +0,0 @@
Design Assumptions:
1. the number of directories are magnitudely smaller than the number of files
2. unlimited number of files under any directories
Phylosophy:
metadata for directories and files should be separated
Design:
Store directories in normal map
all of directories hopefully all be in memory
efficient to move/rename/list_directories
Log directory changes to append only log file
Store files in sorted string table in <dir_id/filename> format
efficient to list_files, just simple iterator
efficient to locate files, binary search
Testing:
1. starting server, "weed server -filer=true"
2. posting files to different folders
curl -F "filename=@design.txt" "http://localhost:8888/sources/"
curl -F "filename=@design.txt" "http://localhost:8888/design/"
curl -F "filename=@directory.go" "http://localhost:8888/sources/weed/go/"
curl -F "filename=@directory.go" "http://localhost:8888/sources/testing/go/"
curl -F "filename=@filer.go" "http://localhost:8888/sources/weed/go/"
curl -F "filename=@filer_in_leveldb.go" "http://localhost:8888/sources/weed/go/"
curl "http://localhost:8888/?pretty=y"
curl "http://localhost:8888/sources/weed/go/?pretty=y"
curl "http://localhost:8888/sources/weed/go/?pretty=y"

View file

@ -1,15 +0,0 @@
package embedded_filer
import (
"github.com/chrislusf/seaweedfs/weed/filer"
)
type DirectoryManager interface {
FindDirectory(dirPath string) (DirectoryId, error)
ListDirectories(dirPath string) (dirs []filer.DirectoryName, err error)
MakeDirectory(currentDirPath string, dirName string) (DirectoryId, error)
MoveUnderDirectory(oldDirPath string, newParentDirPath string) error
DeleteDirectory(dirPath string) error
//functions used by FUSE
FindDirectoryById(DirectoryId, error)
}

View file

@ -1,312 +0,0 @@
package embedded_filer
import (
"bufio"
"fmt"
"io"
"os"
"path/filepath"
"strconv"
"strings"
"sync"
"github.com/chrislusf/seaweedfs/weed/filer"
"github.com/chrislusf/seaweedfs/weed/util"
)
var writeLock sync.Mutex //serialize changes to dir.log
type DirectoryId int32
type DirectoryEntryInMap struct {
sync.Mutex
Name string
Parent *DirectoryEntryInMap
subDirectories map[string]*DirectoryEntryInMap
Id DirectoryId
}
func (de *DirectoryEntryInMap) getChild(dirName string) (*DirectoryEntryInMap, bool) {
de.Lock()
defer de.Unlock()
child, ok := de.subDirectories[dirName]
return child, ok
}
func (de *DirectoryEntryInMap) addChild(dirName string, child *DirectoryEntryInMap) {
de.Lock()
defer de.Unlock()
de.subDirectories[dirName] = child
}
func (de *DirectoryEntryInMap) removeChild(dirName string) {
de.Lock()
defer de.Unlock()
delete(de.subDirectories, dirName)
}
func (de *DirectoryEntryInMap) hasChildren() bool {
de.Lock()
defer de.Unlock()
return len(de.subDirectories) > 0
}
func (de *DirectoryEntryInMap) children() (dirNames []filer.DirectoryName) {
de.Lock()
defer de.Unlock()
for k, _ := range de.subDirectories {
dirNames = append(dirNames, filer.DirectoryName(k))
}
return dirNames
}
type DirectoryManagerInMap struct {
Root *DirectoryEntryInMap
max DirectoryId
logFile *os.File
isLoading bool
}
func (dm *DirectoryManagerInMap) newDirectoryEntryInMap(parent *DirectoryEntryInMap, name string) (d *DirectoryEntryInMap, err error) {
d = &DirectoryEntryInMap{Name: name, Parent: parent, subDirectories: make(map[string]*DirectoryEntryInMap)}
var parts []string
for p := d; p != nil && p.Name != ""; p = p.Parent {
parts = append(parts, p.Name)
}
n := len(parts)
if n <= 0 {
return nil, fmt.Errorf("Failed to create folder %s/%s", parent.Name, name)
}
for i := 0; i < n/2; i++ {
parts[i], parts[n-1-i] = parts[n-1-i], parts[i]
}
dm.max++
d.Id = dm.max
dm.log("add", "/"+strings.Join(parts, "/"), strconv.Itoa(int(d.Id)))
return d, nil
}
func (dm *DirectoryManagerInMap) log(words ...string) {
if !dm.isLoading {
dm.logFile.WriteString(strings.Join(words, "\t") + "\n")
}
}
func NewDirectoryManagerInMap(dirLogFile string) (dm *DirectoryManagerInMap, err error) {
dm = &DirectoryManagerInMap{}
//dm.Root do not use newDirectoryEntryInMap, since dm.max will be changed
dm.Root = &DirectoryEntryInMap{subDirectories: make(map[string]*DirectoryEntryInMap)}
if dm.logFile, err = os.OpenFile(dirLogFile, os.O_RDWR|os.O_CREATE, 0644); err != nil {
return nil, fmt.Errorf("cannot write directory log file %s: %v", dirLogFile, err)
}
return dm, dm.load()
}
func (dm *DirectoryManagerInMap) processEachLine(line string) error {
if strings.HasPrefix(line, "#") {
return nil
}
if line == "" {
return nil
}
parts := strings.Split(line, "\t")
if len(parts) == 0 {
return nil
}
switch parts[0] {
case "add":
v, pe := strconv.Atoi(parts[2])
if pe != nil {
return pe
}
if e := dm.loadDirectory(parts[1], DirectoryId(v)); e != nil {
return e
}
case "mov":
newName := ""
if len(parts) >= 4 {
newName = parts[3]
}
if e := dm.MoveUnderDirectory(parts[1], parts[2], newName); e != nil {
return e
}
case "del":
if e := dm.DeleteDirectory(parts[1]); e != nil {
return e
}
default:
fmt.Printf("line %s has %s!\n", line, parts[0])
return nil
}
return nil
}
func (dm *DirectoryManagerInMap) load() error {
dm.max = 0
lines := bufio.NewReader(dm.logFile)
dm.isLoading = true
defer func() { dm.isLoading = false }()
for {
line, err := util.Readln(lines)
if err != nil && err != io.EOF {
return err
}
if pe := dm.processEachLine(string(line)); pe != nil {
return pe
}
if err == io.EOF {
return nil
}
}
}
func (dm *DirectoryManagerInMap) findDirectory(dirPath string) (*DirectoryEntryInMap, error) {
if dirPath == "" {
return dm.Root, nil
}
dirPath = CleanFilePath(dirPath)
if dirPath == "/" {
return dm.Root, nil
}
parts := strings.Split(dirPath, "/")
dir := dm.Root
for i := 1; i < len(parts); i++ {
if sub, ok := dir.getChild(parts[i]); ok {
dir = sub
} else {
return dm.Root, filer.ErrNotFound
}
}
return dir, nil
}
func (dm *DirectoryManagerInMap) findDirectoryId(dirPath string) (DirectoryId, error) {
d, e := dm.findDirectory(dirPath)
if e == nil {
return d.Id, nil
}
return dm.Root.Id, e
}
func (dm *DirectoryManagerInMap) loadDirectory(dirPath string, dirId DirectoryId) error {
dirPath = CleanFilePath(dirPath)
if dirPath == "/" {
return nil
}
parts := strings.Split(dirPath, "/")
dir := dm.Root
for i := 1; i < len(parts); i++ {
sub, ok := dir.getChild(parts[i])
if !ok {
writeLock.Lock()
if sub2, createdByOtherThread := dir.getChild(parts[i]); createdByOtherThread {
sub = sub2
} else {
if i != len(parts)-1 {
writeLock.Unlock()
return fmt.Errorf("%s should be created after parent %s", dirPath, parts[i])
}
var err error
sub, err = dm.newDirectoryEntryInMap(dir, parts[i])
if err != nil {
writeLock.Unlock()
return err
}
if sub.Id != dirId {
writeLock.Unlock()
// the dir.log should be the same order as in-memory directory id
return fmt.Errorf("%s should be have id %v instead of %v", dirPath, sub.Id, dirId)
}
dir.addChild(parts[i], sub)
}
writeLock.Unlock()
}
dir = sub
}
return nil
}
func (dm *DirectoryManagerInMap) makeDirectory(dirPath string) (dir *DirectoryEntryInMap, created bool) {
dirPath = CleanFilePath(dirPath)
if dirPath == "/" {
return dm.Root, false
}
parts := strings.Split(dirPath, "/")
dir = dm.Root
for i := 1; i < len(parts); i++ {
sub, ok := dir.getChild(parts[i])
if !ok {
writeLock.Lock()
if sub2, createdByOtherThread := dir.getChild(parts[i]); createdByOtherThread {
sub = sub2
} else {
var err error
sub, err = dm.newDirectoryEntryInMap(dir, parts[i])
if err != nil {
writeLock.Unlock()
return nil, false
}
dir.addChild(parts[i], sub)
created = true
}
writeLock.Unlock()
}
dir = sub
}
return dir, created
}
func (dm *DirectoryManagerInMap) MakeDirectory(dirPath string) (DirectoryId, error) {
dir, _ := dm.makeDirectory(dirPath)
return dir.Id, nil
}
func (dm *DirectoryManagerInMap) MoveUnderDirectory(oldDirPath string, newParentDirPath string, newName string) error {
writeLock.Lock()
defer writeLock.Unlock()
oldDir, oe := dm.findDirectory(oldDirPath)
if oe != nil {
return oe
}
parentDir, pe := dm.findDirectory(newParentDirPath)
if pe != nil {
return pe
}
dm.log("mov", oldDirPath, newParentDirPath, newName)
oldDir.Parent.removeChild(oldDir.Name)
if newName == "" {
newName = oldDir.Name
}
parentDir.addChild(newName, oldDir)
oldDir.Name = newName
oldDir.Parent = parentDir
return nil
}
func (dm *DirectoryManagerInMap) ListDirectories(dirPath string) (dirNames []filer.DirectoryName, err error) {
d, e := dm.findDirectory(dirPath)
if e != nil {
return dirNames, e
}
return d.children(), nil
}
func (dm *DirectoryManagerInMap) DeleteDirectory(dirPath string) error {
writeLock.Lock()
defer writeLock.Unlock()
if dirPath == "/" {
return fmt.Errorf("Can not delete %s", dirPath)
}
d, e := dm.findDirectory(dirPath)
if e != nil {
return e
}
if d.hasChildren() {
return fmt.Errorf("dir %s still has sub directories", dirPath)
}
d.Parent.removeChild(d.Name)
d.Parent = nil
dm.log("del", dirPath)
return nil
}
func CleanFilePath(fp string) string {
ret := filepath.Clean(fp)
if os.PathSeparator == '\\' {
return strings.Replace(ret, "\\", "/", -1)
}
return ret
}

View file

@ -1,86 +0,0 @@
package embedded_filer
import (
"os"
"strings"
"testing"
)
func TestDirectory(t *testing.T) {
dm, _ := NewDirectoryManagerInMap("/tmp/dir.log")
defer func() {
if true {
os.Remove("/tmp/dir.log")
}
}()
dm.MakeDirectory("/a/b/c")
dm.MakeDirectory("/a/b/d")
dm.MakeDirectory("/a/b/e")
dm.MakeDirectory("/a/b/e/f")
dm.MakeDirectory("/a/b/e/f/g")
dm.MoveUnderDirectory("/a/b/e/f/g", "/a/b", "t")
if _, err := dm.findDirectoryId("/a/b/e/f/g"); err == nil {
t.Fatal("/a/b/e/f/g should not exist any more after moving")
}
if _, err := dm.findDirectoryId("/a/b/t"); err != nil {
t.Fatal("/a/b/t should exist after moving")
}
if _, err := dm.findDirectoryId("/a/b/g"); err == nil {
t.Fatal("/a/b/g should not exist after moving")
}
dm.MoveUnderDirectory("/a/b/e/f", "/a/b", "")
if _, err := dm.findDirectoryId("/a/b/f"); err != nil {
t.Fatal("/a/b/g should not exist after moving")
}
dm.MakeDirectory("/a/b/g/h/i")
dm.DeleteDirectory("/a/b/e/f")
dm.DeleteDirectory("/a/b/e")
dirNames, _ := dm.ListDirectories("/a/b/e")
for _, v := range dirNames {
println("sub1 dir:", v)
}
dm.logFile.Close()
var path []string
printTree(dm.Root, path)
dm2, e := NewDirectoryManagerInMap("/tmp/dir.log")
if e != nil {
println("load error", e.Error())
}
if !compare(dm.Root, dm2.Root) {
t.Fatal("restored dir not the same!")
}
printTree(dm2.Root, path)
}
func printTree(node *DirectoryEntryInMap, path []string) {
println(strings.Join(path, "/") + "/" + node.Name)
path = append(path, node.Name)
for _, v := range node.subDirectories {
printTree(v, path)
}
}
func compare(root1 *DirectoryEntryInMap, root2 *DirectoryEntryInMap) bool {
if len(root1.subDirectories) != len(root2.subDirectories) {
return false
}
if root1.Name != root2.Name {
return false
}
if root1.Id != root2.Id {
return false
}
if !(root1.Parent == nil && root2.Parent == nil) {
if root1.Parent.Id != root2.Parent.Id {
return false
}
}
for k, v := range root1.subDirectories {
if !compare(v, root2.subDirectories[k]) {
return false
}
}
return true
}

View file

@ -1,156 +0,0 @@
package embedded_filer
import (
"errors"
"fmt"
"path/filepath"
"strings"
"sync"
"github.com/chrislusf/seaweedfs/weed/filer"
"github.com/chrislusf/seaweedfs/weed/operation"
)
type FilerEmbedded struct {
master string
directories *DirectoryManagerInMap
files *FileListInLevelDb
mvMutex sync.Mutex
}
func NewFilerEmbedded(master string, dir string) (filer *FilerEmbedded, err error) {
dm, de := NewDirectoryManagerInMap(filepath.Join(dir, "dir.log"))
if de != nil {
return nil, de
}
fl, fe := NewFileListInLevelDb(dir)
if fe != nil {
return nil, fe
}
filer = &FilerEmbedded{
master: master,
directories: dm,
files: fl,
}
return
}
func (filer *FilerEmbedded) CreateFile(filePath string, fid string) (err error) {
dir, file := filepath.Split(filePath)
dirId, e := filer.directories.MakeDirectory(dir)
if e != nil {
return e
}
return filer.files.CreateFile(dirId, file, fid)
}
func (filer *FilerEmbedded) FindFile(filePath string) (fid string, err error) {
dir, file := filepath.Split(filePath)
return filer.findFileEntry(dir, file)
}
func (filer *FilerEmbedded) findFileEntry(parentPath string, fileName string) (fid string, err error) {
dirId, e := filer.directories.findDirectoryId(parentPath)
if e != nil {
return "", e
}
return filer.files.FindFile(dirId, fileName)
}
func (filer *FilerEmbedded) LookupDirectoryEntry(dirPath string, name string) (found bool, fileId string, err error) {
if _, err = filer.directories.findDirectory(filepath.Join(dirPath, name)); err == nil {
return true, "", nil
}
if fileId, err = filer.findFileEntry(dirPath, name); err == nil {
return true, fileId, nil
}
return false, "", err
}
func (filer *FilerEmbedded) ListDirectories(dirPath string) (dirs []filer.DirectoryName, err error) {
return filer.directories.ListDirectories(dirPath)
}
func (filer *FilerEmbedded) ListFiles(dirPath string, lastFileName string, limit int) (files []filer.FileEntry, err error) {
dirId, e := filer.directories.findDirectoryId(dirPath)
if e != nil {
return nil, e
}
return filer.files.ListFiles(dirId, lastFileName, limit), nil
}
func (filer *FilerEmbedded) DeleteDirectory(dirPath string, recursive bool) (err error) {
dirId, e := filer.directories.findDirectoryId(dirPath)
if e != nil {
return e
}
if sub_dirs, sub_err := filer.directories.ListDirectories(dirPath); sub_err == nil {
if len(sub_dirs) > 0 && !recursive {
return fmt.Errorf("Fail to delete directory %s: %d sub directories found!", dirPath, len(sub_dirs))
}
for _, sub := range sub_dirs {
if delete_sub_err := filer.DeleteDirectory(filepath.Join(dirPath, string(sub)), recursive); delete_sub_err != nil {
return delete_sub_err
}
}
}
list := filer.files.ListFiles(dirId, "", 100)
if len(list) != 0 && !recursive {
if !recursive {
return fmt.Errorf("Fail to delete non-empty directory %s!", dirPath)
}
}
for {
if len(list) == 0 {
return filer.directories.DeleteDirectory(dirPath)
}
var fids []string
for _, fileEntry := range list {
fids = append(fids, string(fileEntry.Id))
}
if result_list, delete_file_err := operation.DeleteFiles(filer.master, fids); delete_file_err != nil {
return delete_file_err
} else {
if len(result_list.Errors) > 0 {
return errors.New(strings.Join(result_list.Errors, "\n"))
}
}
lastFile := list[len(list)-1]
list = filer.files.ListFiles(dirId, lastFile.Name, 100)
}
}
func (filer *FilerEmbedded) DeleteFile(filePath string) (fid string, err error) {
dir, file := filepath.Split(filePath)
dirId, e := filer.directories.findDirectoryId(dir)
if e != nil {
return "", e
}
return filer.files.DeleteFile(dirId, file)
}
/*
Move a folder or a file, with 4 Use cases:
mv fromDir toNewDir
mv fromDir toOldDir
mv fromFile toDir
mv fromFile toFile
*/
func (filer *FilerEmbedded) Move(fromPath string, toPath string) error {
filer.mvMutex.Lock()
defer filer.mvMutex.Unlock()
if _, dir_err := filer.directories.findDirectoryId(fromPath); dir_err == nil {
if _, err := filer.directories.findDirectoryId(toPath); err == nil {
// move folder under an existing folder
return filer.directories.MoveUnderDirectory(fromPath, toPath, "")
}
// move folder to a new folder
return filer.directories.MoveUnderDirectory(fromPath, filepath.Dir(toPath), filepath.Base(toPath))
}
if fid, file_err := filer.DeleteFile(fromPath); file_err == nil {
if _, err := filer.directories.findDirectoryId(toPath); err == nil {
// move file under an existing folder
return filer.CreateFile(filepath.Join(toPath, filepath.Base(fromPath)), fid)
}
// move to a folder with new name
return filer.CreateFile(toPath, fid)
}
return fmt.Errorf("File %s is not found!", fromPath)
}

View file

@ -1,87 +0,0 @@
package embedded_filer
import (
"bytes"
"github.com/chrislusf/seaweedfs/weed/filer"
"github.com/chrislusf/seaweedfs/weed/glog"
"github.com/syndtr/goleveldb/leveldb"
"github.com/syndtr/goleveldb/leveldb/util"
)
/*
The entry in level db has this format:
key: genKey(dirId, fileName)
value: []byte(fid)
And genKey(dirId, fileName) use first 4 bytes to store dirId, and rest for fileName
*/
type FileListInLevelDb struct {
db *leveldb.DB
}
func NewFileListInLevelDb(dir string) (fl *FileListInLevelDb, err error) {
fl = &FileListInLevelDb{}
if fl.db, err = leveldb.OpenFile(dir, nil); err != nil {
return
}
return
}
func genKey(dirId DirectoryId, fileName string) []byte {
ret := make([]byte, 0, 4+len(fileName))
for i := 3; i >= 0; i-- {
ret = append(ret, byte(dirId>>(uint(i)*8)))
}
ret = append(ret, []byte(fileName)...)
return ret
}
func (fl *FileListInLevelDb) CreateFile(dirId DirectoryId, fileName string, fid string) (err error) {
glog.V(4).Infoln("directory", dirId, "fileName", fileName, "fid", fid)
return fl.db.Put(genKey(dirId, fileName), []byte(fid), nil)
}
func (fl *FileListInLevelDb) DeleteFile(dirId DirectoryId, fileName string) (fid string, err error) {
if fid, err = fl.FindFile(dirId, fileName); err != nil {
if err == leveldb.ErrNotFound {
return "", nil
}
return
}
err = fl.db.Delete(genKey(dirId, fileName), nil)
return fid, err
}
func (fl *FileListInLevelDb) FindFile(dirId DirectoryId, fileName string) (fid string, err error) {
data, e := fl.db.Get(genKey(dirId, fileName), nil)
if e == leveldb.ErrNotFound {
return "", filer.ErrNotFound
} else if e != nil {
return "", e
}
return string(data), nil
}
func (fl *FileListInLevelDb) ListFiles(dirId DirectoryId, lastFileName string, limit int) (files []filer.FileEntry) {
glog.V(4).Infoln("directory", dirId, "lastFileName", lastFileName, "limit", limit)
dirKey := genKey(dirId, "")
iter := fl.db.NewIterator(&util.Range{Start: genKey(dirId, lastFileName)}, nil)
limitCounter := 0
for iter.Next() {
key := iter.Key()
if !bytes.HasPrefix(key, dirKey) {
break
}
fileName := string(key[len(dirKey):])
if fileName == lastFileName {
continue
}
limitCounter++
if limit > 0 {
if limitCounter > limit {
break
}
}
files = append(files, filer.FileEntry{Name: fileName, Id: filer.FileId(string(iter.Value()))})
}
iter.Release()
return
}

View file

@ -1,29 +0,0 @@
package filer
import (
"errors"
)
type FileId string //file id in SeaweedFS
type FileEntry struct {
Name string `json:"name,omitempty"` //file name without path
Id FileId `json:"fid,omitempty"`
}
type DirectoryName string
type Filer interface {
CreateFile(fullFileName string, fid string) (err error)
FindFile(fullFileName string) (fid string, err error)
DeleteFile(fullFileName string) (fid string, err error)
//Optional functions. embedded filer support these
ListDirectories(dirPath string) (dirs []DirectoryName, err error)
ListFiles(dirPath string, lastFileName string, limit int) (files []FileEntry, err error)
DeleteDirectory(dirPath string, recursive bool) (err error)
Move(fromPath string, toPath string) (err error)
LookupDirectoryEntry(dirPath string, name string) (found bool, fileId string, err error)
}
var ErrNotFound = errors.New("filer: no entry is found in filer store")

View file

@ -1,66 +0,0 @@
package flat_namespace
import (
"errors"
"github.com/chrislusf/seaweedfs/weed/filer"
"path/filepath"
)
type FlatNamespaceFiler struct {
master string
store FlatNamespaceStore
}
var (
ErrNotImplemented = errors.New("Not Implemented for flat namespace meta data store")
)
func NewFlatNamespaceFiler(master string, store FlatNamespaceStore) *FlatNamespaceFiler {
return &FlatNamespaceFiler{
master: master,
store: store,
}
}
func (filer *FlatNamespaceFiler) CreateFile(fullFileName string, fid string) (err error) {
return filer.store.Put(fullFileName, fid)
}
func (filer *FlatNamespaceFiler) FindFile(fullFileName string) (fid string, err error) {
return filer.store.Get(fullFileName)
}
func (filer *FlatNamespaceFiler) LookupDirectoryEntry(dirPath string, name string) (found bool, fileId string, err error) {
if fileId, err = filer.FindFile(filepath.Join(dirPath, name)); err == nil {
return true, fileId, nil
}
return false, "", err
}
func (filer *FlatNamespaceFiler) ListDirectories(dirPath string) (dirs []filer.DirectoryName, err error) {
return nil, ErrNotImplemented
}
func (filer *FlatNamespaceFiler) ListFiles(dirPath string, lastFileName string, limit int) (files []filer.FileEntry, err error) {
return nil, ErrNotImplemented
}
func (filer *FlatNamespaceFiler) DeleteDirectory(dirPath string, recursive bool) (err error) {
return ErrNotImplemented
}
func (filer *FlatNamespaceFiler) DeleteFile(fullFileName string) (fid string, err error) {
fid, err = filer.FindFile(fullFileName)
if err != nil {
return "", err
}
err = filer.store.Delete(fullFileName)
if err != nil {
return "", err
}
return fid, nil
//return filer.store.Delete(fullFileName)
//are you kidding me!!!!
}
func (filer *FlatNamespaceFiler) Move(fromPath string, toPath string) error {
return ErrNotImplemented
}

View file

@ -1,9 +0,0 @@
package flat_namespace
import ()
type FlatNamespaceStore interface {
Put(fullFileName string, fid string) (err error)
Get(fullFileName string) (fid string, err error)
Delete(fullFileName string) (err error)
}

View file

@ -1,67 +0,0 @@
#MySQL filer mapping store
## Schema format
Basically, uriPath and fid are the key elements stored in MySQL. In view of the optimization and user's usage,
adding primary key with integer type and involving createTime, updateTime, status fields should be somewhat meaningful.
Of course, you could customize the schema per your concretely circumstance freely.
<pre><code>
CREATE TABLE IF NOT EXISTS `filer_mapping` (
`id` bigint(20) NOT NULL AUTO_INCREMENT,
`uriPath` char(256) NOT NULL DEFAULT "" COMMENT 'http uriPath',
`fid` char(36) NOT NULL DEFAULT "" COMMENT 'seaweedfs fid',
`createTime` int(10) NOT NULL DEFAULT 0 COMMENT 'createdTime in unix timestamp',
`updateTime` int(10) NOT NULL DEFAULT 0 COMMENT 'updatedTime in unix timestamp',
`remark` varchar(20) NOT NULL DEFAULT "" COMMENT 'reserverd field',
`status` tinyint(2) DEFAULT '1' COMMENT 'resource status',
PRIMARY KEY (`id`),
UNIQUE KEY `index_uriPath` (`uriPath`)
) DEFAULT CHARSET=utf8;
</code></pre>
The MySQL 's config params is not added into the weed command option as other stores(redis,cassandra). Instead,
We created a config file(json format) for them. TOML,YAML or XML also should be OK. But TOML and YAML need import thirdparty package
while XML is a little bit complex.
The sample config file's content is below:
<pre><code>
{
"mysql": [
{
"User": "root",
"Password": "root",
"HostName": "127.0.0.1",
"Port": 3306,
"DataBase": "seaweedfs"
},
{
"User": "root",
"Password": "root",
"HostName": "127.0.0.2",
"Port": 3306,
"DataBase": "seaweedfs"
}
],
"IsSharding":true,
"ShardCount":1024
}
</code></pre>
The "mysql" field in above conf file is an array which include all mysql instances you prepared to store sharding data.
1. If one mysql instance is enough, just keep one instance in "mysql" field.
2. If table sharding at a specific mysql instance is needed , mark "IsSharding" field with true and specify total table sharding numbers using "ShardCount" field.
3. If the mysql service could be auto scaled transparently in your environment, just config one mysql instance(usually it's a frondend proxy or VIP),and mark "IsSharding" with false value
4. If you prepare more than one mysql instance and have no plan to use table sharding for any instance(mark isSharding with false), instance sharding will still be done implicitly

View file

@ -1,274 +0,0 @@
package mysql_store
import (
"database/sql"
"fmt"
"hash/crc32"
"sync"
"time"
"github.com/chrislusf/seaweedfs/weed/filer"
_ "github.com/go-sql-driver/mysql"
)
const (
sqlUrl = "%s:%s@tcp(%s:%d)/%s?charset=utf8"
default_maxIdleConnections = 100
default_maxOpenConnections = 50
default_maxTableNums = 1024
tableName = "filer_mapping"
)
var (
_init_db sync.Once
_db_connections []*sql.DB
)
type MySqlConf struct {
User string
Password string
HostName string
Port int
DataBase string
MaxIdleConnections int
MaxOpenConnections int
}
type ShardingConf struct {
IsSharding bool `json:"isSharding"`
ShardCount int `json:"shardCount"`
}
type MySqlStore struct {
dbs []*sql.DB
isSharding bool
shardCount int
}
func getDbConnection(confs []MySqlConf) []*sql.DB {
_init_db.Do(func() {
for _, conf := range confs {
sqlUrl := fmt.Sprintf(sqlUrl, conf.User, conf.Password, conf.HostName, conf.Port, conf.DataBase)
var dbErr error
_db_connection, dbErr := sql.Open("mysql", sqlUrl)
if dbErr != nil {
_db_connection.Close()
_db_connection = nil
panic(dbErr)
}
var maxIdleConnections, maxOpenConnections int
if conf.MaxIdleConnections != 0 {
maxIdleConnections = conf.MaxIdleConnections
} else {
maxIdleConnections = default_maxIdleConnections
}
if conf.MaxOpenConnections != 0 {
maxOpenConnections = conf.MaxOpenConnections
} else {
maxOpenConnections = default_maxOpenConnections
}
_db_connection.SetMaxIdleConns(maxIdleConnections)
_db_connection.SetMaxOpenConns(maxOpenConnections)
_db_connections = append(_db_connections, _db_connection)
}
})
return _db_connections
}
func NewMysqlStore(confs []MySqlConf, isSharding bool, shardCount int) *MySqlStore {
ms := &MySqlStore{
dbs: getDbConnection(confs),
isSharding: isSharding,
shardCount: shardCount,
}
for _, db := range ms.dbs {
if !isSharding {
ms.shardCount = 1
} else {
if ms.shardCount == 0 {
ms.shardCount = default_maxTableNums
}
}
for i := 0; i < ms.shardCount; i++ {
if err := ms.createTables(db, tableName, i); err != nil {
fmt.Printf("create table failed %v", err)
}
}
}
return ms
}
func (s *MySqlStore) hash(fullFileName string) (instance_offset, table_postfix int) {
hash_value := crc32.ChecksumIEEE([]byte(fullFileName))
instance_offset = int(hash_value) % len(s.dbs)
table_postfix = int(hash_value) % s.shardCount
return
}
func (s *MySqlStore) parseFilerMappingInfo(path string) (instanceId int, tableFullName string, err error) {
instance_offset, table_postfix := s.hash(path)
instanceId = instance_offset
if s.isSharding {
tableFullName = fmt.Sprintf("%s_%04d", tableName, table_postfix)
} else {
tableFullName = tableName
}
return
}
func (s *MySqlStore) Get(fullFilePath string) (fid string, err error) {
instance_offset, tableFullName, err := s.parseFilerMappingInfo(fullFilePath)
if err != nil {
return "", fmt.Errorf("MySqlStore Get operation can not parse file path %s: err is %v", fullFilePath, err)
}
fid, err = s.query(fullFilePath, s.dbs[instance_offset], tableFullName)
if err == sql.ErrNoRows {
//Could not found
err = filer.ErrNotFound
}
return fid, err
}
func (s *MySqlStore) Put(fullFilePath string, fid string) (err error) {
var tableFullName string
instance_offset, tableFullName, err := s.parseFilerMappingInfo(fullFilePath)
if err != nil {
return fmt.Errorf("MySqlStore Put operation can not parse file path %s: err is %v", fullFilePath, err)
}
var old_fid string
if old_fid, err = s.query(fullFilePath, s.dbs[instance_offset], tableFullName); err != nil && err != sql.ErrNoRows {
return fmt.Errorf("MySqlStore Put operation failed when querying path %s: err is %v", fullFilePath, err)
} else {
if len(old_fid) == 0 {
err = s.insert(fullFilePath, fid, s.dbs[instance_offset], tableFullName)
if err != nil {
err = fmt.Errorf("MySqlStore Put operation failed when inserting path %s with fid %s : err is %v", fullFilePath, fid, err)
}
} else {
err = s.update(fullFilePath, fid, s.dbs[instance_offset], tableFullName)
if err != nil {
err = fmt.Errorf("MySqlStore Put operation failed when updating path %s with fid %s : err is %v", fullFilePath, fid, err)
}
}
}
return
}
func (s *MySqlStore) Delete(fullFilePath string) (err error) {
var fid string
instance_offset, tableFullName, err := s.parseFilerMappingInfo(fullFilePath)
if err != nil {
return fmt.Errorf("MySqlStore Delete operation can not parse file path %s: err is %v", fullFilePath, err)
}
if fid, err = s.query(fullFilePath, s.dbs[instance_offset], tableFullName); err != nil {
return fmt.Errorf("MySqlStore Delete operation failed when querying path %s: err is %v", fullFilePath, err)
} else if fid == "" {
return nil
}
if err = s.delete(fullFilePath, s.dbs[instance_offset], tableFullName); err != nil {
return fmt.Errorf("MySqlStore Delete operation failed when deleting path %s: err is %v", fullFilePath, err)
} else {
return nil
}
}
func (s *MySqlStore) Close() {
for _, db := range s.dbs {
db.Close()
}
}
var createTable = `
CREATE TABLE IF NOT EXISTS %s (
id bigint(20) NOT NULL AUTO_INCREMENT,
uriPath char(255) NOT NULL DEFAULT "" COMMENT 'http uriPath',
fid char(36) NOT NULL DEFAULT "" COMMENT 'seaweedfs fid',
createTime int(10) NOT NULL DEFAULT 0 COMMENT 'createdTime in unix timestamp',
updateTime int(10) NOT NULL DEFAULT 0 COMMENT 'updatedTime in unix timestamp',
remark varchar(20) NOT NULL DEFAULT "" COMMENT 'reserverd field',
status tinyint(2) DEFAULT '1' COMMENT 'resource status',
PRIMARY KEY (id),
UNIQUE KEY index_uriPath (uriPath)
) DEFAULT CHARSET=utf8;
`
func (s *MySqlStore) createTables(db *sql.DB, tableName string, postfix int) error {
var realTableName string
if s.isSharding {
realTableName = fmt.Sprintf("%s_%04d", tableName, postfix)
} else {
realTableName = tableName
}
stmt, err := db.Prepare(fmt.Sprintf(createTable, realTableName))
if err != nil {
return err
}
defer stmt.Close()
_, err = stmt.Exec()
if err != nil {
return err
}
return nil
}
func (s *MySqlStore) query(uriPath string, db *sql.DB, tableName string) (string, error) {
sqlStatement := "SELECT fid FROM %s WHERE uriPath=?"
row := db.QueryRow(fmt.Sprintf(sqlStatement, tableName), uriPath)
var fid string
err := row.Scan(&fid)
if err != nil {
return "", err
}
return fid, nil
}
func (s *MySqlStore) update(uriPath string, fid string, db *sql.DB, tableName string) error {
sqlStatement := "UPDATE %s SET fid=?, updateTime=? WHERE uriPath=?"
res, err := db.Exec(fmt.Sprintf(sqlStatement, tableName), fid, time.Now().Unix(), uriPath)
if err != nil {
return err
}
_, err = res.RowsAffected()
if err != nil {
return err
}
return nil
}
func (s *MySqlStore) insert(uriPath string, fid string, db *sql.DB, tableName string) error {
sqlStatement := "INSERT INTO %s (uriPath,fid,createTime) VALUES(?,?,?)"
res, err := db.Exec(fmt.Sprintf(sqlStatement, tableName), uriPath, fid, time.Now().Unix())
if err != nil {
return err
}
_, err = res.RowsAffected()
if err != nil {
return err
}
return nil
}
func (s *MySqlStore) delete(uriPath string, db *sql.DB, tableName string) error {
sqlStatement := "DELETE FROM %s WHERE uriPath=?"
res, err := db.Exec(fmt.Sprintf(sqlStatement, tableName), uriPath)
if err != nil {
return err
}
_, err = res.RowsAffected()
if err != nil {
return err
}
return nil
}

View file

@ -1,30 +0,0 @@
package mysql_store
import (
"encoding/json"
"hash/crc32"
"testing"
)
func TestGenerateMysqlConf(t *testing.T) {
var conf []MySqlConf
conf = append(conf, MySqlConf{
User: "root",
Password: "root",
HostName: "localhost",
Port: 3306,
DataBase: "seaweedfs",
})
body, err := json.Marshal(conf)
if err != nil {
t.Errorf("json encoding err %s", err.Error())
}
t.Logf("json output is %s", string(body))
}
func TestCRC32FullPathName(t *testing.T) {
fullPathName := "/prod-bucket/law632191483895612493300-signed.pdf"
hash_value := crc32.ChecksumIEEE([]byte(fullPathName))
table_postfix := int(hash_value) % 1024
t.Logf("table postfix %d", table_postfix)
}

View file

@ -1,456 +0,0 @@
package postgres_store
import (
"database/sql"
"fmt"
"path/filepath"
"time"
"github.com/chrislusf/seaweedfs/weed/filer"
"github.com/chrislusf/seaweedfs/weed/glog"
_ "github.com/lib/pq"
_ "path/filepath"
"strings"
)
type DirectoryId int32
func databaseExists(db *sql.DB, databaseName string) (bool, error) {
sqlStatement := "SELECT datname from pg_database WHERE datname='%s'"
row := db.QueryRow(fmt.Sprintf(sqlStatement, databaseName))
var dbName string
err := row.Scan(&dbName)
if err != nil {
if err == sql.ErrNoRows {
return false, nil
}
return false, err
}
return true, nil
}
func createDatabase(db *sql.DB, databaseName string) error {
sqlStatement := "CREATE DATABASE %s ENCODING='UTF8'"
_, err := db.Exec(fmt.Sprintf(sqlStatement, databaseName))
return err
}
func getDbConnection(conf PostgresConf) *sql.DB {
_init_db.Do(func() {
sqlUrl := fmt.Sprintf("host=%s port=%d user=%s password=%s dbname=%s sslmode=%s connect_timeout=30", conf.HostName, conf.Port, conf.User, conf.Password, "postgres", conf.SslMode)
glog.V(3).Infoln("Opening postgres master database")
var dbErr error
_db_connection, dbErr := sql.Open("postgres", sqlUrl)
if dbErr != nil {
_db_connection.Close()
_db_connection = nil
panic(dbErr)
}
pingErr := _db_connection.Ping()
if pingErr != nil {
_db_connection.Close()
_db_connection = nil
panic(pingErr)
}
glog.V(3).Infoln("Checking to see if DB exists: ", conf.DataBase)
var existsErr error
dbExists, existsErr := databaseExists(_db_connection, conf.DataBase)
if existsErr != nil {
_db_connection.Close()
_db_connection = nil
panic(existsErr)
}
if !dbExists {
glog.V(3).Infoln("Database doesn't exist. Attempting to create one: ", conf.DataBase)
createErr := createDatabase(_db_connection, conf.DataBase)
if createErr != nil {
_db_connection.Close()
_db_connection = nil
panic(createErr)
}
}
glog.V(3).Infoln("Closing master postgres database and opening configured database: ", conf.DataBase)
_db_connection.Close()
_db_connection = nil
sqlUrl = fmt.Sprintf("host=%s port=%d user=%s password=%s dbname=%s sslmode=%s connect_timeout=30", conf.HostName, conf.Port, conf.User, conf.Password, conf.DataBase, conf.SslMode)
_db_connection, dbErr = sql.Open("postgres", sqlUrl)
if dbErr != nil {
_db_connection.Close()
_db_connection = nil
panic(dbErr)
}
pingErr = _db_connection.Ping()
if pingErr != nil {
_db_connection.Close()
_db_connection = nil
panic(pingErr)
}
maxIdleConnections, maxOpenConnections := default_maxIdleConnections, default_maxOpenConnections
if conf.MaxIdleConnections != 0 {
maxIdleConnections = conf.MaxIdleConnections
}
if conf.MaxOpenConnections != 0 {
maxOpenConnections = conf.MaxOpenConnections
}
_db_connection.SetMaxIdleConns(maxIdleConnections)
_db_connection.SetMaxOpenConns(maxOpenConnections)
})
return _db_connection
}
var createDirectoryTable = `
CREATE TABLE IF NOT EXISTS %s (
id BIGSERIAL NOT NULL,
directoryRoot VARCHAR(1024) NOT NULL DEFAULT '',
directoryName VARCHAR(1024) NOT NULL DEFAULT '',
CONSTRAINT unique_directory UNIQUE (directoryRoot, directoryName)
);
`
var createFileTable = `
CREATE TABLE IF NOT EXISTS %s (
id BIGSERIAL NOT NULL,
directoryPart VARCHAR(1024) NOT NULL DEFAULT '',
filePart VARCHAR(1024) NOT NULL DEFAULT '',
fid VARCHAR(36) NOT NULL DEFAULT '',
createTime BIGINT NOT NULL DEFAULT 0,
updateTime BIGINT NOT NULL DEFAULT 0,
remark VARCHAR(20) NOT NULL DEFAULT '',
status SMALLINT NOT NULL DEFAULT '1',
PRIMARY KEY (id),
CONSTRAINT %s_unique_file UNIQUE (directoryPart, filePart)
);
`
func (s *PostgresStore) createDirectoriesTable() error {
glog.V(3).Infoln("Creating postgres table if it doesn't exist: ", directoriesTableName)
sqlCreate := fmt.Sprintf(createDirectoryTable, directoriesTableName)
stmt, err := s.db.Prepare(sqlCreate)
if err != nil {
return err
}
defer stmt.Close()
_, err = stmt.Exec()
if err != nil {
return err
}
return nil
}
func (s *PostgresStore) createFilesTable() error {
glog.V(3).Infoln("Creating postgres table if it doesn't exist: ", filesTableName)
sqlCreate := fmt.Sprintf(createFileTable, filesTableName, filesTableName)
stmt, err := s.db.Prepare(sqlCreate)
if err != nil {
return err
}
defer stmt.Close()
_, err = stmt.Exec()
if err != nil {
return err
}
return nil
}
func (s *PostgresStore) query(uriPath string) (string, error) {
directoryPart, filePart := filepath.Split(uriPath)
sqlStatement := fmt.Sprintf("SELECT fid FROM %s WHERE directoryPart=$1 AND filePart=$2", filesTableName)
row := s.db.QueryRow(sqlStatement, directoryPart, filePart)
var fid string
err := row.Scan(&fid)
glog.V(3).Infof("Postgres query -- looking up path '%s' and found id '%s' ", uriPath, fid)
if err != nil {
return "", err
}
return fid, nil
}
func (s *PostgresStore) update(uriPath string, fid string) error {
directoryPart, filePart := filepath.Split(uriPath)
sqlStatement := fmt.Sprintf("UPDATE %s SET fid=$1, updateTime=$2 WHERE directoryPart=$3 AND filePart=$4", filesTableName)
glog.V(3).Infof("Postgres query -- updating path '%s' with id '%s'", uriPath, fid)
res, err := s.db.Exec(sqlStatement, fid, time.Now().Unix(), directoryPart, filePart)
if err != nil {
return err
}
_, err = res.RowsAffected()
if err != nil {
return err
}
return nil
}
func (s *PostgresStore) insert(uriPath string, fid string) error {
directoryPart, filePart := filepath.Split(uriPath)
existingId, _, _ := s.lookupDirectory(directoryPart)
if existingId == 0 {
s.recursiveInsertDirectory(directoryPart)
}
sqlStatement := fmt.Sprintf("INSERT INTO %s (directoryPart,filePart,fid,createTime) VALUES($1, $2, $3, $4)", filesTableName)
glog.V(3).Infof("Postgres query -- inserting path '%s' with id '%s'", uriPath, fid)
res, err := s.db.Exec(sqlStatement, directoryPart, filePart, fid, time.Now().Unix())
if err != nil {
return err
}
rows, err := res.RowsAffected()
if rows != 1 {
return fmt.Errorf("Postgres insert -- rows affected = %d. Expecting 1", rows)
}
if err != nil {
return err
}
return nil
}
func (s *PostgresStore) recursiveInsertDirectory(dirPath string) {
pathParts := strings.Split(dirPath, "/")
var workingPath string = "/"
for _, part := range pathParts {
if part == "" {
continue
}
workingPath += (part + "/")
existingId, _, _ := s.lookupDirectory(workingPath)
if existingId == 0 {
s.insertDirectory(workingPath)
}
}
}
func (s *PostgresStore) insertDirectory(dirPath string) {
pathParts := strings.Split(dirPath, "/")
directoryRoot := "/"
directoryName := ""
if len(pathParts) > 1 {
directoryRoot = strings.Join(pathParts[0:len(pathParts)-2], "/") + "/"
directoryName = strings.Join(pathParts[len(pathParts)-2:], "/")
} else if len(pathParts) == 1 {
directoryRoot = "/"
directoryName = pathParts[0] + "/"
}
sqlInsertDirectoryStatement := fmt.Sprintf("INSERT INTO %s (directoryroot, directoryname) "+
"SELECT $1, $2 WHERE NOT EXISTS ( SELECT id FROM %s WHERE directoryroot=$3 AND directoryname=$4 )",
directoriesTableName, directoriesTableName)
glog.V(4).Infof("Postgres query -- Inserting directory (if it doesn't exist) - root = %s, name = %s",
directoryRoot, directoryName)
_, err := s.db.Exec(sqlInsertDirectoryStatement, directoryRoot, directoryName, directoryRoot, directoryName)
if err != nil {
glog.V(0).Infof("Postgres query -- Error inserting directory - root = %s, name = %s: %s",
directoryRoot, directoryName, err)
}
}
func (s *PostgresStore) delete(uriPath string) error {
directoryPart, filePart := filepath.Split(uriPath)
sqlStatement := fmt.Sprintf("DELETE FROM %s WHERE directoryPart=$1 AND filePart=$2", filesTableName)
glog.V(3).Infof("Postgres query -- deleting path '%s'", uriPath)
res, err := s.db.Exec(sqlStatement, directoryPart, filePart)
if err != nil {
return err
}
_, err = res.RowsAffected()
if err != nil {
return err
}
return nil
}
func (s *PostgresStore) lookupDirectory(dirPath string) (DirectoryId, string, error) {
directoryRoot, directoryName := s.mySplitPath(dirPath)
sqlStatement := fmt.Sprintf("SELECT id, directoryroot, directoryname FROM %s WHERE directoryRoot=$1 AND directoryName=$2", directoriesTableName)
row := s.db.QueryRow(sqlStatement, directoryRoot, directoryName)
var id DirectoryId
var dirRoot string
var dirName string
err := row.Scan(&id, &dirRoot, &dirName)
glog.V(3).Infof("Postgres lookupDirectory -- looking up directory '%s' and found id '%d', root '%s', name '%s' ", dirPath, id, dirRoot, dirName)
if err != nil {
return 0, "", err
}
return id, filepath.Join(dirRoot, dirName), err
}
func (s *PostgresStore) findDirectories(dirPath string, limit int) (dirs []filer.DirectoryName, err error) {
sqlStatement := fmt.Sprintf("SELECT id, directoryroot, directoryname FROM %s WHERE directoryRoot=$1 AND directoryName != '' ORDER BY id LIMIT $2", directoriesTableName)
rows, err := s.db.Query(sqlStatement, dirPath, limit)
if err != nil {
glog.V(0).Infof("Postgres findDirectories error: %s", err)
}
if rows != nil {
defer rows.Close()
for rows.Next() {
var id DirectoryId
var directoryRoot string
var directoryName string
scanErr := rows.Scan(&id, &directoryRoot, &directoryName)
if scanErr != nil {
err = scanErr
}
dirs = append(dirs, filer.DirectoryName(directoryName))
}
}
return
}
func (s *PostgresStore) safeToDeleteDirectory(dirPath string, recursive bool) bool {
if recursive {
return true
}
sqlStatement := fmt.Sprintf("SELECT id FROM %s WHERE directoryRoot LIKE $1 LIMIT 1", directoriesTableName)
row := s.db.QueryRow(sqlStatement, dirPath+"%")
var id DirectoryId
err := row.Scan(&id)
if err != nil {
if err == sql.ErrNoRows {
return true
}
}
return false
}
func (s *PostgresStore) mySplitPath(dirPath string) (directoryRoot string, directoryName string) {
pathParts := strings.Split(dirPath, "/")
directoryRoot = "/"
directoryName = ""
if len(pathParts) > 1 {
directoryRoot = strings.Join(pathParts[0:len(pathParts)-2], "/") + "/"
directoryName = strings.Join(pathParts[len(pathParts)-2:], "/")
} else if len(pathParts) == 1 {
directoryRoot = "/"
directoryName = pathParts[0] + "/"
}
return directoryRoot, directoryName
}
func (s *PostgresStore) deleteDirectory(dirPath string, recursive bool) (err error) {
directoryRoot, directoryName := s.mySplitPath(dirPath)
// delete files
sqlStatement := fmt.Sprintf("DELETE FROM %s WHERE directorypart=$1", filesTableName)
_, err = s.db.Exec(sqlStatement, dirPath)
if err != nil {
return err
}
// delete specific directory if it is empty or recursive delete was requested
safeToDelete := s.safeToDeleteDirectory(dirPath, recursive)
if safeToDelete {
sqlStatement = fmt.Sprintf("DELETE FROM %s WHERE directoryRoot=$1 AND directoryName=$2", directoriesTableName)
_, err = s.db.Exec(sqlStatement, directoryRoot, directoryName)
if err != nil {
return err
}
}
if recursive {
// delete descendant files
sqlStatement = fmt.Sprintf("DELETE FROM %s WHERE directorypart LIKE $1", filesTableName)
_, err = s.db.Exec(sqlStatement, dirPath+"%")
if err != nil {
return err
}
// delete descendant directories
sqlStatement = fmt.Sprintf("DELETE FROM %s WHERE directoryRoot LIKE $1", directoriesTableName)
_, err = s.db.Exec(sqlStatement, dirPath+"%")
if err != nil {
return err
}
}
return err
}
func (s *PostgresStore) findFiles(dirPath string, lastFileName string, limit int) (files []filer.FileEntry, err error) {
var rows *sql.Rows = nil
if lastFileName == "" {
sqlStatement :=
fmt.Sprintf("SELECT fid, directorypart, filepart FROM %s WHERE directorypart=$1 ORDER BY id LIMIT $2", filesTableName)
rows, err = s.db.Query(sqlStatement, dirPath, limit)
} else {
sqlStatement :=
fmt.Sprintf("SELECT fid, directorypart, filepart FROM %s WHERE directorypart=$1 "+
"AND id > (SELECT id FROM %s WHERE directoryPart=$2 AND filepart=$3) ORDER BY id LIMIT $4",
filesTableName, filesTableName)
_, lastFileNameName := filepath.Split(lastFileName)
rows, err = s.db.Query(sqlStatement, dirPath, dirPath, lastFileNameName, limit)
}
if err != nil {
glog.V(0).Infof("Postgres find files error: %s", err)
}
if rows != nil {
defer rows.Close()
for rows.Next() {
var fid filer.FileId
var directoryPart string
var filePart string
scanErr := rows.Scan(&fid, &directoryPart, &filePart)
if scanErr != nil {
err = scanErr
}
files = append(files, filer.FileEntry{Name: filepath.Join(directoryPart, filePart), Id: fid})
if len(files) >= limit {
break
}
}
}
glog.V(3).Infof("Postgres findFiles -- looking up files under '%s' and found %d files. Limit=%d, lastFileName=%s",
dirPath, len(files), limit, lastFileName)
return files, err
}

View file

@ -1,149 +0,0 @@
package postgres_store
import (
"database/sql"
"errors"
"fmt"
"sync"
"github.com/chrislusf/seaweedfs/weed/filer"
"github.com/chrislusf/seaweedfs/weed/glog"
_ "github.com/lib/pq"
_ "path/filepath"
"path/filepath"
)
const (
default_maxIdleConnections = 100
default_maxOpenConnections = 50
filesTableName = "files"
directoriesTableName = "directories"
)
var (
_init_db sync.Once
_db_connection *sql.DB
)
type PostgresConf struct {
User string
Password string
HostName string
Port int
DataBase string
SslMode string
MaxIdleConnections int
MaxOpenConnections int
}
type PostgresStore struct {
db *sql.DB
server string
user string
password string
}
func (s *PostgresStore) CreateFile(fullFilePath string, fid string) (err error) {
var old_fid string
if old_fid, err = s.query(fullFilePath); err != nil && err != sql.ErrNoRows {
return fmt.Errorf("PostgresStore Put operation failed when querying path %s: err is %v", fullFilePath, err)
} else {
if len(old_fid) == 0 {
err = s.insert(fullFilePath, fid)
if err != nil {
return fmt.Errorf("PostgresStore Put operation failed when inserting path %s with fid %s : err is %v", fullFilePath, fid, err)
}
} else {
err = s.update(fullFilePath, fid)
if err != nil {
return fmt.Errorf("PostgresStore Put operation failed when updating path %s with fid %s : err is %v", fullFilePath, fid, err)
}
}
}
return
}
func (s *PostgresStore) FindFile(fullFilePath string) (fid string, err error) {
if err != nil {
return "", fmt.Errorf("PostgresStore Get operation can not parse file path %s: err is %v", fullFilePath, err)
}
fid, err = s.query(fullFilePath)
return fid, err
}
func (s *PostgresStore) LookupDirectoryEntry(dirPath string, name string) (found bool, fileId string, err error) {
fullPath := filepath.Join(dirPath, name)
if fileId, err = s.FindFile(fullPath); err == nil {
return true, fileId, nil
}
if _, _, err := s.lookupDirectory(fullPath); err == nil {
return true, "", err
}
return false, "", err
}
func (s *PostgresStore) DeleteFile(fullFilePath string) (fid string, err error) {
if err != nil {
return "", fmt.Errorf("PostgresStore Delete operation can not parse file path %s: err is %v", fullFilePath, err)
}
if fid, err = s.query(fullFilePath); err != nil {
return "", fmt.Errorf("PostgresStore Delete operation failed when querying path %s: err is %v", fullFilePath, err)
} else if fid == "" {
return "", nil
}
if err = s.delete(fullFilePath); err != nil {
return "", fmt.Errorf("PostgresStore Delete operation failed when deleting path %s: err is %v", fullFilePath, err)
} else {
return "", nil
}
}
func (s *PostgresStore) ListDirectories(dirPath string) (dirs []filer.DirectoryName, err error) {
dirs, err = s.findDirectories(dirPath, 1000)
glog.V(3).Infof("Postgres ListDirs = found %d directories under %s", len(dirs), dirPath)
return dirs, err
}
func (s *PostgresStore) ListFiles(dirPath string, lastFileName string, limit int) (files []filer.FileEntry, err error) {
files, err = s.findFiles(dirPath, lastFileName, limit)
return files, err
}
func (s *PostgresStore) DeleteDirectory(dirPath string, recursive bool) (err error) {
err = s.deleteDirectory(dirPath, recursive)
if err != nil {
glog.V(0).Infof("Error in Postgres DeleteDir '%s' (recursive = '%t'): %s", err)
}
return err
}
func (s *PostgresStore) Move(fromPath string, toPath string) (err error) {
glog.V(3).Infoln("Calling posgres_store Move")
return errors.New("Move is not yet implemented for the PostgreSQL store.")
}
//func NewPostgresStore(master string, confs []PostgresConf, isSharding bool, shardCount int) *PostgresStore {
func NewPostgresStore(master string, conf PostgresConf) *PostgresStore {
pg := &PostgresStore{
db: getDbConnection(conf),
}
pg.createDirectoriesTable()
if err := pg.createFilesTable(); err != nil {
fmt.Printf("create table failed %v", err)
}
return pg
}
func (s *PostgresStore) Close() {
s.db.Close()
}

View file

@ -1,50 +0,0 @@
package redis_store
import (
"github.com/chrislusf/seaweedfs/weed/filer"
"github.com/go-redis/redis"
)
type RedisStore struct {
Client *redis.Client
}
func NewRedisStore(hostPort string, password string, database int) *RedisStore {
client := redis.NewClient(&redis.Options{
Addr: hostPort,
Password: password,
DB: database,
})
return &RedisStore{Client: client}
}
func (s *RedisStore) Get(fullFileName string) (fid string, err error) {
fid, err = s.Client.Get(fullFileName).Result()
if err == redis.Nil {
err = filer.ErrNotFound
}
return fid, err
}
func (s *RedisStore) Put(fullFileName string, fid string) (err error) {
_, err = s.Client.Set(fullFileName, fid, 0).Result()
if err == redis.Nil {
err = nil
}
return err
}
// Currently the fid is not returned
func (s *RedisStore) Delete(fullFileName string) (err error) {
_, err = s.Client.Del(fullFileName).Result()
if err == redis.Nil {
err = nil
}
return err
}
func (s *RedisStore) Close() {
if s.Client != nil {
s.Client.Close()
}
}

View file

@ -1,45 +0,0 @@
There are two main components of a filer: directories and files.
My previous approach was to use some sequance number to generate directoryId.
However, this is not scalable. The id generation itself is a bottleneck.
It needs careful locking and deduplication checking to get a directoryId.
In a second design, each directory is deterministically mapped to UUID version 3,
which uses MD5 to map a tuple of <uuid, name> to a version 3 UUID.
However, this UUID3 approach is logically the same as storing the full path.
Storing the full path is the simplest design.
separator is a special byte, 0x00.
When writing a file:
<file parent full path, separator, file name> => fildId, file properties
For folders:
The filer breaks the directory path into folders.
for each folder:
if it is not in cache:
check whether the folder is created in the KVS, if not:
set <folder parent full path, separator, folder name> => directory properties
if no permission for the folder:
break
The filer caches the most recently used folder permissions with a TTL.
So any folder permission change needs to wait TTL interval to take effect.
When listing the directory:
prefix scan of using (the folder full path + separator) as the prefix
The downside:
1. Rename a folder will need to recursively process all sub folders and files.
2. Move a folder will need to recursively process all sub folders and files.
So these operations are not allowed if the folder is not empty.
Allowing:
1. Rename a file
2. Move a file to a different folder
3. Delete an empty folder

View file

@ -0,0 +1,130 @@
package abstract_sql
import (
"database/sql"
"fmt"
"github.com/chrislusf/seaweedfs/weed/filer2"
"github.com/chrislusf/seaweedfs/weed/glog"
)
type AbstractSqlStore struct {
DB *sql.DB
SqlInsert string
SqlUpdate string
SqlFind string
SqlDelete string
SqlListExclusive string
SqlListInclusive string
}
func (store *AbstractSqlStore) InsertEntry(entry *filer2.Entry) (err error) {
dir, name := entry.FullPath.DirAndName()
meta, err := entry.EncodeAttributesAndChunks()
if err != nil {
return fmt.Errorf("encode %s: %s", entry.FullPath, err)
}
res, err := store.DB.Exec(store.SqlInsert, hashToLong(dir), name, dir, meta)
if err != nil {
return fmt.Errorf("insert %s: %s", entry.FullPath, err)
}
_, err = res.RowsAffected()
if err != nil {
return fmt.Errorf("insert %s but no rows affected: %s", entry.FullPath, err)
}
return nil
}
func (store *AbstractSqlStore) UpdateEntry(entry *filer2.Entry) (err error) {
dir, name := entry.FullPath.DirAndName()
meta, err := entry.EncodeAttributesAndChunks()
if err != nil {
return fmt.Errorf("encode %s: %s", entry.FullPath, err)
}
res, err := store.DB.Exec(store.SqlUpdate, meta, hashToLong(dir), name, dir)
if err != nil {
return fmt.Errorf("update %s: %s", entry.FullPath, err)
}
_, err = res.RowsAffected()
if err != nil {
return fmt.Errorf("update %s but no rows affected: %s", entry.FullPath, err)
}
return nil
}
func (store *AbstractSqlStore) FindEntry(fullpath filer2.FullPath) (*filer2.Entry, error) {
dir, name := fullpath.DirAndName()
row := store.DB.QueryRow(store.SqlFind, hashToLong(dir), name, dir)
var data []byte
if err := row.Scan(&data); err != nil {
return nil, fmt.Errorf("read entry %s: %v", fullpath, err)
}
entry := &filer2.Entry{
FullPath: fullpath,
}
if err := entry.DecodeAttributesAndChunks(data); err != nil {
return entry, fmt.Errorf("decode %s : %v", entry.FullPath, err)
}
return entry, nil
}
func (store *AbstractSqlStore) DeleteEntry(fullpath filer2.FullPath) (error) {
dir, name := fullpath.DirAndName()
res, err := store.DB.Exec(store.SqlDelete, hashToLong(dir), name, dir)
if err != nil {
return fmt.Errorf("delete %s: %s", fullpath, err)
}
_, err = res.RowsAffected()
if err != nil {
return fmt.Errorf("delete %s but no rows affected: %s", fullpath, err)
}
return nil
}
func (store *AbstractSqlStore) ListDirectoryEntries(fullpath filer2.FullPath, startFileName string, inclusive bool, limit int) (entries []*filer2.Entry, err error) {
sqlText := store.SqlListExclusive
if inclusive {
sqlText = store.SqlListInclusive
}
rows, err := store.DB.Query(sqlText, hashToLong(string(fullpath)), startFileName, string(fullpath), limit)
if err != nil {
return nil, fmt.Errorf("list %s : %v", fullpath, err)
}
defer rows.Close()
for rows.Next() {
var name string
var data []byte
if err = rows.Scan(&name, &data); err != nil {
glog.V(0).Infof("scan %s : %v", fullpath, err)
return nil, fmt.Errorf("scan %s: %v", fullpath, err)
}
entry := &filer2.Entry{
FullPath: filer2.NewFullPath(string(fullpath), name),
}
if err = entry.DecodeAttributesAndChunks(data); err != nil {
glog.V(0).Infof("scan decode %s : %v", entry.FullPath, err)
return nil, fmt.Errorf("scan decode %s : %v", entry.FullPath, err)
}
entries = append(entries, entry)
}
return entries, nil
}

View file

@ -0,0 +1,32 @@
package abstract_sql
import (
"crypto/md5"
"io"
)
// returns a 64 bit big int
func hashToLong(dir string) (v int64) {
h := md5.New()
io.WriteString(h, dir)
b := h.Sum(nil)
v += int64(b[0])
v <<= 8
v += int64(b[1])
v <<= 8
v += int64(b[2])
v <<= 8
v += int64(b[3])
v <<= 8
v += int64(b[4])
v <<= 8
v += int64(b[5])
v <<= 8
v += int64(b[6])
v <<= 8
v += int64(b[7])
return
}

View file

@ -0,0 +1,14 @@
1. create a keyspace
CREATE KEYSPACE seaweedfs WITH replication = {'class':'SimpleStrategy', 'replication_factor' : 1};
2. create filemeta table
USE seaweedfs;
CREATE TABLE filemeta (
directory varchar,
name varchar,
meta blob,
PRIMARY KEY (directory, name)
) WITH CLUSTERING ORDER BY (name ASC);

View file

@ -0,0 +1,131 @@
package cassandra
import (
"fmt"
"github.com/chrislusf/seaweedfs/weed/filer2"
"github.com/chrislusf/seaweedfs/weed/glog"
"github.com/gocql/gocql"
"github.com/spf13/viper"
)
func init() {
filer2.Stores = append(filer2.Stores, &CassandraStore{})
}
type CassandraStore struct {
cluster *gocql.ClusterConfig
session *gocql.Session
}
func (store *CassandraStore) GetName() string {
return "cassandra"
}
func (store *CassandraStore) Initialize(viper *viper.Viper) (err error) {
return store.initialize(
viper.GetString("keyspace"),
viper.GetStringSlice("hosts"),
)
}
func (store *CassandraStore) initialize(keyspace string, hosts []string) (err error) {
store.cluster = gocql.NewCluster(hosts...)
store.cluster.Keyspace = keyspace
store.cluster.Consistency = gocql.LocalQuorum
store.session, err = store.cluster.CreateSession()
if err != nil {
glog.V(0).Infof("Failed to open cassandra store, hosts %v, keyspace %s", hosts, keyspace)
}
return
}
func (store *CassandraStore) InsertEntry(entry *filer2.Entry) (err error) {
dir, name := entry.FullPath.DirAndName()
meta, err := entry.EncodeAttributesAndChunks()
if err != nil {
return fmt.Errorf("encode %s: %s", entry.FullPath, err)
}
if err := store.session.Query(
"INSERT INTO filemeta (directory,name,meta) VALUES(?,?,?)",
dir, name, meta).Exec(); err != nil {
return fmt.Errorf("insert %s: %s", entry.FullPath, err)
}
return nil
}
func (store *CassandraStore) UpdateEntry(entry *filer2.Entry) (err error) {
return store.InsertEntry(entry)
}
func (store *CassandraStore) FindEntry(fullpath filer2.FullPath) (entry *filer2.Entry, err error) {
dir, name := fullpath.DirAndName()
var data []byte
if err := store.session.Query(
"SELECT meta FROM filemeta WHERE directory=? AND name=?",
dir, name).Consistency(gocql.One).Scan(&data); err != nil {
if err != gocql.ErrNotFound {
return nil, fmt.Errorf("read entry %s: %v", fullpath, err)
}
}
if len(data) == 0 {
return nil, fmt.Errorf("not found: %s", fullpath)
}
entry = &filer2.Entry{
FullPath: fullpath,
}
err = entry.DecodeAttributesAndChunks(data)
if err != nil {
return entry, fmt.Errorf("decode %s : %v", entry.FullPath, err)
}
return entry, nil
}
func (store *CassandraStore) DeleteEntry(fullpath filer2.FullPath) error {
dir, name := fullpath.DirAndName()
if err := store.session.Query(
"DELETE FROM filemeta WHERE directory=? AND name=?",
dir, name).Exec(); err != nil {
return fmt.Errorf("delete %s : %v", fullpath, err)
}
return nil
}
func (store *CassandraStore) ListDirectoryEntries(fullpath filer2.FullPath, startFileName string, inclusive bool,
limit int) (entries []*filer2.Entry, err error) {
cqlStr := "SELECT NAME, meta FROM filemeta WHERE directory=? AND name>? ORDER BY NAME ASC LIMIT ?"
if inclusive {
cqlStr = "SELECT NAME, meta FROM filemeta WHERE directory=? AND name>=? ORDER BY NAME ASC LIMIT ?"
}
var data []byte
var name string
iter := store.session.Query(cqlStr, string(fullpath), startFileName, limit).Iter()
for iter.Scan(&name, &data) {
entry := &filer2.Entry{
FullPath: filer2.NewFullPath(string(fullpath), name),
}
if decodeErr := entry.DecodeAttributesAndChunks(data); decodeErr != nil {
err = decodeErr
glog.V(0).Infof("list %s : %v", entry.FullPath, err)
break
}
entries = append(entries, entry)
}
if err := iter.Close(); err != nil {
glog.V(0).Infof("list iterator close: %v", err)
}
return entries, err
}

View file

@ -0,0 +1,126 @@
package filer2
import (
"os"
"github.com/chrislusf/seaweedfs/weed/glog"
"github.com/spf13/viper"
)
const (
FILER_TOML_EXAMPLE = `
# A sample TOML config file for SeaweedFS filer store
[memory]
# local in memory, mostly for testing purpose
enabled = false
[leveldb]
# local on disk, mostly for simple single-machine setup, fairly scalable
enabled = false
dir = "." # directory to store level db files
####################################################
# multiple filers on shared storage, fairly scalable
####################################################
[mysql]
# CREATE TABLE IF NOT EXISTS filemeta (
# dirhash BIGINT COMMENT 'first 64 bits of MD5 hash value of directory field',
# name VARCHAR(1000) COMMENT 'directory or file name',
# directory VARCHAR(4096) COMMENT 'full path to parent directory',
# meta BLOB,
# PRIMARY KEY (dirhash, name)
# ) DEFAULT CHARSET=utf8;
enabled = true
hostname = "localhost"
port = 3306
username = "root"
password = ""
database = "" # create or use an existing database
connection_max_idle = 2
connection_max_open = 100
[postgres]
# CREATE TABLE IF NOT EXISTS filemeta (
# dirhash BIGINT,
# name VARCHAR(1000),
# directory VARCHAR(4096),
# meta bytea,
# PRIMARY KEY (dirhash, name)
# );
enabled = false
hostname = "localhost"
port = 5432
username = "postgres"
password = ""
database = "" # create or use an existing database
sslmode = "disable"
connection_max_idle = 100
connection_max_open = 100
[cassandra]
# CREATE TABLE filemeta (
# directory varchar,
# name varchar,
# meta blob,
# PRIMARY KEY (directory, name)
# ) WITH CLUSTERING ORDER BY (name ASC);
enabled = false
keyspace="seaweedfs"
hosts=[
"localhost:9042",
]
[redis]
enabled = true
address = "localhost:6379"
password = ""
db = 0
`
)
var (
Stores []FilerStore
)
func (f *Filer) LoadConfiguration() {
// find a filer store
viper.SetConfigName("filer") // name of config file (without extension)
viper.AddConfigPath(".") // optionally look for config in the working directory
viper.AddConfigPath("$HOME/.seaweedfs") // call multiple times to add many search paths
viper.AddConfigPath("/etc/seaweedfs/") // path to look for the config file in
if err := viper.ReadInConfig(); err != nil { // Handle errors reading the config file
glog.Fatalf("Failed to load filer.toml file from current directory, or $HOME/.seaweedfs/, or /etc/seaweedfs/" +
"\n\nPlease follow this example and add a filer.toml file to " +
"current directory, or $HOME/.seaweedfs/, or /etc/seaweedfs/:\n" + FILER_TOML_EXAMPLE)
}
glog.V(0).Infof("Reading filer configuration from %s", viper.ConfigFileUsed())
for _, store := range Stores {
if viper.GetBool(store.GetName() + ".enabled") {
viperSub := viper.Sub(store.GetName())
if err := store.Initialize(viperSub); err != nil {
glog.Fatalf("Failed to initialize store for %s: %+v",
store.GetName(), err)
}
f.SetStore(store)
glog.V(0).Infof("Configure filer for %s from %s", store.GetName(), viper.ConfigFileUsed())
return
}
}
println()
println("Supported filer stores are:")
for _, store := range Stores {
println(" " + store.GetName())
}
println()
println("Please configure a supported filer store in", viper.ConfigFileUsed())
println()
os.Exit(-1)
}

View file

@ -1,42 +0,0 @@
package embedded
import (
"github.com/syndtr/goleveldb/leveldb"
"github.com/chrislusf/seaweedfs/weed/filer2"
)
type EmbeddedStore struct {
db *leveldb.DB
}
func NewEmbeddedStore(dir string) (filer *EmbeddedStore, err error) {
filer = &EmbeddedStore{}
if filer.db, err = leveldb.OpenFile(dir, nil); err != nil {
return
}
return
}
func (filer *EmbeddedStore) InsertEntry(entry *filer2.Entry) (err error) {
return nil
}
func (filer *EmbeddedStore) AddDirectoryLink(directory *filer2.Entry, delta int32) (err error) {
return nil
}
func (filer *EmbeddedStore) AppendFileChunk(fullpath filer2.FullPath, fileChunk filer2.FileChunk) (err error) {
return nil
}
func (filer *EmbeddedStore) FindEntry(fullpath filer2.FullPath) (found bool, entry *filer2.Entry, err error) {
return false, nil, nil
}
func (filer *EmbeddedStore) DeleteEntry(fullpath filer2.FullPath) (entry *filer2.Entry, err error) {
return nil, nil
}
func (filer *EmbeddedStore) ListDirectoryEntries(fullpath filer2.FullPath) (entries []*filer2.Entry, err error) {
return nil, nil
}

42
weed/filer2/entry.go Normal file
View file

@ -0,0 +1,42 @@
package filer2
import (
"os"
"time"
"github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
)
type Attr struct {
Mtime time.Time // time of last modification
Crtime time.Time // time of creation (OS X only)
Mode os.FileMode // file mode
Uid uint32 // owner uid
Gid uint32 // group gid
Mime string
}
func (attr Attr) IsDirectory() bool {
return attr.Mode&os.ModeDir > 0
}
type Entry struct {
FullPath
Attr
// the following is for files
Chunks []*filer_pb.FileChunk `json:"chunks,omitempty"`
}
func (entry *Entry) Size() uint64 {
return TotalSize(entry.Chunks)
}
func (entry *Entry) Timestamp() time.Time {
if entry.IsDirectory() {
return entry.Crtime
} else {
return entry.Mtime
}
}

View file

@ -0,0 +1,45 @@
package filer2
import (
"os"
"time"
"fmt"
"github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
"github.com/gogo/protobuf/proto"
)
func (entry *Entry) EncodeAttributesAndChunks() ([]byte, error) {
message := &filer_pb.Entry{
Attributes: &filer_pb.FuseAttributes{
Crtime: entry.Attr.Crtime.Unix(),
Mtime: entry.Attr.Mtime.Unix(),
FileMode: uint32(entry.Attr.Mode),
Uid: entry.Uid,
Gid: entry.Gid,
Mime: entry.Mime,
},
Chunks: entry.Chunks,
}
return proto.Marshal(message)
}
func (entry *Entry) DecodeAttributesAndChunks(blob []byte) error {
message := &filer_pb.Entry{}
if err := proto.UnmarshalMerge(blob, message); err != nil {
return fmt.Errorf("decoding value blob for %s: %v", entry.FullPath, err)
}
entry.Attr.Crtime = time.Unix(message.Attributes.Crtime, 0)
entry.Attr.Mtime = time.Unix(message.Attributes.Mtime, 0)
entry.Attr.Mode = os.FileMode(message.Attributes.FileMode)
entry.Attr.Uid = message.Attributes.Uid
entry.Attr.Gid = message.Attributes.Gid
entry.Attr.Mime = message.Attributes.Mime
entry.Chunks = message.Chunks
return nil
}

View file

@ -1,8 +1,13 @@
package filer2
type Chunks []FileChunk
import (
"math"
"sort"
func (chunks Chunks) TotalSize() (size uint64) {
"github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
)
func TotalSize(chunks []*filer_pb.FileChunk) (size uint64) {
for _, c := range chunks {
t := uint64(c.Offset + int64(c.Size))
if size < t {
@ -12,12 +17,236 @@ func (chunks Chunks) TotalSize() (size uint64) {
return
}
func (chunks Chunks) Len() int {
return len(chunks)
func CompactFileChunks(chunks []*filer_pb.FileChunk) (compacted, garbage []*filer_pb.FileChunk) {
visibles := nonOverlappingVisibleIntervals(chunks)
fileIds := make(map[string]bool)
for _, interval := range visibles {
fileIds[interval.fileId] = true
}
for _, chunk := range chunks {
if found := fileIds[chunk.FileId]; found {
compacted = append(compacted, chunk)
} else {
garbage = append(garbage, chunk)
}
}
return
}
func (chunks Chunks) Swap(i, j int) {
chunks[i], chunks[j] = chunks[j], chunks[i]
func FindUnusedFileChunks(oldChunks, newChunks []*filer_pb.FileChunk) (unused []*filer_pb.FileChunk) {
fileIds := make(map[string]bool)
for _, interval := range newChunks {
fileIds[interval.FileId] = true
}
for _, chunk := range oldChunks {
if found := fileIds[chunk.FileId]; !found {
unused = append(unused, chunk)
}
}
return
}
func (chunks Chunks) Less(i, j int) bool {
return chunks[i].Offset < chunks[j].Offset
type ChunkView struct {
FileId string
Offset int64
Size uint64
LogicOffset int64
}
func ViewFromChunks(chunks []*filer_pb.FileChunk, offset int64, size int) (views []*ChunkView) {
visibles := nonOverlappingVisibleIntervals(chunks)
stop := offset + int64(size)
for _, chunk := range visibles {
if chunk.start <= offset && offset < chunk.stop && offset < stop {
views = append(views, &ChunkView{
FileId: chunk.fileId,
Offset: offset - chunk.start, // offset is the data starting location in this file id
Size: uint64(min(chunk.stop, stop) - offset),
LogicOffset: offset,
})
offset = min(chunk.stop, stop)
}
}
return views
}
func logPrintf(name string, visibles []*visibleInterval) {
/*
log.Printf("%s len %d", name, len(visibles))
for _, v := range visibles {
log.Printf("%s: => %+v", name, v)
}
*/
}
func nonOverlappingVisibleIntervals(chunks []*filer_pb.FileChunk) (visibles []*visibleInterval) {
sort.Slice(chunks, func(i, j int) bool {
if chunks[i].Offset < chunks[j].Offset {
return true
}
if chunks[i].Offset == chunks[j].Offset {
return chunks[i].Mtime < chunks[j].Mtime
}
return false
})
if len(chunks) == 0 {
return
}
var parallelIntervals, intervals []*visibleInterval
var minStopInterval, upToDateInterval *visibleInterval
watermarkStart := chunks[0].Offset
for _, chunk := range chunks {
// log.Printf("checking chunk: [%d,%d)", chunk.Offset, chunk.Offset+int64(chunk.Size))
logPrintf("parallelIntervals", parallelIntervals)
for len(parallelIntervals) > 0 && watermarkStart < chunk.Offset {
logPrintf("parallelIntervals loop 1", parallelIntervals)
logPrintf("parallelIntervals loop 1 intervals", intervals)
minStopInterval, upToDateInterval = findMinStopInterval(parallelIntervals)
nextStop := min(minStopInterval.stop, chunk.Offset)
intervals = append(intervals, newVisibleInterval(
max(watermarkStart, minStopInterval.start),
nextStop,
upToDateInterval.fileId,
upToDateInterval.modifiedTime,
))
watermarkStart = nextStop
logPrintf("parallelIntervals loop intervals =>", intervals)
// remove processed intervals, possibly multiple
var remaining []*visibleInterval
for _, interval := range parallelIntervals {
if interval.stop != watermarkStart {
remaining = append(remaining, interval)
}
}
parallelIntervals = remaining
logPrintf("parallelIntervals loop 2", parallelIntervals)
logPrintf("parallelIntervals loop 2 intervals", intervals)
}
parallelIntervals = append(parallelIntervals, newVisibleInterval(
chunk.Offset,
chunk.Offset+int64(chunk.Size),
chunk.FileId,
chunk.Mtime,
))
}
logPrintf("parallelIntervals loop 3", parallelIntervals)
logPrintf("parallelIntervals loop 3 intervals", intervals)
for len(parallelIntervals) > 0 {
minStopInterval, upToDateInterval = findMinStopInterval(parallelIntervals)
intervals = append(intervals, newVisibleInterval(
max(watermarkStart, minStopInterval.start),
minStopInterval.stop,
upToDateInterval.fileId,
upToDateInterval.modifiedTime,
))
watermarkStart = minStopInterval.stop
// remove processed intervals, possibly multiple
var remaining []*visibleInterval
for _, interval := range parallelIntervals {
if interval.stop != watermarkStart {
remaining = append(remaining, interval)
}
}
parallelIntervals = remaining
}
logPrintf("parallelIntervals loop 4", parallelIntervals)
logPrintf("intervals", intervals)
// merge connected intervals, now the intervals are non-intersecting
var lastIntervalIndex int
var prevIntervalIndex int
for i, interval := range intervals {
if i == 0 {
prevIntervalIndex = i
lastIntervalIndex = i
continue
}
if intervals[i-1].fileId != interval.fileId ||
intervals[i-1].stop < intervals[i].start {
visibles = append(visibles, newVisibleInterval(
intervals[prevIntervalIndex].start,
intervals[i-1].stop,
intervals[prevIntervalIndex].fileId,
intervals[prevIntervalIndex].modifiedTime,
))
prevIntervalIndex = i
}
lastIntervalIndex = i
logPrintf("intervals loop 1 visibles", visibles)
}
visibles = append(visibles, newVisibleInterval(
intervals[prevIntervalIndex].start,
intervals[lastIntervalIndex].stop,
intervals[prevIntervalIndex].fileId,
intervals[prevIntervalIndex].modifiedTime,
))
logPrintf("visibles", visibles)
return
}
func findMinStopInterval(intervals []*visibleInterval) (minStopInterval, upToDateInterval *visibleInterval) {
var latestMtime int64
latestIntervalIndex := 0
minStop := int64(math.MaxInt64)
minIntervalIndex := 0
for i, interval := range intervals {
if minStop > interval.stop {
minIntervalIndex = i
minStop = interval.stop
}
if latestMtime < interval.modifiedTime {
latestMtime = interval.modifiedTime
latestIntervalIndex = i
}
}
minStopInterval = intervals[minIntervalIndex]
upToDateInterval = intervals[latestIntervalIndex]
return
}
// find non-overlapping visible intervals
// visible interval map to one file chunk
type visibleInterval struct {
start int64
stop int64
modifiedTime int64
fileId string
}
func newVisibleInterval(start, stop int64, fileId string, modifiedTime int64) *visibleInterval {
return &visibleInterval{start: start, stop: stop, fileId: fileId, modifiedTime: modifiedTime}
}
func min(x, y int64) int64 {
if x <= y {
return x
}
return y
}
func max(x, y int64) int64 {
if x > y {
return x
}
return y
}

View file

@ -0,0 +1,316 @@
package filer2
import (
"log"
"testing"
"github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
)
func TestCompactFileChunks(t *testing.T) {
chunks := []*filer_pb.FileChunk{
{Offset: 10, Size: 100, FileId: "abc", Mtime: 50},
{Offset: 100, Size: 100, FileId: "def", Mtime: 100},
{Offset: 200, Size: 100, FileId: "ghi", Mtime: 200},
{Offset: 110, Size: 200, FileId: "jkl", Mtime: 300},
}
compacted, garbarge := CompactFileChunks(chunks)
log.Printf("Compacted: %+v", compacted)
log.Printf("Garbage : %+v", garbarge)
if len(compacted) != 3 {
t.Fatalf("unexpected compacted: %d", len(compacted))
}
if len(garbarge) != 1 {
t.Fatalf("unexpected garbarge: %d", len(garbarge))
}
}
func TestIntervalMerging(t *testing.T) {
testcases := []struct {
Chunks []*filer_pb.FileChunk
Expected []*visibleInterval
}{
// case 0: normal
{
Chunks: []*filer_pb.FileChunk{
{Offset: 0, Size: 100, FileId: "abc", Mtime: 123},
{Offset: 100, Size: 100, FileId: "asdf", Mtime: 134},
{Offset: 200, Size: 100, FileId: "fsad", Mtime: 353},
},
Expected: []*visibleInterval{
{start: 0, stop: 100, fileId: "abc"},
{start: 100, stop: 200, fileId: "asdf"},
{start: 200, stop: 300, fileId: "fsad"},
},
},
// case 1: updates overwrite full chunks
{
Chunks: []*filer_pb.FileChunk{
{Offset: 0, Size: 100, FileId: "abc", Mtime: 123},
{Offset: 0, Size: 200, FileId: "asdf", Mtime: 134},
},
Expected: []*visibleInterval{
{start: 0, stop: 200, fileId: "asdf"},
},
},
// case 2: updates overwrite part of previous chunks
{
Chunks: []*filer_pb.FileChunk{
{Offset: 0, Size: 100, FileId: "abc", Mtime: 123},
{Offset: 0, Size: 50, FileId: "asdf", Mtime: 134},
},
Expected: []*visibleInterval{
{start: 0, stop: 50, fileId: "asdf"},
{start: 50, stop: 100, fileId: "abc"},
},
},
// case 3: updates overwrite full chunks
{
Chunks: []*filer_pb.FileChunk{
{Offset: 0, Size: 100, FileId: "abc", Mtime: 123},
{Offset: 0, Size: 200, FileId: "asdf", Mtime: 134},
{Offset: 50, Size: 250, FileId: "xxxx", Mtime: 154},
},
Expected: []*visibleInterval{
{start: 0, stop: 50, fileId: "asdf"},
{start: 50, stop: 300, fileId: "xxxx"},
},
},
// case 4: updates far away from prev chunks
{
Chunks: []*filer_pb.FileChunk{
{Offset: 0, Size: 100, FileId: "abc", Mtime: 123},
{Offset: 0, Size: 200, FileId: "asdf", Mtime: 134},
{Offset: 250, Size: 250, FileId: "xxxx", Mtime: 154},
},
Expected: []*visibleInterval{
{start: 0, stop: 200, fileId: "asdf"},
{start: 250, stop: 500, fileId: "xxxx"},
},
},
// case 5: updates overwrite full chunks
{
Chunks: []*filer_pb.FileChunk{
{Offset: 0, Size: 100, FileId: "abc", Mtime: 123},
{Offset: 0, Size: 200, FileId: "asdf", Mtime: 184},
{Offset: 70, Size: 150, FileId: "abc", Mtime: 143},
{Offset: 80, Size: 100, FileId: "xxxx", Mtime: 134},
},
Expected: []*visibleInterval{
{start: 0, stop: 200, fileId: "asdf"},
{start: 200, stop: 220, fileId: "abc"},
},
},
// case 6: same updates
{
Chunks: []*filer_pb.FileChunk{
{Offset: 0, Size: 100, FileId: "abc", Mtime: 123},
{Offset: 0, Size: 100, FileId: "abc", Mtime: 123},
{Offset: 0, Size: 100, FileId: "abc", Mtime: 123},
},
Expected: []*visibleInterval{
{start: 0, stop: 100, fileId: "abc"},
},
},
// case 7: real updates
{
Chunks: []*filer_pb.FileChunk{
{Offset: 0, Size: 2097152, FileId: "7,0294cbb9892b", Mtime: 123},
{Offset: 0, Size: 3145728, FileId: "3,029565bf3092", Mtime: 130},
{Offset: 2097152, Size: 3145728, FileId: "6,029632f47ae2", Mtime: 140},
{Offset: 5242880, Size: 3145728, FileId: "2,029734c5aa10", Mtime: 150},
{Offset: 8388608, Size: 3145728, FileId: "5,02982f80de50", Mtime: 160},
{Offset: 11534336, Size: 2842193, FileId: "7,0299ad723803", Mtime: 170},
},
Expected: []*visibleInterval{
{start: 0, stop: 2097152, fileId: "3,029565bf3092"},
{start: 2097152, stop: 5242880, fileId: "6,029632f47ae2"},
{start: 5242880, stop: 8388608, fileId: "2,029734c5aa10"},
{start: 8388608, stop: 11534336, fileId: "5,02982f80de50"},
{start: 11534336, stop: 14376529, fileId: "7,0299ad723803"},
},
},
}
for i, testcase := range testcases {
log.Printf("++++++++++ merged test case %d ++++++++++++++++++++", i)
intervals := nonOverlappingVisibleIntervals(testcase.Chunks)
for x, interval := range intervals {
log.Printf("test case %d, interval %d, start=%d, stop=%d, fileId=%s",
i, x, interval.start, interval.stop, interval.fileId)
}
for x, interval := range intervals {
if interval.start != testcase.Expected[x].start {
t.Fatalf("failed on test case %d, interval %d, start %d, expect %d",
i, x, interval.start, testcase.Expected[x].start)
}
if interval.stop != testcase.Expected[x].stop {
t.Fatalf("failed on test case %d, interval %d, stop %d, expect %d",
i, x, interval.stop, testcase.Expected[x].stop)
}
if interval.fileId != testcase.Expected[x].fileId {
t.Fatalf("failed on test case %d, interval %d, chunkId %s, expect %s",
i, x, interval.fileId, testcase.Expected[x].fileId)
}
}
if len(intervals) != len(testcase.Expected) {
t.Fatalf("failed to compact test case %d, len %d expected %d", i, len(intervals), len(testcase.Expected))
}
}
}
func TestChunksReading(t *testing.T) {
testcases := []struct {
Chunks []*filer_pb.FileChunk
Offset int64
Size int
Expected []*ChunkView
}{
// case 0: normal
{
Chunks: []*filer_pb.FileChunk{
{Offset: 0, Size: 100, FileId: "abc", Mtime: 123},
{Offset: 100, Size: 100, FileId: "asdf", Mtime: 134},
{Offset: 200, Size: 100, FileId: "fsad", Mtime: 353},
},
Offset: 0,
Size: 250,
Expected: []*ChunkView{
{Offset: 0, Size: 100, FileId: "abc", LogicOffset: 0},
{Offset: 0, Size: 100, FileId: "asdf", LogicOffset: 100},
{Offset: 0, Size: 50, FileId: "fsad", LogicOffset: 200},
},
},
// case 1: updates overwrite full chunks
{
Chunks: []*filer_pb.FileChunk{
{Offset: 0, Size: 100, FileId: "abc", Mtime: 123},
{Offset: 0, Size: 200, FileId: "asdf", Mtime: 134},
},
Offset: 50,
Size: 100,
Expected: []*ChunkView{
{Offset: 50, Size: 100, FileId: "asdf", LogicOffset: 50},
},
},
// case 2: updates overwrite part of previous chunks
{
Chunks: []*filer_pb.FileChunk{
{Offset: 0, Size: 100, FileId: "abc", Mtime: 123},
{Offset: 0, Size: 50, FileId: "asdf", Mtime: 134},
},
Offset: 25,
Size: 50,
Expected: []*ChunkView{
{Offset: 25, Size: 25, FileId: "asdf", LogicOffset: 25},
{Offset: 0, Size: 25, FileId: "abc", LogicOffset: 50},
},
},
// case 3: updates overwrite full chunks
{
Chunks: []*filer_pb.FileChunk{
{Offset: 0, Size: 100, FileId: "abc", Mtime: 123},
{Offset: 0, Size: 200, FileId: "asdf", Mtime: 134},
{Offset: 50, Size: 250, FileId: "xxxx", Mtime: 154},
},
Offset: 0,
Size: 200,
Expected: []*ChunkView{
{Offset: 0, Size: 50, FileId: "asdf", LogicOffset: 0},
{Offset: 0, Size: 150, FileId: "xxxx", LogicOffset: 50},
},
},
// case 4: updates far away from prev chunks
{
Chunks: []*filer_pb.FileChunk{
{Offset: 0, Size: 100, FileId: "abc", Mtime: 123},
{Offset: 0, Size: 200, FileId: "asdf", Mtime: 134},
{Offset: 250, Size: 250, FileId: "xxxx", Mtime: 154},
},
Offset: 0,
Size: 400,
Expected: []*ChunkView{
{Offset: 0, Size: 200, FileId: "asdf", LogicOffset: 0},
// {Offset: 0, Size: 150, FileId: "xxxx"}, // missing intervals should not happen
},
},
// case 5: updates overwrite full chunks
{
Chunks: []*filer_pb.FileChunk{
{Offset: 0, Size: 100, FileId: "abc", Mtime: 123},
{Offset: 0, Size: 200, FileId: "asdf", Mtime: 184},
{Offset: 70, Size: 150, FileId: "abc", Mtime: 143},
{Offset: 80, Size: 100, FileId: "xxxx", Mtime: 134},
},
Offset: 0,
Size: 220,
Expected: []*ChunkView{
{Offset: 0, Size: 200, FileId: "asdf", LogicOffset: 0},
{Offset: 0, Size: 20, FileId: "abc", LogicOffset: 200},
},
},
// case 6: same updates
{
Chunks: []*filer_pb.FileChunk{
{Offset: 0, Size: 100, FileId: "abc", Mtime: 123},
{Offset: 0, Size: 100, FileId: "abc", Mtime: 123},
{Offset: 0, Size: 100, FileId: "abc", Mtime: 123},
},
Offset: 0,
Size: 100,
Expected: []*ChunkView{
{Offset: 0, Size: 100, FileId: "abc", LogicOffset: 0},
},
},
// case 7: edge cases
{
Chunks: []*filer_pb.FileChunk{
{Offset: 0, Size: 100, FileId: "abc", Mtime: 123},
{Offset: 100, Size: 100, FileId: "asdf", Mtime: 134},
{Offset: 200, Size: 100, FileId: "fsad", Mtime: 353},
},
Offset: 0,
Size: 200,
Expected: []*ChunkView{
{Offset: 0, Size: 100, FileId: "abc", LogicOffset: 0},
{Offset: 0, Size: 100, FileId: "asdf", LogicOffset: 100},
},
},
}
for i, testcase := range testcases {
log.Printf("++++++++++ read test case %d ++++++++++++++++++++", i)
chunks := ViewFromChunks(testcase.Chunks, testcase.Offset, testcase.Size)
for x, chunk := range chunks {
log.Printf("read case %d, chunk %d, offset=%d, size=%d, fileId=%s",
i, x, chunk.Offset, chunk.Size, chunk.FileId)
if chunk.Offset != testcase.Expected[x].Offset {
t.Fatalf("failed on read case %d, chunk %d, Offset %d, expect %d",
i, x, chunk.Offset, testcase.Expected[x].Offset)
}
if chunk.Size != testcase.Expected[x].Size {
t.Fatalf("failed on read case %d, chunk %d, Size %d, expect %d",
i, x, chunk.Size, testcase.Expected[x].Size)
}
if chunk.FileId != testcase.Expected[x].FileId {
t.Fatalf("failed on read case %d, chunk %d, FileId %s, expect %s",
i, x, chunk.FileId, testcase.Expected[x].FileId)
}
if chunk.LogicOffset != testcase.Expected[x].LogicOffset {
t.Fatalf("failed on read case %d, chunk %d, LogicOffset %d, expect %d",
i, x, chunk.LogicOffset, testcase.Expected[x].LogicOffset)
}
}
if len(chunks) != len(testcase.Expected) {
t.Fatalf("failed to read test case %d, len %d expected %d", i, len(chunks), len(testcase.Expected))
}
}
}

View file

@ -3,35 +3,39 @@ package filer2
import (
"fmt"
"github.com/chrislusf/seaweedfs/weed/glog"
"github.com/karlseguin/ccache"
"strings"
"path/filepath"
"time"
"os"
"path/filepath"
"strings"
"time"
"github.com/chrislusf/seaweedfs/weed/operation"
)
type Filer struct {
master string
masters []string
store FilerStore
directoryCache *ccache.Cache
currentMaster string
}
func NewFiler(master string) *Filer {
func NewFiler(masters []string) *Filer {
return &Filer{
master: master,
masters: masters,
directoryCache: ccache.New(ccache.Configure().MaxSize(1000).ItemsToPrune(100)),
}
}
func (f *Filer) SetStore(store FilerStore) () {
func (f *Filer) SetStore(store FilerStore) {
f.store = store
}
func (f *Filer) DisableDirectoryCache() () {
func (f *Filer) DisableDirectoryCache() {
f.directoryCache = nil
}
func (f *Filer) CreateEntry(entry *Entry) (error) {
func (f *Filer) CreateEntry(entry *Entry) error {
dirParts := strings.Split(string(entry.FullPath), "/")
@ -43,22 +47,19 @@ func (f *Filer) CreateEntry(entry *Entry) (error) {
dirPath := "/" + filepath.Join(dirParts[:i]...)
// fmt.Printf("%d directory: %+v\n", i, dirPath)
dirFound := false
// first check local cache
dirEntry := f.cacheGetDirectory(dirPath)
// not found, check the store directly
if dirEntry == nil {
var dirFindErr error
dirFound, dirEntry, dirFindErr = f.FindEntry(FullPath(dirPath))
if dirFindErr != nil {
return fmt.Errorf("findDirectory %s: %v", dirPath, dirFindErr)
}
glog.V(4).Infof("find uncached directory: %s", dirPath)
dirEntry, _ = f.FindEntry(FullPath(dirPath))
} else {
glog.V(4).Infof("found cached directory: %s", dirPath)
}
// no such existing directory
if !dirFound {
if dirEntry == nil {
// create the directory
now := time.Now()
@ -68,12 +69,13 @@ func (f *Filer) CreateEntry(entry *Entry) (error) {
Attr: Attr{
Mtime: now,
Crtime: now,
Mode: os.ModeDir | 0660,
Mode: os.ModeDir | 0770,
Uid: entry.Uid,
Gid: entry.Gid,
},
}
glog.V(2).Infof("create directory: %s %v", dirPath, dirEntry.Mode)
mkdirErr := f.store.InsertEntry(dirEntry)
if mkdirErr != nil {
return fmt.Errorf("mkdir %s: %v", dirPath, mkdirErr)
@ -94,9 +96,17 @@ func (f *Filer) CreateEntry(entry *Entry) (error) {
return fmt.Errorf("parent folder not found: %v", entry.FullPath)
}
/*
if !hasWritePermission(lastDirectoryEntry, entry) {
glog.V(0).Infof("directory %s: %v, entry: uid=%d gid=%d",
lastDirectoryEntry.FullPath, lastDirectoryEntry.Attr, entry.Uid, entry.Gid)
return fmt.Errorf("no write permission in folder %v", lastDirectoryEntry.FullPath)
}
*/
if oldEntry, err := f.FindEntry(entry.FullPath); err == nil {
f.deleteChunks(oldEntry)
}
if err := f.store.InsertEntry(entry); err != nil {
return fmt.Errorf("insert entry %s: %v", entry.FullPath, err)
@ -105,26 +115,43 @@ func (f *Filer) CreateEntry(entry *Entry) (error) {
return nil
}
func (f *Filer) AppendFileChunk(p FullPath, c FileChunk) (err error) {
return f.store.AppendFileChunk(p, c)
func (f *Filer) UpdateEntry(entry *Entry) (err error) {
return f.store.UpdateEntry(entry)
}
func (f *Filer) FindEntry(p FullPath) (found bool, entry *Entry, err error) {
func (f *Filer) FindEntry(p FullPath) (entry *Entry, err error) {
return f.store.FindEntry(p)
}
func (f *Filer) DeleteEntry(p FullPath) (fileEntry *Entry, err error) {
func (f *Filer) DeleteEntryMetaAndData(p FullPath) (err error) {
entry, err := f.FindEntry(p)
if err != nil {
return err
}
if entry.IsDirectory() {
entries, err := f.ListDirectoryEntries(p, "", false, 1)
if err != nil {
return fmt.Errorf("list folder %s: %v", p, err)
}
if len(entries) > 0 {
return fmt.Errorf("folder %s is not empty", p)
}
}
f.deleteChunks(entry)
return f.store.DeleteEntry(p)
}
func (f *Filer) ListDirectoryEntries(p FullPath) ([]*Entry, error) {
if strings.HasSuffix(string(p), "/") {
p = p[0:len(p)-1]
func (f *Filer) ListDirectoryEntries(p FullPath, startFileName string, inclusive bool, limit int) ([]*Entry, error) {
if strings.HasSuffix(string(p), "/") && len(p) > 1 {
p = p[0: len(p)-1]
}
return f.store.ListDirectoryEntries(p)
return f.store.ListDirectoryEntries(p, startFileName, inclusive, limit)
}
func (f *Filer) cacheGetDirectory(dirpath string) (*Entry) {
func (f *Filer) cacheGetDirectory(dirpath string) *Entry {
if f.directoryCache == nil {
return nil
}
@ -148,3 +175,15 @@ func (f *Filer) cacheSetDirectory(dirpath string, dirEntry *Entry, level int) {
f.directoryCache.Set(dirpath, dirEntry, time.Duration(minutes)*time.Minute)
}
func (f *Filer) deleteChunks(entry *Entry) {
if entry == nil {
return
}
for _, chunk := range entry.Chunks {
if err := operation.DeleteFile(f.GetMaster(), chunk.FileId, ""); err != nil {
glog.V(0).Infof("deleting file %s: %v", chunk.FileId, err)
}
}
}

View file

@ -0,0 +1,60 @@
package filer2
import (
"fmt"
"context"
"time"
"github.com/chrislusf/seaweedfs/weed/pb/master_pb"
"github.com/chrislusf/seaweedfs/weed/glog"
"google.golang.org/grpc"
)
func (fs *Filer) GetMaster() string {
return fs.currentMaster
}
func (fs *Filer) KeepConnectedToMaster() {
glog.V(0).Infof("Filer bootstraps with masters %v", fs.masters)
for _, master := range fs.masters {
glog.V(0).Infof("Connecting to %v", master)
withMasterClient(master, func(client master_pb.SeaweedClient) error {
stream, err := client.KeepConnected(context.Background())
if err != nil {
glog.V(0).Infof("failed to keep connected to %s: %v", master, err)
return err
}
glog.V(0).Infof("Connected to %v", master)
fs.currentMaster = master
for {
time.Sleep(time.Duration(float32(10*1e3)*0.25) * time.Millisecond)
if err = stream.Send(&master_pb.Empty{}); err != nil {
glog.V(0).Infof("failed to send to %s: %v", master, err)
return err
}
if _, err = stream.Recv(); err != nil {
glog.V(0).Infof("failed to receive from %s: %v", master, err)
return err
}
}
})
fs.currentMaster = ""
}
}
func withMasterClient(master string, fn func(client master_pb.SeaweedClient) error) error {
grpcConnection, err := grpc.Dial(master, grpc.WithInsecure())
if err != nil {
return fmt.Errorf("fail to dial %s: %v", master, err)
}
defer grpcConnection.Close()
client := master_pb.NewSeaweedClient(grpcConnection)
return fn(client)
}

View file

@ -1,66 +0,0 @@
package filer2
import (
"errors"
"os"
"time"
"path/filepath"
)
type FileId string //file id in SeaweedFS
type FullPath string
func (fp FullPath) DirAndName() (string, string) {
dir, name := filepath.Split(string(fp))
if dir == "/" {
return dir, name
}
if len(dir) < 1 {
return "/", ""
}
return dir[:len(dir)-1], name
}
type Attr struct {
Mtime time.Time // time of last modification
Crtime time.Time // time of creation (OS X only)
Mode os.FileMode // file mode
Uid uint32 // owner uid
Gid uint32 // group gid
}
type Entry struct {
FullPath
Attr
// the following is for files
Chunks []FileChunk `json:"chunks,omitempty"`
}
type FileChunk struct {
Fid FileId `json:"fid,omitempty"`
Offset int64 `json:"offset,omitempty"`
Size uint64 `json:"size,omitempty"` // size in bytes
}
type AbstractFiler interface {
CreateEntry(*Entry) (error)
AppendFileChunk(FullPath, FileChunk) (err error)
FindEntry(FullPath) (found bool, fileEntry *Entry, err error)
DeleteEntry(FullPath) (fileEntry *Entry, err error)
ListDirectoryEntries(dirPath FullPath) ([]*Entry, error)
UpdateEntry(*Entry) (error)
}
var ErrNotFound = errors.New("filer: no entry is found in filer store")
type FilerStore interface {
InsertEntry(*Entry) (error)
AppendFileChunk(FullPath, FileChunk) (err error)
FindEntry(FullPath) (found bool, entry *Entry, err error)
DeleteEntry(FullPath) (fileEntry *Entry, err error)
ListDirectoryEntries(dirPath FullPath) ([]*Entry, error)
}

18
weed/filer2/filerstore.go Normal file
View file

@ -0,0 +1,18 @@
package filer2
import (
"errors"
"github.com/spf13/viper"
)
type FilerStore interface {
GetName() string
Initialize(viper *viper.Viper) error
InsertEntry(*Entry) error
UpdateEntry(*Entry) (err error)
FindEntry(FullPath) (entry *Entry, err error)
DeleteEntry(FullPath) (err error)
ListDirectoryEntries(dirPath FullPath, startFileName string, inclusive bool, limit int) ([]*Entry, error)
}
var ErrNotFound = errors.New("filer: no entry is found in filer store")

31
weed/filer2/fullpath.go Normal file
View file

@ -0,0 +1,31 @@
package filer2
import (
"path/filepath"
"strings"
)
type FullPath string
func NewFullPath(dir, name string) FullPath {
if strings.HasSuffix(dir, "/") {
return FullPath(dir + name)
}
return FullPath(dir + "/" + name)
}
func (fp FullPath) DirAndName() (string, string) {
dir, name := filepath.Split(string(fp))
if dir == "/" {
return dir, name
}
if len(dir) < 1 {
return "/", ""
}
return dir[:len(dir)-1], name
}
func (fp FullPath) Name() string {
_, name := filepath.Split(string(fp))
return name
}

View file

@ -0,0 +1,169 @@
package leveldb
import (
"bytes"
"fmt"
"github.com/chrislusf/seaweedfs/weed/filer2"
"github.com/chrislusf/seaweedfs/weed/glog"
weed_util "github.com/chrislusf/seaweedfs/weed/util"
"github.com/spf13/viper"
"github.com/syndtr/goleveldb/leveldb"
leveldb_util "github.com/syndtr/goleveldb/leveldb/util"
)
const (
DIR_FILE_SEPARATOR = byte(0x00)
)
func init() {
filer2.Stores = append(filer2.Stores, &LevelDBStore{})
}
type LevelDBStore struct {
db *leveldb.DB
}
func (store *LevelDBStore) GetName() string {
return "leveldb"
}
func (store *LevelDBStore) Initialize(viper *viper.Viper) (err error) {
dir := viper.GetString("dir")
return store.initialize(dir)
}
func (store *LevelDBStore) initialize(dir string) (err error) {
if err := weed_util.TestFolderWritable(dir); err != nil {
return fmt.Errorf("Check Level Folder %s Writable: %s", dir, err)
}
if store.db, err = leveldb.OpenFile(dir, nil); err != nil {
return
}
return
}
func (store *LevelDBStore) InsertEntry(entry *filer2.Entry) (err error) {
key := genKey(entry.DirAndName())
value, err := entry.EncodeAttributesAndChunks()
if err != nil {
return fmt.Errorf("encoding %s %+v: %v", entry.FullPath, entry.Attr, err)
}
err = store.db.Put(key, value, nil)
if err != nil {
return fmt.Errorf("persisting %s : %v", entry.FullPath, err)
}
// println("saved", entry.FullPath, "chunks", len(entry.Chunks))
return nil
}
func (store *LevelDBStore) UpdateEntry(entry *filer2.Entry) (err error) {
return store.InsertEntry(entry)
}
func (store *LevelDBStore) FindEntry(fullpath filer2.FullPath) (entry *filer2.Entry, err error) {
key := genKey(fullpath.DirAndName())
data, err := store.db.Get(key, nil)
if err == leveldb.ErrNotFound {
return nil, filer2.ErrNotFound
}
if err != nil {
return nil, fmt.Errorf("get %s : %v", entry.FullPath, err)
}
entry = &filer2.Entry{
FullPath: fullpath,
}
err = entry.DecodeAttributesAndChunks(data)
if err != nil {
return entry, fmt.Errorf("decode %s : %v", entry.FullPath, err)
}
// println("read", entry.FullPath, "chunks", len(entry.Chunks), "data", len(data), string(data))
return entry, nil
}
func (store *LevelDBStore) DeleteEntry(fullpath filer2.FullPath) (err error) {
key := genKey(fullpath.DirAndName())
err = store.db.Delete(key, nil)
if err != nil {
return fmt.Errorf("delete %s : %v", fullpath, err)
}
return nil
}
func (store *LevelDBStore) ListDirectoryEntries(fullpath filer2.FullPath, startFileName string, inclusive bool,
limit int) (entries []*filer2.Entry, err error) {
directoryPrefix := genDirectoryKeyPrefix(fullpath, "")
iter := store.db.NewIterator(&leveldb_util.Range{Start: genDirectoryKeyPrefix(fullpath, startFileName)}, nil)
for iter.Next() {
key := iter.Key()
if !bytes.HasPrefix(key, directoryPrefix) {
break
}
fileName := getNameFromKey(key)
if fileName == "" {
continue
}
if fileName == startFileName && !inclusive {
continue
}
limit--
if limit < 0 {
break
}
entry := &filer2.Entry{
FullPath: filer2.NewFullPath(string(fullpath), fileName),
}
if decodeErr := entry.DecodeAttributesAndChunks(iter.Value()); decodeErr != nil {
err = decodeErr
glog.V(0).Infof("list %s : %v", entry.FullPath, err)
break
}
entries = append(entries, entry)
}
iter.Release()
return entries, err
}
func genKey(dirPath, fileName string) (key []byte) {
key = []byte(dirPath)
key = append(key, DIR_FILE_SEPARATOR)
key = append(key, []byte(fileName)...)
return key
}
func genDirectoryKeyPrefix(fullpath filer2.FullPath, startFileName string) (keyPrefix []byte) {
keyPrefix = []byte(string(fullpath))
keyPrefix = append(keyPrefix, DIR_FILE_SEPARATOR)
if len(startFileName) > 0 {
keyPrefix = append(keyPrefix, []byte(startFileName)...)
}
return keyPrefix
}
func getNameFromKey(key []byte) string {
sepIndex := len(key) - 1
for sepIndex >= 0 && key[sepIndex] != DIR_FILE_SEPARATOR {
sepIndex--
}
return string(key[sepIndex+1:])
}

View file

@ -0,0 +1,61 @@
package leveldb
import (
"github.com/chrislusf/seaweedfs/weed/filer2"
"io/ioutil"
"os"
"testing"
)
func TestCreateAndFind(t *testing.T) {
filer := filer2.NewFiler(nil)
dir, _ := ioutil.TempDir("", "seaweedfs_filer_test")
defer os.RemoveAll(dir)
store := &LevelDBStore{}
store.initialize(dir)
filer.SetStore(store)
filer.DisableDirectoryCache()
fullpath := filer2.FullPath("/home/chris/this/is/one/file1.jpg")
entry1 := &filer2.Entry{
FullPath: fullpath,
Attr: filer2.Attr{
Mode: 0440,
Uid: 1234,
Gid: 5678,
},
}
if err := filer.CreateEntry(entry1); err != nil {
t.Errorf("create entry %v: %v", entry1.FullPath, err)
return
}
entry, err := filer.FindEntry(fullpath)
if err != nil {
t.Errorf("find entry: %v", err)
return
}
if entry.FullPath != entry1.FullPath {
t.Errorf("find wrong entry: %v", entry.FullPath)
return
}
// checking one upper directory
entries, _ := filer.ListDirectoryEntries(filer2.FullPath("/home/chris/this/is/one"), "", false, 100)
if len(entries) != 1 {
t.Errorf("list entries count: %v", len(entries))
return
}
// checking one upper directory
entries, _ = filer.ListDirectoryEntries(filer2.FullPath("/"), "", false, 100)
if len(entries) != 1 {
t.Errorf("list entries count: %v", len(entries))
return
}
}

View file

@ -1,81 +1,102 @@
package memdb
import (
"fmt"
"github.com/chrislusf/seaweedfs/weed/filer2"
"github.com/google/btree"
"github.com/spf13/viper"
"strings"
"fmt"
"time"
)
func init() {
filer2.Stores = append(filer2.Stores, &MemDbStore{})
}
type MemDbStore struct {
tree *btree.BTree
}
type Entry struct {
type entryItem struct {
*filer2.Entry
}
func (a Entry) Less(b btree.Item) bool {
return strings.Compare(string(a.FullPath), string(b.(Entry).FullPath)) < 0
func (a entryItem) Less(b btree.Item) bool {
return strings.Compare(string(a.FullPath), string(b.(entryItem).FullPath)) < 0
}
func NewMemDbStore() (filer *MemDbStore) {
filer = &MemDbStore{}
filer.tree = btree.New(8)
return
func (store *MemDbStore) GetName() string {
return "memory"
}
func (filer *MemDbStore) InsertEntry(entry *filer2.Entry) (err error) {
func (store *MemDbStore) Initialize(viper *viper.Viper) (err error) {
store.tree = btree.New(8)
return nil
}
func (store *MemDbStore) InsertEntry(entry *filer2.Entry) (err error) {
// println("inserting", entry.FullPath)
filer.tree.ReplaceOrInsert(Entry{entry})
store.tree.ReplaceOrInsert(entryItem{entry})
return nil
}
func (filer *MemDbStore) AppendFileChunk(fullpath filer2.FullPath, fileChunk filer2.FileChunk) (err error) {
found, entry, err := filer.FindEntry(fullpath)
if !found {
return fmt.Errorf("No such file: %s", fullpath)
func (store *MemDbStore) UpdateEntry(entry *filer2.Entry) (err error) {
if _, err = store.FindEntry(entry.FullPath); err != nil {
return fmt.Errorf("no such file %s : %v", entry.FullPath, err)
}
entry.Chunks = append(entry.Chunks, fileChunk)
entry.Mtime = time.Now()
store.tree.ReplaceOrInsert(entryItem{entry})
return nil
}
func (filer *MemDbStore) FindEntry(fullpath filer2.FullPath) (found bool, entry *filer2.Entry, err error) {
item := filer.tree.Get(Entry{&filer2.Entry{FullPath: fullpath}})
if item == nil {
return false, nil, nil
}
entry = item.(Entry).Entry
return true, entry, nil
}
func (filer *MemDbStore) DeleteEntry(fullpath filer2.FullPath) (entry *filer2.Entry, err error) {
item := filer.tree.Delete(Entry{&filer2.Entry{FullPath: fullpath}})
func (store *MemDbStore) FindEntry(fullpath filer2.FullPath) (entry *filer2.Entry, err error) {
item := store.tree.Get(entryItem{&filer2.Entry{FullPath: fullpath}})
if item == nil {
return nil, nil
}
entry = item.(Entry).Entry
entry = item.(entryItem).Entry
return entry, nil
}
func (filer *MemDbStore) ListDirectoryEntries(fullpath filer2.FullPath) (entries []*filer2.Entry, err error) {
filer.tree.AscendGreaterOrEqual(Entry{&filer2.Entry{FullPath: fullpath}},
func (store *MemDbStore) DeleteEntry(fullpath filer2.FullPath) (err error) {
store.tree.Delete(entryItem{&filer2.Entry{FullPath: fullpath}})
return nil
}
func (store *MemDbStore) ListDirectoryEntries(fullpath filer2.FullPath, startFileName string, inclusive bool, limit int) (entries []*filer2.Entry, err error) {
startFrom := string(fullpath)
if startFileName != "" {
startFrom = startFrom + "/" + startFileName
}
store.tree.AscendGreaterOrEqual(entryItem{&filer2.Entry{FullPath: filer2.FullPath(startFrom)}},
func(item btree.Item) bool {
entry := item.(Entry).Entry
if limit <= 0 {
return false
}
entry := item.(entryItem).Entry
// println("checking", entry.FullPath)
if entry.FullPath == fullpath {
// skipping the current directory
// println("skipping the folder", entry.FullPath)
return true
}
dir, _ := entry.FullPath.DirAndName()
if !strings.HasPrefix(dir, string(fullpath)) {
// println("directory is:", dir, "fullpath:", fullpath)
dir, name := entry.FullPath.DirAndName()
if name == startFileName {
if inclusive {
limit--
entries = append(entries, entry)
}
return true
}
// only iterate the same prefix
if !strings.HasPrefix(string(entry.FullPath), string(fullpath)) {
// println("breaking from", entry.FullPath)
return false
}
if dir != string(fullpath) {
// this could be items in deeper directories
// println("skipping deeper folder", entry.FullPath)
@ -83,6 +104,7 @@ func (filer *MemDbStore) ListDirectoryEntries(fullpath filer2.FullPath) (entries
}
// now process the directory items
// println("adding entry", entry.FullPath)
limit--
entries = append(entries, entry)
return true
},

View file

@ -1,13 +1,15 @@
package memdb
import (
"testing"
"github.com/chrislusf/seaweedfs/weed/filer2"
"testing"
)
func TestCreateAndFind(t *testing.T) {
filer := filer2.NewFiler("")
filer.SetStore(NewMemDbStore())
filer := filer2.NewFiler(nil)
store := &MemDbStore{}
store.Initialize(nil)
filer.SetStore(store)
filer.DisableDirectoryCache()
fullpath := filer2.FullPath("/home/chris/this/is/one/file1.jpg")
@ -26,18 +28,13 @@ func TestCreateAndFind(t *testing.T) {
return
}
found, entry, err := filer.FindEntry(fullpath)
entry, err := filer.FindEntry(fullpath)
if err != nil {
t.Errorf("find entry: %v", err)
return
}
if !found {
t.Errorf("Failed to find newly created file")
return
}
if entry.FullPath != entry1.FullPath {
t.Errorf("find wrong entry: %v", entry.FullPath)
return
@ -46,8 +43,10 @@ func TestCreateAndFind(t *testing.T) {
}
func TestCreateFileAndList(t *testing.T) {
filer := filer2.NewFiler("")
filer.SetStore(NewMemDbStore())
filer := filer2.NewFiler(nil)
store := &MemDbStore{}
store.Initialize(nil)
filer.SetStore(store)
filer.DisableDirectoryCache()
entry1 := &filer2.Entry{
@ -72,7 +71,7 @@ func TestCreateFileAndList(t *testing.T) {
filer.CreateEntry(entry2)
// checking the 2 files
entries, err := filer.ListDirectoryEntries(filer2.FullPath("/home/chris/this/is/one/"))
entries, err := filer.ListDirectoryEntries(filer2.FullPath("/home/chris/this/is/one/"), "", false, 100)
if err != nil {
t.Errorf("list entries: %v", err)
@ -94,13 +93,28 @@ func TestCreateFileAndList(t *testing.T) {
return
}
// checking one upper directory
entries, _ = filer.ListDirectoryEntries(filer2.FullPath("/home/chris/this/is"))
// checking the offset
entries, err = filer.ListDirectoryEntries(filer2.FullPath("/home/chris/this/is/one/"), "file1.jpg", false, 100)
if len(entries) != 1 {
t.Errorf("list entries count: %v", len(entries))
return
}
// checking one upper directory
entries, _ = filer.ListDirectoryEntries(filer2.FullPath("/home/chris/this/is"), "", false, 100)
if len(entries) != 1 {
t.Errorf("list entries count: %v", len(entries))
return
}
// checking root directory
entries, _ = filer.ListDirectoryEntries(filer2.FullPath("/"), "", false, 100)
if len(entries) != 1 {
t.Errorf("list entries count: %v", len(entries))
return
}
// add file3
file3Path := filer2.FullPath("/home/chris/this/is/file3.jpg")
entry3 := &filer2.Entry{
FullPath: file3Path,
@ -113,15 +127,15 @@ func TestCreateFileAndList(t *testing.T) {
filer.CreateEntry(entry3)
// checking one upper directory
entries, _ = filer.ListDirectoryEntries(filer2.FullPath("/home/chris/this/is"))
entries, _ = filer.ListDirectoryEntries(filer2.FullPath("/home/chris/this/is"), "", false, 100)
if len(entries) != 2 {
t.Errorf("list entries count: %v", len(entries))
return
}
// delete file and count
filer.DeleteEntry(file3Path)
entries, _ = filer.ListDirectoryEntries(filer2.FullPath("/home/chris/this/is"))
filer.DeleteEntryMetaAndData(file3Path)
entries, _ = filer.ListDirectoryEntries(filer2.FullPath("/home/chris/this/is"), "", false, 100)
if len(entries) != 1 {
t.Errorf("list entries count: %v", len(entries))
return

View file

@ -0,0 +1,67 @@
package mysql
import (
"database/sql"
"fmt"
"github.com/chrislusf/seaweedfs/weed/filer2"
"github.com/chrislusf/seaweedfs/weed/filer2/abstract_sql"
_ "github.com/go-sql-driver/mysql"
"github.com/spf13/viper"
)
const (
CONNECTION_URL_PATTERN = "%s:%s@tcp(%s:%d)/%s?charset=utf8"
)
func init() {
filer2.Stores = append(filer2.Stores, &MysqlStore{})
}
type MysqlStore struct {
abstract_sql.AbstractSqlStore
}
func (store *MysqlStore) GetName() string {
return "mysql"
}
func (store *MysqlStore) Initialize(viper *viper.Viper) (err error) {
return store.initialize(
viper.GetString("username"),
viper.GetString("password"),
viper.GetString("hostname"),
viper.GetInt("port"),
viper.GetString("database"),
viper.GetInt("connection_max_idle"),
viper.GetInt("connection_max_open"),
)
}
func (store *MysqlStore) initialize(user, password, hostname string, port int, database string, maxIdle, maxOpen int) (err error) {
store.SqlInsert = "INSERT INTO filemeta (dirhash,name,directory,meta) VALUES(?,?,?,?)"
store.SqlUpdate = "UPDATE filemeta SET meta=? WHERE dirhash=? AND name=? AND directory=?"
store.SqlFind = "SELECT meta FROM filemeta WHERE dirhash=? AND name=? AND directory=?"
store.SqlDelete = "DELETE FROM filemeta WHERE dirhash=? AND name=? AND directory=?"
store.SqlListExclusive = "SELECT NAME, meta FROM filemeta WHERE dirhash=? AND name>? AND directory=? ORDER BY NAME ASC LIMIT ?"
store.SqlListInclusive = "SELECT NAME, meta FROM filemeta WHERE dirhash=? AND name>=? AND directory=? ORDER BY NAME ASC LIMIT ?"
sqlUrl := fmt.Sprintf(CONNECTION_URL_PATTERN, user, password, hostname, port, database)
var dbErr error
store.DB, dbErr = sql.Open("mysql", sqlUrl)
if dbErr != nil {
store.DB.Close()
store.DB = nil
return fmt.Errorf("can not connect to %s error:%v", sqlUrl, err)
}
store.DB.SetMaxIdleConns(maxIdle)
store.DB.SetMaxOpenConns(maxOpen)
if err = store.DB.Ping(); err != nil {
return fmt.Errorf("connect to %s error:%v", sqlUrl, err)
}
return nil
}

View file

@ -0,0 +1,17 @@
1. create "seaweedfs" database
export PGHOME=/Library/PostgreSQL/10
$PGHOME/bin/createdb --username=postgres --password seaweedfs
2. create "filemeta" table
$PGHOME/bin/psql --username=postgres --password seaweedfs
CREATE TABLE IF NOT EXISTS filemeta (
dirhash BIGINT,
name VARCHAR(1000),
directory VARCHAR(4096),
meta bytea,
PRIMARY KEY (dirhash, name)
);

View file

@ -0,0 +1,68 @@
package postgres
import (
"database/sql"
"fmt"
"github.com/chrislusf/seaweedfs/weed/filer2"
"github.com/chrislusf/seaweedfs/weed/filer2/abstract_sql"
_ "github.com/lib/pq"
"github.com/spf13/viper"
)
const (
CONNECTION_URL_PATTERN = "host=%s port=%d user=%s password=%s dbname=%s sslmode=%s connect_timeout=30"
)
func init() {
filer2.Stores = append(filer2.Stores, &PostgresStore{})
}
type PostgresStore struct {
abstract_sql.AbstractSqlStore
}
func (store *PostgresStore) GetName() string {
return "postgres"
}
func (store *PostgresStore) Initialize(viper *viper.Viper) (err error) {
return store.initialize(
viper.GetString("username"),
viper.GetString("password"),
viper.GetString("hostname"),
viper.GetInt("port"),
viper.GetString("database"),
viper.GetString("sslmode"),
viper.GetInt("connection_max_idle"),
viper.GetInt("connection_max_open"),
)
}
func (store *PostgresStore) initialize(user, password, hostname string, port int, database, sslmode string, maxIdle, maxOpen int) (err error) {
store.SqlInsert = "INSERT INTO filemeta (dirhash,name,directory,meta) VALUES($1,$2,$3,$4)"
store.SqlUpdate = "UPDATE filemeta SET meta=$1 WHERE dirhash=$2 AND name=$3 AND directory=$4"
store.SqlFind = "SELECT meta FROM filemeta WHERE dirhash=$1 AND name=$2 AND directory=$3"
store.SqlDelete = "DELETE FROM filemeta WHERE dirhash=$1 AND name=$2 AND directory=$3"
store.SqlListExclusive = "SELECT NAME, meta FROM filemeta WHERE dirhash=$1 AND name>$2 AND directory=$3 ORDER BY NAME ASC LIMIT $4"
store.SqlListInclusive = "SELECT NAME, meta FROM filemeta WHERE dirhash=$1 AND name>=$2 AND directory=$3 ORDER BY NAME ASC LIMIT $4"
sqlUrl := fmt.Sprintf(CONNECTION_URL_PATTERN, hostname, port, user, password, database, sslmode)
var dbErr error
store.DB, dbErr = sql.Open("postgres", sqlUrl)
if dbErr != nil {
store.DB.Close()
store.DB = nil
return fmt.Errorf("can not connect to %s error:%v", sqlUrl, err)
}
store.DB.SetMaxIdleConns(maxIdle)
store.DB.SetMaxOpenConns(maxOpen)
if err = store.DB.Ping(); err != nil {
return fmt.Errorf("connect to %s error:%v", sqlUrl, err)
}
return nil
}

View file

@ -0,0 +1,167 @@
package redis
import (
"fmt"
"github.com/chrislusf/seaweedfs/weed/filer2"
"github.com/chrislusf/seaweedfs/weed/glog"
"github.com/go-redis/redis"
"github.com/spf13/viper"
"sort"
"strings"
)
const (
DIR_LIST_MARKER = "\x00"
)
func init() {
filer2.Stores = append(filer2.Stores, &RedisStore{})
}
type RedisStore struct {
Client *redis.Client
}
func (store *RedisStore) GetName() string {
return "redis"
}
func (store *RedisStore) Initialize(viper *viper.Viper) (err error) {
return store.initialize(
viper.GetString("address"),
viper.GetString("password"),
viper.GetInt("database"),
)
}
func (store *RedisStore) initialize(hostPort string, password string, database int) (err error) {
store.Client = redis.NewClient(&redis.Options{
Addr: hostPort,
Password: password,
DB: database,
})
return
}
func (store *RedisStore) InsertEntry(entry *filer2.Entry) (err error) {
value, err := entry.EncodeAttributesAndChunks()
if err != nil {
return fmt.Errorf("encoding %s %+v: %v", entry.FullPath, entry.Attr, err)
}
_, err = store.Client.Set(string(entry.FullPath), value, 0).Result()
if err != nil {
return fmt.Errorf("persisting %s : %v", entry.FullPath, err)
}
dir, name := entry.FullPath.DirAndName()
if name != "" {
_, err = store.Client.SAdd(genDirectoryListKey(dir), name).Result()
if err != nil {
return fmt.Errorf("persisting %s in parent dir: %v", entry.FullPath, err)
}
}
return nil
}
func (store *RedisStore) UpdateEntry(entry *filer2.Entry) (err error) {
return store.InsertEntry(entry)
}
func (store *RedisStore) FindEntry(fullpath filer2.FullPath) (entry *filer2.Entry, err error) {
data, err := store.Client.Get(string(fullpath)).Result()
if err == redis.Nil {
return nil, filer2.ErrNotFound
}
if err != nil {
return nil, fmt.Errorf("get %s : %v", entry.FullPath, err)
}
entry = &filer2.Entry{
FullPath: fullpath,
}
err = entry.DecodeAttributesAndChunks([]byte(data))
if err != nil {
return entry, fmt.Errorf("decode %s : %v", entry.FullPath, err)
}
return entry, nil
}
func (store *RedisStore) DeleteEntry(fullpath filer2.FullPath) (err error) {
_, err = store.Client.Del(string(fullpath)).Result()
if err != nil {
return fmt.Errorf("delete %s : %v", fullpath, err)
}
dir, name := fullpath.DirAndName()
if name != "" {
_, err = store.Client.SRem(genDirectoryListKey(dir), name).Result()
if err != nil {
return fmt.Errorf("delete %s in parent dir: %v", fullpath, err)
}
}
return nil
}
func (store *RedisStore) ListDirectoryEntries(fullpath filer2.FullPath, startFileName string, inclusive bool,
limit int) (entries []*filer2.Entry, err error) {
members, err := store.Client.SMembers(genDirectoryListKey(string(fullpath))).Result()
if err != nil {
return nil, fmt.Errorf("list %s : %v", fullpath, err)
}
// skip
if startFileName != "" {
var t []string
for _, m := range members {
if strings.Compare(m, startFileName) >= 0 {
if m == startFileName {
if inclusive {
t = append(t, m)
}
} else {
t = append(t, m)
}
}
}
members = t
}
// sort
sort.Slice(members, func(i, j int) bool {
return strings.Compare(members[i], members[j]) < 0
})
// limit
if limit < len(members) {
members = members[:limit]
}
// fetch entry meta
for _, fileName := range members {
path := filer2.NewFullPath(string(fullpath), fileName)
entry, err := store.FindEntry(path)
if err != nil {
glog.V(0).Infof("list %s : %v", path, err)
} else {
entries = append(entries, entry)
}
}
return entries, err
}
func genDirectoryListKey(dir string) (dirList string) {
return dir + DIR_LIST_MARKER
}

View file

@ -7,11 +7,12 @@ import (
"path"
"sync"
"bazil.org/fuse/fs"
"bazil.org/fuse"
"github.com/chrislusf/seaweedfs/weed/filer"
"bazil.org/fuse/fs"
"github.com/chrislusf/seaweedfs/weed/glog"
"github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
"path/filepath"
"time"
)
type Dir struct {
@ -21,21 +22,156 @@ type Dir struct {
wfs *WFS
}
var _ = fs.Node(&Dir{})
var _ = fs.NodeCreater(&Dir{})
var _ = fs.NodeMkdirer(&Dir{})
var _ = fs.NodeStringLookuper(&Dir{})
var _ = fs.HandleReadDirAller(&Dir{})
var _ = fs.NodeRemover(&Dir{})
func (dir *Dir) Attr(context context.Context, attr *fuse.Attr) error {
if dir.Path == "/" {
attr.Valid = time.Second
attr.Mode = os.ModeDir | 0777
return nil
}
parent, name := filepath.Split(dir.Path)
var attributes *filer_pb.FuseAttributes
err := dir.wfs.withFilerClient(func(client filer_pb.SeaweedFilerClient) error {
request := &filer_pb.GetEntryAttributesRequest{
Name: name,
ParentDir: parent,
}
glog.V(1).Infof("read dir attr: %v", request)
resp, err := client.GetEntryAttributes(context, request)
if err != nil {
glog.V(0).Infof("read dir attr %v: %v", request, err)
return err
}
attributes = resp.Attributes
return nil
})
if err != nil {
return err
}
// glog.V(1).Infof("dir %s: %v", dir.Path, attributes)
// glog.V(1).Infof("dir %s permission: %v", dir.Path, os.FileMode(attributes.FileMode))
attr.Mode = os.FileMode(attributes.FileMode) | os.ModeDir
if dir.Path == "/" && attributes.FileMode == 0 {
attr.Valid = time.Second
}
attr.Mtime = time.Unix(attributes.Mtime, 0)
attr.Ctime = time.Unix(attributes.Mtime, 0)
attr.Gid = attributes.Gid
attr.Uid = attributes.Uid
return nil
}
func (dir *Dir) newFile(name string, chunks []*filer_pb.FileChunk) *File {
return &File{
Name: name,
dir: dir,
wfs: dir.wfs,
// attributes: &filer_pb.FuseAttributes{},
Chunks: chunks,
}
}
func (dir *Dir) Create(ctx context.Context, req *fuse.CreateRequest,
resp *fuse.CreateResponse) (fs.Node, fs.Handle, error) {
err := dir.wfs.withFilerClient(func(client filer_pb.SeaweedFilerClient) error {
request := &filer_pb.CreateEntryRequest{
Directory: dir.Path,
Entry: &filer_pb.Entry{
Name: req.Name,
IsDirectory: req.Mode&os.ModeDir > 0,
Attributes: &filer_pb.FuseAttributes{
Mtime: time.Now().Unix(),
Crtime: time.Now().Unix(),
FileMode: uint32(req.Mode),
Uid: req.Uid,
Gid: req.Gid,
},
},
}
glog.V(1).Infof("create: %v", request)
if _, err := client.CreateEntry(ctx, request); err != nil {
return fmt.Errorf("create file: %v", err)
}
return nil
})
if err == nil {
file := dir.newFile(req.Name, nil)
dir.NodeMap[req.Name] = file
file.isOpen = true
return file, &FileHandle{
f: file,
dirtyPages: newDirtyPages(file),
RequestId: req.Header.ID,
NodeId: req.Header.Node,
Uid: req.Uid,
Gid: req.Gid,
}, nil
}
return nil, nil, err
}
func (dir *Dir) Mkdir(ctx context.Context, req *fuse.MkdirRequest) (fs.Node, error) {
dir.NodeMapLock.Lock()
defer dir.NodeMapLock.Unlock()
fmt.Printf("mkdir %+v\n", req)
err := dir.wfs.withFilerClient(func(client filer_pb.SeaweedFilerClient) error {
request := &filer_pb.CreateEntryRequest{
Directory: dir.Path,
Entry: &filer_pb.Entry{
Name: req.Name,
IsDirectory: true,
Attributes: &filer_pb.FuseAttributes{
Mtime: time.Now().Unix(),
Crtime: time.Now().Unix(),
FileMode: uint32(req.Mode),
Uid: req.Uid,
Gid: req.Gid,
},
},
}
glog.V(1).Infof("mkdir: %v", request)
if _, err := client.CreateEntry(ctx, request); err != nil {
glog.V(0).Infof("mkdir %v: %v", request, err)
return fmt.Errorf("make dir: %v", err)
}
return nil
})
if err == nil {
node := &Dir{Path: path.Join(dir.Path, req.Name), wfs: dir.wfs}
dir.NodeMap[req.Name] = node
return node, nil
}
return nil, err
}
func (dir *Dir) Lookup(ctx context.Context, name string) (node fs.Node, err error) {
@ -59,7 +195,7 @@ func (dir *Dir) Lookup(ctx context.Context, name string) (node fs.Node, err erro
Name: name,
}
glog.V(1).Infof("lookup directory entry: %v", request)
glog.V(4).Infof("lookup directory entry: %v", request)
resp, err := client.LookupDirectoryEntry(ctx, request)
if err != nil {
return err
@ -74,13 +210,13 @@ func (dir *Dir) Lookup(ctx context.Context, name string) (node fs.Node, err erro
if entry.IsDirectory {
node = &Dir{Path: path.Join(dir.Path, name), wfs: dir.wfs}
} else {
node = &File{FileId: filer.FileId(entry.FileId), Name: name, wfs: dir.wfs}
node = dir.newFile(name, entry.Chunks)
}
dir.NodeMap[name] = node
return node, nil
}
return nil, err
return nil, fuse.ENOENT
}
func (dir *Dir) ReadDirAll(ctx context.Context) (ret []fuse.Dirent, err error) {
@ -91,7 +227,7 @@ func (dir *Dir) ReadDirAll(ctx context.Context) (ret []fuse.Dirent, err error) {
Directory: dir.Path,
}
glog.V(1).Infof("read directory: %v", request)
glog.V(4).Infof("read directory: %v", request)
resp, err := client.ListEntries(ctx, request)
if err != nil {
return err
@ -104,6 +240,7 @@ func (dir *Dir) ReadDirAll(ctx context.Context) (ret []fuse.Dirent, err error) {
} else {
dirent := fuse.Dirent{Name: entry.Name, Type: fuse.DT_File}
ret = append(ret, dirent)
dir.wfs.listDirectoryEntriesCache.Set(dir.Path+"/"+entry.Name, entry, 300*time.Millisecond)
}
}

165
weed/filesys/dirty_page.go Normal file
View file

@ -0,0 +1,165 @@
package filesys
import (
"fmt"
"bytes"
"time"
"context"
"github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
"github.com/chrislusf/seaweedfs/weed/operation"
"github.com/chrislusf/seaweedfs/weed/glog"
)
type ContinuousDirtyPages struct {
hasData bool
Offset int64
Size int64
Data []byte
f *File
}
func newDirtyPages(file *File) *ContinuousDirtyPages {
return &ContinuousDirtyPages{
Data: make([]byte, file.wfs.chunkSizeLimit),
f: file,
}
}
func (pages *ContinuousDirtyPages) AddPage(ctx context.Context, offset int64, data []byte) (chunks []*filer_pb.FileChunk, err error) {
var chunk *filer_pb.FileChunk
if len(data) > len(pages.Data) {
// this is more than what buffer can hold.
// flush existing
if chunk, err = pages.saveExistingPagesToStorage(ctx); err == nil {
if chunk != nil {
glog.V(4).Infof("%s/%s flush existing [%d,%d)", pages.f.dir.Path, pages.f.Name, chunk.Offset, chunk.Offset+int64(chunk.Size))
}
chunks = append(chunks, chunk)
} else {
glog.V(0).Infof("%s/%s failed to flush1 [%d,%d): %v", pages.f.dir.Path, pages.f.Name, chunk.Offset, chunk.Offset+int64(chunk.Size), err)
return
}
pages.Size = 0
// flush the big page
if chunk, err = pages.saveToStorage(ctx, data, offset); err == nil {
if chunk != nil {
glog.V(4).Infof("%s/%s flush big request [%d,%d)", pages.f.dir.Path, pages.f.Name, chunk.Offset, chunk.Offset+int64(chunk.Size))
chunks = append(chunks, chunk)
}
} else {
glog.V(0).Infof("%s/%s failed to flush2 [%d,%d): %v", pages.f.dir.Path, pages.f.Name, chunk.Offset, chunk.Offset+int64(chunk.Size), err)
return
}
return
}
if offset < pages.Offset || offset >= pages.Offset+int64(len(pages.Data)) ||
pages.Offset+int64(len(pages.Data)) < offset+int64(len(data)) {
// if the data is out of range,
// or buffer is full if adding new data,
// flush current buffer and add new data
// println("offset", offset, "size", len(data), "existing offset", pages.Offset, "size", pages.Size)
if chunk, err = pages.saveExistingPagesToStorage(ctx); err == nil {
if chunk != nil {
glog.V(4).Infof("%s/%s add save [%d,%d)", pages.f.dir.Path, pages.f.Name, chunk.Offset, chunk.Offset+int64(chunk.Size))
chunks = append(chunks, chunk)
}
} else {
glog.V(0).Infof("%s/%s add save [%d,%d): %v", pages.f.dir.Path, pages.f.Name, chunk.Offset, chunk.Offset+int64(chunk.Size), err)
return
}
pages.Offset = offset
pages.Size = int64(len(data))
copy(pages.Data, data)
return
}
copy(pages.Data[offset-pages.Offset:], data)
pages.Size = max(pages.Size, offset+int64(len(data))-pages.Offset)
return
}
func (pages *ContinuousDirtyPages) FlushToStorage(ctx context.Context) (chunk *filer_pb.FileChunk, err error) {
if pages.Size == 0 {
return nil, nil
}
if chunk, err = pages.saveExistingPagesToStorage(ctx); err == nil {
pages.Size = 0
if chunk != nil {
glog.V(4).Infof("%s/%s flush [%d,%d)", pages.f.dir.Path, pages.f.Name, chunk.Offset, chunk.Offset+int64(chunk.Size))
}
}
return
}
func (pages *ContinuousDirtyPages) saveExistingPagesToStorage(ctx context.Context) (*filer_pb.FileChunk, error) {
return pages.saveToStorage(ctx, pages.Data[:pages.Size], pages.Offset)
}
func (pages *ContinuousDirtyPages) saveToStorage(ctx context.Context, buf []byte, offset int64) (*filer_pb.FileChunk, error) {
if pages.Size == 0 {
return nil, nil
}
var fileId, host string
if err := pages.f.wfs.withFilerClient(func(client filer_pb.SeaweedFilerClient) error {
request := &filer_pb.AssignVolumeRequest{
Count: 1,
Replication: pages.f.wfs.replication,
Collection: pages.f.wfs.collection,
}
resp, err := client.AssignVolume(ctx, request)
if err != nil {
glog.V(0).Infof("assign volume failure %v: %v", request, err)
return err
}
fileId, host = resp.FileId, resp.Url
return nil
}); err != nil {
return nil, fmt.Errorf("filer assign volume: %v", err)
}
fileUrl := fmt.Sprintf("http://%s/%s", host, fileId)
bufReader := bytes.NewReader(pages.Data[:pages.Size])
uploadResult, err := operation.Upload(fileUrl, pages.f.Name, bufReader, false, "application/octet-stream", nil, "")
if err != nil {
glog.V(0).Infof("upload data %v to %s: %v", pages.f.Name, fileUrl, err)
return nil, fmt.Errorf("upload data: %v", err)
}
if uploadResult.Error != "" {
glog.V(0).Infof("upload failure %v to %s: %v", pages.f.Name, fileUrl, err)
return nil, fmt.Errorf("upload result: %v", uploadResult.Error)
}
return &filer_pb.FileChunk{
FileId: fileId,
Offset: offset,
Size: uint64(len(buf)),
Mtime: time.Now().UnixNano(),
}, nil
}
func max(x, y int64) int64 {
if x > y {
return x
}
return y
}

View file

@ -1,75 +1,136 @@
package filesys
import (
"context"
"fmt"
"bazil.org/fuse"
"github.com/chrislusf/seaweedfs/weed/filer"
"bazil.org/fuse/fs"
"context"
"github.com/chrislusf/seaweedfs/weed/filer2"
"github.com/chrislusf/seaweedfs/weed/glog"
"github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
"os"
"path/filepath"
"time"
)
var _ = fs.Node(&File{})
// var _ = fs.NodeOpener(&File{})
// var _ = fs.NodeFsyncer(&File{})
var _ = fs.Handle(&File{})
var _ = fs.HandleReadAller(&File{})
// var _ = fs.HandleReader(&File{})
var _ = fs.HandleWriter(&File{})
var _ = fs.NodeOpener(&File{})
var _ = fs.NodeFsyncer(&File{})
var _ = fs.NodeSetattrer(&File{})
type File struct {
FileId filer.FileId
Chunks []*filer_pb.FileChunk
Name string
dir *Dir
wfs *WFS
attributes *filer_pb.FuseAttributes
isOpen bool
}
func (file *File) Attr(context context.Context, attr *fuse.Attr) error {
attr.Mode = 0444
return file.wfs.withFilerClient(func(client filer_pb.SeaweedFilerClient) error {
request := &filer_pb.GetFileAttributesRequest{
fullPath := filepath.Join(file.dir.Path, file.Name)
if file.attributes == nil || !file.isOpen {
item := file.wfs.listDirectoryEntriesCache.Get(fullPath)
if item != nil {
entry := item.Value().(*filer_pb.Entry)
file.Chunks = entry.Chunks
file.attributes = entry.Attributes
glog.V(1).Infof("file attr read cached %v attributes", file.Name)
} else {
err := file.wfs.withFilerClient(func(client filer_pb.SeaweedFilerClient) error {
request := &filer_pb.GetEntryAttributesRequest{
Name: file.Name,
ParentDir: "", //TODO add parent folder
FileId: string(file.FileId),
ParentDir: file.dir.Path,
}
glog.V(1).Infof("read file size: %v", request)
resp, err := client.GetFileAttributes(context, request)
resp, err := client.GetEntryAttributes(context, request)
if err != nil {
glog.V(0).Infof("file attr read file %v: %v", request, err)
return err
}
attr.Size = resp.Attributes.FileSize
file.attributes = resp.Attributes
file.Chunks = resp.Chunks
return nil
})
}
func (file *File) ReadAll(ctx context.Context) (content []byte, err error) {
err = file.wfs.withFilerClient(func(client filer_pb.SeaweedFilerClient) error {
request := &filer_pb.GetFileContentRequest{
FileId: string(file.FileId),
}
glog.V(1).Infof("read file content: %v", request)
resp, err := client.GetFileContent(ctx, request)
if err != nil {
return err
}
content = resp.Content
glog.V(1).Infof("file attr %v %+v: %d", fullPath, file.attributes, filer2.TotalSize(file.Chunks))
return nil
})
return content, err
if err != nil {
return err
}
}
}
attr.Mode = os.FileMode(file.attributes.FileMode)
attr.Size = filer2.TotalSize(file.Chunks)
attr.Mtime = time.Unix(file.attributes.Mtime, 0)
attr.Gid = file.attributes.Gid
attr.Uid = file.attributes.Uid
return nil
}
func (file *File) Write(ctx context.Context, req *fuse.WriteRequest, resp *fuse.WriteResponse) error {
fmt.Printf("write file %+v\n", req)
func (file *File) Open(ctx context.Context, req *fuse.OpenRequest, resp *fuse.OpenResponse) (fs.Handle, error) {
fullPath := filepath.Join(file.dir.Path, file.Name)
glog.V(3).Infof("%v file open %+v", fullPath, req)
file.isOpen = true
return &FileHandle{
f: file,
dirtyPages: newDirtyPages(file),
RequestId: req.Header.ID,
NodeId: req.Header.Node,
Uid: req.Uid,
Gid: req.Gid,
}, nil
}
func (file *File) Setattr(ctx context.Context, req *fuse.SetattrRequest, resp *fuse.SetattrResponse) error {
fullPath := filepath.Join(file.dir.Path, file.Name)
glog.V(3).Infof("%v file setattr %+v", fullPath, req)
if req.Valid.Size() {
glog.V(3).Infof("%v file setattr set size=%v", fullPath, req.Size)
if req.Size == 0 {
// fmt.Printf("truncate %v \n", fullPath)
file.Chunks = nil
}
file.attributes.FileSize = req.Size
}
if req.Valid.Mode() {
file.attributes.FileMode = uint32(req.Mode)
}
if req.Valid.Uid() {
file.attributes.Uid = req.Uid
}
if req.Valid.Gid() {
file.attributes.Gid = req.Gid
}
if req.Valid.Mtime() {
file.attributes.Mtime = req.Mtime.Unix()
}
return nil
}
func (file *File) Fsync(ctx context.Context, req *fuse.FsyncRequest) error {
// fsync works at OS level
// write the file chunks to the filer
glog.V(3).Infof("%s/%s fsync file %+v", file.dir.Path, file.Name, req)
return nil
}

219
weed/filesys/filehandle.go Normal file
View file

@ -0,0 +1,219 @@
package filesys
import (
"bazil.org/fuse"
"bazil.org/fuse/fs"
"context"
"fmt"
"github.com/chrislusf/seaweedfs/weed/filer2"
"github.com/chrislusf/seaweedfs/weed/glog"
"github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
"github.com/chrislusf/seaweedfs/weed/util"
"strings"
"sync"
"net/http"
)
type FileHandle struct {
// cache file has been written to
dirtyPages *ContinuousDirtyPages
dirtyMetadata bool
f *File
RequestId fuse.RequestID // unique ID for request
NodeId fuse.NodeID // file or directory the request is about
Uid uint32 // user ID of process making request
Gid uint32 // group ID of process making request
}
var _ = fs.Handle(&FileHandle{})
// var _ = fs.HandleReadAller(&FileHandle{})
var _ = fs.HandleReader(&FileHandle{})
var _ = fs.HandleFlusher(&FileHandle{})
var _ = fs.HandleWriter(&FileHandle{})
var _ = fs.HandleReleaser(&FileHandle{})
func (fh *FileHandle) Read(ctx context.Context, req *fuse.ReadRequest, resp *fuse.ReadResponse) error {
glog.V(4).Infof("%v/%v read fh: [%d,%d)", fh.f.dir.Path, fh.f.Name, req.Offset, req.Offset+int64(req.Size))
if len(fh.f.Chunks) == 0 {
glog.V(0).Infof("empty fh %v/%v", fh.f.dir.Path, fh.f.Name)
return fmt.Errorf("empty file %v/%v", fh.f.dir.Path, fh.f.Name)
}
buff := make([]byte, req.Size)
chunkViews := filer2.ViewFromChunks(fh.f.Chunks, req.Offset, req.Size)
var vids []string
for _, chunkView := range chunkViews {
vids = append(vids, volumeId(chunkView.FileId))
}
vid2Locations := make(map[string]*filer_pb.Locations)
err := fh.f.wfs.withFilerClient(func(client filer_pb.SeaweedFilerClient) error {
glog.V(4).Infof("read fh lookup volume id locations: %v", vids)
resp, err := client.LookupVolume(ctx, &filer_pb.LookupVolumeRequest{
VolumeIds: vids,
})
if err != nil {
return err
}
vid2Locations = resp.LocationsMap
return nil
})
if err != nil {
glog.V(4).Infof("%v/%v read fh lookup volume ids: %v", fh.f.dir.Path, fh.f.Name, err)
return fmt.Errorf("failed to lookup volume ids %v: %v", vids, err)
}
var totalRead int64
var wg sync.WaitGroup
for _, chunkView := range chunkViews {
wg.Add(1)
go func(chunkView *filer2.ChunkView) {
defer wg.Done()
glog.V(4).Infof("read fh reading chunk: %+v", chunkView)
locations := vid2Locations[volumeId(chunkView.FileId)]
if locations == nil || len(locations.Locations) == 0 {
glog.V(0).Infof("failed to locate %s", chunkView.FileId)
err = fmt.Errorf("failed to locate %s", chunkView.FileId)
return
}
var n int64
n, err = util.ReadUrl(
fmt.Sprintf("http://%s/%s", locations.Locations[0].Url, chunkView.FileId),
chunkView.Offset,
int(chunkView.Size),
buff[chunkView.LogicOffset-req.Offset:chunkView.LogicOffset-req.Offset+int64(chunkView.Size)])
if err != nil {
glog.V(0).Infof("%v/%v read http://%s/%v %v bytes: %v", fh.f.dir.Path, fh.f.Name, locations.Locations[0].Url, chunkView.FileId, n, err)
err = fmt.Errorf("failed to read http://%s/%s: %v",
locations.Locations[0].Url, chunkView.FileId, err)
return
}
glog.V(4).Infof("read fh read %d bytes: %+v", n, chunkView)
totalRead += n
}(chunkView)
}
wg.Wait()
resp.Data = buff[:totalRead]
return err
}
// Write to the file handle
func (fh *FileHandle) Write(ctx context.Context, req *fuse.WriteRequest, resp *fuse.WriteResponse) error {
// write the request to volume servers
glog.V(4).Infof("%+v/%v write fh: [%d,%d)", fh.f.dir.Path, fh.f.Name, req.Offset, req.Offset+int64(len(req.Data)))
chunks, err := fh.dirtyPages.AddPage(ctx, req.Offset, req.Data)
if err != nil {
return fmt.Errorf("write %s/%s at [%d,%d): %v", fh.f.dir.Path, fh.f.Name, req.Offset, req.Offset+int64(len(req.Data)), err)
}
resp.Size = len(req.Data)
if req.Offset == 0 {
fh.f.attributes.Mime = http.DetectContentType(req.Data)
fh.dirtyMetadata = true
}
for _, chunk := range chunks {
fh.f.Chunks = append(fh.f.Chunks, chunk)
glog.V(1).Infof("uploaded %s/%s to %s [%d,%d)", fh.f.dir.Path, fh.f.Name, chunk.FileId, chunk.Offset, chunk.Offset+int64(chunk.Size))
fh.dirtyMetadata = true
}
return nil
}
func (fh *FileHandle) Release(ctx context.Context, req *fuse.ReleaseRequest) error {
glog.V(4).Infof("%+v/%v release fh", fh.f.dir.Path, fh.f.Name)
fh.f.isOpen = false
return nil
}
// Flush - experimenting with uploading at flush, this slows operations down till it has been
// completely flushed
func (fh *FileHandle) Flush(ctx context.Context, req *fuse.FlushRequest) error {
// fflush works at fh level
// send the data to the OS
glog.V(4).Infof("%s/%s fh flush %v", fh.f.dir.Path, fh.f.Name, req)
chunk, err := fh.dirtyPages.FlushToStorage(ctx)
if err != nil {
glog.V(0).Infof("flush %s/%s to %s [%d,%d): %v", fh.f.dir.Path, fh.f.Name, chunk.FileId, chunk.Offset, chunk.Offset+int64(chunk.Size), err)
return fmt.Errorf("flush %s/%s to %s [%d,%d): %v", fh.f.dir.Path, fh.f.Name, chunk.FileId, chunk.Offset, chunk.Offset+int64(chunk.Size), err)
}
if chunk != nil {
fh.f.Chunks = append(fh.f.Chunks, chunk)
fh.dirtyMetadata = true
}
if !fh.dirtyMetadata {
return nil
}
if len(fh.f.Chunks) == 0 {
glog.V(2).Infof("fh %s/%s flush skipping empty: %v", fh.f.dir.Path, fh.f.Name, req)
return nil
}
err = fh.f.wfs.withFilerClient(func(client filer_pb.SeaweedFilerClient) error {
request := &filer_pb.UpdateEntryRequest{
Directory: fh.f.dir.Path,
Entry: &filer_pb.Entry{
Name: fh.f.Name,
Attributes: fh.f.attributes,
Chunks: fh.f.Chunks,
},
}
glog.V(1).Infof("%s/%s set chunks: %v", fh.f.dir.Path, fh.f.Name, len(fh.f.Chunks))
for i, chunk := range fh.f.Chunks {
glog.V(1).Infof("%s/%s chunks %d: %v [%d,%d)", fh.f.dir.Path, fh.f.Name, i, chunk.FileId, chunk.Offset, chunk.Offset+int64(chunk.Size))
}
if _, err := client.UpdateEntry(ctx, request); err != nil {
return fmt.Errorf("update fh: %v", err)
}
return nil
})
if err == nil {
fh.dirtyMetadata = false
}
return err
}
func volumeId(fileId string) string {
lastCommaIndex := strings.LastIndex(fileId, ",")
if lastCommaIndex > 0 {
return fileId[:lastCommaIndex]
}
return fileId
}

View file

@ -3,17 +3,26 @@ package filesys
import (
"bazil.org/fuse/fs"
"fmt"
"google.golang.org/grpc"
"github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
"github.com/karlseguin/ccache"
"google.golang.org/grpc"
)
type WFS struct {
filer string
listDirectoryEntriesCache *ccache.Cache
collection string
replication string
chunkSizeLimit int64
}
func NewSeaweedFileSystem(filer string) *WFS {
func NewSeaweedFileSystem(filer string, collection string, replication string, chunkSizeLimitMB int) *WFS {
return &WFS{
filer: filer,
listDirectoryEntriesCache: ccache.New(ccache.Configure().MaxSize(6000).ItemsToPrune(100)),
collection: collection,
replication: replication,
chunkSizeLimit: int64(chunkSizeLimitMB) * 1024 * 1024,
}
}

View file

@ -181,6 +181,7 @@ type bintree struct {
Func func() (*asset, error)
Children map[string]*bintree
}
var _bintree = &bintree{nil, map[string]*bintree{
"favicon": &bintree{nil, map[string]*bintree{
"favicon.ico": &bintree{favicon, map[string]*bintree{}},
@ -233,4 +234,3 @@ func _filePath(dir, name string) string {
cannonicalName := strings.Replace(name, "\\", "/", -1)
return filepath.Join(append([]string{dir}, strings.Split(cannonicalName, "/")...)...)
}

View file

@ -52,7 +52,7 @@ func Assign(server string, r *VolumeAssignRequest) (*AssignResult, error) {
}
jsonBlob, err := util.Post("http://"+server+"/dir/assign", values)
glog.V(2).Info("assign result :", string(jsonBlob))
glog.V(2).Infof("assign result from %s : %s", server, string(jsonBlob))
if err != nil {
return nil, err
}

View file

@ -1,31 +0,0 @@
package filer
import (
"fmt"
"net/url"
"github.com/chrislusf/seaweedfs/weed/security"
"github.com/chrislusf/seaweedfs/weed/util"
)
type SubmitResult struct {
FileName string `json:"fileName,omitempty"`
FileUrl string `json:"fileUrl,omitempty"`
Fid string `json:"fid,omitempty"`
Size uint32 `json:"size,omitempty"`
Error string `json:"error,omitempty"`
}
func RegisterFile(filer string, path string, fileId string, secret security.Secret) error {
// TODO: jwt need to be used
_ = security.GenJwt(secret, fileId)
values := make(url.Values)
values.Add("path", path)
values.Add("fileId", fileId)
_, err := util.Post("http://"+filer+"/admin/register", values)
if err != nil {
return fmt.Errorf("Failed to register path %s on filer %s to file id %s : %v", path, filer, fileId, err)
}
return nil
}

View file

@ -12,15 +12,24 @@ service SeaweedFiler {
rpc ListEntries (ListEntriesRequest) returns (ListEntriesResponse) {
}
rpc GetFileAttributes (GetFileAttributesRequest) returns (GetFileAttributesResponse) {
rpc GetEntryAttributes (GetEntryAttributesRequest) returns (GetEntryAttributesResponse) {
}
rpc GetFileContent (GetFileContentRequest) returns (GetFileContentResponse) {
rpc CreateEntry (CreateEntryRequest) returns (CreateEntryResponse) {
}
rpc UpdateEntry (UpdateEntryRequest) returns (UpdateEntryResponse) {
}
rpc DeleteEntry (DeleteEntryRequest) returns (DeleteEntryResponse) {
}
rpc AssignVolume (AssignVolumeRequest) returns (AssignVolumeResponse) {
}
rpc LookupVolume (LookupVolumeRequest) returns (LookupVolumeResponse) {
}
}
//////////////////////////////////////////////////
@ -45,26 +54,36 @@ message ListEntriesResponse {
message Entry {
string name = 1;
bool is_directory = 2;
string file_id = 3;
repeated FileChunk chunks = 3;
FuseAttributes attributes = 4;
}
message FileChunk {
string file_id = 1;
int64 offset = 2;
uint64 size = 3;
int64 mtime = 4;
}
message FuseAttributes {
uint64 file_size = 1;
int64 mtime = 2;
uint32 file_mode = 3;
uint32 uid = 4;
uint32 gid = 5;
int64 crtime = 6;
string mime = 7;
}
message GetFileAttributesRequest {
message GetEntryAttributesRequest {
string name = 1;
string parent_dir = 2;
string file_id = 3;
}
message GetFileAttributesResponse {
message GetEntryAttributesResponse {
FuseAttributes attributes = 1;
repeated FileChunk chunks = 2;
}
message GetFileContentRequest {
@ -75,6 +94,21 @@ message GetFileContentResponse {
bytes content = 1;
}
message CreateEntryRequest {
string directory = 1;
Entry entry = 2;
}
message CreateEntryResponse {
}
message UpdateEntryRequest {
string directory = 1;
Entry entry = 2;
}
message UpdateEntryResponse {
}
message DeleteEntryRequest {
string directory = 1;
string name = 2;
@ -83,3 +117,32 @@ message DeleteEntryRequest {
message DeleteEntryResponse {
}
message AssignVolumeRequest {
int32 count = 1;
string collection = 2;
string replication = 3;
}
message AssignVolumeResponse {
string file_id = 1;
string url = 2;
string public_url = 3;
int32 count = 4;
}
message LookupVolumeRequest {
repeated string volume_ids = 1;
}
message Locations {
repeated Location locations = 1;
}
message Location {
string url = 1;
string public_url = 2;
}
message LookupVolumeResponse {
map<string, Locations> locations_map = 1;
}

View file

@ -14,13 +14,24 @@ It has these top-level messages:
ListEntriesRequest
ListEntriesResponse
Entry
FileChunk
FuseAttributes
GetFileAttributesRequest
GetFileAttributesResponse
GetEntryAttributesRequest
GetEntryAttributesResponse
GetFileContentRequest
GetFileContentResponse
CreateEntryRequest
CreateEntryResponse
UpdateEntryRequest
UpdateEntryResponse
DeleteEntryRequest
DeleteEntryResponse
AssignVolumeRequest
AssignVolumeResponse
LookupVolumeRequest
Locations
Location
LookupVolumeResponse
*/
package filer_pb
@ -119,7 +130,7 @@ func (m *ListEntriesResponse) GetEntries() []*Entry {
type Entry struct {
Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"`
IsDirectory bool `protobuf:"varint,2,opt,name=is_directory,json=isDirectory" json:"is_directory,omitempty"`
FileId string `protobuf:"bytes,3,opt,name=file_id,json=fileId" json:"file_id,omitempty"`
Chunks []*FileChunk `protobuf:"bytes,3,rep,name=chunks" json:"chunks,omitempty"`
Attributes *FuseAttributes `protobuf:"bytes,4,opt,name=attributes" json:"attributes,omitempty"`
}
@ -142,11 +153,11 @@ func (m *Entry) GetIsDirectory() bool {
return false
}
func (m *Entry) GetFileId() string {
func (m *Entry) GetChunks() []*FileChunk {
if m != nil {
return m.FileId
return m.Chunks
}
return ""
return nil
}
func (m *Entry) GetAttributes() *FuseAttributes {
@ -156,18 +167,60 @@ func (m *Entry) GetAttributes() *FuseAttributes {
return nil
}
type FileChunk struct {
FileId string `protobuf:"bytes,1,opt,name=file_id,json=fileId" json:"file_id,omitempty"`
Offset int64 `protobuf:"varint,2,opt,name=offset" json:"offset,omitempty"`
Size uint64 `protobuf:"varint,3,opt,name=size" json:"size,omitempty"`
Mtime int64 `protobuf:"varint,4,opt,name=mtime" json:"mtime,omitempty"`
}
func (m *FileChunk) Reset() { *m = FileChunk{} }
func (m *FileChunk) String() string { return proto.CompactTextString(m) }
func (*FileChunk) ProtoMessage() {}
func (*FileChunk) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{5} }
func (m *FileChunk) GetFileId() string {
if m != nil {
return m.FileId
}
return ""
}
func (m *FileChunk) GetOffset() int64 {
if m != nil {
return m.Offset
}
return 0
}
func (m *FileChunk) GetSize() uint64 {
if m != nil {
return m.Size
}
return 0
}
func (m *FileChunk) GetMtime() int64 {
if m != nil {
return m.Mtime
}
return 0
}
type FuseAttributes struct {
FileSize uint64 `protobuf:"varint,1,opt,name=file_size,json=fileSize" json:"file_size,omitempty"`
Mtime int64 `protobuf:"varint,2,opt,name=mtime" json:"mtime,omitempty"`
FileMode uint32 `protobuf:"varint,3,opt,name=file_mode,json=fileMode" json:"file_mode,omitempty"`
Uid uint32 `protobuf:"varint,4,opt,name=uid" json:"uid,omitempty"`
Gid uint32 `protobuf:"varint,5,opt,name=gid" json:"gid,omitempty"`
Crtime int64 `protobuf:"varint,6,opt,name=crtime" json:"crtime,omitempty"`
Mime string `protobuf:"bytes,7,opt,name=mime" json:"mime,omitempty"`
}
func (m *FuseAttributes) Reset() { *m = FuseAttributes{} }
func (m *FuseAttributes) String() string { return proto.CompactTextString(m) }
func (*FuseAttributes) ProtoMessage() {}
func (*FuseAttributes) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{5} }
func (*FuseAttributes) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{6} }
func (m *FuseAttributes) GetFileSize() uint64 {
if m != nil {
@ -204,54 +257,76 @@ func (m *FuseAttributes) GetGid() uint32 {
return 0
}
type GetFileAttributesRequest struct {
func (m *FuseAttributes) GetCrtime() int64 {
if m != nil {
return m.Crtime
}
return 0
}
func (m *FuseAttributes) GetMime() string {
if m != nil {
return m.Mime
}
return ""
}
type GetEntryAttributesRequest struct {
Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"`
ParentDir string `protobuf:"bytes,2,opt,name=parent_dir,json=parentDir" json:"parent_dir,omitempty"`
FileId string `protobuf:"bytes,3,opt,name=file_id,json=fileId" json:"file_id,omitempty"`
}
func (m *GetFileAttributesRequest) Reset() { *m = GetFileAttributesRequest{} }
func (m *GetFileAttributesRequest) String() string { return proto.CompactTextString(m) }
func (*GetFileAttributesRequest) ProtoMessage() {}
func (*GetFileAttributesRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{6} }
func (m *GetEntryAttributesRequest) Reset() { *m = GetEntryAttributesRequest{} }
func (m *GetEntryAttributesRequest) String() string { return proto.CompactTextString(m) }
func (*GetEntryAttributesRequest) ProtoMessage() {}
func (*GetEntryAttributesRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{7} }
func (m *GetFileAttributesRequest) GetName() string {
func (m *GetEntryAttributesRequest) GetName() string {
if m != nil {
return m.Name
}
return ""
}
func (m *GetFileAttributesRequest) GetParentDir() string {
func (m *GetEntryAttributesRequest) GetParentDir() string {
if m != nil {
return m.ParentDir
}
return ""
}
func (m *GetFileAttributesRequest) GetFileId() string {
func (m *GetEntryAttributesRequest) GetFileId() string {
if m != nil {
return m.FileId
}
return ""
}
type GetFileAttributesResponse struct {
type GetEntryAttributesResponse struct {
Attributes *FuseAttributes `protobuf:"bytes,1,opt,name=attributes" json:"attributes,omitempty"`
Chunks []*FileChunk `protobuf:"bytes,2,rep,name=chunks" json:"chunks,omitempty"`
}
func (m *GetFileAttributesResponse) Reset() { *m = GetFileAttributesResponse{} }
func (m *GetFileAttributesResponse) String() string { return proto.CompactTextString(m) }
func (*GetFileAttributesResponse) ProtoMessage() {}
func (*GetFileAttributesResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{7} }
func (m *GetEntryAttributesResponse) Reset() { *m = GetEntryAttributesResponse{} }
func (m *GetEntryAttributesResponse) String() string { return proto.CompactTextString(m) }
func (*GetEntryAttributesResponse) ProtoMessage() {}
func (*GetEntryAttributesResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{8} }
func (m *GetFileAttributesResponse) GetAttributes() *FuseAttributes {
func (m *GetEntryAttributesResponse) GetAttributes() *FuseAttributes {
if m != nil {
return m.Attributes
}
return nil
}
func (m *GetEntryAttributesResponse) GetChunks() []*FileChunk {
if m != nil {
return m.Chunks
}
return nil
}
type GetFileContentRequest struct {
FileId string `protobuf:"bytes,1,opt,name=file_id,json=fileId" json:"file_id,omitempty"`
}
@ -259,7 +334,7 @@ type GetFileContentRequest struct {
func (m *GetFileContentRequest) Reset() { *m = GetFileContentRequest{} }
func (m *GetFileContentRequest) String() string { return proto.CompactTextString(m) }
func (*GetFileContentRequest) ProtoMessage() {}
func (*GetFileContentRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{8} }
func (*GetFileContentRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{9} }
func (m *GetFileContentRequest) GetFileId() string {
if m != nil {
@ -275,7 +350,7 @@ type GetFileContentResponse struct {
func (m *GetFileContentResponse) Reset() { *m = GetFileContentResponse{} }
func (m *GetFileContentResponse) String() string { return proto.CompactTextString(m) }
func (*GetFileContentResponse) ProtoMessage() {}
func (*GetFileContentResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{9} }
func (*GetFileContentResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{10} }
func (m *GetFileContentResponse) GetContent() []byte {
if m != nil {
@ -284,6 +359,70 @@ func (m *GetFileContentResponse) GetContent() []byte {
return nil
}
type CreateEntryRequest struct {
Directory string `protobuf:"bytes,1,opt,name=directory" json:"directory,omitempty"`
Entry *Entry `protobuf:"bytes,2,opt,name=entry" json:"entry,omitempty"`
}
func (m *CreateEntryRequest) Reset() { *m = CreateEntryRequest{} }
func (m *CreateEntryRequest) String() string { return proto.CompactTextString(m) }
func (*CreateEntryRequest) ProtoMessage() {}
func (*CreateEntryRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{11} }
func (m *CreateEntryRequest) GetDirectory() string {
if m != nil {
return m.Directory
}
return ""
}
func (m *CreateEntryRequest) GetEntry() *Entry {
if m != nil {
return m.Entry
}
return nil
}
type CreateEntryResponse struct {
}
func (m *CreateEntryResponse) Reset() { *m = CreateEntryResponse{} }
func (m *CreateEntryResponse) String() string { return proto.CompactTextString(m) }
func (*CreateEntryResponse) ProtoMessage() {}
func (*CreateEntryResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{12} }
type UpdateEntryRequest struct {
Directory string `protobuf:"bytes,1,opt,name=directory" json:"directory,omitempty"`
Entry *Entry `protobuf:"bytes,2,opt,name=entry" json:"entry,omitempty"`
}
func (m *UpdateEntryRequest) Reset() { *m = UpdateEntryRequest{} }
func (m *UpdateEntryRequest) String() string { return proto.CompactTextString(m) }
func (*UpdateEntryRequest) ProtoMessage() {}
func (*UpdateEntryRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{13} }
func (m *UpdateEntryRequest) GetDirectory() string {
if m != nil {
return m.Directory
}
return ""
}
func (m *UpdateEntryRequest) GetEntry() *Entry {
if m != nil {
return m.Entry
}
return nil
}
type UpdateEntryResponse struct {
}
func (m *UpdateEntryResponse) Reset() { *m = UpdateEntryResponse{} }
func (m *UpdateEntryResponse) String() string { return proto.CompactTextString(m) }
func (*UpdateEntryResponse) ProtoMessage() {}
func (*UpdateEntryResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{14} }
type DeleteEntryRequest struct {
Directory string `protobuf:"bytes,1,opt,name=directory" json:"directory,omitempty"`
Name string `protobuf:"bytes,2,opt,name=name" json:"name,omitempty"`
@ -293,7 +432,7 @@ type DeleteEntryRequest struct {
func (m *DeleteEntryRequest) Reset() { *m = DeleteEntryRequest{} }
func (m *DeleteEntryRequest) String() string { return proto.CompactTextString(m) }
func (*DeleteEntryRequest) ProtoMessage() {}
func (*DeleteEntryRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{10} }
func (*DeleteEntryRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{15} }
func (m *DeleteEntryRequest) GetDirectory() string {
if m != nil {
@ -322,7 +461,151 @@ type DeleteEntryResponse struct {
func (m *DeleteEntryResponse) Reset() { *m = DeleteEntryResponse{} }
func (m *DeleteEntryResponse) String() string { return proto.CompactTextString(m) }
func (*DeleteEntryResponse) ProtoMessage() {}
func (*DeleteEntryResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{11} }
func (*DeleteEntryResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{16} }
type AssignVolumeRequest struct {
Count int32 `protobuf:"varint,1,opt,name=count" json:"count,omitempty"`
Collection string `protobuf:"bytes,2,opt,name=collection" json:"collection,omitempty"`
Replication string `protobuf:"bytes,3,opt,name=replication" json:"replication,omitempty"`
}
func (m *AssignVolumeRequest) Reset() { *m = AssignVolumeRequest{} }
func (m *AssignVolumeRequest) String() string { return proto.CompactTextString(m) }
func (*AssignVolumeRequest) ProtoMessage() {}
func (*AssignVolumeRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{17} }
func (m *AssignVolumeRequest) GetCount() int32 {
if m != nil {
return m.Count
}
return 0
}
func (m *AssignVolumeRequest) GetCollection() string {
if m != nil {
return m.Collection
}
return ""
}
func (m *AssignVolumeRequest) GetReplication() string {
if m != nil {
return m.Replication
}
return ""
}
type AssignVolumeResponse struct {
FileId string `protobuf:"bytes,1,opt,name=file_id,json=fileId" json:"file_id,omitempty"`
Url string `protobuf:"bytes,2,opt,name=url" json:"url,omitempty"`
PublicUrl string `protobuf:"bytes,3,opt,name=public_url,json=publicUrl" json:"public_url,omitempty"`
Count int32 `protobuf:"varint,4,opt,name=count" json:"count,omitempty"`
}
func (m *AssignVolumeResponse) Reset() { *m = AssignVolumeResponse{} }
func (m *AssignVolumeResponse) String() string { return proto.CompactTextString(m) }
func (*AssignVolumeResponse) ProtoMessage() {}
func (*AssignVolumeResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{18} }
func (m *AssignVolumeResponse) GetFileId() string {
if m != nil {
return m.FileId
}
return ""
}
func (m *AssignVolumeResponse) GetUrl() string {
if m != nil {
return m.Url
}
return ""
}
func (m *AssignVolumeResponse) GetPublicUrl() string {
if m != nil {
return m.PublicUrl
}
return ""
}
func (m *AssignVolumeResponse) GetCount() int32 {
if m != nil {
return m.Count
}
return 0
}
type LookupVolumeRequest struct {
VolumeIds []string `protobuf:"bytes,1,rep,name=volume_ids,json=volumeIds" json:"volume_ids,omitempty"`
}
func (m *LookupVolumeRequest) Reset() { *m = LookupVolumeRequest{} }
func (m *LookupVolumeRequest) String() string { return proto.CompactTextString(m) }
func (*LookupVolumeRequest) ProtoMessage() {}
func (*LookupVolumeRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{19} }
func (m *LookupVolumeRequest) GetVolumeIds() []string {
if m != nil {
return m.VolumeIds
}
return nil
}
type Locations struct {
Locations []*Location `protobuf:"bytes,1,rep,name=locations" json:"locations,omitempty"`
}
func (m *Locations) Reset() { *m = Locations{} }
func (m *Locations) String() string { return proto.CompactTextString(m) }
func (*Locations) ProtoMessage() {}
func (*Locations) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{20} }
func (m *Locations) GetLocations() []*Location {
if m != nil {
return m.Locations
}
return nil
}
type Location struct {
Url string `protobuf:"bytes,1,opt,name=url" json:"url,omitempty"`
PublicUrl string `protobuf:"bytes,2,opt,name=public_url,json=publicUrl" json:"public_url,omitempty"`
}
func (m *Location) Reset() { *m = Location{} }
func (m *Location) String() string { return proto.CompactTextString(m) }
func (*Location) ProtoMessage() {}
func (*Location) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{21} }
func (m *Location) GetUrl() string {
if m != nil {
return m.Url
}
return ""
}
func (m *Location) GetPublicUrl() string {
if m != nil {
return m.PublicUrl
}
return ""
}
type LookupVolumeResponse struct {
LocationsMap map[string]*Locations `protobuf:"bytes,1,rep,name=locations_map,json=locationsMap" json:"locations_map,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"`
}
func (m *LookupVolumeResponse) Reset() { *m = LookupVolumeResponse{} }
func (m *LookupVolumeResponse) String() string { return proto.CompactTextString(m) }
func (*LookupVolumeResponse) ProtoMessage() {}
func (*LookupVolumeResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{22} }
func (m *LookupVolumeResponse) GetLocationsMap() map[string]*Locations {
if m != nil {
return m.LocationsMap
}
return nil
}
func init() {
proto.RegisterType((*LookupDirectoryEntryRequest)(nil), "filer_pb.LookupDirectoryEntryRequest")
@ -330,13 +613,24 @@ func init() {
proto.RegisterType((*ListEntriesRequest)(nil), "filer_pb.ListEntriesRequest")
proto.RegisterType((*ListEntriesResponse)(nil), "filer_pb.ListEntriesResponse")
proto.RegisterType((*Entry)(nil), "filer_pb.Entry")
proto.RegisterType((*FileChunk)(nil), "filer_pb.FileChunk")
proto.RegisterType((*FuseAttributes)(nil), "filer_pb.FuseAttributes")
proto.RegisterType((*GetFileAttributesRequest)(nil), "filer_pb.GetFileAttributesRequest")
proto.RegisterType((*GetFileAttributesResponse)(nil), "filer_pb.GetFileAttributesResponse")
proto.RegisterType((*GetEntryAttributesRequest)(nil), "filer_pb.GetEntryAttributesRequest")
proto.RegisterType((*GetEntryAttributesResponse)(nil), "filer_pb.GetEntryAttributesResponse")
proto.RegisterType((*GetFileContentRequest)(nil), "filer_pb.GetFileContentRequest")
proto.RegisterType((*GetFileContentResponse)(nil), "filer_pb.GetFileContentResponse")
proto.RegisterType((*CreateEntryRequest)(nil), "filer_pb.CreateEntryRequest")
proto.RegisterType((*CreateEntryResponse)(nil), "filer_pb.CreateEntryResponse")
proto.RegisterType((*UpdateEntryRequest)(nil), "filer_pb.UpdateEntryRequest")
proto.RegisterType((*UpdateEntryResponse)(nil), "filer_pb.UpdateEntryResponse")
proto.RegisterType((*DeleteEntryRequest)(nil), "filer_pb.DeleteEntryRequest")
proto.RegisterType((*DeleteEntryResponse)(nil), "filer_pb.DeleteEntryResponse")
proto.RegisterType((*AssignVolumeRequest)(nil), "filer_pb.AssignVolumeRequest")
proto.RegisterType((*AssignVolumeResponse)(nil), "filer_pb.AssignVolumeResponse")
proto.RegisterType((*LookupVolumeRequest)(nil), "filer_pb.LookupVolumeRequest")
proto.RegisterType((*Locations)(nil), "filer_pb.Locations")
proto.RegisterType((*Location)(nil), "filer_pb.Location")
proto.RegisterType((*LookupVolumeResponse)(nil), "filer_pb.LookupVolumeResponse")
}
// Reference imports to suppress errors if they are not otherwise used.
@ -352,9 +646,12 @@ const _ = grpc.SupportPackageIsVersion4
type SeaweedFilerClient interface {
LookupDirectoryEntry(ctx context.Context, in *LookupDirectoryEntryRequest, opts ...grpc.CallOption) (*LookupDirectoryEntryResponse, error)
ListEntries(ctx context.Context, in *ListEntriesRequest, opts ...grpc.CallOption) (*ListEntriesResponse, error)
GetFileAttributes(ctx context.Context, in *GetFileAttributesRequest, opts ...grpc.CallOption) (*GetFileAttributesResponse, error)
GetFileContent(ctx context.Context, in *GetFileContentRequest, opts ...grpc.CallOption) (*GetFileContentResponse, error)
GetEntryAttributes(ctx context.Context, in *GetEntryAttributesRequest, opts ...grpc.CallOption) (*GetEntryAttributesResponse, error)
CreateEntry(ctx context.Context, in *CreateEntryRequest, opts ...grpc.CallOption) (*CreateEntryResponse, error)
UpdateEntry(ctx context.Context, in *UpdateEntryRequest, opts ...grpc.CallOption) (*UpdateEntryResponse, error)
DeleteEntry(ctx context.Context, in *DeleteEntryRequest, opts ...grpc.CallOption) (*DeleteEntryResponse, error)
AssignVolume(ctx context.Context, in *AssignVolumeRequest, opts ...grpc.CallOption) (*AssignVolumeResponse, error)
LookupVolume(ctx context.Context, in *LookupVolumeRequest, opts ...grpc.CallOption) (*LookupVolumeResponse, error)
}
type seaweedFilerClient struct {
@ -383,18 +680,27 @@ func (c *seaweedFilerClient) ListEntries(ctx context.Context, in *ListEntriesReq
return out, nil
}
func (c *seaweedFilerClient) GetFileAttributes(ctx context.Context, in *GetFileAttributesRequest, opts ...grpc.CallOption) (*GetFileAttributesResponse, error) {
out := new(GetFileAttributesResponse)
err := grpc.Invoke(ctx, "/filer_pb.SeaweedFiler/GetFileAttributes", in, out, c.cc, opts...)
func (c *seaweedFilerClient) GetEntryAttributes(ctx context.Context, in *GetEntryAttributesRequest, opts ...grpc.CallOption) (*GetEntryAttributesResponse, error) {
out := new(GetEntryAttributesResponse)
err := grpc.Invoke(ctx, "/filer_pb.SeaweedFiler/GetEntryAttributes", in, out, c.cc, opts...)
if err != nil {
return nil, err
}
return out, nil
}
func (c *seaweedFilerClient) GetFileContent(ctx context.Context, in *GetFileContentRequest, opts ...grpc.CallOption) (*GetFileContentResponse, error) {
out := new(GetFileContentResponse)
err := grpc.Invoke(ctx, "/filer_pb.SeaweedFiler/GetFileContent", in, out, c.cc, opts...)
func (c *seaweedFilerClient) CreateEntry(ctx context.Context, in *CreateEntryRequest, opts ...grpc.CallOption) (*CreateEntryResponse, error) {
out := new(CreateEntryResponse)
err := grpc.Invoke(ctx, "/filer_pb.SeaweedFiler/CreateEntry", in, out, c.cc, opts...)
if err != nil {
return nil, err
}
return out, nil
}
func (c *seaweedFilerClient) UpdateEntry(ctx context.Context, in *UpdateEntryRequest, opts ...grpc.CallOption) (*UpdateEntryResponse, error) {
out := new(UpdateEntryResponse)
err := grpc.Invoke(ctx, "/filer_pb.SeaweedFiler/UpdateEntry", in, out, c.cc, opts...)
if err != nil {
return nil, err
}
@ -410,14 +716,35 @@ func (c *seaweedFilerClient) DeleteEntry(ctx context.Context, in *DeleteEntryReq
return out, nil
}
func (c *seaweedFilerClient) AssignVolume(ctx context.Context, in *AssignVolumeRequest, opts ...grpc.CallOption) (*AssignVolumeResponse, error) {
out := new(AssignVolumeResponse)
err := grpc.Invoke(ctx, "/filer_pb.SeaweedFiler/AssignVolume", in, out, c.cc, opts...)
if err != nil {
return nil, err
}
return out, nil
}
func (c *seaweedFilerClient) LookupVolume(ctx context.Context, in *LookupVolumeRequest, opts ...grpc.CallOption) (*LookupVolumeResponse, error) {
out := new(LookupVolumeResponse)
err := grpc.Invoke(ctx, "/filer_pb.SeaweedFiler/LookupVolume", in, out, c.cc, opts...)
if err != nil {
return nil, err
}
return out, nil
}
// Server API for SeaweedFiler service
type SeaweedFilerServer interface {
LookupDirectoryEntry(context.Context, *LookupDirectoryEntryRequest) (*LookupDirectoryEntryResponse, error)
ListEntries(context.Context, *ListEntriesRequest) (*ListEntriesResponse, error)
GetFileAttributes(context.Context, *GetFileAttributesRequest) (*GetFileAttributesResponse, error)
GetFileContent(context.Context, *GetFileContentRequest) (*GetFileContentResponse, error)
GetEntryAttributes(context.Context, *GetEntryAttributesRequest) (*GetEntryAttributesResponse, error)
CreateEntry(context.Context, *CreateEntryRequest) (*CreateEntryResponse, error)
UpdateEntry(context.Context, *UpdateEntryRequest) (*UpdateEntryResponse, error)
DeleteEntry(context.Context, *DeleteEntryRequest) (*DeleteEntryResponse, error)
AssignVolume(context.Context, *AssignVolumeRequest) (*AssignVolumeResponse, error)
LookupVolume(context.Context, *LookupVolumeRequest) (*LookupVolumeResponse, error)
}
func RegisterSeaweedFilerServer(s *grpc.Server, srv SeaweedFilerServer) {
@ -460,38 +787,56 @@ func _SeaweedFiler_ListEntries_Handler(srv interface{}, ctx context.Context, dec
return interceptor(ctx, in, info, handler)
}
func _SeaweedFiler_GetFileAttributes_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(GetFileAttributesRequest)
func _SeaweedFiler_GetEntryAttributes_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(GetEntryAttributesRequest)
if err := dec(in); err != nil {
return nil, err
}
if interceptor == nil {
return srv.(SeaweedFilerServer).GetFileAttributes(ctx, in)
return srv.(SeaweedFilerServer).GetEntryAttributes(ctx, in)
}
info := &grpc.UnaryServerInfo{
Server: srv,
FullMethod: "/filer_pb.SeaweedFiler/GetFileAttributes",
FullMethod: "/filer_pb.SeaweedFiler/GetEntryAttributes",
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(SeaweedFilerServer).GetFileAttributes(ctx, req.(*GetFileAttributesRequest))
return srv.(SeaweedFilerServer).GetEntryAttributes(ctx, req.(*GetEntryAttributesRequest))
}
return interceptor(ctx, in, info, handler)
}
func _SeaweedFiler_GetFileContent_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(GetFileContentRequest)
func _SeaweedFiler_CreateEntry_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(CreateEntryRequest)
if err := dec(in); err != nil {
return nil, err
}
if interceptor == nil {
return srv.(SeaweedFilerServer).GetFileContent(ctx, in)
return srv.(SeaweedFilerServer).CreateEntry(ctx, in)
}
info := &grpc.UnaryServerInfo{
Server: srv,
FullMethod: "/filer_pb.SeaweedFiler/GetFileContent",
FullMethod: "/filer_pb.SeaweedFiler/CreateEntry",
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(SeaweedFilerServer).GetFileContent(ctx, req.(*GetFileContentRequest))
return srv.(SeaweedFilerServer).CreateEntry(ctx, req.(*CreateEntryRequest))
}
return interceptor(ctx, in, info, handler)
}
func _SeaweedFiler_UpdateEntry_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(UpdateEntryRequest)
if err := dec(in); err != nil {
return nil, err
}
if interceptor == nil {
return srv.(SeaweedFilerServer).UpdateEntry(ctx, in)
}
info := &grpc.UnaryServerInfo{
Server: srv,
FullMethod: "/filer_pb.SeaweedFiler/UpdateEntry",
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(SeaweedFilerServer).UpdateEntry(ctx, req.(*UpdateEntryRequest))
}
return interceptor(ctx, in, info, handler)
}
@ -514,6 +859,42 @@ func _SeaweedFiler_DeleteEntry_Handler(srv interface{}, ctx context.Context, dec
return interceptor(ctx, in, info, handler)
}
func _SeaweedFiler_AssignVolume_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(AssignVolumeRequest)
if err := dec(in); err != nil {
return nil, err
}
if interceptor == nil {
return srv.(SeaweedFilerServer).AssignVolume(ctx, in)
}
info := &grpc.UnaryServerInfo{
Server: srv,
FullMethod: "/filer_pb.SeaweedFiler/AssignVolume",
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(SeaweedFilerServer).AssignVolume(ctx, req.(*AssignVolumeRequest))
}
return interceptor(ctx, in, info, handler)
}
func _SeaweedFiler_LookupVolume_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(LookupVolumeRequest)
if err := dec(in); err != nil {
return nil, err
}
if interceptor == nil {
return srv.(SeaweedFilerServer).LookupVolume(ctx, in)
}
info := &grpc.UnaryServerInfo{
Server: srv,
FullMethod: "/filer_pb.SeaweedFiler/LookupVolume",
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(SeaweedFilerServer).LookupVolume(ctx, req.(*LookupVolumeRequest))
}
return interceptor(ctx, in, info, handler)
}
var _SeaweedFiler_serviceDesc = grpc.ServiceDesc{
ServiceName: "filer_pb.SeaweedFiler",
HandlerType: (*SeaweedFilerServer)(nil),
@ -527,17 +908,29 @@ var _SeaweedFiler_serviceDesc = grpc.ServiceDesc{
Handler: _SeaweedFiler_ListEntries_Handler,
},
{
MethodName: "GetFileAttributes",
Handler: _SeaweedFiler_GetFileAttributes_Handler,
MethodName: "GetEntryAttributes",
Handler: _SeaweedFiler_GetEntryAttributes_Handler,
},
{
MethodName: "GetFileContent",
Handler: _SeaweedFiler_GetFileContent_Handler,
MethodName: "CreateEntry",
Handler: _SeaweedFiler_CreateEntry_Handler,
},
{
MethodName: "UpdateEntry",
Handler: _SeaweedFiler_UpdateEntry_Handler,
},
{
MethodName: "DeleteEntry",
Handler: _SeaweedFiler_DeleteEntry_Handler,
},
{
MethodName: "AssignVolume",
Handler: _SeaweedFiler_AssignVolume_Handler,
},
{
MethodName: "LookupVolume",
Handler: _SeaweedFiler_LookupVolume_Handler,
},
},
Streams: []grpc.StreamDesc{},
Metadata: "filer.proto",
@ -546,39 +939,62 @@ var _SeaweedFiler_serviceDesc = grpc.ServiceDesc{
func init() { proto.RegisterFile("filer.proto", fileDescriptor0) }
var fileDescriptor0 = []byte{
// 532 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0xa4, 0x54, 0x4d, 0x6f, 0xd3, 0x40,
0x10, 0xad, 0x71, 0xd2, 0x34, 0x93, 0xb4, 0xc0, 0xb4, 0x05, 0x93, 0xa6, 0x22, 0x2c, 0x2a, 0x82,
0x4b, 0x84, 0xc2, 0x85, 0x23, 0x88, 0xb4, 0x08, 0x29, 0x08, 0xc9, 0x55, 0xaf, 0x44, 0x49, 0x3d,
0x8d, 0x56, 0x24, 0x76, 0xf0, 0xae, 0x85, 0xda, 0x33, 0x7f, 0x80, 0xbf, 0xc5, 0xaf, 0x42, 0xbb,
0xeb, 0x8f, 0x35, 0x76, 0x0a, 0x88, 0x9b, 0xf7, 0xcd, 0xbe, 0x99, 0x37, 0x6f, 0x66, 0x0d, 0x9d,
0x2b, 0xbe, 0xa4, 0x78, 0xb8, 0x8e, 0x23, 0x19, 0xe1, 0x8e, 0x3e, 0x4c, 0xd7, 0x73, 0xf6, 0x09,
0x8e, 0x26, 0x51, 0xf4, 0x25, 0x59, 0x8f, 0x79, 0x4c, 0x97, 0x32, 0x8a, 0xaf, 0x4f, 0x43, 0x19,
0x5f, 0xfb, 0xf4, 0x35, 0x21, 0x21, 0xb1, 0x0f, 0xed, 0x20, 0x0b, 0x78, 0xce, 0xc0, 0x79, 0xde,
0xf6, 0x0b, 0x00, 0x11, 0x1a, 0xe1, 0x6c, 0x45, 0xde, 0x1d, 0x1d, 0xd0, 0xdf, 0xec, 0x14, 0xfa,
0xf5, 0x09, 0xc5, 0x3a, 0x0a, 0x05, 0xe1, 0x09, 0x34, 0x49, 0x01, 0x3a, 0x5b, 0x67, 0x74, 0x77,
0x98, 0x49, 0x19, 0x9a, 0x7b, 0x26, 0xca, 0x46, 0x80, 0x13, 0x2e, 0xa4, 0xc2, 0x38, 0x89, 0xbf,
0x92, 0xc3, 0xde, 0xc0, 0x7e, 0x89, 0x93, 0x56, 0x7c, 0x01, 0x2d, 0x32, 0x90, 0xe7, 0x0c, 0xdc,
0xba, 0x9a, 0x59, 0x9c, 0xfd, 0x70, 0xa0, 0xa9, 0xa1, 0xbc, 0x35, 0xa7, 0x68, 0x0d, 0x9f, 0x40,
0x97, 0x8b, 0x69, 0x21, 0x40, 0xb5, 0xbd, 0xe3, 0x77, 0xb8, 0xc8, 0x5b, 0xc5, 0x87, 0xd0, 0x52,
0xb9, 0xa7, 0x3c, 0xf0, 0x5c, 0xcd, 0xdc, 0x56, 0xc7, 0x0f, 0x01, 0xbe, 0x06, 0x98, 0x49, 0x19,
0xf3, 0x79, 0x22, 0x49, 0x78, 0x0d, 0xdd, 0xbb, 0x57, 0xe8, 0x38, 0x4b, 0x04, 0xbd, 0xcd, 0xe3,
0xbe, 0x75, 0x97, 0x7d, 0x77, 0x60, 0xaf, 0x1c, 0xc6, 0x23, 0x68, 0xeb, 0x2a, 0x82, 0xdf, 0x18,
0x85, 0x0d, 0x5f, 0x4f, 0xf4, 0x9c, 0xdf, 0x10, 0x1e, 0x40, 0x73, 0x25, 0x79, 0x3a, 0x15, 0xd7,
0x37, 0x87, 0x9c, 0xb2, 0x8a, 0x02, 0xd2, 0xd2, 0x76, 0x0d, 0xe5, 0x63, 0x14, 0x10, 0xde, 0x03,
0x37, 0xe1, 0x81, 0x56, 0xb5, 0xeb, 0xab, 0x4f, 0x85, 0x2c, 0x78, 0xe0, 0x35, 0x0d, 0xb2, 0xe0,
0x01, 0xbb, 0x02, 0xef, 0x3d, 0xc9, 0x33, 0xbe, 0xb4, 0x75, 0xa6, 0x63, 0xa9, 0x33, 0xeb, 0x18,
0x60, 0x3d, 0x8b, 0x29, 0x94, 0xca, 0xb0, 0x74, 0x43, 0xda, 0x06, 0x19, 0xf3, 0x78, 0xa3, 0x51,
0xec, 0x02, 0x1e, 0xd5, 0xd4, 0x49, 0x47, 0x59, 0x76, 0xd1, 0xf9, 0x07, 0x17, 0x5f, 0xc2, 0x61,
0x9a, 0xf6, 0x5d, 0x14, 0x4a, 0x0a, 0x65, 0xa6, 0xdd, 0x12, 0xe2, 0x94, 0x84, 0x8c, 0xe0, 0xc1,
0xef, 0x8c, 0x54, 0x85, 0x07, 0xad, 0x4b, 0x03, 0x69, 0x4a, 0xd7, 0xcf, 0x8e, 0x8c, 0x03, 0x8e,
0x69, 0x49, 0x92, 0xfe, 0xef, 0x11, 0x55, 0x36, 0xcd, 0xad, 0x6c, 0x1a, 0x3b, 0x84, 0xfd, 0x52,
0x29, 0xa3, 0x6d, 0xf4, 0xd3, 0x85, 0xee, 0x39, 0xcd, 0xbe, 0x11, 0x05, 0x4a, 0x7a, 0x8c, 0x0b,
0x38, 0xa8, 0x7b, 0x8f, 0x78, 0x52, 0xd8, 0x76, 0xcb, 0x0f, 0xa0, 0xf7, 0xec, 0x4f, 0xd7, 0x4c,
0x5d, 0xb6, 0x85, 0x13, 0xe8, 0x58, 0xaf, 0x0f, 0xfb, 0x16, 0xb1, 0xf2, 0x90, 0x7b, 0xc7, 0x1b,
0xa2, 0x79, 0xb6, 0xcf, 0x70, 0xbf, 0xb2, 0x06, 0xc8, 0x0a, 0xd6, 0xa6, 0x5d, 0xec, 0x3d, 0xbd,
0xf5, 0x4e, 0x9e, 0xff, 0x02, 0xf6, 0xca, 0xd3, 0xc5, 0xc7, 0x15, 0x62, 0x79, 0x53, 0x7a, 0x83,
0xcd, 0x17, 0x6c, 0x13, 0xac, 0xa9, 0xd8, 0x26, 0x54, 0xf7, 0xc2, 0x36, 0xa1, 0x66, 0x94, 0x6c,
0x6b, 0xbe, 0xad, 0xff, 0xd6, 0xaf, 0x7e, 0x05, 0x00, 0x00, 0xff, 0xff, 0x6a, 0x83, 0x8a, 0x32,
0xbc, 0x05, 0x00, 0x00,
// 906 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0xb4, 0x56, 0x5b, 0x6f, 0xdc, 0x44,
0x14, 0x8e, 0xd7, 0xd9, 0x4d, 0x7c, 0x76, 0xc3, 0x65, 0x36, 0x2d, 0x66, 0x9b, 0x54, 0x61, 0xa0,
0xa8, 0x15, 0x52, 0x14, 0x05, 0x1e, 0x2a, 0x10, 0x12, 0x55, 0x53, 0xaa, 0x4a, 0xa9, 0x2a, 0xb9,
0x04, 0x89, 0xa7, 0x95, 0x63, 0x9f, 0x5d, 0x46, 0xf1, 0x0d, 0xcf, 0x38, 0x28, 0xbc, 0xf2, 0x5b,
0x78, 0xe7, 0x81, 0x7f, 0xc0, 0x1f, 0x43, 0x73, 0xb1, 0x3d, 0x8e, 0xbd, 0xbd, 0x3c, 0xf0, 0x36,
0x73, 0x2e, 0xdf, 0xf9, 0xce, 0xcc, 0x99, 0xcf, 0x86, 0xe9, 0x8a, 0x25, 0x58, 0x1e, 0x17, 0x65,
0x2e, 0x72, 0xb2, 0xab, 0x36, 0xcb, 0xe2, 0x92, 0xbe, 0x82, 0x7b, 0xe7, 0x79, 0x7e, 0x55, 0x15,
0x67, 0xac, 0xc4, 0x48, 0xe4, 0xe5, 0xcd, 0xb3, 0x4c, 0x94, 0x37, 0x01, 0xfe, 0x56, 0x21, 0x17,
0xe4, 0x00, 0xbc, 0xb8, 0x76, 0xf8, 0xce, 0x91, 0xf3, 0xd0, 0x0b, 0x5a, 0x03, 0x21, 0xb0, 0x9d,
0x85, 0x29, 0xfa, 0x23, 0xe5, 0x50, 0x6b, 0xfa, 0x0c, 0x0e, 0x86, 0x01, 0x79, 0x91, 0x67, 0x1c,
0xc9, 0x03, 0x18, 0xa3, 0x34, 0x28, 0xb4, 0xe9, 0xe9, 0x87, 0xc7, 0x35, 0x95, 0x63, 0x1d, 0xa7,
0xbd, 0xf4, 0x14, 0xc8, 0x39, 0xe3, 0x42, 0xda, 0x18, 0xf2, 0x77, 0xa2, 0x43, 0x7f, 0x80, 0x79,
0x27, 0xc7, 0x54, 0x7c, 0x04, 0x3b, 0xa8, 0x4d, 0xbe, 0x73, 0xe4, 0x0e, 0xd5, 0xac, 0xfd, 0xf4,
0x2f, 0x07, 0xc6, 0xca, 0xd4, 0xb4, 0xe6, 0xb4, 0xad, 0x91, 0xcf, 0x60, 0xc6, 0xf8, 0xb2, 0x25,
0x20, 0xdb, 0xde, 0x0d, 0xa6, 0x8c, 0x37, 0xad, 0x92, 0xaf, 0x60, 0x12, 0xfd, 0x5a, 0x65, 0x57,
0xdc, 0x77, 0x55, 0xa9, 0x79, 0x5b, 0xea, 0x47, 0x96, 0xe0, 0x53, 0xe9, 0x0b, 0x4c, 0x08, 0x79,
0x0c, 0x10, 0x0a, 0x51, 0xb2, 0xcb, 0x4a, 0x20, 0xf7, 0xb7, 0xd5, 0x79, 0xf8, 0x56, 0x42, 0xc5,
0xf1, 0x49, 0xe3, 0x0f, 0xac, 0x58, 0xba, 0x02, 0xaf, 0x81, 0x23, 0x9f, 0xc0, 0x8e, 0xcc, 0x59,
0xb2, 0xd8, 0xb0, 0x9d, 0xc8, 0xed, 0x8b, 0x98, 0xdc, 0x85, 0x49, 0xbe, 0x5a, 0x71, 0x14, 0x8a,
0xa9, 0x1b, 0x98, 0x9d, 0xec, 0x8d, 0xb3, 0x3f, 0xd0, 0x77, 0x8f, 0x9c, 0x87, 0xdb, 0x81, 0x5a,
0x93, 0x7d, 0x18, 0xa7, 0x82, 0xa5, 0xa8, 0x68, 0xb8, 0x81, 0xde, 0xd0, 0xbf, 0x1d, 0xf8, 0xa0,
0x4b, 0x83, 0xdc, 0x03, 0x4f, 0x55, 0x53, 0x08, 0x8e, 0x42, 0x50, 0xd3, 0xf4, 0xba, 0x83, 0x32,
0xb2, 0x50, 0x9a, 0x94, 0x34, 0x8f, 0x75, 0xd1, 0x3d, 0x9d, 0xf2, 0x32, 0x8f, 0x91, 0x7c, 0x04,
0x6e, 0xc5, 0x62, 0x55, 0x76, 0x2f, 0x90, 0x4b, 0x69, 0x59, 0xb3, 0xd8, 0x1f, 0x6b, 0xcb, 0x9a,
0xa9, 0x46, 0xa2, 0x52, 0xe1, 0x4e, 0x74, 0x23, 0x7a, 0x27, 0x1b, 0x49, 0xa5, 0x75, 0x47, 0x5f,
0x92, 0x5c, 0xd3, 0x35, 0x7c, 0xfa, 0x1c, 0xd5, 0x0c, 0xdc, 0x58, 0x87, 0x67, 0xe6, 0x67, 0xe8,
0x56, 0x0f, 0x01, 0x8a, 0xb0, 0xc4, 0x4c, 0xc8, 0x9b, 0x35, 0xa3, 0xec, 0x69, 0xcb, 0x19, 0x2b,
0xed, 0xd3, 0x75, 0xed, 0xd3, 0xa5, 0x7f, 0x3a, 0xb0, 0x18, 0xaa, 0x64, 0xa6, 0xae, 0x7b, 0xb9,
0xce, 0xbb, 0x5f, 0xae, 0x35, 0x43, 0xa3, 0xb7, 0xce, 0x10, 0x3d, 0x81, 0x3b, 0xcf, 0x51, 0x28,
0x7b, 0x9e, 0x09, 0xcc, 0x44, 0xdd, 0xea, 0xa6, 0xa9, 0xa0, 0xa7, 0x70, 0xf7, 0x76, 0x86, 0xa1,
0xec, 0xc3, 0x4e, 0xa4, 0x4d, 0x2a, 0x65, 0x16, 0xd4, 0x5b, 0xfa, 0x0b, 0x90, 0xa7, 0x25, 0x86,
0x02, 0xdf, 0x43, 0x1c, 0x9a, 0x87, 0x3e, 0x7a, 0xe3, 0x43, 0xbf, 0x03, 0xf3, 0x0e, 0xb4, 0xe6,
0x22, 0x2b, 0x5e, 0x14, 0xf1, 0xff, 0x55, 0xb1, 0x03, 0x6d, 0x2a, 0x32, 0x20, 0x67, 0x98, 0xe0,
0x7b, 0x55, 0x1c, 0x10, 0xc0, 0x9e, 0x4a, 0xb8, 0x3d, 0x95, 0x90, 0x0c, 0x3a, 0xa5, 0x0c, 0x83,
0x14, 0xe6, 0x4f, 0x38, 0x67, 0xeb, 0xec, 0xe7, 0x3c, 0xa9, 0x52, 0xac, 0x29, 0xec, 0xc3, 0x38,
0xca, 0x2b, 0x73, 0x29, 0xe3, 0x40, 0x6f, 0xc8, 0x7d, 0x80, 0x28, 0x4f, 0x12, 0x8c, 0x04, 0xcb,
0x33, 0x43, 0xc0, 0xb2, 0x90, 0x23, 0x98, 0x96, 0x58, 0x24, 0x2c, 0x0a, 0x55, 0x80, 0x9e, 0x5d,
0xdb, 0x44, 0xaf, 0x61, 0xbf, 0x5b, 0xce, 0x8c, 0xc1, 0x46, 0x3d, 0x91, 0x4f, 0xb5, 0x4c, 0x4c,
0x2d, 0xb9, 0x54, 0x6f, 0xa7, 0xba, 0x4c, 0x58, 0xb4, 0x94, 0x0e, 0xd7, 0xbc, 0x1d, 0x65, 0xb9,
0x28, 0x93, 0x96, 0xf9, 0xb6, 0xc5, 0x9c, 0x7e, 0x03, 0x73, 0xfd, 0x85, 0xe8, 0xb6, 0x79, 0x08,
0x70, 0xad, 0x0c, 0x4b, 0x16, 0x6b, 0xa5, 0xf6, 0x02, 0x4f, 0x5b, 0x5e, 0xc4, 0x9c, 0x7e, 0x0f,
0xde, 0x79, 0xae, 0x99, 0x73, 0x72, 0x02, 0x5e, 0x52, 0x6f, 0x8c, 0xa8, 0x93, 0xf6, 0xb6, 0xeb,
0xb8, 0xa0, 0x0d, 0xa2, 0xdf, 0xc1, 0x6e, 0x6d, 0xae, 0xfb, 0x70, 0x36, 0xf5, 0x31, 0xba, 0xd5,
0x07, 0xfd, 0xd7, 0x81, 0xfd, 0x2e, 0x65, 0x73, 0x54, 0x17, 0xb0, 0xd7, 0x94, 0x58, 0xa6, 0x61,
0x61, 0xb8, 0x9c, 0xd8, 0x5c, 0xfa, 0x69, 0x0d, 0x41, 0xfe, 0x32, 0x2c, 0xf4, 0x08, 0xcc, 0x12,
0xcb, 0xb4, 0xf8, 0x09, 0x3e, 0xee, 0x85, 0x48, 0xd6, 0x57, 0x58, 0xcf, 0xa0, 0x5c, 0x92, 0x47,
0x30, 0xbe, 0x0e, 0x93, 0x0a, 0xcd, 0xbc, 0xcf, 0xfb, 0x27, 0xc0, 0x03, 0x1d, 0xf1, 0xed, 0xe8,
0xb1, 0x73, 0xfa, 0xcf, 0x18, 0x66, 0xaf, 0x31, 0xfc, 0x1d, 0x31, 0x96, 0xaf, 0xbf, 0x24, 0xeb,
0xba, 0xab, 0xee, 0xa7, 0x9a, 0x3c, 0xb8, 0x4d, 0x7f, 0xf0, 0xdf, 0x60, 0xf1, 0xe5, 0xdb, 0xc2,
0xcc, 0x58, 0x6f, 0x91, 0x73, 0x98, 0x5a, 0x1f, 0x66, 0x72, 0x60, 0x25, 0xf6, 0xbe, 0xf1, 0x8b,
0xc3, 0x0d, 0xde, 0x06, 0x2d, 0x04, 0xd2, 0xd7, 0x5d, 0xf2, 0x79, 0x9b, 0xb6, 0x51, 0xff, 0x17,
0x5f, 0xbc, 0x39, 0xc8, 0x26, 0x6c, 0x89, 0x92, 0x4d, 0xb8, 0x2f, 0x83, 0x36, 0xe1, 0x21, 0x25,
0x53, 0x68, 0x96, 0xe0, 0xd8, 0x68, 0x7d, 0x89, 0xb3, 0xd1, 0x86, 0x54, 0x4a, 0xa1, 0x59, 0xe2,
0x61, 0xa3, 0xf5, 0xe5, 0xcb, 0x46, 0x1b, 0x52, 0x9c, 0x2d, 0xf2, 0x0a, 0x66, 0xb6, 0x08, 0x10,
0x2b, 0x61, 0x40, 0x8b, 0x16, 0xf7, 0x37, 0xb9, 0x6d, 0x40, 0x7b, 0xe6, 0x6d, 0xc0, 0x81, 0x57,
0x6f, 0x03, 0x0e, 0x3d, 0x15, 0xba, 0x75, 0x39, 0x51, 0xbf, 0xac, 0x5f, 0xff, 0x17, 0x00, 0x00,
0xff, 0xff, 0xf1, 0x42, 0x51, 0xbb, 0xc1, 0x0a, 0x00, 0x00,
}

View file

@ -12,6 +12,7 @@ It has these top-level messages:
Heartbeat
HeartbeatResponse
VolumeInformationMessage
Empty
*/
package master_pb
@ -235,10 +236,19 @@ func (m *VolumeInformationMessage) GetTtl() uint32 {
return 0
}
type Empty struct {
}
func (m *Empty) Reset() { *m = Empty{} }
func (m *Empty) String() string { return proto.CompactTextString(m) }
func (*Empty) ProtoMessage() {}
func (*Empty) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{3} }
func init() {
proto.RegisterType((*Heartbeat)(nil), "master_pb.Heartbeat")
proto.RegisterType((*HeartbeatResponse)(nil), "master_pb.HeartbeatResponse")
proto.RegisterType((*VolumeInformationMessage)(nil), "master_pb.VolumeInformationMessage")
proto.RegisterType((*Empty)(nil), "master_pb.Empty")
}
// Reference imports to suppress errors if they are not otherwise used.
@ -253,6 +263,7 @@ const _ = grpc.SupportPackageIsVersion4
type SeaweedClient interface {
SendHeartbeat(ctx context.Context, opts ...grpc.CallOption) (Seaweed_SendHeartbeatClient, error)
KeepConnected(ctx context.Context, opts ...grpc.CallOption) (Seaweed_KeepConnectedClient, error)
}
type seaweedClient struct {
@ -294,10 +305,42 @@ func (x *seaweedSendHeartbeatClient) Recv() (*HeartbeatResponse, error) {
return m, nil
}
func (c *seaweedClient) KeepConnected(ctx context.Context, opts ...grpc.CallOption) (Seaweed_KeepConnectedClient, error) {
stream, err := grpc.NewClientStream(ctx, &_Seaweed_serviceDesc.Streams[1], c.cc, "/master_pb.Seaweed/KeepConnected", opts...)
if err != nil {
return nil, err
}
x := &seaweedKeepConnectedClient{stream}
return x, nil
}
type Seaweed_KeepConnectedClient interface {
Send(*Empty) error
Recv() (*Empty, error)
grpc.ClientStream
}
type seaweedKeepConnectedClient struct {
grpc.ClientStream
}
func (x *seaweedKeepConnectedClient) Send(m *Empty) error {
return x.ClientStream.SendMsg(m)
}
func (x *seaweedKeepConnectedClient) Recv() (*Empty, error) {
m := new(Empty)
if err := x.ClientStream.RecvMsg(m); err != nil {
return nil, err
}
return m, nil
}
// Server API for Seaweed service
type SeaweedServer interface {
SendHeartbeat(Seaweed_SendHeartbeatServer) error
KeepConnected(Seaweed_KeepConnectedServer) error
}
func RegisterSeaweedServer(s *grpc.Server, srv SeaweedServer) {
@ -330,6 +373,32 @@ func (x *seaweedSendHeartbeatServer) Recv() (*Heartbeat, error) {
return m, nil
}
func _Seaweed_KeepConnected_Handler(srv interface{}, stream grpc.ServerStream) error {
return srv.(SeaweedServer).KeepConnected(&seaweedKeepConnectedServer{stream})
}
type Seaweed_KeepConnectedServer interface {
Send(*Empty) error
Recv() (*Empty, error)
grpc.ServerStream
}
type seaweedKeepConnectedServer struct {
grpc.ServerStream
}
func (x *seaweedKeepConnectedServer) Send(m *Empty) error {
return x.ServerStream.SendMsg(m)
}
func (x *seaweedKeepConnectedServer) Recv() (*Empty, error) {
m := new(Empty)
if err := x.ServerStream.RecvMsg(m); err != nil {
return nil, err
}
return m, nil
}
var _Seaweed_serviceDesc = grpc.ServiceDesc{
ServiceName: "master_pb.Seaweed",
HandlerType: (*SeaweedServer)(nil),
@ -341,6 +410,12 @@ var _Seaweed_serviceDesc = grpc.ServiceDesc{
ServerStreams: true,
ClientStreams: true,
},
{
StreamName: "KeepConnected",
Handler: _Seaweed_KeepConnected_Handler,
ServerStreams: true,
ClientStreams: true,
},
},
Metadata: "seaweed.proto",
}
@ -348,37 +423,39 @@ var _Seaweed_serviceDesc = grpc.ServiceDesc{
func init() { proto.RegisterFile("seaweed.proto", fileDescriptor0) }
var fileDescriptor0 = []byte{
// 504 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0x74, 0x93, 0xdf, 0x6e, 0xd3, 0x30,
0x14, 0xc6, 0x49, 0x16, 0xda, 0xfa, 0x74, 0x1d, 0x9d, 0x85, 0x90, 0x05, 0x03, 0x4a, 0xb9, 0x89,
0x04, 0xaa, 0xd0, 0xb8, 0xe6, 0x86, 0x49, 0x88, 0x69, 0x20, 0x26, 0x17, 0xb8, 0x8d, 0xdc, 0xe4,
0x0c, 0x59, 0x73, 0xfe, 0xc8, 0x76, 0x47, 0xb3, 0x77, 0xe2, 0x2d, 0x78, 0x30, 0xe4, 0x93, 0xa6,
0x9d, 0x10, 0xdc, 0x1d, 0xff, 0xce, 0xe7, 0xf8, 0xe4, 0xfb, 0x6c, 0x98, 0x38, 0x54, 0x3f, 0x11,
0x8b, 0x45, 0x63, 0x6b, 0x5f, 0x73, 0x56, 0x2a, 0xe7, 0xd1, 0x66, 0xcd, 0x6a, 0xfe, 0x2b, 0x06,
0xf6, 0x11, 0x95, 0xf5, 0x2b, 0x54, 0x9e, 0x1f, 0x41, 0xac, 0x1b, 0x11, 0xcd, 0xa2, 0x94, 0xc9,
0x58, 0x37, 0x9c, 0x43, 0xd2, 0xd4, 0xd6, 0x8b, 0x78, 0x16, 0xa5, 0x13, 0x49, 0x35, 0x7f, 0x0a,
0xd0, 0xac, 0x57, 0x46, 0xe7, 0xd9, 0xda, 0x1a, 0x71, 0x40, 0x5a, 0xd6, 0x91, 0x6f, 0xd6, 0xf0,
0x14, 0xa6, 0xa5, 0xda, 0x64, 0x37, 0xb5, 0x59, 0x97, 0x98, 0xe5, 0xf5, 0xba, 0xf2, 0x22, 0xa1,
0xed, 0x47, 0xa5, 0xda, 0x7c, 0x27, 0x7c, 0x16, 0x28, 0x9f, 0xc1, 0x61, 0x50, 0x5e, 0x69, 0x83,
0xd9, 0x35, 0xb6, 0xe2, 0xfe, 0x2c, 0x4a, 0x13, 0x09, 0xa5, 0xda, 0x7c, 0xd0, 0x06, 0x2f, 0xb0,
0xe5, 0xcf, 0x61, 0x5c, 0x28, 0xaf, 0xb2, 0x1c, 0x2b, 0x8f, 0x56, 0x0c, 0xe8, 0x2c, 0x08, 0xe8,
0x8c, 0x48, 0x98, 0xcf, 0xaa, 0xfc, 0x5a, 0x0c, 0xa9, 0x43, 0x75, 0x98, 0x4f, 0x15, 0xa5, 0xae,
0x32, 0x9a, 0x7c, 0x44, 0x47, 0x33, 0x22, 0x97, 0x61, 0xfc, 0x77, 0x30, 0xec, 0x66, 0x73, 0x82,
0xcd, 0x0e, 0xd2, 0xf1, 0xe9, 0xcb, 0xc5, 0xce, 0x8d, 0x45, 0x37, 0xde, 0x79, 0x75, 0x55, 0xdb,
0x52, 0x79, 0x5d, 0x57, 0x9f, 0xd1, 0x39, 0xf5, 0x03, 0x65, 0xbf, 0x67, 0xee, 0xe0, 0x78, 0x67,
0x97, 0x44, 0xd7, 0xd4, 0x95, 0x43, 0x9e, 0xc2, 0x83, 0xae, 0xbf, 0xd4, 0xb7, 0xf8, 0x49, 0x97,
0xda, 0x93, 0x87, 0x89, 0xfc, 0x1b, 0xf3, 0x13, 0x60, 0x0e, 0x73, 0x8b, 0xfe, 0x02, 0x5b, 0x72,
0x95, 0xc9, 0x3d, 0xe0, 0x8f, 0x60, 0x60, 0x50, 0x15, 0x68, 0xb7, 0xb6, 0x6e, 0x57, 0xf3, 0xdf,
0x31, 0x88, 0xff, 0x8d, 0x46, 0x99, 0x15, 0x74, 0xde, 0x44, 0xc6, 0xba, 0x08, 0x9e, 0x38, 0x7d,
0x8b, 0xf4, 0xf5, 0x44, 0x52, 0xcd, 0x9f, 0x01, 0xe4, 0xb5, 0x31, 0x98, 0x87, 0x8d, 0xdb, 0x8f,
0xdf, 0x21, 0xc1, 0x33, 0x8a, 0x61, 0x1f, 0x57, 0x22, 0x59, 0x20, 0x5d, 0x52, 0x2f, 0xe0, 0xb0,
0x40, 0x83, 0xbe, 0x17, 0x74, 0x49, 0x8d, 0x3b, 0xd6, 0x49, 0x5e, 0x03, 0xef, 0x96, 0x45, 0xb6,
0x6a, 0x77, 0xc2, 0x01, 0x09, 0xa7, 0xdb, 0xce, 0xfb, 0xb6, 0x57, 0x3f, 0x01, 0x66, 0x51, 0x15,
0x59, 0x5d, 0x99, 0x96, 0xc2, 0x1b, 0xc9, 0x51, 0x00, 0x5f, 0x2a, 0xd3, 0xf2, 0x57, 0x70, 0x6c,
0xb1, 0x31, 0x3a, 0x57, 0x59, 0x63, 0x54, 0x8e, 0x25, 0x56, 0x7d, 0x8e, 0xd3, 0x6d, 0xe3, 0xb2,
0xe7, 0x5c, 0xc0, 0xf0, 0x06, 0xad, 0x0b, 0xbf, 0xc5, 0x48, 0xd2, 0x2f, 0xf9, 0x14, 0x0e, 0xbc,
0x37, 0x02, 0x88, 0x86, 0xf2, 0xf4, 0x2b, 0x0c, 0x97, 0xdd, 0x3b, 0xe0, 0xe7, 0x30, 0x59, 0x62,
0x55, 0xec, 0x6f, 0xfe, 0xc3, 0x3b, 0xb7, 0x60, 0x47, 0x1f, 0x9f, 0xfc, 0x8b, 0xf6, 0xb1, 0xcf,
0xef, 0xa5, 0xd1, 0x9b, 0x68, 0x35, 0xa0, 0x37, 0xf5, 0xf6, 0x4f, 0x00, 0x00, 0x00, 0xff, 0xff,
0x01, 0x14, 0xbb, 0x3a, 0x64, 0x03, 0x00, 0x00,
// 540 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0x74, 0x93, 0xcf, 0x6e, 0xd3, 0x4e,
0x10, 0xc7, 0x7f, 0x76, 0xdd, 0xa4, 0x9e, 0x34, 0xfd, 0xa5, 0x2b, 0x84, 0xac, 0x52, 0x20, 0x84,
0x8b, 0x25, 0x50, 0x84, 0xca, 0x89, 0x03, 0x17, 0x22, 0x10, 0x55, 0x40, 0x54, 0x8e, 0xe0, 0x6a,
0x6d, 0xec, 0x29, 0x5a, 0x75, 0xbd, 0xb6, 0x76, 0x37, 0x25, 0xee, 0x4b, 0xf0, 0x24, 0xbc, 0x05,
0x0f, 0x86, 0x76, 0x36, 0x4e, 0x22, 0xfe, 0xdc, 0x66, 0x3f, 0xf3, 0x1d, 0xcf, 0x78, 0xbe, 0xbb,
0x30, 0x34, 0xc8, 0xbf, 0x21, 0x96, 0xd3, 0x46, 0xd7, 0xb6, 0x66, 0x71, 0xc5, 0x8d, 0x45, 0x9d,
0x37, 0xcb, 0xc9, 0x8f, 0x10, 0xe2, 0xf7, 0xc8, 0xb5, 0x5d, 0x22, 0xb7, 0xec, 0x04, 0x42, 0xd1,
0x24, 0xc1, 0x38, 0x48, 0xe3, 0x2c, 0x14, 0x0d, 0x63, 0x10, 0x35, 0xb5, 0xb6, 0x49, 0x38, 0x0e,
0xd2, 0x61, 0x46, 0x31, 0x7b, 0x08, 0xd0, 0xac, 0x96, 0x52, 0x14, 0xf9, 0x4a, 0xcb, 0xe4, 0x80,
0xb4, 0xb1, 0x27, 0x9f, 0xb5, 0x64, 0x29, 0x8c, 0x2a, 0xbe, 0xce, 0x6f, 0x6b, 0xb9, 0xaa, 0x30,
0x2f, 0xea, 0x95, 0xb2, 0x49, 0x44, 0xe5, 0x27, 0x15, 0x5f, 0x7f, 0x21, 0x3c, 0x73, 0x94, 0x8d,
0xe1, 0xd8, 0x29, 0xaf, 0x85, 0xc4, 0xfc, 0x06, 0xdb, 0xe4, 0x70, 0x1c, 0xa4, 0x51, 0x06, 0x15,
0x5f, 0xbf, 0x13, 0x12, 0xe7, 0xd8, 0xb2, 0xc7, 0x30, 0x28, 0xb9, 0xe5, 0x79, 0x81, 0xca, 0xa2,
0x4e, 0x7a, 0xd4, 0x0b, 0x1c, 0x9a, 0x11, 0x71, 0xf3, 0x69, 0x5e, 0xdc, 0x24, 0x7d, 0xca, 0x50,
0xec, 0xe6, 0xe3, 0x65, 0x25, 0x54, 0x4e, 0x93, 0x1f, 0x51, 0xeb, 0x98, 0xc8, 0x95, 0x1b, 0xff,
0x35, 0xf4, 0xfd, 0x6c, 0x26, 0x89, 0xc7, 0x07, 0xe9, 0xe0, 0xe2, 0xe9, 0x74, 0xbb, 0x8d, 0xa9,
0x1f, 0xef, 0x52, 0x5d, 0xd7, 0xba, 0xe2, 0x56, 0xd4, 0xea, 0x23, 0x1a, 0xc3, 0xbf, 0x62, 0xd6,
0xd5, 0x4c, 0x0c, 0x9c, 0x6e, 0xd7, 0x95, 0xa1, 0x69, 0x6a, 0x65, 0x90, 0xa5, 0xf0, 0xbf, 0xcf,
0x2f, 0xc4, 0x1d, 0x7e, 0x10, 0x95, 0xb0, 0xb4, 0xc3, 0x28, 0xfb, 0x1d, 0xb3, 0x73, 0x88, 0x0d,
0x16, 0x1a, 0xed, 0x1c, 0x5b, 0xda, 0x6a, 0x9c, 0xed, 0x00, 0xbb, 0x0f, 0x3d, 0x89, 0xbc, 0x44,
0xbd, 0x59, 0xeb, 0xe6, 0x34, 0xf9, 0x19, 0x42, 0xf2, 0xaf, 0xd1, 0xc8, 0xb3, 0x92, 0xfa, 0x0d,
0xb3, 0x50, 0x94, 0x6e, 0x27, 0x46, 0xdc, 0x21, 0x7d, 0x3d, 0xca, 0x28, 0x66, 0x8f, 0x00, 0x8a,
0x5a, 0x4a, 0x2c, 0x5c, 0xe1, 0xe6, 0xe3, 0x7b, 0xc4, 0xed, 0x8c, 0x6c, 0xd8, 0xd9, 0x15, 0x65,
0xb1, 0x23, 0xde, 0xa9, 0x27, 0x70, 0x5c, 0xa2, 0x44, 0xdb, 0x09, 0xbc, 0x53, 0x03, 0xcf, 0xbc,
0xe4, 0x39, 0x30, 0x7f, 0x2c, 0xf3, 0x65, 0xbb, 0x15, 0xf6, 0x48, 0x38, 0xda, 0x64, 0xde, 0xb4,
0x9d, 0xfa, 0x01, 0xc4, 0x1a, 0x79, 0x99, 0xd7, 0x4a, 0xb6, 0x64, 0xde, 0x51, 0x76, 0xe4, 0xc0,
0x27, 0x25, 0x5b, 0xf6, 0x0c, 0x4e, 0x35, 0x36, 0x52, 0x14, 0x3c, 0x6f, 0x24, 0x2f, 0xb0, 0x42,
0xd5, 0xf9, 0x38, 0xda, 0x24, 0xae, 0x3a, 0xce, 0x12, 0xe8, 0xdf, 0xa2, 0x36, 0xee, 0xb7, 0x62,
0x92, 0x74, 0x47, 0x36, 0x82, 0x03, 0x6b, 0x65, 0x02, 0x44, 0x5d, 0x38, 0xe9, 0xc3, 0xe1, 0xdb,
0xaa, 0xb1, 0xed, 0xc5, 0xf7, 0x00, 0xfa, 0x0b, 0xff, 0x22, 0xd8, 0x25, 0x0c, 0x17, 0xa8, 0xca,
0xdd, 0x1b, 0xb8, 0xb7, 0x77, 0x1f, 0xb6, 0xf4, 0xec, 0xfc, 0x6f, 0xb4, 0xbb, 0x00, 0x93, 0xff,
0xd2, 0xe0, 0x45, 0xc0, 0x5e, 0xc1, 0x70, 0x8e, 0xd8, 0xcc, 0x6a, 0xa5, 0xb0, 0xb0, 0x58, 0xb2,
0xd1, 0x5e, 0x11, 0x75, 0x3e, 0xfb, 0x83, 0xf8, 0xd2, 0x65, 0x8f, 0x1e, 0xe6, 0xcb, 0x5f, 0x01,
0x00, 0x00, 0xff, 0xff, 0x41, 0x64, 0x8a, 0xd9, 0xa9, 0x03, 0x00, 0x00,
}

View file

@ -5,7 +5,10 @@ package master_pb;
//////////////////////////////////////////////////
service Seaweed {
rpc SendHeartbeat(stream Heartbeat) returns (stream HeartbeatResponse) {}
rpc SendHeartbeat (stream Heartbeat) returns (stream HeartbeatResponse) {
}
rpc KeepConnected (stream Empty) returns (stream Empty) {
}
}
//////////////////////////////////////////////////
@ -21,6 +24,7 @@ message Heartbeat {
uint32 admin_port = 8;
repeated VolumeInformationMessage volumes = 9;
}
message HeartbeatResponse {
uint64 volumeSizeLimit = 1;
string secretKey = 2;
@ -39,3 +43,6 @@ message VolumeInformationMessage {
uint32 version = 9;
uint32 ttl = 10;
}
message Empty {
}

View file

@ -12,6 +12,7 @@ import (
"time"
"github.com/chrislusf/seaweedfs/weed/glog"
"github.com/chrislusf/seaweedfs/weed/images"
"github.com/chrislusf/seaweedfs/weed/operation"
"github.com/chrislusf/seaweedfs/weed/security"
"github.com/chrislusf/seaweedfs/weed/stats"
@ -188,3 +189,15 @@ func statsMemoryHandler(w http.ResponseWriter, r *http.Request) {
m["Memory"] = stats.MemStat()
writeJsonQuiet(w, r, http.StatusOK, m)
}
func faviconHandler(w http.ResponseWriter, r *http.Request) {
data, err := images.Asset("favicon/favicon.ico")
if err != nil {
glog.V(2).Infoln("favicon read error:", err)
return
}
if e := writeResponseContent("favicon.ico", "image/x-icon", bytes.NewReader(data), w, r); e != nil {
glog.V(2).Infoln("response write error:", e)
}
}

View file

@ -2,108 +2,210 @@ package weed_server
import (
"context"
"strconv"
"github.com/chrislusf/seaweedfs/weed/operation"
"github.com/chrislusf/seaweedfs/weed/util"
"github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
"fmt"
"os"
"path/filepath"
"time"
"github.com/chrislusf/seaweedfs/weed/filer2"
"github.com/chrislusf/seaweedfs/weed/glog"
"github.com/chrislusf/seaweedfs/weed/operation"
"github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
)
func (fs *FilerServer) LookupDirectoryEntry(ctx context.Context, req *filer_pb.LookupDirectoryEntryRequest) (*filer_pb.LookupDirectoryEntryResponse, error) {
found, fileId, err := fs.filer.LookupDirectoryEntry(req.Directory, req.Name)
entry, err := fs.filer.FindEntry(filer2.FullPath(filepath.Join(req.Directory, req.Name)))
if err != nil {
return nil, err
}
if !found {
return nil, fmt.Errorf("%s not found under %s", req.Name, req.Directory)
return nil, fmt.Errorf("%s not found under %s: %v", req.Name, req.Directory, err)
}
return &filer_pb.LookupDirectoryEntryResponse{
Entry: &filer_pb.Entry{
Name: req.Name,
IsDirectory: fileId == "",
FileId: fileId,
IsDirectory: entry.IsDirectory(),
Chunks: entry.Chunks,
},
}, nil
}
func (fs *FilerServer) ListEntries(ctx context.Context, req *filer_pb.ListEntriesRequest) (*filer_pb.ListEntriesResponse, error) {
directoryNames, err := fs.filer.ListDirectories(req.Directory)
if err != nil {
return nil, err
}
files, err := fs.filer.ListFiles(req.Directory, "", 1000)
entries, err := fs.filer.ListDirectoryEntries(filer2.FullPath(req.Directory), "", false, 1000)
if err != nil {
return nil, err
}
resp := &filer_pb.ListEntriesResponse{}
for _, dir := range directoryNames {
for _, entry := range entries {
resp.Entries = append(resp.Entries, &filer_pb.Entry{
Name: string(dir),
IsDirectory: true,
})
}
for _, fileEntry := range files {
resp.Entries = append(resp.Entries, &filer_pb.Entry{
Name: fileEntry.Name,
IsDirectory: false,
FileId: string(fileEntry.Id),
Name: entry.Name(),
IsDirectory: entry.IsDirectory(),
Chunks: entry.Chunks,
Attributes: &filer_pb.FuseAttributes{
FileSize: entry.Size(),
Mtime: entry.Mtime.Unix(),
Crtime: entry.Crtime.Unix(),
Gid: entry.Gid,
Uid: entry.Uid,
FileMode: uint32(entry.Mode),
Mime: entry.Mime,
},
})
}
return resp, nil
}
func (fs *FilerServer) GetFileAttributes(ctx context.Context, req *filer_pb.GetFileAttributesRequest) (*filer_pb.GetFileAttributesResponse, error) {
func (fs *FilerServer) GetEntryAttributes(ctx context.Context, req *filer_pb.GetEntryAttributesRequest) (*filer_pb.GetEntryAttributesResponse, error) {
attributes := &filer_pb.FuseAttributes{}
server, err := operation.LookupFileId(fs.getMasterNode(), req.FileId)
fullpath := filer2.NewFullPath(req.ParentDir, req.Name)
entry, err := fs.filer.FindEntry(fullpath)
if err != nil {
return nil, err
}
head, err := util.Head(server)
if err != nil {
return nil, err
}
attributes.FileSize, err = strconv.ParseUint(head.Get("Content-Length"), 10, 0)
if err != nil {
return nil, err
attributes.FileSize = 0
return nil, fmt.Errorf("FindEntry %s: %v", fullpath, err)
}
return &filer_pb.GetFileAttributesResponse{
attributes.FileSize = entry.Size()
attributes.FileMode = uint32(entry.Mode)
attributes.Uid = entry.Uid
attributes.Gid = entry.Gid
attributes.Mtime = entry.Mtime.Unix()
attributes.Crtime = entry.Crtime.Unix()
attributes.Mime = entry.Mime
glog.V(3).Infof("GetEntryAttributes %v size %d chunks %d: %+v", fullpath, attributes.FileSize, len(entry.Chunks), attributes)
return &filer_pb.GetEntryAttributesResponse{
Attributes: attributes,
Chunks: entry.Chunks,
}, nil
}
func (fs *FilerServer) GetFileContent(ctx context.Context, req *filer_pb.GetFileContentRequest) (*filer_pb.GetFileContentResponse, error) {
func (fs *FilerServer) LookupVolume(ctx context.Context, req *filer_pb.LookupVolumeRequest) (*filer_pb.LookupVolumeResponse, error) {
server, err := operation.LookupFileId(fs.getMasterNode(), req.FileId)
if err != nil {
return nil, err
}
content, err := util.Get(server)
lookupResult, err := operation.LookupVolumeIds(fs.filer.GetMaster(), req.VolumeIds)
if err != nil {
return nil, err
}
return &filer_pb.GetFileContentResponse{
Content: content,
}, nil
resp := &filer_pb.LookupVolumeResponse{
LocationsMap: make(map[string]*filer_pb.Locations),
}
for vid, locations := range lookupResult {
var locs []*filer_pb.Location
for _, loc := range locations.Locations {
locs = append(locs, &filer_pb.Location{
Url: loc.Url,
PublicUrl: loc.PublicUrl,
})
}
resp.LocationsMap[vid] = &filer_pb.Locations{
Locations: locs,
}
}
return resp, nil
}
func (fs *FilerServer) CreateEntry(ctx context.Context, req *filer_pb.CreateEntryRequest) (resp *filer_pb.CreateEntryResponse, err error) {
err = fs.filer.CreateEntry(&filer2.Entry{
FullPath: filer2.FullPath(filepath.Join(req.Directory, req.Entry.Name)),
Attr: filer2.Attr{
Mtime: time.Unix(req.Entry.Attributes.Mtime, 0),
Crtime: time.Unix(req.Entry.Attributes.Mtime, 0),
Mode: os.FileMode(req.Entry.Attributes.FileMode),
Uid: req.Entry.Attributes.Uid,
Gid: req.Entry.Attributes.Gid,
Mime: req.Entry.Attributes.Mime,
},
Chunks: req.Entry.Chunks,
})
if err == nil {
}
return &filer_pb.CreateEntryResponse{}, err
}
func (fs *FilerServer) UpdateEntry(ctx context.Context, req *filer_pb.UpdateEntryRequest) (*filer_pb.UpdateEntryResponse, error) {
fullpath := filepath.Join(req.Directory, req.Entry.Name)
entry, err := fs.filer.FindEntry(filer2.FullPath(fullpath))
if err != nil {
return &filer_pb.UpdateEntryResponse{}, fmt.Errorf("not found %s: %v", fullpath, err)
}
// remove old chunks if not included in the new ones
unusedChunks := filer2.FindUnusedFileChunks(entry.Chunks, req.Entry.Chunks)
chunks, garbages := filer2.CompactFileChunks(req.Entry.Chunks)
newEntry := &filer2.Entry{
FullPath: filer2.FullPath(filepath.Join(req.Directory, req.Entry.Name)),
Attr: entry.Attr,
Chunks: chunks,
}
glog.V(3).Infof("updating %s: %+v, chunks %d: %v => %+v, chunks %d: %v",
fullpath, entry.Attr, len(entry.Chunks), entry.Chunks,
req.Entry.Attributes, len(req.Entry.Chunks), req.Entry.Chunks)
if req.Entry.Attributes != nil {
if req.Entry.Attributes.Mtime != 0 {
newEntry.Attr.Mtime = time.Unix(req.Entry.Attributes.Mtime, 0)
}
if req.Entry.Attributes.FileMode != 0 {
newEntry.Attr.Mode = os.FileMode(req.Entry.Attributes.FileMode)
}
newEntry.Attr.Uid = req.Entry.Attributes.Uid
newEntry.Attr.Gid = req.Entry.Attributes.Gid
newEntry.Attr.Mime = req.Entry.Attributes.Mime
}
if err = fs.filer.UpdateEntry(newEntry); err == nil {
for _, garbage := range unusedChunks {
glog.V(0).Infof("deleting %s old chunk: %v, [%d, %d)", fullpath, garbage.FileId, garbage.Offset, garbage.Offset+int64(garbage.Size))
operation.DeleteFile(fs.filer.GetMaster(), garbage.FileId, fs.jwt(garbage.FileId))
}
for _, garbage := range garbages {
glog.V(0).Infof("deleting %s garbage chunk: %v, [%d, %d)", fullpath, garbage.FileId, garbage.Offset, garbage.Offset+int64(garbage.Size))
operation.DeleteFile(fs.filer.GetMaster(), garbage.FileId, fs.jwt(garbage.FileId))
}
}
return &filer_pb.UpdateEntryResponse{}, err
}
func (fs *FilerServer) DeleteEntry(ctx context.Context, req *filer_pb.DeleteEntryRequest) (resp *filer_pb.DeleteEntryResponse, err error) {
if req.IsDirectory {
err = fs.filer.DeleteDirectory(req.Directory+req.Name, false)
} else {
fid, err := fs.filer.DeleteFile(req.Directory + req.Name)
if err == nil && fid != "" {
err = operation.DeleteFile(fs.getMasterNode(), fid, fs.jwt(fid))
}
}
return nil, err
err = fs.filer.DeleteEntryMetaAndData(filer2.FullPath(filepath.Join(req.Directory, req.Name)))
return &filer_pb.DeleteEntryResponse{}, err
}
func (fs *FilerServer) AssignVolume(ctx context.Context, req *filer_pb.AssignVolumeRequest) (resp *filer_pb.AssignVolumeResponse, err error) {
assignResult, err := operation.Assign(fs.filer.GetMaster(), &operation.VolumeAssignRequest{
Count: uint64(req.Count),
Replication: req.Replication,
Collection: req.Collection,
})
if err != nil {
return nil, fmt.Errorf("assign volume: %v", err)
}
if assignResult.Error != "" {
return nil, fmt.Errorf("assign volume result: %v", assignResult.Error)
}
return &filer_pb.AssignVolumeResponse{
FileId: assignResult.Fid,
Count: int32(assignResult.Count),
Url: assignResult.Url,
PublicUrl: assignResult.PublicUrl,
}, err
}

View file

@ -1,72 +1,38 @@
package weed_server
import (
"encoding/json"
"math/rand"
"net/http"
"os"
"strconv"
"sync"
"time"
"github.com/chrislusf/seaweedfs/weed/filer"
"github.com/chrislusf/seaweedfs/weed/filer/cassandra_store"
"github.com/chrislusf/seaweedfs/weed/filer/embedded_filer"
"github.com/chrislusf/seaweedfs/weed/filer/flat_namespace"
"github.com/chrislusf/seaweedfs/weed/filer/mysql_store"
"github.com/chrislusf/seaweedfs/weed/filer/postgres_store"
"github.com/chrislusf/seaweedfs/weed/filer/redis_store"
"github.com/chrislusf/seaweedfs/weed/glog"
"github.com/chrislusf/seaweedfs/weed/filer2"
_ "github.com/chrislusf/seaweedfs/weed/filer2/cassandra"
_ "github.com/chrislusf/seaweedfs/weed/filer2/leveldb"
_ "github.com/chrislusf/seaweedfs/weed/filer2/memdb"
_ "github.com/chrislusf/seaweedfs/weed/filer2/mysql"
_ "github.com/chrislusf/seaweedfs/weed/filer2/postgres"
_ "github.com/chrislusf/seaweedfs/weed/filer2/redis"
"github.com/chrislusf/seaweedfs/weed/security"
"github.com/chrislusf/seaweedfs/weed/storage"
"github.com/chrislusf/seaweedfs/weed/util"
"github.com/chrislusf/seaweedfs/weed/glog"
)
type filerConf struct {
MysqlConf []mysql_store.MySqlConf `json:"mysql"`
mysql_store.ShardingConf
PostgresConf *postgres_store.PostgresConf `json:"postgres"`
}
func parseConfFile(confPath string) (*filerConf, error) {
var setting filerConf
configFile, err := os.Open(confPath)
defer configFile.Close()
if err != nil {
return nil, err
}
jsonParser := json.NewDecoder(configFile)
if err = jsonParser.Decode(&setting); err != nil {
return nil, err
}
return &setting, nil
}
type FilerServer struct {
port string
master string
mnLock sync.RWMutex
masters []string
collection string
defaultReplication string
redirectOnRead bool
disableDirListing bool
secret security.Secret
filer filer.Filer
filer *filer2.Filer
maxMB int
masterNodes *storage.MasterNodes
}
func NewFilerServer(defaultMux, readonlyMux *http.ServeMux, ip string, port int, master string, dir string, collection string,
func NewFilerServer(defaultMux, readonlyMux *http.ServeMux, ip string, port int, masters []string, collection string,
replication string, redirectOnRead bool, disableDirListing bool,
confFile string,
maxMB int,
secret string,
cassandra_server string, cassandra_keyspace string,
redis_server string, redis_password string, redis_database int,
) (fs *FilerServer, err error) {
fs = &FilerServer{
master: master,
masters: masters,
collection: collection,
defaultReplication: replication,
redirectOnRead: redirectOnRead,
@ -75,117 +41,25 @@ func NewFilerServer(defaultMux, readonlyMux *http.ServeMux, ip string, port int,
port: ip + ":" + strconv.Itoa(port),
}
var setting *filerConf
if confFile != "" {
setting, err = parseConfFile(confFile)
if err != nil {
return nil, err
}
} else {
setting = new(filerConf)
if len(masters) == 0 {
glog.Fatal("master list is required!")
}
if setting.MysqlConf != nil && len(setting.MysqlConf) != 0 {
mysql_store := mysql_store.NewMysqlStore(setting.MysqlConf, setting.IsSharding, setting.ShardCount)
fs.filer = flat_namespace.NewFlatNamespaceFiler(master, mysql_store)
} else if setting.PostgresConf != nil {
fs.filer = postgres_store.NewPostgresStore(master, *setting.PostgresConf)
} else if cassandra_server != "" {
cassandra_store, err := cassandra_store.NewCassandraStore(cassandra_keyspace, cassandra_server)
if err != nil {
glog.Fatalf("Can not connect to cassandra server %s with keyspace %s: %v", cassandra_server, cassandra_keyspace, err)
}
fs.filer = flat_namespace.NewFlatNamespaceFiler(master, cassandra_store)
} else if redis_server != "" {
redis_store := redis_store.NewRedisStore(redis_server, redis_password, redis_database)
fs.filer = flat_namespace.NewFlatNamespaceFiler(master, redis_store)
} else {
if fs.filer, err = embedded_filer.NewFilerEmbedded(master, dir); err != nil {
glog.Fatalf("Can not start filer in dir %s : %v", dir, err)
return
}
fs.filer = filer2.NewFiler(masters)
defaultMux.HandleFunc("/admin/mv", fs.moveHandler)
}
go fs.filer.KeepConnectedToMaster()
defaultMux.HandleFunc("/admin/register", fs.registerHandler)
fs.filer.LoadConfiguration()
defaultMux.HandleFunc("/favicon.ico", faviconHandler)
defaultMux.HandleFunc("/", fs.filerHandler)
if defaultMux != readonlyMux {
readonlyMux.HandleFunc("/", fs.readonlyFilerHandler)
}
go func() {
connected := true
fs.masterNodes = storage.NewMasterNodes(fs.master)
glog.V(0).Infof("Filer server bootstraps with master %s", fs.getMasterNode())
for {
glog.V(4).Infof("Filer server sending to master %s", fs.getMasterNode())
master, err := fs.detectHealthyMaster(fs.getMasterNode())
if err == nil {
if !connected {
connected = true
if fs.getMasterNode() != master {
fs.setMasterNode(master)
}
glog.V(0).Infoln("Filer Server Connected with master at", master)
}
} else {
glog.V(1).Infof("Filer Server Failed to talk with master %s: %v", fs.getMasterNode(), err)
if connected {
connected = false
}
}
if connected {
time.Sleep(time.Duration(float32(10*1e3)*(1+rand.Float32())) * time.Millisecond)
} else {
time.Sleep(time.Duration(float32(10*1e3)*0.25) * time.Millisecond)
}
}
}()
return fs, nil
}
func (fs *FilerServer) jwt(fileId string) security.EncodedJwt {
return security.GenJwt(fs.secret, fileId)
}
func (fs *FilerServer) getMasterNode() string {
fs.mnLock.RLock()
defer fs.mnLock.RUnlock()
return fs.master
}
func (fs *FilerServer) setMasterNode(masterNode string) {
fs.mnLock.Lock()
defer fs.mnLock.Unlock()
fs.master = masterNode
}
func (fs *FilerServer) detectHealthyMaster(masterNode string) (master string, e error) {
if e = checkMaster(masterNode); e != nil {
fs.masterNodes.Reset()
for i := 0; i <= 3; i++ {
master, e = fs.masterNodes.FindMaster()
if e != nil {
continue
} else {
if e = checkMaster(master); e == nil {
break
}
}
}
} else {
master = masterNode
}
return
}
func checkMaster(masterNode string) error {
statUrl := "http://" + masterNode + "/stats/health"
glog.V(4).Infof("Connecting to %s ...", statUrl)
_, e := util.Get(statUrl)
return e
}

View file

@ -1,41 +0,0 @@
package weed_server
import (
"net/http"
"github.com/chrislusf/seaweedfs/weed/glog"
)
/*
Move a folder or a file, with 4 Use cases:
mv fromDir toNewDir
mv fromDir toOldDir
mv fromFile toDir
mv fromFile toFile
Wildcard is not supported.
*/
func (fs *FilerServer) moveHandler(w http.ResponseWriter, r *http.Request) {
from := r.FormValue("from")
to := r.FormValue("to")
err := fs.filer.Move(from, to)
if err != nil {
glog.V(4).Infoln("moving", from, "->", to, err.Error())
writeJsonError(w, r, http.StatusInternalServerError, err)
} else {
w.WriteHeader(http.StatusOK)
}
}
func (fs *FilerServer) registerHandler(w http.ResponseWriter, r *http.Request) {
path := r.FormValue("path")
fileId := r.FormValue("fileId")
err := fs.filer.CreateFile(path, fileId)
if err != nil {
glog.V(4).Infof("register %s to %s error: %v", fileId, path, err)
writeJsonError(w, r, http.StatusInternalServerError, err)
} else {
w.WriteHeader(http.StatusOK)
}
}

View file

@ -4,86 +4,32 @@ import (
"io"
"net/http"
"net/url"
"strconv"
"strings"
"github.com/chrislusf/seaweedfs/weed/filer"
"github.com/chrislusf/seaweedfs/weed/filer2"
"github.com/chrislusf/seaweedfs/weed/glog"
"github.com/chrislusf/seaweedfs/weed/operation"
ui "github.com/chrislusf/seaweedfs/weed/server/filer_ui"
"github.com/chrislusf/seaweedfs/weed/util"
"github.com/syndtr/goleveldb/leveldb"
"strconv"
"mime/multipart"
"mime"
"path"
)
// listDirectoryHandler lists directories and folers under a directory
// files are sorted by name and paginated via "lastFileName" and "limit".
// sub directories are listed on the first page, when "lastFileName"
// is empty.
func (fs *FilerServer) listDirectoryHandler(w http.ResponseWriter, r *http.Request) {
if !strings.HasSuffix(r.URL.Path, "/") {
return
}
limit, limit_err := strconv.Atoi(r.FormValue("limit"))
if limit_err != nil {
limit = 100
}
lastFileName := r.FormValue("lastFileName")
files, err := fs.filer.ListFiles(r.URL.Path, lastFileName, limit)
if err == leveldb.ErrNotFound {
glog.V(0).Infof("Error %s", err)
w.WriteHeader(http.StatusNotFound)
return
}
directories, err2 := fs.filer.ListDirectories(r.URL.Path)
if err2 == leveldb.ErrNotFound {
glog.V(0).Infof("Error %s", err)
w.WriteHeader(http.StatusNotFound)
return
}
shouldDisplayLoadMore := len(files) > 0
lastFileName = ""
if len(files) > 0 {
lastFileName = files[len(files)-1].Name
files2, err3 := fs.filer.ListFiles(r.URL.Path, lastFileName, limit)
if err3 == leveldb.ErrNotFound {
glog.V(0).Infof("Error %s", err)
w.WriteHeader(http.StatusNotFound)
return
}
shouldDisplayLoadMore = len(files2) > 0
}
args := struct {
Path string
Files interface{}
Directories interface{}
Limit int
LastFileName string
ShouldDisplayLoadMore bool
}{
r.URL.Path,
files,
directories,
limit,
lastFileName,
shouldDisplayLoadMore,
}
if r.Header.Get("Accept") == "application/json" {
writeJsonQuiet(w, r, http.StatusOK, args)
} else {
ui.StatusTpl.Execute(w, args)
}
}
func (fs *FilerServer) GetOrHeadHandler(w http.ResponseWriter, r *http.Request, isGetMethod bool) {
if strings.HasSuffix(r.URL.Path, "/") {
path := r.URL.Path
if strings.HasSuffix(path, "/") && len(path) > 1 {
path = path[:len(path)-1]
}
entry, err := fs.filer.FindEntry(filer2.FullPath(path))
if err != nil {
glog.V(1).Infof("Not found %s: %v", path, err)
w.WriteHeader(http.StatusNotFound)
return
}
if entry.IsDirectory() {
if fs.disableDirListing {
w.WriteHeader(http.StatusMethodNotAllowed)
return
@ -92,24 +38,43 @@ func (fs *FilerServer) GetOrHeadHandler(w http.ResponseWriter, r *http.Request,
return
}
fileId, err := fs.filer.FindFile(r.URL.Path)
if err == filer.ErrNotFound {
glog.V(3).Infoln("Not found in db", r.URL.Path)
if len(entry.Chunks) == 0 {
glog.V(1).Infof("no file chunks for %s, attr=%+v", path, entry.Attr)
w.WriteHeader(http.StatusNoContent)
return
}
w.Header().Set("Accept-Ranges", "bytes")
if r.Method == "HEAD" {
w.Header().Set("Content-Length", strconv.FormatInt(int64(filer2.TotalSize(entry.Chunks)), 10))
return
}
if len(entry.Chunks) == 1 {
fs.handleSingleChunk(w, r, entry)
return
}
fs.handleMultipleChunks(w, r, entry)
}
func (fs *FilerServer) handleSingleChunk(w http.ResponseWriter, r *http.Request, entry *filer2.Entry) {
fileId := entry.Chunks[0].FileId
urlString, err := operation.LookupFileId(fs.filer.GetMaster(), fileId)
if err != nil {
glog.V(1).Infof("operation LookupFileId %s failed, err: %v", fileId, err)
w.WriteHeader(http.StatusNotFound)
return
}
urlLocation, err := operation.LookupFileId(fs.getMasterNode(), fileId)
if err != nil {
glog.V(1).Infoln("operation LookupFileId %s failed, err is %s", fileId, err.Error())
w.WriteHeader(http.StatusNotFound)
return
}
urlString := urlLocation
if fs.redirectOnRead {
http.Redirect(w, r, urlString, http.StatusFound)
return
}
u, _ := url.Parse(urlString)
q := u.Query()
for key, values := range r.URL.Query() {
@ -142,5 +107,143 @@ func (fs *FilerServer) GetOrHeadHandler(w http.ResponseWriter, r *http.Request,
}
w.WriteHeader(resp.StatusCode)
io.Copy(w, resp.Body)
}
func (fs *FilerServer) handleMultipleChunks(w http.ResponseWriter, r *http.Request, entry *filer2.Entry) {
mimeType := entry.Mime
if mimeType == "" {
if ext := path.Ext(entry.Name()); ext != "" {
mimeType = mime.TypeByExtension(ext)
}
}
if mimeType != "" {
w.Header().Set("Content-Type", mimeType)
}
println("mime type:", mimeType)
totalSize := int64(filer2.TotalSize(entry.Chunks))
rangeReq := r.Header.Get("Range")
if rangeReq == "" {
w.Header().Set("Content-Length", strconv.FormatInt(totalSize, 10))
if err := fs.writeContent(w, entry, 0, int(totalSize)); err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
return
}
//the rest is dealing with partial content request
//mostly copy from src/pkg/net/http/fs.go
ranges, err := parseRange(rangeReq, totalSize)
if err != nil {
http.Error(w, err.Error(), http.StatusRequestedRangeNotSatisfiable)
return
}
if sumRangesSize(ranges) > totalSize {
// The total number of bytes in all the ranges
// is larger than the size of the file by
// itself, so this is probably an attack, or a
// dumb client. Ignore the range request.
return
}
if len(ranges) == 0 {
return
}
if len(ranges) == 1 {
// RFC 2616, Section 14.16:
// "When an HTTP message includes the content of a single
// range (for example, a response to a request for a
// single range, or to a request for a set of ranges
// that overlap without any holes), this content is
// transmitted with a Content-Range header, and a
// Content-Length header showing the number of bytes
// actually transferred.
// ...
// A response to a request for a single range MUST NOT
// be sent using the multipart/byteranges media type."
ra := ranges[0]
w.Header().Set("Content-Length", strconv.FormatInt(ra.length, 10))
w.Header().Set("Content-Range", ra.contentRange(totalSize))
w.WriteHeader(http.StatusPartialContent)
err = fs.writeContent(w, entry, ra.start, int(ra.length))
if err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
return
}
// process multiple ranges
for _, ra := range ranges {
if ra.start > totalSize {
http.Error(w, "Out of Range", http.StatusRequestedRangeNotSatisfiable)
return
}
}
sendSize := rangesMIMESize(ranges, mimeType, totalSize)
pr, pw := io.Pipe()
mw := multipart.NewWriter(pw)
w.Header().Set("Content-Type", "multipart/byteranges; boundary="+mw.Boundary())
sendContent := pr
defer pr.Close() // cause writing goroutine to fail and exit if CopyN doesn't finish.
go func() {
for _, ra := range ranges {
part, e := mw.CreatePart(ra.mimeHeader(mimeType, totalSize))
if e != nil {
pw.CloseWithError(e)
return
}
if e = fs.writeContent(part, entry, ra.start, int(ra.length)); e != nil {
pw.CloseWithError(e)
return
}
}
mw.Close()
pw.Close()
}()
if w.Header().Get("Content-Encoding") == "" {
w.Header().Set("Content-Length", strconv.FormatInt(sendSize, 10))
}
w.WriteHeader(http.StatusPartialContent)
if _, err := io.CopyN(w, sendContent, sendSize); err != nil {
http.Error(w, "Internal Error", http.StatusInternalServerError)
return
}
}
func (fs *FilerServer) writeContent(w io.Writer, entry *filer2.Entry, offset int64, size int) error {
chunkViews := filer2.ViewFromChunks(entry.Chunks, offset, size)
fileId2Url := make(map[string]string)
for _, chunkView := range chunkViews {
urlString, err := operation.LookupFileId(fs.filer.GetMaster(), chunkView.FileId)
if err != nil {
glog.V(1).Infof("operation LookupFileId %s failed, err: %v", chunkView.FileId, err)
return err
}
fileId2Url[chunkView.FileId] = urlString
}
for _, chunkView := range chunkViews {
urlString := fileId2Url[chunkView.FileId]
_, err := util.ReadUrlAsStream(urlString, chunkView.Offset, int(chunkView.Size), func(data []byte) {
w.Write(data)
})
if err != nil {
glog.V(1).Infof("read %s failed, err: %v", chunkView.FileId, err)
return err
}
}
return nil
}

View file

@ -0,0 +1,70 @@
package weed_server
import (
"net/http"
"strconv"
"strings"
"github.com/chrislusf/seaweedfs/weed/filer2"
"github.com/chrislusf/seaweedfs/weed/glog"
ui "github.com/chrislusf/seaweedfs/weed/server/filer_ui"
)
// listDirectoryHandler lists directories and folers under a directory
// files are sorted by name and paginated via "lastFileName" and "limit".
// sub directories are listed on the first page, when "lastFileName"
// is empty.
func (fs *FilerServer) listDirectoryHandler(w http.ResponseWriter, r *http.Request) {
path := r.URL.Path
if strings.HasSuffix(path, "/") && len(path) > 1 {
path = path[:len(path)-1]
}
limit, limit_err := strconv.Atoi(r.FormValue("limit"))
if limit_err != nil {
limit = 100
}
lastFileName := r.FormValue("lastFileName")
entries, err := fs.filer.ListDirectoryEntries(filer2.FullPath(path), lastFileName, false, limit)
if err != nil {
glog.V(0).Infof("listDirectory %s %s $d: %s", path, lastFileName, limit, err)
w.WriteHeader(http.StatusNotFound)
return
}
shouldDisplayLoadMore := len(entries) == limit
if path == "/" {
path = ""
}
if len(entries) > 0 {
lastFileName = entries[len(entries)-1].Name()
}
glog.V(4).Infof("listDirectory %s, last file %s, limit %d: %d items", path, lastFileName, limit, len(entries))
args := struct {
Path string
Breadcrumbs []ui.Breadcrumb
Entries interface{}
Limit int
LastFileName string
ShouldDisplayLoadMore bool
}{
path,
ui.ToBreadcrumb(path),
entries,
limit,
lastFileName,
shouldDisplayLoadMore,
}
if r.Header.Get("Accept") == "application/json" {
writeJsonQuiet(w, r, http.StatusOK, args)
} else {
ui.StatusTpl.Execute(w, args)
}
}

View file

@ -1,26 +1,19 @@
package weed_server
import (
"bytes"
"crypto/md5"
"encoding/base64"
"encoding/json"
"errors"
"fmt"
"io"
"io/ioutil"
"mime/multipart"
"net/http"
"net/textproto"
"net/url"
"path"
"strconv"
"strings"
"time"
"github.com/chrislusf/seaweedfs/weed/filer"
"github.com/chrislusf/seaweedfs/weed/filer2"
"github.com/chrislusf/seaweedfs/weed/glog"
"github.com/chrislusf/seaweedfs/weed/operation"
"github.com/chrislusf/seaweedfs/weed/storage"
"github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
"github.com/chrislusf/seaweedfs/weed/util"
)
@ -32,58 +25,18 @@ type FilerPostResult struct {
Url string `json:"url,omitempty"`
}
var quoteEscaper = strings.NewReplacer("\\", "\\\\", `"`, "\\\"")
func escapeQuotes(s string) string {
return quoteEscaper.Replace(s)
}
func createFormFile(writer *multipart.Writer, fieldname, filename, mime string) (io.Writer, error) {
h := make(textproto.MIMEHeader)
h.Set("Content-Disposition",
fmt.Sprintf(`form-data; name="%s"; filename="%s"`,
escapeQuotes(fieldname), escapeQuotes(filename)))
if len(mime) == 0 {
mime = "application/octet-stream"
}
h.Set("Content-Type", mime)
return writer.CreatePart(h)
}
func makeFormData(filename, mimeType string, content io.Reader) (formData io.Reader, contentType string, err error) {
buf := new(bytes.Buffer)
writer := multipart.NewWriter(buf)
defer writer.Close()
part, err := createFormFile(writer, "file", filename, mimeType)
if err != nil {
glog.V(0).Infoln(err)
return
}
_, err = io.Copy(part, content)
if err != nil {
glog.V(0).Infoln(err)
return
}
formData = buf
contentType = writer.FormDataContentType()
return
}
func (fs *FilerServer) queryFileInfoByPath(w http.ResponseWriter, r *http.Request, path string) (fileId, urlLocation string, err error) {
if fileId, err = fs.filer.FindFile(path); err != nil && err != filer.ErrNotFound {
var entry *filer2.Entry
if entry, err = fs.filer.FindEntry(filer2.FullPath(path)); err != nil {
glog.V(0).Infoln("failing to find path in filer store", path, err.Error())
writeJsonError(w, r, http.StatusInternalServerError, err)
} else if fileId != "" && err == nil {
urlLocation, err = operation.LookupFileId(fs.getMasterNode(), fileId)
} else {
fileId = entry.Chunks[0].FileId
urlLocation, err = operation.LookupFileId(fs.filer.GetMaster(), fileId)
if err != nil {
glog.V(1).Infoln("operation LookupFileId %s failed, err is %s", fileId, err.Error())
glog.V(1).Infof("operation LookupFileId %s failed, err is %s", fileId, err.Error())
w.WriteHeader(http.StatusNotFound)
}
} else if fileId == "" && err == filer.ErrNotFound {
w.WriteHeader(http.StatusNotFound)
}
return
}
@ -95,7 +48,7 @@ func (fs *FilerServer) assignNewFileInfo(w http.ResponseWriter, r *http.Request,
Collection: collection,
Ttl: r.URL.Query().Get("ttl"),
}
assignResult, ae := operation.Assign(fs.getMasterNode(), ar)
assignResult, ae := operation.Assign(fs.filer.GetMaster(), ar)
if ae != nil {
glog.V(0).Infoln("failing to assign a file id", ae.Error())
writeJsonError(w, r, http.StatusInternalServerError, ae)
@ -107,117 +60,6 @@ func (fs *FilerServer) assignNewFileInfo(w http.ResponseWriter, r *http.Request,
return
}
func (fs *FilerServer) multipartUploadAnalyzer(w http.ResponseWriter, r *http.Request, replication, collection string) (fileId, urlLocation string, err error) {
//Default handle way for http multipart
if r.Method == "PUT" {
buf, _ := ioutil.ReadAll(r.Body)
r.Body = ioutil.NopCloser(bytes.NewBuffer(buf))
fileName, _, _, _, _, _, _, _, pe := storage.ParseUpload(r)
if pe != nil {
glog.V(0).Infoln("failing to parse post body", pe.Error())
writeJsonError(w, r, http.StatusInternalServerError, pe)
err = pe
return
}
//reconstruct http request body for following new request to volume server
r.Body = ioutil.NopCloser(bytes.NewBuffer(buf))
path := r.URL.Path
if strings.HasSuffix(path, "/") {
if fileName != "" {
path += fileName
}
}
fileId, urlLocation, err = fs.queryFileInfoByPath(w, r, path)
} else {
fileId, urlLocation, err = fs.assignNewFileInfo(w, r, replication, collection)
}
return
}
func multipartHttpBodyBuilder(w http.ResponseWriter, r *http.Request, fileName string) (err error) {
body, contentType, te := makeFormData(fileName, r.Header.Get("Content-Type"), r.Body)
if te != nil {
glog.V(0).Infoln("S3 protocol to raw seaweed protocol failed", te.Error())
writeJsonError(w, r, http.StatusInternalServerError, te)
err = te
return
}
if body != nil {
switch v := body.(type) {
case *bytes.Buffer:
r.ContentLength = int64(v.Len())
case *bytes.Reader:
r.ContentLength = int64(v.Len())
case *strings.Reader:
r.ContentLength = int64(v.Len())
}
}
r.Header.Set("Content-Type", contentType)
rc, ok := body.(io.ReadCloser)
if !ok && body != nil {
rc = ioutil.NopCloser(body)
}
r.Body = rc
return
}
func checkContentMD5(w http.ResponseWriter, r *http.Request) (err error) {
if contentMD5 := r.Header.Get("Content-MD5"); contentMD5 != "" {
buf, _ := ioutil.ReadAll(r.Body)
//checkMD5
sum := md5.Sum(buf)
fileDataMD5 := base64.StdEncoding.EncodeToString(sum[0:len(sum)])
if strings.ToLower(fileDataMD5) != strings.ToLower(contentMD5) {
glog.V(0).Infof("fileDataMD5 [%s] is not equal to Content-MD5 [%s]", fileDataMD5, contentMD5)
err = fmt.Errorf("MD5 check failed")
writeJsonError(w, r, http.StatusNotAcceptable, err)
return
}
//reconstruct http request body for following new request to volume server
r.Body = ioutil.NopCloser(bytes.NewBuffer(buf))
}
return
}
func (fs *FilerServer) monolithicUploadAnalyzer(w http.ResponseWriter, r *http.Request, replication, collection string) (fileId, urlLocation string, err error) {
/*
Amazon S3 ref link:[http://docs.aws.amazon.com/AmazonS3/latest/API/Welcome.html]
There is a long way to provide a completely compatibility against all Amazon S3 API, I just made
a simple data stream adapter between S3 PUT API and seaweedfs's volume storage Write API
1. The request url format should be http://$host:$port/$bucketName/$objectName
2. bucketName will be mapped to seaweedfs's collection name
3. You could customize and make your enhancement.
*/
lastPos := strings.LastIndex(r.URL.Path, "/")
if lastPos == -1 || lastPos == 0 || lastPos == len(r.URL.Path)-1 {
glog.V(0).Infoln("URL Path [%s] is invalid, could not retrieve file name", r.URL.Path)
err = fmt.Errorf("URL Path is invalid")
writeJsonError(w, r, http.StatusInternalServerError, err)
return
}
if err = checkContentMD5(w, r); err != nil {
return
}
fileName := r.URL.Path[lastPos+1:]
if err = multipartHttpBodyBuilder(w, r, fileName); err != nil {
return
}
secondPos := strings.Index(r.URL.Path[1:], "/") + 1
collection = r.URL.Path[1:secondPos]
path := r.URL.Path
if fileId, urlLocation, err = fs.queryFileInfoByPath(w, r, path); err == nil && fileId == "" {
fileId, urlLocation, err = fs.assignNewFileInfo(w, r, replication, collection)
}
return
}
func (fs *FilerServer) PostHandler(w http.ResponseWriter, r *http.Request) {
query := r.URL.Query()
@ -303,7 +145,7 @@ func (fs *FilerServer) PostHandler(w http.ResponseWriter, r *http.Request) {
if ret.Name != "" {
path += ret.Name
} else {
operation.DeleteFile(fs.getMasterNode(), fileId, fs.jwt(fileId)) //clean up
operation.DeleteFile(fs.filer.GetMaster(), fileId, fs.jwt(fileId)) //clean up
glog.V(0).Infoln("Can not to write to folder", path, "without a file name!")
writeJsonError(w, r, http.StatusInternalServerError,
errors.New("Can not to write to folder "+path+" without a file name"))
@ -313,16 +155,28 @@ func (fs *FilerServer) PostHandler(w http.ResponseWriter, r *http.Request) {
// also delete the old fid unless PUT operation
if r.Method != "PUT" {
if oldFid, err := fs.filer.FindFile(path); err == nil {
operation.DeleteFile(fs.getMasterNode(), oldFid, fs.jwt(oldFid))
} else if err != nil && err != filer.ErrNotFound {
if entry, err := fs.filer.FindEntry(filer2.FullPath(path)); err == nil {
oldFid := entry.Chunks[0].FileId
operation.DeleteFile(fs.filer.GetMaster(), oldFid, fs.jwt(oldFid))
} else if err != nil && err != filer2.ErrNotFound {
glog.V(0).Infof("error %v occur when finding %s in filer store", err, path)
}
}
glog.V(4).Infoln("saving", path, "=>", fileId)
if db_err := fs.filer.CreateFile(path, fileId); db_err != nil {
operation.DeleteFile(fs.getMasterNode(), fileId, fs.jwt(fileId)) //clean up
entry := &filer2.Entry{
FullPath: filer2.FullPath(path),
Attr: filer2.Attr{
Mode: 0660,
},
Chunks: []*filer_pb.FileChunk{{
FileId: fileId,
Size: uint64(r.ContentLength),
Mtime: time.Now().UnixNano(),
}},
}
if db_err := fs.filer.CreateEntry(entry); db_err != nil {
operation.DeleteFile(fs.filer.GetMaster(), fileId, fs.jwt(fileId)) //clean up
glog.V(0).Infof("failing to write %s to filer server : %v", path, db_err)
writeJsonError(w, r, http.StatusInternalServerError, db_err)
return
@ -338,217 +192,15 @@ func (fs *FilerServer) PostHandler(w http.ResponseWriter, r *http.Request) {
writeJsonQuiet(w, r, http.StatusCreated, reply)
}
func (fs *FilerServer) autoChunk(w http.ResponseWriter, r *http.Request, replication string, collection string) bool {
if r.Method != "POST" {
glog.V(4).Infoln("AutoChunking not supported for method", r.Method)
return false
}
// autoChunking can be set at the command-line level or as a query param. Query param overrides command-line
query := r.URL.Query()
parsedMaxMB, _ := strconv.ParseInt(query.Get("maxMB"), 10, 32)
maxMB := int32(parsedMaxMB)
if maxMB <= 0 && fs.maxMB > 0 {
maxMB = int32(fs.maxMB)
}
if maxMB <= 0 {
glog.V(4).Infoln("AutoChunking not enabled")
return false
}
glog.V(4).Infoln("AutoChunking level set to", maxMB, "(MB)")
chunkSize := 1024 * 1024 * maxMB
contentLength := int64(0)
if contentLengthHeader := r.Header["Content-Length"]; len(contentLengthHeader) == 1 {
contentLength, _ = strconv.ParseInt(contentLengthHeader[0], 10, 64)
if contentLength <= int64(chunkSize) {
glog.V(4).Infoln("Content-Length of", contentLength, "is less than the chunk size of", chunkSize, "so autoChunking will be skipped.")
return false
}
}
if contentLength <= 0 {
glog.V(4).Infoln("Content-Length value is missing or unexpected so autoChunking will be skipped.")
return false
}
reply, err := fs.doAutoChunk(w, r, contentLength, chunkSize, replication, collection)
if err != nil {
writeJsonError(w, r, http.StatusInternalServerError, err)
} else if reply != nil {
writeJsonQuiet(w, r, http.StatusCreated, reply)
}
return true
}
func (fs *FilerServer) doAutoChunk(w http.ResponseWriter, r *http.Request, contentLength int64, chunkSize int32, replication string, collection string) (filerResult *FilerPostResult, replyerr error) {
multipartReader, multipartReaderErr := r.MultipartReader()
if multipartReaderErr != nil {
return nil, multipartReaderErr
}
part1, part1Err := multipartReader.NextPart()
if part1Err != nil {
return nil, part1Err
}
fileName := part1.FileName()
if fileName != "" {
fileName = path.Base(fileName)
}
chunks := (int64(contentLength) / int64(chunkSize)) + 1
cm := operation.ChunkManifest{
Name: fileName,
Size: 0, // don't know yet
Mime: "application/octet-stream",
Chunks: make([]*operation.ChunkInfo, 0, chunks),
}
totalBytesRead := int64(0)
tmpBufferSize := int32(1024 * 1024)
tmpBuffer := bytes.NewBuffer(make([]byte, 0, tmpBufferSize))
chunkBuf := make([]byte, chunkSize+tmpBufferSize, chunkSize+tmpBufferSize) // chunk size plus a little overflow
chunkBufOffset := int32(0)
chunkOffset := int64(0)
writtenChunks := 0
filerResult = &FilerPostResult{
Name: fileName,
}
for totalBytesRead < contentLength {
tmpBuffer.Reset()
bytesRead, readErr := io.CopyN(tmpBuffer, part1, int64(tmpBufferSize))
readFully := readErr != nil && readErr == io.EOF
tmpBuf := tmpBuffer.Bytes()
bytesToCopy := tmpBuf[0:int(bytesRead)]
copy(chunkBuf[chunkBufOffset:chunkBufOffset+int32(bytesRead)], bytesToCopy)
chunkBufOffset = chunkBufOffset + int32(bytesRead)
if chunkBufOffset >= chunkSize || readFully || (chunkBufOffset > 0 && bytesRead == 0) {
writtenChunks = writtenChunks + 1
fileId, urlLocation, assignErr := fs.assignNewFileInfo(w, r, replication, collection)
if assignErr != nil {
return nil, assignErr
}
// upload the chunk to the volume server
chunkName := fileName + "_chunk_" + strconv.FormatInt(int64(cm.Chunks.Len()+1), 10)
uploadErr := fs.doUpload(urlLocation, w, r, chunkBuf[0:chunkBufOffset], chunkName, "application/octet-stream", fileId)
if uploadErr != nil {
return nil, uploadErr
}
// Save to chunk manifest structure
cm.Chunks = append(cm.Chunks,
&operation.ChunkInfo{
Offset: chunkOffset,
Size: int64(chunkBufOffset),
Fid: fileId,
},
)
// reset variables for the next chunk
chunkBufOffset = 0
chunkOffset = totalBytesRead + int64(bytesRead)
}
totalBytesRead = totalBytesRead + int64(bytesRead)
if bytesRead == 0 || readFully {
break
}
if readErr != nil {
return nil, readErr
}
}
cm.Size = totalBytesRead
manifestBuf, marshalErr := cm.Marshal()
if marshalErr != nil {
return nil, marshalErr
}
manifestStr := string(manifestBuf)
glog.V(4).Infoln("Generated chunk manifest: ", manifestStr)
manifestFileId, manifestUrlLocation, manifestAssignmentErr := fs.assignNewFileInfo(w, r, replication, collection)
if manifestAssignmentErr != nil {
return nil, manifestAssignmentErr
}
glog.V(4).Infoln("Manifest uploaded to:", manifestUrlLocation, "Fid:", manifestFileId)
filerResult.Fid = manifestFileId
u, _ := url.Parse(manifestUrlLocation)
q := u.Query()
q.Set("cm", "true")
u.RawQuery = q.Encode()
manifestUploadErr := fs.doUpload(u.String(), w, r, manifestBuf, fileName+"_manifest", "application/json", manifestFileId)
if manifestUploadErr != nil {
return nil, manifestUploadErr
}
path := r.URL.Path
// also delete the old fid unless PUT operation
if r.Method != "PUT" {
if oldFid, err := fs.filer.FindFile(path); err == nil {
operation.DeleteFile(fs.getMasterNode(), oldFid, fs.jwt(oldFid))
} else if err != nil && err != filer.ErrNotFound {
glog.V(0).Infof("error %v occur when finding %s in filer store", err, path)
}
}
glog.V(4).Infoln("saving", path, "=>", manifestFileId)
if db_err := fs.filer.CreateFile(path, manifestFileId); db_err != nil {
replyerr = db_err
filerResult.Error = db_err.Error()
operation.DeleteFile(fs.getMasterNode(), manifestFileId, fs.jwt(manifestFileId)) //clean up
glog.V(0).Infof("failing to write %s to filer server : %v", path, db_err)
return
}
return
}
func (fs *FilerServer) doUpload(urlLocation string, w http.ResponseWriter, r *http.Request, chunkBuf []byte, fileName string, contentType string, fileId string) (err error) {
err = nil
ioReader := ioutil.NopCloser(bytes.NewBuffer(chunkBuf))
uploadResult, uploadError := operation.Upload(urlLocation, fileName, ioReader, false, contentType, nil, fs.jwt(fileId))
if uploadResult != nil {
glog.V(0).Infoln("Chunk upload result. Name:", uploadResult.Name, "Fid:", fileId, "Size:", uploadResult.Size)
}
if uploadError != nil {
err = uploadError
}
return
}
// curl -X DELETE http://localhost:8888/path/to
// curl -X DELETE http://localhost:8888/path/to/?recursive=true
func (fs *FilerServer) DeleteHandler(w http.ResponseWriter, r *http.Request) {
var err error
var fid string
if strings.HasSuffix(r.URL.Path, "/") {
isRecursive := r.FormValue("recursive") == "true"
err = fs.filer.DeleteDirectory(r.URL.Path, isRecursive)
} else {
fid, err = fs.filer.DeleteFile(r.URL.Path)
if err == nil && fid != "" {
err = operation.DeleteFile(fs.getMasterNode(), fid, fs.jwt(fid))
}
}
if err == nil {
writeJsonQuiet(w, r, http.StatusAccepted, map[string]string{"error": ""})
} else {
err := fs.filer.DeleteEntryMetaAndData(filer2.FullPath(r.URL.Path))
if err != nil {
glog.V(4).Infoln("deleting", r.URL.Path, ":", err.Error())
writeJsonError(w, r, http.StatusInternalServerError, err)
return
}
writeJsonQuiet(w, r, http.StatusAccepted, map[string]string{"error": ""})
}

View file

@ -0,0 +1,189 @@
package weed_server
import (
"bytes"
"io"
"io/ioutil"
"net/http"
"path"
"strconv"
"time"
"github.com/chrislusf/seaweedfs/weed/filer2"
"github.com/chrislusf/seaweedfs/weed/glog"
"github.com/chrislusf/seaweedfs/weed/operation"
"github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
)
func (fs *FilerServer) autoChunk(w http.ResponseWriter, r *http.Request, replication string, collection string) bool {
if r.Method != "POST" {
glog.V(4).Infoln("AutoChunking not supported for method", r.Method)
return false
}
// autoChunking can be set at the command-line level or as a query param. Query param overrides command-line
query := r.URL.Query()
parsedMaxMB, _ := strconv.ParseInt(query.Get("maxMB"), 10, 32)
maxMB := int32(parsedMaxMB)
if maxMB <= 0 && fs.maxMB > 0 {
maxMB = int32(fs.maxMB)
}
if maxMB <= 0 {
glog.V(4).Infoln("AutoChunking not enabled")
return false
}
glog.V(4).Infoln("AutoChunking level set to", maxMB, "(MB)")
chunkSize := 1024 * 1024 * maxMB
contentLength := int64(0)
if contentLengthHeader := r.Header["Content-Length"]; len(contentLengthHeader) == 1 {
contentLength, _ = strconv.ParseInt(contentLengthHeader[0], 10, 64)
if contentLength <= int64(chunkSize) {
glog.V(4).Infoln("Content-Length of", contentLength, "is less than the chunk size of", chunkSize, "so autoChunking will be skipped.")
return false
}
}
if contentLength <= 0 {
glog.V(4).Infoln("Content-Length value is missing or unexpected so autoChunking will be skipped.")
return false
}
reply, err := fs.doAutoChunk(w, r, contentLength, chunkSize, replication, collection)
if err != nil {
writeJsonError(w, r, http.StatusInternalServerError, err)
} else if reply != nil {
writeJsonQuiet(w, r, http.StatusCreated, reply)
}
return true
}
func (fs *FilerServer) doAutoChunk(w http.ResponseWriter, r *http.Request, contentLength int64, chunkSize int32, replication string, collection string) (filerResult *FilerPostResult, replyerr error) {
multipartReader, multipartReaderErr := r.MultipartReader()
if multipartReaderErr != nil {
return nil, multipartReaderErr
}
part1, part1Err := multipartReader.NextPart()
if part1Err != nil {
return nil, part1Err
}
fileName := part1.FileName()
if fileName != "" {
fileName = path.Base(fileName)
}
var fileChunks []*filer_pb.FileChunk
totalBytesRead := int64(0)
tmpBufferSize := int32(1024 * 1024)
tmpBuffer := bytes.NewBuffer(make([]byte, 0, tmpBufferSize))
chunkBuf := make([]byte, chunkSize+tmpBufferSize, chunkSize+tmpBufferSize) // chunk size plus a little overflow
chunkBufOffset := int32(0)
chunkOffset := int64(0)
writtenChunks := 0
filerResult = &FilerPostResult{
Name: fileName,
}
for totalBytesRead < contentLength {
tmpBuffer.Reset()
bytesRead, readErr := io.CopyN(tmpBuffer, part1, int64(tmpBufferSize))
readFully := readErr != nil && readErr == io.EOF
tmpBuf := tmpBuffer.Bytes()
bytesToCopy := tmpBuf[0:int(bytesRead)]
copy(chunkBuf[chunkBufOffset:chunkBufOffset+int32(bytesRead)], bytesToCopy)
chunkBufOffset = chunkBufOffset + int32(bytesRead)
if chunkBufOffset >= chunkSize || readFully || (chunkBufOffset > 0 && bytesRead == 0) {
writtenChunks = writtenChunks + 1
fileId, urlLocation, assignErr := fs.assignNewFileInfo(w, r, replication, collection)
if assignErr != nil {
return nil, assignErr
}
// upload the chunk to the volume server
chunkName := fileName + "_chunk_" + strconv.FormatInt(int64(len(fileChunks)+1), 10)
uploadErr := fs.doUpload(urlLocation, w, r, chunkBuf[0:chunkBufOffset], chunkName, "application/octet-stream", fileId)
if uploadErr != nil {
return nil, uploadErr
}
// Save to chunk manifest structure
fileChunks = append(fileChunks,
&filer_pb.FileChunk{
FileId: fileId,
Offset: chunkOffset,
Size: uint64(chunkBufOffset),
Mtime: time.Now().UnixNano(),
},
)
// reset variables for the next chunk
chunkBufOffset = 0
chunkOffset = totalBytesRead + int64(bytesRead)
}
totalBytesRead = totalBytesRead + int64(bytesRead)
if bytesRead == 0 || readFully {
break
}
if readErr != nil {
return nil, readErr
}
}
path := r.URL.Path
// also delete the old fid unless PUT operation
if r.Method != "PUT" {
if entry, err := fs.filer.FindEntry(filer2.FullPath(path)); err == nil {
for _, chunk := range entry.Chunks {
oldFid := chunk.FileId
operation.DeleteFile(fs.filer.GetMaster(), oldFid, fs.jwt(oldFid))
}
} else if err != nil {
glog.V(0).Infof("error %v occur when finding %s in filer store", err, path)
}
}
glog.V(4).Infoln("saving", path)
entry := &filer2.Entry{
FullPath: filer2.FullPath(path),
Attr: filer2.Attr{
Mtime: time.Now(),
Crtime: time.Now(),
Mode: 0660,
},
Chunks: fileChunks,
}
if db_err := fs.filer.CreateEntry(entry); db_err != nil {
replyerr = db_err
filerResult.Error = db_err.Error()
glog.V(0).Infof("failing to write %s to filer server : %v", path, db_err)
return
}
return
}
func (fs *FilerServer) doUpload(urlLocation string, w http.ResponseWriter, r *http.Request, chunkBuf []byte, fileName string, contentType string, fileId string) (err error) {
err = nil
ioReader := ioutil.NopCloser(bytes.NewBuffer(chunkBuf))
uploadResult, uploadError := operation.Upload(urlLocation, fileName, ioReader, false, contentType, nil, fs.jwt(fileId))
if uploadResult != nil {
glog.V(0).Infoln("Chunk upload result. Name:", uploadResult.Name, "Fid:", fileId, "Size:", uploadResult.Size)
}
if uploadError != nil {
err = uploadError
}
return
}

View file

@ -0,0 +1,139 @@
package weed_server
import (
"bytes"
"crypto/md5"
"encoding/base64"
"fmt"
"io"
"io/ioutil"
"mime/multipart"
"net/http"
"net/textproto"
"strings"
"github.com/chrislusf/seaweedfs/weed/glog"
)
var quoteEscaper = strings.NewReplacer("\\", "\\\\", `"`, "\\\"")
func escapeQuotes(s string) string {
return quoteEscaper.Replace(s)
}
func createFormFile(writer *multipart.Writer, fieldname, filename, mime string) (io.Writer, error) {
h := make(textproto.MIMEHeader)
h.Set("Content-Disposition",
fmt.Sprintf(`form-data; name="%s"; filename="%s"`,
escapeQuotes(fieldname), escapeQuotes(filename)))
if len(mime) == 0 {
mime = "application/octet-stream"
}
h.Set("Content-Type", mime)
return writer.CreatePart(h)
}
func makeFormData(filename, mimeType string, content io.Reader) (formData io.Reader, contentType string, err error) {
buf := new(bytes.Buffer)
writer := multipart.NewWriter(buf)
defer writer.Close()
part, err := createFormFile(writer, "file", filename, mimeType)
if err != nil {
glog.V(0).Infoln(err)
return
}
_, err = io.Copy(part, content)
if err != nil {
glog.V(0).Infoln(err)
return
}
formData = buf
contentType = writer.FormDataContentType()
return
}
func checkContentMD5(w http.ResponseWriter, r *http.Request) (err error) {
if contentMD5 := r.Header.Get("Content-MD5"); contentMD5 != "" {
buf, _ := ioutil.ReadAll(r.Body)
//checkMD5
sum := md5.Sum(buf)
fileDataMD5 := base64.StdEncoding.EncodeToString(sum[0:len(sum)])
if strings.ToLower(fileDataMD5) != strings.ToLower(contentMD5) {
glog.V(0).Infof("fileDataMD5 [%s] is not equal to Content-MD5 [%s]", fileDataMD5, contentMD5)
err = fmt.Errorf("MD5 check failed")
writeJsonError(w, r, http.StatusNotAcceptable, err)
return
}
//reconstruct http request body for following new request to volume server
r.Body = ioutil.NopCloser(bytes.NewBuffer(buf))
}
return
}
func (fs *FilerServer) monolithicUploadAnalyzer(w http.ResponseWriter, r *http.Request, replication, collection string) (fileId, urlLocation string, err error) {
/*
Amazon S3 ref link:[http://docs.aws.amazon.com/AmazonS3/latest/API/Welcome.html]
There is a long way to provide a completely compatibility against all Amazon S3 API, I just made
a simple data stream adapter between S3 PUT API and seaweedfs's volume storage Write API
1. The request url format should be http://$host:$port/$bucketName/$objectName
2. bucketName will be mapped to seaweedfs's collection name
3. You could customize and make your enhancement.
*/
lastPos := strings.LastIndex(r.URL.Path, "/")
if lastPos == -1 || lastPos == 0 || lastPos == len(r.URL.Path)-1 {
glog.V(0).Infoln("URL Path [%s] is invalid, could not retrieve file name", r.URL.Path)
err = fmt.Errorf("URL Path is invalid")
writeJsonError(w, r, http.StatusInternalServerError, err)
return
}
if err = checkContentMD5(w, r); err != nil {
return
}
fileName := r.URL.Path[lastPos+1:]
if err = multipartHttpBodyBuilder(w, r, fileName); err != nil {
return
}
secondPos := strings.Index(r.URL.Path[1:], "/") + 1
collection = r.URL.Path[1:secondPos]
path := r.URL.Path
if fileId, urlLocation, err = fs.queryFileInfoByPath(w, r, path); err == nil && fileId == "" {
fileId, urlLocation, err = fs.assignNewFileInfo(w, r, replication, collection)
}
return
}
func multipartHttpBodyBuilder(w http.ResponseWriter, r *http.Request, fileName string) (err error) {
body, contentType, te := makeFormData(fileName, r.Header.Get("Content-Type"), r.Body)
if te != nil {
glog.V(0).Infoln("S3 protocol to raw seaweed protocol failed", te.Error())
writeJsonError(w, r, http.StatusInternalServerError, te)
err = te
return
}
if body != nil {
switch v := body.(type) {
case *bytes.Buffer:
r.ContentLength = int64(v.Len())
case *bytes.Reader:
r.ContentLength = int64(v.Len())
case *strings.Reader:
r.ContentLength = int64(v.Len())
}
}
r.Header.Set("Content-Type", contentType)
rc, ok := body.(io.ReadCloser)
if !ok && body != nil {
rc = ioutil.NopCloser(body)
}
r.Body = rc
return
}

View file

@ -0,0 +1,39 @@
package weed_server
import (
"bytes"
"io/ioutil"
"net/http"
"strings"
"github.com/chrislusf/seaweedfs/weed/glog"
"github.com/chrislusf/seaweedfs/weed/storage"
)
func (fs *FilerServer) multipartUploadAnalyzer(w http.ResponseWriter, r *http.Request, replication, collection string) (fileId, urlLocation string, err error) {
//Default handle way for http multipart
if r.Method == "PUT" {
buf, _ := ioutil.ReadAll(r.Body)
r.Body = ioutil.NopCloser(bytes.NewBuffer(buf))
fileName, _, _, _, _, _, _, _, pe := storage.ParseUpload(r)
if pe != nil {
glog.V(0).Infoln("failing to parse post body", pe.Error())
writeJsonError(w, r, http.StatusInternalServerError, pe)
err = pe
return
}
//reconstruct http request body for following new request to volume server
r.Body = ioutil.NopCloser(bytes.NewBuffer(buf))
path := r.URL.Path
if strings.HasSuffix(path, "/") {
if fileName != "" {
path += fileName
}
}
fileId, urlLocation, err = fs.queryFileInfoByPath(w, r, path)
} else {
fileId, urlLocation, err = fs.assignNewFileInfo(w, r, replication, collection)
}
return
}

View file

@ -0,0 +1,24 @@
package master_ui
import (
"path/filepath"
"strings"
)
type Breadcrumb struct {
Name string
Link string
}
func ToBreadcrumb(fullpath string) (crumbs []Breadcrumb) {
parts := strings.Split(fullpath, "/")
for i := 0; i < len(parts); i++ {
crumbs = append(crumbs, Breadcrumb{
Name: parts[i] + "/",
Link: "/" + filepath.Join(parts[0:i+1]...),
})
}
return
}

View file

@ -20,29 +20,51 @@ var StatusTpl = template.Must(template.New("status").Parse(`<!DOCTYPE html>
</h1>
</div>
<div class="row">
{{.Path}}
{{ range $entry := .Breadcrumbs }}
<a href={{ $entry.Link }} >
{{ $entry.Name }}
</a>
{{ end }}
</div>
<div class="row">
<ul>
<table width="90%">
{{$path := .Path }}
{{ range $dirs_index, $dir := .Directories }}
<li>
{{ range $entry_index, $entry := .Entries }}
<tr>
<td>
{{if $entry.IsDirectory}}
<img src="https://www.w3.org/TR/WWWicn/folder.gif" width="20" height="23">
<a href={{ print $path $dir "/"}} >
{{ $dir }}
<a href={{ print $path "/" $entry.Name "/"}} >
{{ $entry.Name }}
</a>
</li>
{{else}}
<a href={{ print $path "/" $entry.Name }} >
{{ $entry.Name }}
</a>
{{end}}
</td>
<td align="right">
{{if $entry.IsDirectory}}
{{else}}
{{ $entry.Mime }}
{{end}}
</td>
<td align="right">
{{if $entry.IsDirectory}}
{{else}}
{{ $entry.Size }} bytes
&nbsp;&nbsp;&nbsp;
{{end}}
</td>
<td>
{{ $entry.Timestamp.Format "2006-01-02 15:04" }}
</td>
</tr>
{{ end }}
{{ range $file_index, $file := .Files }}
<li>
<a href={{ print $path $file.Name}} >
{{ $file.Name }}
</a>
</li>
{{ end }}
</ul>
</table>
</div>
{{if .ShouldDisplayLoadMore}}

View file

@ -11,7 +11,7 @@ import (
"google.golang.org/grpc/peer"
)
func (ms MasterServer) SendHeartbeat(stream master_pb.Seaweed_SendHeartbeatServer) error {
func (ms *MasterServer) SendHeartbeat(stream master_pb.Seaweed_SendHeartbeatServer) error {
var dn *topology.DataNode
t := ms.Topo
for {
@ -77,3 +77,15 @@ func (ms MasterServer) SendHeartbeat(stream master_pb.Seaweed_SendHeartbeatServe
}
}
}
func (ms *MasterServer) KeepConnected(stream master_pb.Seaweed_KeepConnectedServer) error {
for {
_, err := stream.Recv()
if err != nil {
return err
}
if err := stream.Send(&master_pb.Empty{}); err != nil {
return err
}
}
}

View file

@ -198,7 +198,7 @@ func postFollowingOneRedirect(target string, contentType string, b bytes.Buffer)
reply := string(data)
if strings.HasPrefix(reply, "\"http") {
urlStr := reply[1: len(reply)-1]
urlStr := reply[1 : len(reply)-1]
glog.V(0).Infoln("Post redirected to ", urlStr)
resp2, err2 := http.Post(urlStr, contentType, backupReader)

View file

@ -7,49 +7,51 @@ import (
"github.com/chrislusf/seaweedfs/weed/glog"
"github.com/chrislusf/seaweedfs/weed/pb/master_pb"
"github.com/chrislusf/seaweedfs/weed/security"
"github.com/chrislusf/seaweedfs/weed/storage"
"golang.org/x/net/context"
"google.golang.org/grpc"
)
func (vs *VolumeServer) GetMaster() string {
return vs.currentMaster
}
func (vs *VolumeServer) heartbeat() {
glog.V(0).Infof("Volume server bootstraps with master %s", vs.GetMasterNode())
vs.masterNodes = storage.NewMasterNodes(vs.masterNode)
glog.V(0).Infof("Volume server start with masters: %v", vs.MasterNodes)
vs.store.SetDataCenter(vs.dataCenter)
vs.store.SetRack(vs.rack)
var err error
var newLeader string
for {
err := vs.doHeartbeat(time.Duration(vs.pulseSeconds) * time.Second)
for _, master := range vs.MasterNodes {
if newLeader != "" {
master = newLeader
}
newLeader, err = vs.doHeartbeat(master, time.Duration(vs.pulseSeconds)*time.Second)
if err != nil {
glog.V(0).Infof("heartbeat error: %v", err)
time.Sleep(time.Duration(vs.pulseSeconds) * time.Second)
}
}
}
}
func (vs *VolumeServer) doHeartbeat(sleepInterval time.Duration) error {
vs.masterNodes.Reset()
masterNode, err := vs.masterNodes.FindMaster()
if err != nil {
return fmt.Errorf("No master found: %v", err)
}
func (vs *VolumeServer) doHeartbeat(masterNode string, sleepInterval time.Duration) (newLeader string, err error) {
grpcConection, err := grpc.Dial(masterNode, grpc.WithInsecure())
if err != nil {
return fmt.Errorf("fail to dial: %v", err)
return "", fmt.Errorf("fail to dial: %v", err)
}
defer grpcConection.Close()
client := master_pb.NewSeaweedClient(grpcConection)
stream, err := client.SendHeartbeat(context.Background())
if err != nil {
glog.V(0).Infof("%v.SendHeartbeat(_) = _, %v", client, err)
return err
glog.V(0).Infof("SendHeartbeat to %s: %v", masterNode, err)
return "", err
}
vs.SetMasterNode(masterNode)
glog.V(0).Infof("Heartbeat to %s", masterNode)
glog.V(0).Infof("Heartbeat to: %v", masterNode)
vs.currentMaster = masterNode
vs.store.Client = stream
defer func() { vs.store.Client = nil }()
@ -70,7 +72,8 @@ func (vs *VolumeServer) doHeartbeat(sleepInterval time.Duration) error {
vs.guard.SecretKey = security.Secret(in.GetSecretKey())
}
if in.GetLeader() != "" && masterNode != in.GetLeader() {
vs.masterNodes.SetPossibleLeader(in.GetLeader())
glog.V(0).Infof("Volume Server found a new master newLeader: %v instead of %v", in.GetLeader(), masterNode)
newLeader = in.GetLeader()
doneChan <- nil
return
}
@ -79,7 +82,7 @@ func (vs *VolumeServer) doHeartbeat(sleepInterval time.Duration) error {
if err = stream.Send(vs.store.CollectHeartbeat()); err != nil {
glog.V(0).Infof("Volume Server Failed to talk with master %s: %v", masterNode, err)
return err
return "", err
}
tickChan := time.Tick(sleepInterval)
@ -89,11 +92,10 @@ func (vs *VolumeServer) doHeartbeat(sleepInterval time.Duration) error {
case <-tickChan:
if err = stream.Send(vs.store.CollectHeartbeat()); err != nil {
glog.V(0).Infof("Volume Server Failed to talk with master %s: %v", masterNode, err)
return err
return "", err
}
case err := <-doneChan:
glog.V(0).Infof("Volume Server heart beat stops with %v", err)
return err
case <-doneChan:
return
}
}
}

View file

@ -2,22 +2,19 @@ package weed_server
import (
"net/http"
"sync"
"github.com/chrislusf/seaweedfs/weed/glog"
"github.com/chrislusf/seaweedfs/weed/security"
"github.com/chrislusf/seaweedfs/weed/storage"
)
type VolumeServer struct {
masterNode string
mnLock sync.RWMutex
MasterNodes []string
currentMaster string
pulseSeconds int
dataCenter string
rack string
store *storage.Store
guard *security.Guard
masterNodes *storage.MasterNodes
needleMapKind storage.NeedleMapType
FixJpgOrientation bool
@ -28,7 +25,7 @@ func NewVolumeServer(adminMux, publicMux *http.ServeMux, ip string,
port int, publicUrl string,
folders []string, maxCounts []int,
needleMapKind storage.NeedleMapType,
masterNode string, pulseSeconds int,
masterNodes []string, pulseSeconds int,
dataCenter string, rack string,
whiteList []string,
fixJpgOrientation bool,
@ -41,7 +38,7 @@ func NewVolumeServer(adminMux, publicMux *http.ServeMux, ip string,
FixJpgOrientation: fixJpgOrientation,
ReadRedirect: readRedirect,
}
vs.SetMasterNode(masterNode)
vs.MasterNodes = masterNodes
vs.store = storage.NewStore(port, ip, publicUrl, folders, maxCounts, vs.needleMapKind)
vs.guard = security.NewGuard(whiteList, "")
@ -67,7 +64,7 @@ func NewVolumeServer(adminMux, publicMux *http.ServeMux, ip string,
adminMux.HandleFunc("/", vs.privateStoreHandler)
if publicMux != adminMux {
// separated admin and public port
publicMux.HandleFunc("/favicon.ico", vs.faviconHandler)
publicMux.HandleFunc("/favicon.ico", faviconHandler)
publicMux.HandleFunc("/", vs.publicReadOnlyHandler)
}
@ -76,18 +73,6 @@ func NewVolumeServer(adminMux, publicMux *http.ServeMux, ip string,
return vs
}
func (vs *VolumeServer) GetMasterNode() string {
vs.mnLock.RLock()
defer vs.mnLock.RUnlock()
return vs.masterNode
}
func (vs *VolumeServer) SetMasterNode(masterNode string) {
vs.mnLock.Lock()
defer vs.mnLock.Unlock()
vs.masterNode = masterNode
}
func (vs *VolumeServer) Shutdown() {
glog.V(0).Infoln("Shutting down volume server...")
vs.store.Close()

View file

@ -51,7 +51,3 @@ func (vs *VolumeServer) publicReadOnlyHandler(w http.ResponseWriter, r *http.Req
vs.GetOrHeadHandler(w, r)
}
}
func (vs *VolumeServer) faviconHandler(w http.ResponseWriter, r *http.Request) {
vs.FaviconHandler(w, r)
}

View file

@ -8,8 +8,8 @@ import (
"github.com/chrislusf/seaweedfs/weed/glog"
"github.com/chrislusf/seaweedfs/weed/stats"
"github.com/chrislusf/seaweedfs/weed/util"
"github.com/chrislusf/seaweedfs/weed/storage"
"github.com/chrislusf/seaweedfs/weed/util"
)
func (vs *VolumeServer) statusHandler(w http.ResponseWriter, r *http.Request) {
@ -25,7 +25,7 @@ func (vs *VolumeServer) assignVolumeHandler(w http.ResponseWriter, r *http.Reque
if r.FormValue("preallocate") != "" {
preallocate, err = strconv.ParseInt(r.FormValue("preallocate"), 10, 64)
if err != nil {
glog.V(0).Infoln("ignoring invalid int64 value for preallocate = %v", r.FormValue("preallocate"))
glog.V(0).Infof("ignoring invalid int64 value for preallocate = %v", r.FormValue("preallocate"))
}
}
err = vs.store.AddVolume(
@ -41,7 +41,7 @@ func (vs *VolumeServer) assignVolumeHandler(w http.ResponseWriter, r *http.Reque
} else {
writeJsonError(w, r, http.StatusNotAcceptable, err)
}
glog.V(2).Infoln("assign volume = %s, collection = %s , replication = %s, error = %v",
glog.V(2).Infof("assign volume = %s, collection = %s , replication = %s, error = %v",
r.FormValue("volume"), r.FormValue("collection"), r.FormValue("replication"), err)
}

View file

@ -46,7 +46,7 @@ func (vs *VolumeServer) GetOrHeadHandler(w http.ResponseWriter, r *http.Request)
w.WriteHeader(http.StatusNotFound)
return
}
lookupResult, err := operation.Lookup(vs.GetMasterNode(), volumeId.String())
lookupResult, err := operation.Lookup(vs.GetMaster(), volumeId.String())
glog.V(2).Infoln("volume", volumeId, "found on", lookupResult, "error", err)
if err == nil && len(lookupResult.Locations) > 0 {
u, _ := url.Parse(util.NormalizeUrl(lookupResult.Locations[0].PublicUrl))
@ -151,18 +151,6 @@ func (vs *VolumeServer) GetOrHeadHandler(w http.ResponseWriter, r *http.Request)
}
}
func (vs *VolumeServer) FaviconHandler(w http.ResponseWriter, r *http.Request) {
data, err := images.Asset("favicon/favicon.ico")
if err != nil {
glog.V(2).Infoln("favicon read error:", err)
return
}
if e := writeResponseContent("favicon.ico", "image/x-icon", bytes.NewReader(data), w, r); e != nil {
glog.V(2).Infoln("response write error:", e)
}
}
func (vs *VolumeServer) tryHandleChunkedFile(n *storage.Needle, fileName string, w http.ResponseWriter, r *http.Request) (processed bool) {
if !n.IsChunkedManifest() || r.URL.Query().Get("cm") == "false" {
return false
@ -188,7 +176,7 @@ func (vs *VolumeServer) tryHandleChunkedFile(n *storage.Needle, fileName string,
chunkedFileReader := &operation.ChunkedFileReader{
Manifest: chunkManifest,
Master: vs.GetMasterNode(),
Master: vs.GetMaster(),
}
defer chunkedFileReader.Close()
if e := writeResponseContent(fileName, mType, chunkedFileReader, w, r); e != nil {

View file

@ -5,9 +5,9 @@ import (
"path/filepath"
"time"
ui "github.com/chrislusf/seaweedfs/weed/server/volume_server_ui"
"github.com/chrislusf/seaweedfs/weed/stats"
"github.com/chrislusf/seaweedfs/weed/util"
ui "github.com/chrislusf/seaweedfs/weed/server/volume_server_ui"
)
func (vs *VolumeServer) uiStatusHandler(w http.ResponseWriter, r *http.Request) {
@ -21,14 +21,14 @@ func (vs *VolumeServer) uiStatusHandler(w http.ResponseWriter, r *http.Request)
}
args := struct {
Version string
Master string
Masters []string
Volumes interface{}
DiskStatuses interface{}
Stats interface{}
Counters *stats.ServerStats
}{
util.VERSION,
vs.masterNode,
vs.MasterNodes,
vs.store.Status(),
ds,
infos,

View file

@ -22,7 +22,7 @@ func (vs *VolumeServer) vacuumVolumeCompactHandler(w http.ResponseWriter, r *htt
if r.FormValue("preallocate") != "" {
preallocate, err = strconv.ParseInt(r.FormValue("preallocate"), 10, 64)
if err != nil {
glog.V(0).Infoln("Failed to parse int64 preallocate = %s: %v", r.FormValue("preallocate"), err)
glog.V(0).Infof("Failed to parse int64 preallocate = %s: %v", r.FormValue("preallocate"), err)
}
}
err = vs.store.CompactVolume(r.FormValue("volume"), preallocate)

View file

@ -31,7 +31,7 @@ func (vs *VolumeServer) PostHandler(w http.ResponseWriter, r *http.Request) {
}
ret := operation.UploadResult{}
size, errorStatus := topology.ReplicatedWrite(vs.GetMasterNode(),
size, errorStatus := topology.ReplicatedWrite(vs.GetMaster(),
vs.store, volumeId, needle, r)
httpStatus := http.StatusCreated
if errorStatus != "" {
@ -80,14 +80,14 @@ func (vs *VolumeServer) DeleteHandler(w http.ResponseWriter, r *http.Request) {
return
}
// make sure all chunks had deleted before delete manifest
if e := chunkManifest.DeleteChunks(vs.GetMasterNode()); e != nil {
if e := chunkManifest.DeleteChunks(vs.GetMaster()); e != nil {
writeJsonError(w, r, http.StatusInternalServerError, fmt.Errorf("Delete chunks error: %v", e))
return
}
count = chunkManifest.Size
}
_, err := topology.ReplicatedDelete(vs.GetMasterNode(), vs.store, volumeId, n, r)
_, err := topology.ReplicatedDelete(vs.GetMaster(), vs.store, volumeId, n, r)
if err == nil {
m := make(map[string]int64)

View file

@ -72,8 +72,8 @@ var StatusTpl = template.Must(template.New("status").Funcs(funcMap).Parse(`<!DOC
<h2>System Stats</h2>
<table class="table table-condensed table-striped">
<tr>
<th>Master</th>
<td><a href="http://{{.Master}}/ui/index.html">{{.Master}}</a></td>
<th>Masters</th>
<td>{{.Masters}}</td>
</tr>
<tr>
<th>Weekly # ReadRequests</th>

View file

@ -53,14 +53,14 @@ func doLoading(file *os.File, nm *NeedleMap) (*NeedleMap, error) {
nm.FileCounter++
nm.FileByteCounter = nm.FileByteCounter + uint64(size)
oldOffset, oldSize := nm.m.Set(needle.Key(key), offset, size)
glog.V(3).Infoln("reading key", key, "offset", offset*NeedlePaddingSize, "size", size, "oldSize", oldSize)
// glog.V(3).Infoln("reading key", key, "offset", offset*NeedlePaddingSize, "size", size, "oldSize", oldSize)
if oldOffset > 0 && oldSize != TombstoneFileSize {
nm.DeletionCounter++
nm.DeletionByteCounter = nm.DeletionByteCounter + uint64(oldSize)
}
} else {
oldSize := nm.m.Delete(needle.Key(key))
glog.V(3).Infoln("removing key", key, "offset", offset*NeedlePaddingSize, "size", size, "oldSize", oldSize)
// glog.V(3).Infoln("removing key", key, "offset", offset*NeedlePaddingSize, "size", size, "oldSize", oldSize)
nm.DeletionCounter++
nm.DeletionByteCounter = nm.DeletionByteCounter + uint64(oldSize)
}

View file

@ -1,13 +1,11 @@
package storage
import (
"errors"
"fmt"
"strconv"
"strings"
"github.com/chrislusf/seaweedfs/weed/glog"
"github.com/chrislusf/seaweedfs/weed/operation"
"github.com/chrislusf/seaweedfs/weed/pb/master_pb"
)
@ -15,55 +13,6 @@ const (
MAX_TTL_VOLUME_REMOVAL_DELAY = 10 // 10 minutes
)
type MasterNodes struct {
nodes []string
leader string
possibleLeader string
}
func (mn *MasterNodes) String() string {
return fmt.Sprintf("nodes:%v, leader:%s", mn.nodes, mn.leader)
}
func NewMasterNodes(bootstrapNode string) (mn *MasterNodes) {
mn = &MasterNodes{nodes: []string{bootstrapNode}, leader: ""}
return
}
func (mn *MasterNodes) Reset() {
if mn.leader != "" {
mn.leader = ""
glog.V(0).Infof("Resetting master nodes: %v", mn)
}
}
func (mn *MasterNodes) SetPossibleLeader(possibleLeader string) {
// TODO try to check this leader first
mn.possibleLeader = possibleLeader
}
func (mn *MasterNodes) FindMaster() (leader string, err error) {
if len(mn.nodes) == 0 {
return "", errors.New("No master node found!")
}
if mn.leader == "" {
for _, m := range mn.nodes {
glog.V(4).Infof("Listing masters on %s", m)
if leader, masters, e := operation.ListMasters(m); e == nil {
if leader != "" {
mn.nodes = append(masters, m)
mn.leader = leader
glog.V(2).Infof("current master nodes is %v", mn)
break
}
} else {
glog.V(4).Infof("Failed listing masters on %s: %v", m, e)
}
}
}
if mn.leader == "" {
return "", errors.New("No master node available!")
}
return mn.leader, nil
}
/*
* A VolumeServer contains one Store
*/

View file

@ -12,7 +12,7 @@ func getActualSize(size uint32) int64 {
return NeedleHeaderSize + int64(size) + NeedleChecksumSize + int64(padding)
}
func CheckVolumeDataIntegrity(v *Volume, indexFile *os.File) (error) {
func CheckVolumeDataIntegrity(v *Volume, indexFile *os.File) error {
var indexSize int64
var e error
if indexSize, e = verifyIndexFileIntegrity(indexFile); e != nil {

View file

@ -159,5 +159,4 @@ func distributedOperation(masterNode string, store *storage.Store, volumeId stor
glog.V(0).Infoln()
return fmt.Errorf("Failed to lookup for %d: %v", volumeId, lookupErr)
}
return nil
}

View file

@ -1,5 +1,5 @@
package util
const (
VERSION = "0.77"
VERSION = "0.90 beta"
)

View file

@ -141,7 +141,6 @@ func GetBufferStream(url string, values url.Values, allocatedBytes []byte, eachB
return err
}
}
return nil
}
func GetUrlStream(url string, values url.Values, readFn func(io.Reader) error) error {
@ -183,3 +182,70 @@ func NormalizeUrl(url string) string {
}
return "http://" + url
}
func ReadUrl(fileUrl string, offset int64, size int, buf []byte) (n int64, e error) {
req, _ := http.NewRequest("GET", fileUrl, nil)
req.Header.Add("Range", fmt.Sprintf("bytes=%d-%d", offset, offset+int64(size)))
r, err := client.Do(req)
if err != nil {
return 0, err
}
defer r.Body.Close()
if r.StatusCode >= 400 {
return 0, fmt.Errorf("%s: %s", fileUrl, r.Status)
}
var i, m int
for {
m, err = r.Body.Read(buf[i:])
if m == 0 {
return
}
i += m
n += int64(m)
if err == io.EOF {
return n, nil
}
if e != nil {
return n, e
}
}
}
func ReadUrlAsStream(fileUrl string, offset int64, size int, fn func(data []byte)) (n int64, e error) {
req, _ := http.NewRequest("GET", fileUrl, nil)
req.Header.Add("Range", fmt.Sprintf("bytes=%d-%d", offset, offset+int64(size)))
r, err := client.Do(req)
if err != nil {
return 0, err
}
defer r.Body.Close()
if r.StatusCode >= 400 {
return 0, fmt.Errorf("%s: %s", fileUrl, r.Status)
}
var m int
buf := make([]byte, 64*1024)
for {
m, err = r.Body.Read(buf)
if m == 0 {
return
}
fn(buf[:m])
n += int64(m)
if err == io.EOF {
return n, nil
}
if e != nil {
return n, e
}
}
}