mirror of
https://github.com/seaweedfs/seaweedfs.git
synced 2024-01-19 02:48:24 +00:00
add leveldb store
1. switch to viper for filer store configuration 2. simplify FindEntry() return values, removing “found” 3. add leveldb store
This commit is contained in:
parent
c34feca59c
commit
9e77563c99
|
@ -2,7 +2,6 @@ package command
|
|||
|
||||
import (
|
||||
"net/http"
|
||||
"os"
|
||||
"strconv"
|
||||
"time"
|
||||
|
||||
|
@ -13,6 +12,7 @@ import (
|
|||
"google.golang.org/grpc/reflection"
|
||||
"google.golang.org/grpc"
|
||||
"github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
|
||||
"github.com/chrislusf/seaweedfs/weed/filer2"
|
||||
)
|
||||
|
||||
var (
|
||||
|
@ -26,17 +26,10 @@ type FilerOptions struct {
|
|||
publicPort *int
|
||||
collection *string
|
||||
defaultReplicaPlacement *string
|
||||
dir *string
|
||||
redirectOnRead *bool
|
||||
disableDirListing *bool
|
||||
confFile *string
|
||||
maxMB *int
|
||||
secretKey *string
|
||||
cassandra_server *string
|
||||
cassandra_keyspace *string
|
||||
redis_server *string
|
||||
redis_password *string
|
||||
redis_database *int
|
||||
}
|
||||
|
||||
func init() {
|
||||
|
@ -46,23 +39,15 @@ func init() {
|
|||
f.ip = cmdFiler.Flag.String("ip", "", "filer server http listen ip address")
|
||||
f.port = cmdFiler.Flag.Int("port", 8888, "filer server http listen port")
|
||||
f.publicPort = cmdFiler.Flag.Int("port.public", 0, "port opened to public")
|
||||
f.dir = cmdFiler.Flag.String("dir", os.TempDir(), "directory to store meta data")
|
||||
f.defaultReplicaPlacement = cmdFiler.Flag.String("defaultReplicaPlacement", "000", "default replication type if not specified")
|
||||
f.redirectOnRead = cmdFiler.Flag.Bool("redirectOnRead", false, "whether proxy or redirect to volume server during file GET request")
|
||||
f.disableDirListing = cmdFiler.Flag.Bool("disableDirListing", false, "turn off directory listing")
|
||||
f.confFile = cmdFiler.Flag.String("confFile", "", "json encoded filer conf file")
|
||||
f.maxMB = cmdFiler.Flag.Int("maxMB", 32, "split files larger than the limit")
|
||||
f.cassandra_server = cmdFiler.Flag.String("cassandra.server", "", "host[:port] of the cassandra server")
|
||||
f.cassandra_keyspace = cmdFiler.Flag.String("cassandra.keyspace", "seaweed", "keyspace of the cassandra server")
|
||||
f.redis_server = cmdFiler.Flag.String("redis.server", "", "comma separated host:port[,host2:port2]* of the redis server, e.g., 127.0.0.1:6379")
|
||||
f.redis_password = cmdFiler.Flag.String("redis.password", "", "password in clear text")
|
||||
f.redis_database = cmdFiler.Flag.Int("redis.database", 0, "the database on the redis server")
|
||||
f.secretKey = cmdFiler.Flag.String("secure.secret", "", "secret to encrypt Json Web Token(JWT)")
|
||||
|
||||
}
|
||||
|
||||
var cmdFiler = &Command{
|
||||
UsageLine: "filer -port=8888 -dir=/tmp -master=<ip:port>",
|
||||
UsageLine: "filer -port=8888 -master=<ip:port>",
|
||||
Short: "start a file server that points to a master server",
|
||||
Long: `start a file server which accepts REST operation for any files.
|
||||
|
||||
|
@ -75,20 +60,15 @@ var cmdFiler = &Command{
|
|||
//return a json format subdirectory and files listing
|
||||
GET /path/to/
|
||||
|
||||
Current <fullpath~fileid> mapping metadata store is local embedded leveldb.
|
||||
It should be highly scalable to hundreds of millions of files on a modest machine.
|
||||
The configuration file "filer.toml" is read from ".", "$HOME/.seaweedfs/", or "/etc/seaweedfs/", in that order.
|
||||
|
||||
Future we will ensure it can avoid of being SPOF.
|
||||
The following are example filer.toml configuration file.
|
||||
|
||||
`,
|
||||
` + filer2.FILER_TOML_EXAMPLE + "\n",
|
||||
}
|
||||
|
||||
func runFiler(cmd *Command, args []string) bool {
|
||||
|
||||
if err := util.TestFolderWritable(*f.dir); err != nil {
|
||||
glog.Fatalf("Check Meta Folder (-dir) Writable %s : %s", *f.dir, err)
|
||||
}
|
||||
|
||||
f.start()
|
||||
|
||||
return true
|
||||
|
@ -104,13 +84,10 @@ func (fo *FilerOptions) start() {
|
|||
}
|
||||
|
||||
fs, nfs_err := weed_server.NewFilerServer(defaultMux, publicVolumeMux,
|
||||
*fo.ip, *fo.port, *fo.master, *fo.dir, *fo.collection,
|
||||
*fo.ip, *fo.port, *fo.master, *fo.collection,
|
||||
*fo.defaultReplicaPlacement, *fo.redirectOnRead, *fo.disableDirListing,
|
||||
*fo.confFile,
|
||||
*fo.maxMB,
|
||||
*fo.secretKey,
|
||||
*fo.cassandra_server, *fo.cassandra_keyspace,
|
||||
*fo.redis_server, *fo.redis_password, *fo.redis_database,
|
||||
)
|
||||
if nfs_err != nil {
|
||||
glog.Fatalf("Filer startup error: %v", nfs_err)
|
||||
|
|
|
@ -87,17 +87,10 @@ func init() {
|
|||
filerOptions.collection = cmdServer.Flag.String("filer.collection", "", "all data will be stored in this collection")
|
||||
filerOptions.port = cmdServer.Flag.Int("filer.port", 8888, "filer server http listen port")
|
||||
filerOptions.publicPort = cmdServer.Flag.Int("filer.port.public", 0, "filer server public http listen port")
|
||||
filerOptions.dir = cmdServer.Flag.String("filer.dir", "", "directory to store meta data, default to a 'filer' sub directory of what -dir is specified")
|
||||
filerOptions.defaultReplicaPlacement = cmdServer.Flag.String("filer.defaultReplicaPlacement", "", "Default replication type if not specified during runtime.")
|
||||
filerOptions.redirectOnRead = cmdServer.Flag.Bool("filer.redirectOnRead", false, "whether proxy or redirect to volume server during file GET request")
|
||||
filerOptions.disableDirListing = cmdServer.Flag.Bool("filer.disableDirListing", false, "turn off directory listing")
|
||||
filerOptions.confFile = cmdServer.Flag.String("filer.confFile", "", "json encoded filer conf file")
|
||||
filerOptions.maxMB = cmdServer.Flag.Int("filer.maxMB", 32, "split files larger than the limit")
|
||||
filerOptions.cassandra_server = cmdServer.Flag.String("filer.cassandra.server", "", "host[:port] of the cassandra server")
|
||||
filerOptions.cassandra_keyspace = cmdServer.Flag.String("filer.cassandra.keyspace", "seaweed", "keyspace of the cassandra server")
|
||||
filerOptions.redis_server = cmdServer.Flag.String("filer.redis.server", "", "host:port of the redis server, e.g., 127.0.0.1:6379")
|
||||
filerOptions.redis_password = cmdServer.Flag.String("filer.redis.password", "", "redis password in clear text")
|
||||
filerOptions.redis_database = cmdServer.Flag.Int("filer.redis.database", 0, "the database on the redis server")
|
||||
}
|
||||
|
||||
func runServer(cmd *Command, args []string) bool {
|
||||
|
@ -157,15 +150,6 @@ func runServer(cmd *Command, args []string) bool {
|
|||
if *masterMetaFolder == "" {
|
||||
*masterMetaFolder = folders[0]
|
||||
}
|
||||
if *isStartingFiler {
|
||||
if *filerOptions.dir == "" {
|
||||
*filerOptions.dir = *masterMetaFolder + "/filer"
|
||||
os.MkdirAll(*filerOptions.dir, 0700)
|
||||
}
|
||||
if err := util.TestFolderWritable(*filerOptions.dir); err != nil {
|
||||
glog.Fatalf("Check Mapping Meta Folder (-filer.dir=\"%s\") Writable: %s", *filerOptions.dir, err)
|
||||
}
|
||||
}
|
||||
if err := util.TestFolderWritable(*masterMetaFolder); err != nil {
|
||||
glog.Fatalf("Check Meta Folder (-mdir=\"%s\") Writable: %s", *masterMetaFolder, err)
|
||||
}
|
||||
|
|
87
weed/filer2/configuration.go
Normal file
87
weed/filer2/configuration.go
Normal file
|
@ -0,0 +1,87 @@
|
|||
package filer2
|
||||
|
||||
import (
|
||||
"os"
|
||||
|
||||
"github.com/spf13/viper"
|
||||
"github.com/chrislusf/seaweedfs/weed/glog"
|
||||
)
|
||||
|
||||
const (
|
||||
FILER_TOML_EXAMPLE = `
|
||||
# A sample TOML config file for SeaweedFS filer store
|
||||
|
||||
# local in memory, mostly for testing purpose
|
||||
[memory]
|
||||
enabled = false
|
||||
|
||||
[leveldb]
|
||||
enabled = false
|
||||
dir = "." # directory to store level db files
|
||||
|
||||
[mysql]
|
||||
enabled = true
|
||||
server = "192.168.1.1"
|
||||
port = 8080
|
||||
username = ""
|
||||
password = ""
|
||||
database = ""
|
||||
connection_max_idle = 100
|
||||
connection_max_open = 100
|
||||
|
||||
[postgres]
|
||||
enabled = false
|
||||
server = "192.168.1.1"
|
||||
port = 8080
|
||||
username = ""
|
||||
password = ""
|
||||
database = ""
|
||||
connection_max_idle = 100
|
||||
connection_max_open = 100
|
||||
|
||||
`
|
||||
)
|
||||
|
||||
var (
|
||||
Stores []FilerStore
|
||||
)
|
||||
|
||||
func (f *Filer) LoadConfiguration() {
|
||||
|
||||
// find a filer store
|
||||
viper.SetConfigName("filer") // name of config file (without extension)
|
||||
viper.AddConfigPath(".") // optionally look for config in the working directory
|
||||
viper.AddConfigPath("$HOME/.seaweedfs") // call multiple times to add many search paths
|
||||
viper.AddConfigPath("/etc/seaweedfs/") // path to look for the config file in
|
||||
if err := viper.ReadInConfig(); err != nil { // Handle errors reading the config file
|
||||
glog.Fatalf("Failed to load filer.toml file from current directory, or $HOME/.seaweedfs/, or /etc/seaweedfs/" +
|
||||
"\n\nPlease follow this example and add a filer.toml file to " +
|
||||
"current directory, or $HOME/.seaweedfs/, or /etc/seaweedfs/:\n" + FILER_TOML_EXAMPLE)
|
||||
}
|
||||
|
||||
glog.V(0).Infof("Reading filer configuration from %s", viper.ConfigFileUsed())
|
||||
for _, store := range Stores {
|
||||
if viper.GetBool(store.GetName() + ".enabled") {
|
||||
viperSub := viper.Sub(store.GetName())
|
||||
if err := store.Initialize(viperSub); err != nil {
|
||||
glog.Fatalf("Failed to initialize store for %s: %+v",
|
||||
store.GetName(), err)
|
||||
}
|
||||
f.SetStore(store)
|
||||
glog.V(0).Infof("Configure filer for %s from %s", store.GetName(), viper.ConfigFileUsed())
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
println()
|
||||
println("Supported filer stores are:")
|
||||
for _, store := range Stores {
|
||||
println(" " + store.GetName())
|
||||
}
|
||||
|
||||
println()
|
||||
println("Please configure a supported filer store in", viper.ConfigFileUsed())
|
||||
println()
|
||||
|
||||
os.Exit(-1)
|
||||
}
|
|
@ -1,38 +0,0 @@
|
|||
package embedded
|
||||
|
||||
import (
|
||||
"github.com/syndtr/goleveldb/leveldb"
|
||||
"github.com/chrislusf/seaweedfs/weed/filer2"
|
||||
)
|
||||
|
||||
type EmbeddedStore struct {
|
||||
db *leveldb.DB
|
||||
}
|
||||
|
||||
func NewEmbeddedStore(dir string) (filer *EmbeddedStore, err error) {
|
||||
filer = &EmbeddedStore{}
|
||||
if filer.db, err = leveldb.OpenFile(dir, nil); err != nil {
|
||||
return
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func (filer *EmbeddedStore) InsertEntry(entry *filer2.Entry) (err error) {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (filer *EmbeddedStore) UpdateEntry(entry *filer2.Entry) (err error) {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (filer *EmbeddedStore) FindEntry(fullpath filer2.FullPath) (found bool, entry *filer2.Entry, err error) {
|
||||
return false, nil, nil
|
||||
}
|
||||
|
||||
func (filer *EmbeddedStore) DeleteEntry(fullpath filer2.FullPath) (entry *filer2.Entry, err error) {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
func (filer *EmbeddedStore) ListDirectoryEntries(fullpath filer2.FullPath, startFileName string, inclusive bool, limit int) (entries []*filer2.Entry, err error) {
|
||||
return nil, nil
|
||||
}
|
|
@ -28,15 +28,14 @@ type Entry struct {
|
|||
Chunks []*filer_pb.FileChunk `json:"chunks,omitempty"`
|
||||
}
|
||||
|
||||
func (entry Entry) Size() uint64 {
|
||||
func (entry *Entry) Size() uint64 {
|
||||
return TotalSize(entry.Chunks)
|
||||
}
|
||||
|
||||
func (entry Entry) Timestamp() time.Time {
|
||||
func (entry *Entry) Timestamp() time.Time {
|
||||
if entry.IsDirectory() {
|
||||
return entry.Crtime
|
||||
} else {
|
||||
return entry.Mtime
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -9,7 +9,7 @@ import (
|
|||
"fmt"
|
||||
)
|
||||
|
||||
func (entry Entry) EncodeAttributesAndChunks() ([]byte, error) {
|
||||
func (entry *Entry) EncodeAttributesAndChunks() ([]byte, error) {
|
||||
message := &filer_pb.Entry{
|
||||
Attributes: &filer_pb.FuseAttributes{
|
||||
Crtime: entry.Attr.Crtime.Unix(),
|
||||
|
@ -23,7 +23,7 @@ func (entry Entry) EncodeAttributesAndChunks() ([]byte, error) {
|
|||
return proto.Marshal(message)
|
||||
}
|
||||
|
||||
func (entry Entry) DecodeAttributesAndChunks(blob []byte) (error) {
|
||||
func (entry *Entry) DecodeAttributesAndChunks(blob []byte) (error) {
|
||||
|
||||
message := &filer_pb.Entry{}
|
||||
|
||||
|
|
|
@ -50,11 +50,7 @@ func (f *Filer) CreateEntry(entry *Entry) (error) {
|
|||
// not found, check the store directly
|
||||
if dirEntry == nil {
|
||||
glog.V(4).Infof("find uncached directory: %s", dirPath)
|
||||
var dirFindErr error
|
||||
_, dirEntry, dirFindErr = f.FindEntry(FullPath(dirPath))
|
||||
if dirFindErr != nil {
|
||||
return fmt.Errorf("findDirectory %s: %v", dirPath, dirFindErr)
|
||||
}
|
||||
dirEntry, _ = f.FindEntry(FullPath(dirPath))
|
||||
} else {
|
||||
glog.V(4).Infof("found cached directory: %s", dirPath)
|
||||
}
|
||||
|
@ -116,13 +112,13 @@ func (f *Filer) UpdateEntry(entry *Entry) (err error) {
|
|||
return f.store.UpdateEntry(entry)
|
||||
}
|
||||
|
||||
func (f *Filer) FindEntry(p FullPath) (found bool, entry *Entry, err error) {
|
||||
func (f *Filer) FindEntry(p FullPath) (entry *Entry, err error) {
|
||||
return f.store.FindEntry(p)
|
||||
}
|
||||
|
||||
func (f *Filer) DeleteEntry(p FullPath) (fileEntry *Entry, err error) {
|
||||
found, entry, err := f.FindEntry(p)
|
||||
if err != nil || !found {
|
||||
entry, err := f.FindEntry(p)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if entry.IsDirectory() {
|
||||
|
|
|
@ -1,11 +1,16 @@
|
|||
package filer2
|
||||
|
||||
import "errors"
|
||||
import (
|
||||
"errors"
|
||||
"github.com/spf13/viper"
|
||||
)
|
||||
|
||||
type FilerStore interface {
|
||||
GetName() string
|
||||
Initialize(viper *viper.Viper) (error)
|
||||
InsertEntry(*Entry) (error)
|
||||
UpdateEntry(*Entry) (err error)
|
||||
FindEntry(FullPath) (found bool, entry *Entry, err error)
|
||||
FindEntry(FullPath) (entry *Entry, err error)
|
||||
DeleteEntry(FullPath) (fileEntry *Entry, err error)
|
||||
ListDirectoryEntries(dirPath FullPath, startFileName string, inclusive bool, limit int) ([]*Entry, error)
|
||||
}
|
||||
|
|
171
weed/filer2/leveldb/leveldb_store.go
Normal file
171
weed/filer2/leveldb/leveldb_store.go
Normal file
|
@ -0,0 +1,171 @@
|
|||
package leveldb
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"bytes"
|
||||
|
||||
"github.com/syndtr/goleveldb/leveldb"
|
||||
"github.com/chrislusf/seaweedfs/weed/filer2"
|
||||
leveldb_util "github.com/syndtr/goleveldb/leveldb/util"
|
||||
"github.com/chrislusf/seaweedfs/weed/glog"
|
||||
"github.com/spf13/viper"
|
||||
weed_util "github.com/chrislusf/seaweedfs/weed/util"
|
||||
)
|
||||
|
||||
const (
|
||||
DIR_FILE_SEPARATOR = byte(0x00)
|
||||
)
|
||||
|
||||
func init() {
|
||||
filer2.Stores = append(filer2.Stores, &LevelDBStore{})
|
||||
}
|
||||
|
||||
type LevelDBStore struct {
|
||||
db *leveldb.DB
|
||||
}
|
||||
|
||||
func (filer *LevelDBStore) GetName() string {
|
||||
return "leveldb"
|
||||
}
|
||||
|
||||
func (filer *LevelDBStore) Initialize(viper *viper.Viper) (err error) {
|
||||
dir := viper.GetString("dir")
|
||||
return filer.initialize(dir)
|
||||
}
|
||||
|
||||
func (filer *LevelDBStore) initialize(dir string) (err error) {
|
||||
if err := weed_util.TestFolderWritable(dir); err != nil {
|
||||
return fmt.Errorf("Check Level Folder %s Writable: %s", dir, err)
|
||||
}
|
||||
|
||||
if filer.db, err = leveldb.OpenFile(dir, nil); err != nil {
|
||||
return
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func (store *LevelDBStore) InsertEntry(entry *filer2.Entry) (err error) {
|
||||
key := genKey(entry.DirAndName())
|
||||
|
||||
value, err := entry.EncodeAttributesAndChunks()
|
||||
if err != nil {
|
||||
return fmt.Errorf("encoding %s %+v: %v", entry.FullPath, entry.Attr, err)
|
||||
}
|
||||
|
||||
err = store.db.Put(key, value, nil)
|
||||
|
||||
if err != nil {
|
||||
return fmt.Errorf("persisting %s : %v", entry.FullPath, err)
|
||||
}
|
||||
|
||||
// println("saved", entry.FullPath, "chunks", len(entry.Chunks))
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (store *LevelDBStore) UpdateEntry(entry *filer2.Entry) (err error) {
|
||||
|
||||
return store.InsertEntry(entry)
|
||||
}
|
||||
|
||||
func (store *LevelDBStore) FindEntry(fullpath filer2.FullPath) (entry *filer2.Entry, err error) {
|
||||
key := genKey(fullpath.DirAndName())
|
||||
|
||||
data, err := store.db.Get(key, nil)
|
||||
|
||||
if err == leveldb.ErrNotFound {
|
||||
return nil, filer2.ErrNotFound
|
||||
}
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("get %s : %v", entry.FullPath, err)
|
||||
}
|
||||
|
||||
entry = &filer2.Entry{
|
||||
FullPath: fullpath,
|
||||
}
|
||||
err = entry.DecodeAttributesAndChunks(data)
|
||||
if err != nil {
|
||||
return entry, fmt.Errorf("decode %s : %v", entry.FullPath, err)
|
||||
}
|
||||
|
||||
// println("read", entry.FullPath, "chunks", len(entry.Chunks), "data", len(data), string(data))
|
||||
|
||||
return entry, nil
|
||||
}
|
||||
|
||||
func (store *LevelDBStore) DeleteEntry(fullpath filer2.FullPath) (entry *filer2.Entry, err error) {
|
||||
key := genKey(fullpath.DirAndName())
|
||||
|
||||
entry, _ = store.FindEntry(fullpath)
|
||||
|
||||
err = store.db.Delete(key, nil)
|
||||
if err != nil {
|
||||
return entry, fmt.Errorf("delete %s : %v", entry.FullPath, err)
|
||||
}
|
||||
|
||||
return entry, nil
|
||||
}
|
||||
|
||||
func (store *LevelDBStore) ListDirectoryEntries(fullpath filer2.FullPath, startFileName string, inclusive bool,
|
||||
limit int) (entries []*filer2.Entry, err error) {
|
||||
|
||||
directoryPrefix := genDirectoryKeyPrefix(fullpath, "")
|
||||
|
||||
iter := store.db.NewIterator(&leveldb_util.Range{Start: genDirectoryKeyPrefix(fullpath, startFileName)}, nil)
|
||||
for iter.Next() {
|
||||
key := iter.Key()
|
||||
if !bytes.HasPrefix(key, directoryPrefix) {
|
||||
break
|
||||
}
|
||||
fileName := getNameFromKey(key)
|
||||
if fileName == "" {
|
||||
continue
|
||||
}
|
||||
if fileName == startFileName && !inclusive {
|
||||
continue
|
||||
}
|
||||
limit--
|
||||
if limit < 0 {
|
||||
break
|
||||
}
|
||||
entry := &filer2.Entry{
|
||||
FullPath: filer2.NewFullPath(string(fullpath), fileName),
|
||||
}
|
||||
if decodeErr := entry.DecodeAttributesAndChunks(iter.Value()); decodeErr != nil {
|
||||
err = decodeErr
|
||||
glog.V(0).Infof("list %s : %v", entry.FullPath, err)
|
||||
break
|
||||
}
|
||||
entries = append(entries, entry)
|
||||
}
|
||||
iter.Release()
|
||||
|
||||
return entries, err
|
||||
}
|
||||
|
||||
func genKey(dirPath, fileName string) (key []byte) {
|
||||
key = []byte(dirPath)
|
||||
key = append(key, DIR_FILE_SEPARATOR)
|
||||
key = append(key, []byte(fileName)...)
|
||||
return key
|
||||
}
|
||||
|
||||
func genDirectoryKeyPrefix(fullpath filer2.FullPath, startFileName string) (keyPrefix []byte) {
|
||||
keyPrefix = []byte(string(fullpath))
|
||||
keyPrefix = append(keyPrefix, DIR_FILE_SEPARATOR)
|
||||
if len(startFileName) > 0 {
|
||||
keyPrefix = append(keyPrefix, []byte(startFileName)...)
|
||||
}
|
||||
return keyPrefix
|
||||
}
|
||||
|
||||
func getNameFromKey(key []byte) (string) {
|
||||
|
||||
sepIndex := len(key) - 1
|
||||
for sepIndex >= 0 && key[sepIndex] != DIR_FILE_SEPARATOR {
|
||||
sepIndex--
|
||||
}
|
||||
|
||||
return string(key[sepIndex+1:])
|
||||
|
||||
}
|
61
weed/filer2/leveldb/leveldb_store_test.go
Normal file
61
weed/filer2/leveldb/leveldb_store_test.go
Normal file
|
@ -0,0 +1,61 @@
|
|||
package leveldb
|
||||
|
||||
import (
|
||||
"testing"
|
||||
"github.com/chrislusf/seaweedfs/weed/filer2"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
)
|
||||
|
||||
func TestCreateAndFind(t *testing.T) {
|
||||
filer := filer2.NewFiler("")
|
||||
dir, _ := ioutil.TempDir("", "seaweedfs_filer_test")
|
||||
defer os.RemoveAll(dir)
|
||||
store := &LevelDBStore{}
|
||||
store.initialize(dir)
|
||||
filer.SetStore(store)
|
||||
filer.DisableDirectoryCache()
|
||||
|
||||
fullpath := filer2.FullPath("/home/chris/this/is/one/file1.jpg")
|
||||
|
||||
entry1 := &filer2.Entry{
|
||||
FullPath: fullpath,
|
||||
Attr: filer2.Attr{
|
||||
Mode: 0440,
|
||||
Uid: 1234,
|
||||
Gid: 5678,
|
||||
},
|
||||
}
|
||||
|
||||
if err := filer.CreateEntry(entry1); err != nil {
|
||||
t.Errorf("create entry %v: %v", entry1.FullPath, err)
|
||||
return
|
||||
}
|
||||
|
||||
entry, err := filer.FindEntry(fullpath)
|
||||
|
||||
if err != nil {
|
||||
t.Errorf("find entry: %v", err)
|
||||
return
|
||||
}
|
||||
|
||||
if entry.FullPath != entry1.FullPath {
|
||||
t.Errorf("find wrong entry: %v", entry.FullPath)
|
||||
return
|
||||
}
|
||||
|
||||
// checking one upper directory
|
||||
entries, _ := filer.ListDirectoryEntries(filer2.FullPath("/home/chris/this/is/one"), "", false, 100)
|
||||
if len(entries) != 1 {
|
||||
t.Errorf("list entries count: %v", len(entries))
|
||||
return
|
||||
}
|
||||
|
||||
// checking one upper directory
|
||||
entries, _ = filer.ListDirectoryEntries(filer2.FullPath("/"), "", false, 100)
|
||||
if len(entries) != 1 {
|
||||
t.Errorf("list entries count: %v", len(entries))
|
||||
return
|
||||
}
|
||||
|
||||
}
|
|
@ -6,8 +6,13 @@ import (
|
|||
"strings"
|
||||
"fmt"
|
||||
"time"
|
||||
"github.com/spf13/viper"
|
||||
)
|
||||
|
||||
func init() {
|
||||
filer2.Stores = append(filer2.Stores, &MemDbStore{})
|
||||
}
|
||||
|
||||
type MemDbStore struct {
|
||||
tree *btree.BTree
|
||||
}
|
||||
|
@ -20,10 +25,13 @@ func (a Entry) Less(b btree.Item) bool {
|
|||
return strings.Compare(string(a.FullPath), string(b.(Entry).FullPath)) < 0
|
||||
}
|
||||
|
||||
func NewMemDbStore() (filer *MemDbStore) {
|
||||
filer = &MemDbStore{}
|
||||
func (filer *MemDbStore) GetName() string {
|
||||
return "memory"
|
||||
}
|
||||
|
||||
func (filer *MemDbStore) Initialize(viper *viper.Viper) (err error) {
|
||||
filer.tree = btree.New(8)
|
||||
return
|
||||
return nil
|
||||
}
|
||||
|
||||
func (filer *MemDbStore) InsertEntry(entry *filer2.Entry) (err error) {
|
||||
|
@ -34,22 +42,21 @@ func (filer *MemDbStore) InsertEntry(entry *filer2.Entry) (err error) {
|
|||
}
|
||||
|
||||
func (filer *MemDbStore) UpdateEntry(entry *filer2.Entry) (err error) {
|
||||
found, _, err := filer.FindEntry(entry.FullPath)
|
||||
if !found {
|
||||
return fmt.Errorf("No such file: %s", entry.FullPath)
|
||||
if _, err = filer.FindEntry(entry.FullPath); err != nil {
|
||||
return fmt.Errorf("no such file %s : %v", entry.FullPath, err)
|
||||
}
|
||||
entry.Mtime = time.Now()
|
||||
filer.tree.ReplaceOrInsert(Entry{entry})
|
||||
return nil
|
||||
}
|
||||
|
||||
func (filer *MemDbStore) FindEntry(fullpath filer2.FullPath) (found bool, entry *filer2.Entry, err error) {
|
||||
func (filer *MemDbStore) FindEntry(fullpath filer2.FullPath) (entry *filer2.Entry, err error) {
|
||||
item := filer.tree.Get(Entry{&filer2.Entry{FullPath: fullpath}})
|
||||
if item == nil {
|
||||
return false, nil, nil
|
||||
return nil, nil
|
||||
}
|
||||
entry = item.(Entry).Entry
|
||||
return true, entry, nil
|
||||
return entry, nil
|
||||
}
|
||||
|
||||
func (filer *MemDbStore) DeleteEntry(fullpath filer2.FullPath) (entry *filer2.Entry, err error) {
|
||||
|
|
|
@ -7,7 +7,9 @@ import (
|
|||
|
||||
func TestCreateAndFind(t *testing.T) {
|
||||
filer := filer2.NewFiler("")
|
||||
filer.SetStore(NewMemDbStore())
|
||||
store := &MemDbStore{}
|
||||
store.Initialize(nil)
|
||||
filer.SetStore(store)
|
||||
filer.DisableDirectoryCache()
|
||||
|
||||
fullpath := filer2.FullPath("/home/chris/this/is/one/file1.jpg")
|
||||
|
@ -47,7 +49,9 @@ func TestCreateAndFind(t *testing.T) {
|
|||
|
||||
func TestCreateFileAndList(t *testing.T) {
|
||||
filer := filer2.NewFiler("")
|
||||
filer.SetStore(NewMemDbStore())
|
||||
store := &MemDbStore{}
|
||||
store.Initialize(nil)
|
||||
filer.SetStore(store)
|
||||
filer.DisableDirectoryCache()
|
||||
|
||||
entry1 := &filer2.Entry{
|
||||
|
|
|
@ -14,12 +14,9 @@ import (
|
|||
|
||||
func (fs *FilerServer) LookupDirectoryEntry(ctx context.Context, req *filer_pb.LookupDirectoryEntryRequest) (*filer_pb.LookupDirectoryEntryResponse, error) {
|
||||
|
||||
found, entry, err := fs.filer.FindEntry(filer2.FullPath(filepath.Join(req.Directory, req.Name)))
|
||||
entry, err := fs.filer.FindEntry(filer2.FullPath(filepath.Join(req.Directory, req.Name)))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if !found {
|
||||
return nil, fmt.Errorf("%s not found under %s", req.Name, req.Directory)
|
||||
return nil, fmt.Errorf("%s not found under %s: %v", req.Name, req.Directory, err)
|
||||
}
|
||||
|
||||
return &filer_pb.LookupDirectoryEntryResponse{
|
||||
|
@ -65,13 +62,10 @@ func (fs *FilerServer) GetEntryAttributes(ctx context.Context, req *filer_pb.Get
|
|||
|
||||
fullpath := filer2.NewFullPath(req.ParentDir, req.Name)
|
||||
|
||||
found, entry, err := fs.filer.FindEntry(fullpath)
|
||||
entry, err := fs.filer.FindEntry(fullpath)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if !found {
|
||||
attributes.FileSize = 0
|
||||
return nil, fmt.Errorf("file %s not found", fullpath)
|
||||
return nil, fmt.Errorf("FindEntry %s: %v", fullpath, err)
|
||||
}
|
||||
|
||||
attributes.FileSize = entry.Size()
|
||||
|
@ -138,12 +132,9 @@ func (fs *FilerServer) CreateEntry(ctx context.Context, req *filer_pb.CreateEntr
|
|||
func (fs *FilerServer) UpdateEntry(ctx context.Context, req *filer_pb.UpdateEntryRequest) (*filer_pb.UpdateEntryResponse, error) {
|
||||
|
||||
fullpath := filepath.Join(req.Directory, req.Entry.Name)
|
||||
found, entry, err := fs.filer.FindEntry(filer2.FullPath(fullpath))
|
||||
entry, err := fs.filer.FindEntry(filer2.FullPath(fullpath))
|
||||
if err != nil {
|
||||
return &filer_pb.UpdateEntryResponse{}, err
|
||||
}
|
||||
if !found {
|
||||
return &filer_pb.UpdateEntryResponse{}, fmt.Errorf("file not found: %s", fullpath)
|
||||
return &filer_pb.UpdateEntryResponse{}, fmt.Errorf("not found %s: %v", fullpath, err)
|
||||
}
|
||||
|
||||
// remove old chunks if not included in the new ones
|
||||
|
|
|
@ -1,10 +1,8 @@
|
|||
package weed_server
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"math/rand"
|
||||
"net/http"
|
||||
"os"
|
||||
"strconv"
|
||||
"sync"
|
||||
"time"
|
||||
|
@ -16,7 +14,8 @@ import (
|
|||
"github.com/chrislusf/seaweedfs/weed/storage"
|
||||
"github.com/chrislusf/seaweedfs/weed/util"
|
||||
"github.com/chrislusf/seaweedfs/weed/filer2"
|
||||
"github.com/chrislusf/seaweedfs/weed/filer2/memdb"
|
||||
_ "github.com/chrislusf/seaweedfs/weed/filer2/memdb"
|
||||
_ "github.com/chrislusf/seaweedfs/weed/filer2/leveldb"
|
||||
)
|
||||
|
||||
type filerConf struct {
|
||||
|
@ -25,21 +24,6 @@ type filerConf struct {
|
|||
PostgresConf *postgres_store.PostgresConf `json:"postgres"`
|
||||
}
|
||||
|
||||
func parseConfFile(confPath string) (*filerConf, error) {
|
||||
var setting filerConf
|
||||
configFile, err := os.Open(confPath)
|
||||
defer configFile.Close()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
jsonParser := json.NewDecoder(configFile)
|
||||
if err = jsonParser.Decode(&setting); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &setting, nil
|
||||
}
|
||||
|
||||
type FilerServer struct {
|
||||
port string
|
||||
master string
|
||||
|
@ -54,13 +38,10 @@ type FilerServer struct {
|
|||
masterNodes *storage.MasterNodes
|
||||
}
|
||||
|
||||
func NewFilerServer(defaultMux, readonlyMux *http.ServeMux, ip string, port int, master string, dir string, collection string,
|
||||
func NewFilerServer(defaultMux, readonlyMux *http.ServeMux, ip string, port int, master string, collection string,
|
||||
replication string, redirectOnRead bool, disableDirListing bool,
|
||||
confFile string,
|
||||
maxMB int,
|
||||
secret string,
|
||||
cassandra_server string, cassandra_keyspace string,
|
||||
redis_server string, redis_password string, redis_database int,
|
||||
) (fs *FilerServer, err error) {
|
||||
fs = &FilerServer{
|
||||
master: master,
|
||||
|
@ -71,42 +52,9 @@ func NewFilerServer(defaultMux, readonlyMux *http.ServeMux, ip string, port int,
|
|||
maxMB: maxMB,
|
||||
port: ip + ":" + strconv.Itoa(port),
|
||||
}
|
||||
|
||||
var setting *filerConf
|
||||
if confFile != "" {
|
||||
setting, err = parseConfFile(confFile)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
} else {
|
||||
setting = new(filerConf)
|
||||
}
|
||||
|
||||
if setting.MysqlConf != nil && len(setting.MysqlConf) != 0 {
|
||||
// mysql_store := mysql_store.NewMysqlStore(setting.MysqlConf, setting.IsSharding, setting.ShardCount)
|
||||
// fs.filer = flat_namespace.NewFlatNamespaceFiler(master, mysql_store)
|
||||
} else if setting.PostgresConf != nil {
|
||||
// fs.filer = postgres_store.NewPostgresStore(master, *setting.PostgresConf)
|
||||
} else if cassandra_server != "" {
|
||||
// cassandra_store, err := cassandra_store.NewCassandraStore(cassandra_keyspace, cassandra_server)
|
||||
// if err != nil {
|
||||
// glog.Fatalf("Can not connect to cassandra server %s with keyspace %s: %v", cassandra_server, cassandra_keyspace, err)
|
||||
// }
|
||||
// fs.filer = flat_namespace.NewFlatNamespaceFiler(master, cassandra_store)
|
||||
} else if redis_server != "" {
|
||||
// redis_store := redis_store.NewRedisStore(redis_server, redis_password, redis_database)
|
||||
// fs.filer = flat_namespace.NewFlatNamespaceFiler(master, redis_store)
|
||||
} else {
|
||||
/*
|
||||
if fs.filer, err = embedded_filer.NewFilerEmbedded(master, dir); err != nil {
|
||||
glog.Fatalf("Can not start filer in dir %s : %v", dir, err)
|
||||
return
|
||||
}
|
||||
*/
|
||||
}
|
||||
|
||||
fs.filer = filer2.NewFiler(master)
|
||||
fs.filer.SetStore(memdb.NewMemDbStore())
|
||||
|
||||
fs.filer.LoadConfiguration()
|
||||
|
||||
defaultMux.HandleFunc("/admin/register", fs.registerHandler)
|
||||
defaultMux.HandleFunc("/", fs.filerHandler)
|
||||
|
|
|
@ -49,7 +49,7 @@ func (fs *FilerServer) registerHandler(w http.ResponseWriter, r *http.Request) {
|
|||
glog.V(2).Infof("register %s to %s parse fileSize %s", fileId, path, r.FormValue("fileSize"))
|
||||
err = fs.filer.CreateEntry(entry)
|
||||
if err != nil {
|
||||
glog.V(4).Infof("register %s to %s error: %v", fileId, path, err)
|
||||
glog.V(0).Infof("register %s to %s error: %v", fileId, path, err)
|
||||
writeJsonError(w, r, http.StatusInternalServerError, err)
|
||||
} else {
|
||||
w.WriteHeader(http.StatusOK)
|
||||
|
|
|
@ -79,9 +79,9 @@ func (fs *FilerServer) GetOrHeadHandler(w http.ResponseWriter, r *http.Request,
|
|||
path = path[:len(path)-1]
|
||||
}
|
||||
|
||||
found, entry, err := fs.filer.FindEntry(filer2.FullPath(path))
|
||||
if !found || err != nil {
|
||||
glog.V(3).Infof("Not found %s: %v", path, err)
|
||||
entry, err := fs.filer.FindEntry(filer2.FullPath(path))
|
||||
if err != nil {
|
||||
glog.V(1).Infof("Not found %s: %v", path, err)
|
||||
w.WriteHeader(http.StatusNotFound)
|
||||
return
|
||||
}
|
||||
|
@ -96,7 +96,7 @@ func (fs *FilerServer) GetOrHeadHandler(w http.ResponseWriter, r *http.Request,
|
|||
}
|
||||
|
||||
if len(entry.Chunks) == 0 {
|
||||
glog.V(3).Infof("Empty %s: %v", path)
|
||||
glog.V(1).Infof("no file chunks for %s, attr=%+v", path, entry.Attr)
|
||||
w.WriteHeader(http.StatusNoContent)
|
||||
return
|
||||
}
|
||||
|
|
|
@ -76,20 +76,17 @@ func makeFormData(filename, mimeType string, content io.Reader) (formData io.Rea
|
|||
}
|
||||
|
||||
func (fs *FilerServer) queryFileInfoByPath(w http.ResponseWriter, r *http.Request, path string) (fileId, urlLocation string, err error) {
|
||||
var found bool
|
||||
var entry *filer2.Entry
|
||||
if found, entry, err = fs.filer.FindEntry(filer2.FullPath(path)); err != nil {
|
||||
if entry, err = fs.filer.FindEntry(filer2.FullPath(path)); err != nil {
|
||||
glog.V(0).Infoln("failing to find path in filer store", path, err.Error())
|
||||
writeJsonError(w, r, http.StatusInternalServerError, err)
|
||||
} else if found {
|
||||
} else {
|
||||
fileId = entry.Chunks[0].FileId
|
||||
urlLocation, err = operation.LookupFileId(fs.getMasterNode(), fileId)
|
||||
if err != nil {
|
||||
glog.V(1).Infoln("operation LookupFileId %s failed, err is %s", fileId, err.Error())
|
||||
w.WriteHeader(http.StatusNotFound)
|
||||
}
|
||||
} else {
|
||||
w.WriteHeader(http.StatusNotFound)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
@ -319,7 +316,7 @@ func (fs *FilerServer) PostHandler(w http.ResponseWriter, r *http.Request) {
|
|||
|
||||
// also delete the old fid unless PUT operation
|
||||
if r.Method != "PUT" {
|
||||
if found, entry, err := fs.filer.FindEntry(filer2.FullPath(path)); err == nil && found {
|
||||
if entry, err := fs.filer.FindEntry(filer2.FullPath(path)); err == nil {
|
||||
oldFid := entry.Chunks[0].FileId
|
||||
operation.DeleteFile(fs.getMasterNode(), oldFid, fs.jwt(oldFid))
|
||||
} else if err != nil && err != filer.ErrNotFound {
|
||||
|
@ -485,7 +482,7 @@ func (fs *FilerServer) doAutoChunk(w http.ResponseWriter, r *http.Request, conte
|
|||
path := r.URL.Path
|
||||
// also delete the old fid unless PUT operation
|
||||
if r.Method != "PUT" {
|
||||
if found, entry, err := fs.filer.FindEntry(filer2.FullPath(path)); found && err == nil {
|
||||
if entry, err := fs.filer.FindEntry(filer2.FullPath(path)); err == nil {
|
||||
for _, chunk := range entry.Chunks {
|
||||
oldFid := chunk.FileId
|
||||
operation.DeleteFile(fs.getMasterNode(), oldFid, fs.jwt(oldFid))
|
||||
|
|
Loading…
Reference in a new issue