mirror of
https://github.com/seaweedfs/seaweedfs.git
synced 2024-01-19 02:48:24 +00:00
Merge remote-tracking branch 'origin/master' into filer_mongodb
# Conflicts: # go.mod # go.sum # weed/server/filer_server.go
This commit is contained in:
commit
290c6b7f01
|
@ -450,6 +450,12 @@ go get github.com/chrislusf/seaweedfs/weed
|
|||
|
||||
Once this is done, you will find the executable "weed" in your `$GOPATH/bin` directory
|
||||
|
||||
Note:
|
||||
* If you got into this problem, try to `rm -Rf $GOPATH/src/go.etcd.io/etcd/vendor/golang.org/x/net/trace` and build again.
|
||||
```
|
||||
panic: /debug/requests is already registered. You may have two independent copies of golang.org/x/net/trace in your binary, trying to maintain separate state. This may involve a vendored copy of golang.org/x/net/trace.
|
||||
```
|
||||
|
||||
Step 4: after you modify your code locally, you could start a local build by calling `go install` under
|
||||
|
||||
```
|
||||
|
|
12
go.mod
12
go.mod
|
@ -15,7 +15,7 @@ require (
|
|||
github.com/coreos/go-semver v0.3.0 // indirect
|
||||
github.com/coreos/go-systemd v0.0.0-20190719114852-fd7a80b32e1f // indirect
|
||||
github.com/dgrijalva/jwt-go v3.2.0+incompatible
|
||||
github.com/disintegration/imaging v1.6.1
|
||||
github.com/disintegration/imaging v1.6.2
|
||||
github.com/dustin/go-humanize v1.0.0
|
||||
github.com/eapache/go-resiliency v1.2.0 // indirect
|
||||
github.com/facebookgo/clock v0.0.0-20150410010913-600d898af40a
|
||||
|
@ -25,7 +25,7 @@ require (
|
|||
github.com/facebookgo/subset v0.0.0-20200203212716-c811ad88dec4 // indirect
|
||||
github.com/frankban/quicktest v1.7.2 // indirect
|
||||
github.com/gabriel-vasile/mimetype v1.0.0
|
||||
github.com/go-redis/redis v6.15.2+incompatible
|
||||
github.com/go-redis/redis v6.15.7+incompatible
|
||||
github.com/go-sql-driver/mysql v1.4.1
|
||||
github.com/gocql/gocql v0.0.0-20190829130954-e163eff7a8c6
|
||||
github.com/gogo/protobuf v1.2.2-0.20190730201129-28a6bbf47e48 // indirect
|
||||
|
@ -38,7 +38,6 @@ require (
|
|||
github.com/grpc-ecosystem/go-grpc-middleware v1.0.1-0.20190118093823-f849b5445de4 // indirect
|
||||
github.com/grpc-ecosystem/grpc-gateway v1.11.0 // indirect
|
||||
github.com/hashicorp/golang-lru v0.5.3 // indirect
|
||||
github.com/jacobsa/daemonize v0.0.0-20160101105449-e460293e890f
|
||||
github.com/jcmturner/gofork v1.0.0 // indirect
|
||||
github.com/karlseguin/ccache v2.0.3+incompatible
|
||||
github.com/karlseguin/expect v1.0.1 // indirect
|
||||
|
@ -57,10 +56,11 @@ require (
|
|||
github.com/pierrec/lz4 v2.2.7+incompatible // indirect
|
||||
github.com/prometheus/client_golang v1.1.0
|
||||
github.com/prometheus/procfs v0.0.4 // indirect
|
||||
github.com/rakyll/statik v0.1.6
|
||||
github.com/rakyll/statik v0.1.7
|
||||
github.com/rcrowley/go-metrics v0.0.0-20190826022208-cac0b30c2563 // indirect
|
||||
github.com/rwcarlsen/goexif v0.0.0-20190401172101-9e8deecbddbd
|
||||
github.com/seaweedfs/fuse v0.0.0-20190510212405-310228904eff
|
||||
github.com/seaweedfs/goexif v1.0.2
|
||||
github.com/sirupsen/logrus v1.4.2 // indirect
|
||||
github.com/spaolacci/murmur3 v1.1.0 // indirect
|
||||
github.com/spf13/afero v1.2.2 // indirect
|
||||
github.com/spf13/jwalterweatherman v1.1.0 // indirect
|
||||
|
@ -81,7 +81,7 @@ require (
|
|||
gocloud.dev/pubsub/natspubsub v0.16.0
|
||||
gocloud.dev/pubsub/rabbitpubsub v0.16.0
|
||||
golang.org/x/crypto v0.0.0-20190911031432-227b76d455e7 // indirect
|
||||
golang.org/x/image v0.0.0-20190829233526-b3c06291d021 // indirect
|
||||
golang.org/x/image v0.0.0-20200119044424-58c23975cae1 // indirect
|
||||
golang.org/x/net v0.0.0-20190909003024-a7b16738d86b
|
||||
golang.org/x/sys v0.0.0-20190910064555-bbd175535a8b
|
||||
golang.org/x/tools v0.0.0-20190911022129-16c5e0f7d110
|
||||
|
|
17
go.sum
17
go.sum
|
@ -59,6 +59,7 @@ github.com/bmizerany/assert v0.0.0-20160611221934-b7ed37b82869 h1:DDGfHa7BWjL4Yn
|
|||
github.com/bmizerany/assert v0.0.0-20160611221934-b7ed37b82869/go.mod h1:Ekp36dRnpXw/yCqJaO+ZrUyxD+3VXMFFr56k5XYrpB4=
|
||||
github.com/census-instrumentation/opencensus-proto v0.2.0 h1:LzQXZOgg4CQfE6bFvXGM30YZL1WW/M337pXml+GrcZ4=
|
||||
github.com/census-instrumentation/opencensus-proto v0.2.0/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU=
|
||||
github.com/census-instrumentation/opencensus-proto v0.2.1 h1:glEXhBS5PSLLv4IXzLA5yPRVX4bilULVyxxbrfOtDAk=
|
||||
github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU=
|
||||
github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc=
|
||||
github.com/chrislusf/raft v0.0.0-20190225081310-10d6e2182d92 h1:lM9SFsh0EPXkyJyrTJqLZPAIJBtNFP6LNkYXu2MnSZI=
|
||||
|
@ -92,6 +93,8 @@ github.com/dgryski/go-sip13 v0.0.0-20181026042036-e10d5fee7954/go.mod h1:vAd38F8
|
|||
github.com/dimchansky/utfbom v1.1.0/go.mod h1:rO41eb7gLfo8SF1jd9F8HplJm1Fewwi4mQvIirEdv+8=
|
||||
github.com/disintegration/imaging v1.6.1 h1:JnBbK6ECIZb1NsWIikP9pd8gIlTIRx7fuDNpU9fsxOE=
|
||||
github.com/disintegration/imaging v1.6.1/go.mod h1:xuIt+sRxDFrHS0drzXUlCJthkJ8k7lkkUojDSR247MQ=
|
||||
github.com/disintegration/imaging v1.6.2 h1:w1LecBlG2Lnp8B3jk5zSuNqd7b4DXhcjwek1ei82L+c=
|
||||
github.com/disintegration/imaging v1.6.2/go.mod h1:44/5580QXChDfwIclfc/PCwrr44amcmDAg8hxG0Ewe4=
|
||||
github.com/dustin/go-humanize v1.0.0 h1:VSnTsYCnlFHaM2/igO1h6X3HA71jcobQuxemgkq4zYo=
|
||||
github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk=
|
||||
github.com/eapache/go-resiliency v1.1.0 h1:1NtRmCAqadE2FN4ZcN6g90TP3uk8cg9rn9eNK2197aU=
|
||||
|
@ -129,8 +132,8 @@ github.com/go-ini/ini v1.25.4/go.mod h1:ByCAeIL28uOIIG0E3PJtZPDL8WnHpFKFOtgjp+3I
|
|||
github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as=
|
||||
github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE=
|
||||
github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk=
|
||||
github.com/go-redis/redis v6.15.2+incompatible h1:9SpNVG76gr6InJGxoZ6IuuxaCOQwDAhzyXg+Bs+0Sb4=
|
||||
github.com/go-redis/redis v6.15.2+incompatible/go.mod h1:NAIEuMOZ/fxfXJIrKDQDz8wamY7mA7PouImQ2Jvg6kA=
|
||||
github.com/go-redis/redis v6.15.7+incompatible h1:3skhDh95XQMpnqeqNftPkQD9jL9e5e36z/1SUm6dy1U=
|
||||
github.com/go-redis/redis v6.15.7+incompatible/go.mod h1:NAIEuMOZ/fxfXJIrKDQDz8wamY7mA7PouImQ2Jvg6kA=
|
||||
github.com/go-sql-driver/mysql v1.4.1 h1:g24URVg0OFbNUTx9qqY1IRZ9D9z3iPyi5zKhQZpNwpA=
|
||||
github.com/go-sql-driver/mysql v1.4.1/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG5ZlKdlhCg5w=
|
||||
github.com/go-stack/stack v1.8.0 h1:5SgMzNM5HxrEjV0ww2lTmX6E2Izsfxas4+YHWRs3Lsk=
|
||||
|
@ -388,18 +391,25 @@ github.com/prometheus/procfs v0.0.4/go.mod h1:4A/X28fw3Fc593LaREMrKMqOKvUAntwMDa
|
|||
github.com/prometheus/tsdb v0.7.1/go.mod h1:qhTCs0VvXwvX/y3TZrWD7rabWM+ijKTux40TwIPHuXU=
|
||||
github.com/rakyll/statik v0.1.6 h1:uICcfUXpgqtw2VopbIncslhAmE5hwc4g20TEyEENBNs=
|
||||
github.com/rakyll/statik v0.1.6/go.mod h1:OEi9wJV/fMUAGx1eNjq75DKDsJVuEv1U0oYdX6GX8Zs=
|
||||
github.com/rakyll/statik v0.1.7 h1:OF3QCZUuyPxuGEP7B4ypUa7sB/iHtqOTDYZXGM8KOdQ=
|
||||
github.com/rakyll/statik v0.1.7/go.mod h1:AlZONWzMtEnMs7W4e/1LURLiI49pIMmp6V9Unghqrcc=
|
||||
github.com/rcrowley/go-metrics v0.0.0-20181016184325-3113b8401b8a h1:9ZKAASQSHhDYGoxY8uLVpewe1GDZ2vu2Tr/vTdVAkFQ=
|
||||
github.com/rcrowley/go-metrics v0.0.0-20181016184325-3113b8401b8a/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4=
|
||||
github.com/rcrowley/go-metrics v0.0.0-20190826022208-cac0b30c2563 h1:dY6ETXrvDG7Sa4vE8ZQG4yqWg6UnOcbqTAahkV813vQ=
|
||||
github.com/rcrowley/go-metrics v0.0.0-20190826022208-cac0b30c2563/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4=
|
||||
github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg=
|
||||
<<<<<<< HEAD
|
||||
github.com/rogpeppe/go-internal v1.1.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4=
|
||||
github.com/rogpeppe/go-internal v1.2.2/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4=
|
||||
github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4=
|
||||
github.com/rwcarlsen/goexif v0.0.0-20190401172101-9e8deecbddbd h1:CmH9+J6ZSsIjUK3dcGsnCnO41eRBOnY12zwkn5qVwgc=
|
||||
github.com/rwcarlsen/goexif v0.0.0-20190401172101-9e8deecbddbd/go.mod h1:hPqNNc0+uJM6H+SuU8sEs5K5IQeKccPqeSjfgcKGgPk=
|
||||
=======
|
||||
>>>>>>> origin/master
|
||||
github.com/seaweedfs/fuse v0.0.0-20190510212405-310228904eff h1:uLd5zBvf5OA67wcVRePHrFt60bR4LSskaVhgVwyk0Jg=
|
||||
github.com/seaweedfs/fuse v0.0.0-20190510212405-310228904eff/go.mod h1:cubdLmQFqEUZ9vNJrznhgc3m3VMAJi/nY2Ix2axXkG0=
|
||||
github.com/seaweedfs/goexif v1.0.2 h1:p+rTXYdQ2mgxd+1JaTrQ9N8DvYuw9UH9xgYmJ+Bb29E=
|
||||
github.com/seaweedfs/goexif v1.0.2/go.mod h1:MrKs5LK0HXdffrdCZrW3OIMegL2xXpC6ThLyXMyjdrk=
|
||||
github.com/sirupsen/logrus v1.2.0 h1:juTguoYk5qI21pwyTXY3B3Y5cOTH3ZUyZCg1v/mihuo=
|
||||
github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo=
|
||||
github.com/sirupsen/logrus v1.4.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo=
|
||||
|
@ -508,6 +518,9 @@ golang.org/x/image v0.0.0-20190227222117-0694c2d4d067 h1:KYGJGHOQy8oSi1fDlSpcZF0
|
|||
golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js=
|
||||
golang.org/x/image v0.0.0-20190829233526-b3c06291d021 h1:j6QOxNFMpEL1wIQX6TUdBPNfGZKmBOJS/vfSm8a7tdM=
|
||||
golang.org/x/image v0.0.0-20190829233526-b3c06291d021/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0=
|
||||
golang.org/x/image v0.0.0-20191009234506-e7c1f5e7dbb8/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0=
|
||||
golang.org/x/image v0.0.0-20200119044424-58c23975cae1 h1:5h3ngYt7+vXCDZCup/HkCQgW5XwmSvR/nA2JmJ0RErg=
|
||||
golang.org/x/image v0.0.0-20200119044424-58c23975cae1/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0=
|
||||
golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
|
||||
golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU=
|
||||
golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
apiVersion: v1
|
||||
description: SeaweedFS
|
||||
name: seaweedfs
|
||||
version: 1.71
|
||||
version: 1.74
|
||||
|
|
|
@ -4,7 +4,7 @@ global:
|
|||
registry: ""
|
||||
repository: ""
|
||||
imageName: chrislusf/seaweedfs
|
||||
imageTag: "1.71"
|
||||
imageTag: "1.74"
|
||||
imagePullPolicy: IfNotPresent
|
||||
imagePullSecrets: imagepullsecret
|
||||
restartPolicy: Always
|
||||
|
|
|
@ -21,6 +21,9 @@ service SeaweedFiler {
|
|||
rpc UpdateEntry (UpdateEntryRequest) returns (UpdateEntryResponse) {
|
||||
}
|
||||
|
||||
rpc AppendToEntry (AppendToEntryRequest) returns (AppendToEntryResponse) {
|
||||
}
|
||||
|
||||
rpc DeleteEntry (DeleteEntryRequest) returns (DeleteEntryResponse) {
|
||||
}
|
||||
|
||||
|
@ -42,7 +45,7 @@ service SeaweedFiler {
|
|||
rpc GetFilerConfiguration (GetFilerConfigurationRequest) returns (GetFilerConfigurationResponse) {
|
||||
}
|
||||
|
||||
rpc ListenForEvents (ListenForEventsRequest) returns (stream FullEventNotification) {
|
||||
rpc SubscribeMetadata (SubscribeMetadataRequest) returns (stream SubscribeMetadataResponse) {
|
||||
}
|
||||
|
||||
}
|
||||
|
@ -123,6 +126,7 @@ message FuseAttributes {
|
|||
string user_name = 11; // for hdfs
|
||||
repeated string group_name = 12; // for hdfs
|
||||
string symlink_target = 13;
|
||||
bytes md5 = 14;
|
||||
}
|
||||
|
||||
message CreateEntryRequest {
|
||||
|
@ -142,6 +146,14 @@ message UpdateEntryRequest {
|
|||
message UpdateEntryResponse {
|
||||
}
|
||||
|
||||
message AppendToEntryRequest {
|
||||
string directory = 1;
|
||||
string entry_name = 2;
|
||||
repeated FileChunk chunks = 3;
|
||||
}
|
||||
message AppendToEntryResponse {
|
||||
}
|
||||
|
||||
message DeleteEntryRequest {
|
||||
string directory = 1;
|
||||
string name = 2;
|
||||
|
@ -230,16 +242,15 @@ message GetFilerConfigurationResponse {
|
|||
string collection = 3;
|
||||
uint32 max_mb = 4;
|
||||
string dir_buckets = 5;
|
||||
string dir_queues = 6;
|
||||
bool cipher = 7;
|
||||
}
|
||||
|
||||
message ListenForEventsRequest {
|
||||
message SubscribeMetadataRequest {
|
||||
string client_name = 1;
|
||||
string path_prefix = 2;
|
||||
int64 since_ns = 3;
|
||||
}
|
||||
message FullEventNotification {
|
||||
message SubscribeMetadataResponse {
|
||||
string directory = 1;
|
||||
EventNotification event_notification = 2;
|
||||
}
|
||||
|
|
75
unmaintained/see_log_entry/see_log_entry.go
Normal file
75
unmaintained/see_log_entry/see_log_entry.go
Normal file
|
@ -0,0 +1,75 @@
|
|||
package main
|
||||
|
||||
import (
|
||||
"flag"
|
||||
"fmt"
|
||||
"io"
|
||||
"log"
|
||||
"os"
|
||||
|
||||
"github.com/golang/protobuf/proto"
|
||||
|
||||
"github.com/chrislusf/seaweedfs/weed/filer2"
|
||||
"github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
|
||||
"github.com/chrislusf/seaweedfs/weed/util"
|
||||
)
|
||||
|
||||
var (
|
||||
logdataFile = flag.String("logdata", "", "log data file saved under "+ filer2.SystemLogDir)
|
||||
)
|
||||
|
||||
func main() {
|
||||
flag.Parse()
|
||||
|
||||
dst, err := os.OpenFile(*logdataFile, os.O_RDONLY, 0644)
|
||||
if err != nil {
|
||||
log.Fatalf("failed to open %s: %v", *logdataFile, err)
|
||||
}
|
||||
defer dst.Close()
|
||||
|
||||
err = walkLogEntryFile(dst)
|
||||
if err != nil {
|
||||
log.Fatalf("failed to visit %s: %v", *logdataFile, err)
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
func walkLogEntryFile(dst *os.File) error {
|
||||
|
||||
sizeBuf := make([]byte, 4)
|
||||
|
||||
for {
|
||||
if n, err := dst.Read(sizeBuf); n != 4 {
|
||||
if err == io.EOF {
|
||||
return nil
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
size := util.BytesToUint32(sizeBuf)
|
||||
|
||||
data := make([]byte, int(size))
|
||||
|
||||
if n, err := dst.Read(data); n != len(data) {
|
||||
return err
|
||||
}
|
||||
|
||||
logEntry := &filer_pb.LogEntry{}
|
||||
err := proto.Unmarshal(data, logEntry)
|
||||
if err != nil {
|
||||
log.Printf("unexpected unmarshal filer_pb.LogEntry: %v", err)
|
||||
return nil
|
||||
}
|
||||
|
||||
event := &filer_pb.SubscribeMetadataResponse{}
|
||||
err = proto.Unmarshal(logEntry.Data, event)
|
||||
if err != nil {
|
||||
log.Printf("unexpected unmarshal filer_pb.SubscribeMetadataResponse: %v", err)
|
||||
return nil
|
||||
}
|
||||
|
||||
fmt.Printf("event: %+v\n", event)
|
||||
|
||||
}
|
||||
|
||||
}
|
|
@ -121,7 +121,6 @@ func runFilerReplicate(cmd *Command, args []string) bool {
|
|||
}
|
||||
}
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
func validateOneEnabledInput(config *viper.Viper) {
|
||||
|
|
|
@ -47,7 +47,7 @@ func init() {
|
|||
m.ip = cmdMaster.Flag.String("ip", "localhost", "master <ip>|<server> address")
|
||||
m.ipBind = cmdMaster.Flag.String("ip.bind", "0.0.0.0", "ip address to bind to")
|
||||
m.metaFolder = cmdMaster.Flag.String("mdir", os.TempDir(), "data directory to store meta data")
|
||||
m.peers = cmdMaster.Flag.String("peers", "", "all master nodes in comma separated ip:port list, example: 127.0.0.1:9093,127.0.0.1:9094")
|
||||
m.peers = cmdMaster.Flag.String("peers", "", "all master nodes in comma separated ip:port list, example: 127.0.0.1:9093,127.0.0.1:9094,127.0.0.1:9095")
|
||||
m.volumeSizeLimitMB = cmdMaster.Flag.Uint("volumeSizeLimitMB", 30*1000, "Master stops directing writes to oversized volumes.")
|
||||
m.volumePreallocate = cmdMaster.Flag.Bool("volumePreallocate", false, "Preallocate disk space for volumes.")
|
||||
m.pulseSeconds = cmdMaster.Flag.Int("pulseSeconds", 5, "number of seconds between heartbeats")
|
||||
|
@ -147,6 +147,7 @@ func startMaster(masterOption MasterOptions, masterWhiteList []string) {
|
|||
}
|
||||
|
||||
func checkPeers(masterIp string, masterPort int, peers string) (masterAddress string, cleanedPeers []string) {
|
||||
glog.V(0).Infof("current: %s:%d peers:%s", masterIp, masterPort, peers)
|
||||
masterAddress = masterIp + ":" + strconv.Itoa(masterPort)
|
||||
if peers != "" {
|
||||
cleanedPeers = strings.Split(peers, ",")
|
||||
|
|
|
@ -1,5 +1,9 @@
|
|||
package command
|
||||
|
||||
import (
|
||||
"os"
|
||||
)
|
||||
|
||||
type MountOptions struct {
|
||||
filer *string
|
||||
filerMountRootPath *string
|
||||
|
@ -9,7 +13,8 @@ type MountOptions struct {
|
|||
replication *string
|
||||
ttlSec *int
|
||||
chunkSizeLimitMB *int
|
||||
chunkCacheCountLimit *int64
|
||||
cacheDir *string
|
||||
cacheSizeMB *int64
|
||||
dataCenter *string
|
||||
allowOthers *bool
|
||||
umaskString *string
|
||||
|
@ -32,8 +37,9 @@ func init() {
|
|||
mountOptions.collection = cmdMount.Flag.String("collection", "", "collection to create the files")
|
||||
mountOptions.replication = cmdMount.Flag.String("replication", "", "replication(e.g. 000, 001) to create to files. If empty, let filer decide.")
|
||||
mountOptions.ttlSec = cmdMount.Flag.Int("ttl", 0, "file ttl in seconds")
|
||||
mountOptions.chunkSizeLimitMB = cmdMount.Flag.Int("chunkSizeLimitMB", 4, "local write buffer size, also chunk large files")
|
||||
mountOptions.chunkCacheCountLimit = cmdMount.Flag.Int64("chunkCacheCountLimit", 1000, "number of file chunks to cache in memory")
|
||||
mountOptions.chunkSizeLimitMB = cmdMount.Flag.Int("chunkSizeLimitMB", 16, "local write buffer size, also chunk large files")
|
||||
mountOptions.cacheDir = cmdMount.Flag.String("cacheDir", os.TempDir(), "local cache directory for file chunks")
|
||||
mountOptions.cacheSizeMB = cmdMount.Flag.Int64("cacheCapacityMB", 1000, "local cache capacity in MB (0 will disable cache)")
|
||||
mountOptions.dataCenter = cmdMount.Flag.String("dataCenter", "", "prefer to write to the data center")
|
||||
mountOptions.allowOthers = cmdMount.Flag.Bool("allowOthers", true, "allows other users to access the file system")
|
||||
mountOptions.umaskString = cmdMount.Flag.String("umask", "022", "octal umask, e.g., 022, 0111")
|
||||
|
|
|
@ -129,7 +129,6 @@ func RunMount(option *MountOptions, umask os.FileMode) bool {
|
|||
}
|
||||
|
||||
options = append(options, osSpecificMountOptions()...)
|
||||
|
||||
if *option.allowOthers {
|
||||
options = append(options, fuse.AllowOther())
|
||||
}
|
||||
|
@ -137,12 +136,12 @@ func RunMount(option *MountOptions, umask os.FileMode) bool {
|
|||
options = append(options, fuse.AllowNonEmptyMount())
|
||||
}
|
||||
|
||||
// mount
|
||||
c, err := fuse.Mount(dir, options...)
|
||||
if err != nil {
|
||||
glog.V(0).Infof("mount: %v", err)
|
||||
return true
|
||||
}
|
||||
|
||||
defer fuse.Unmount(dir)
|
||||
|
||||
util.OnInterrupt(func() {
|
||||
|
@ -164,7 +163,8 @@ func RunMount(option *MountOptions, umask os.FileMode) bool {
|
|||
Replication: *option.replication,
|
||||
TtlSec: int32(*option.ttlSec),
|
||||
ChunkSizeLimit: int64(chunkSizeLimitMB) * 1024 * 1024,
|
||||
ChunkCacheCountLimit: *option.chunkCacheCountLimit,
|
||||
CacheDir: *option.cacheDir,
|
||||
CacheSizeMB: *option.cacheSizeMB,
|
||||
DataCenter: *option.dataCenter,
|
||||
DirListCacheLimit: *option.dirListCacheLimit,
|
||||
EntryCacheTtl: 3 * time.Second,
|
||||
|
|
|
@ -8,13 +8,12 @@ import (
|
|||
|
||||
"google.golang.org/grpc/reflection"
|
||||
|
||||
"github.com/chrislusf/seaweedfs/weed/glog"
|
||||
"github.com/chrislusf/seaweedfs/weed/messaging/broker"
|
||||
"github.com/chrislusf/seaweedfs/weed/pb"
|
||||
"github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
|
||||
"github.com/chrislusf/seaweedfs/weed/pb/queue_pb"
|
||||
"github.com/chrislusf/seaweedfs/weed/pb/messaging_pb"
|
||||
"github.com/chrislusf/seaweedfs/weed/security"
|
||||
weed_server "github.com/chrislusf/seaweedfs/weed/server"
|
||||
|
||||
"github.com/chrislusf/seaweedfs/weed/glog"
|
||||
"github.com/chrislusf/seaweedfs/weed/util"
|
||||
)
|
||||
|
||||
|
@ -23,16 +22,14 @@ var (
|
|||
)
|
||||
|
||||
type QueueOptions struct {
|
||||
filer *string
|
||||
port *int
|
||||
defaultTtl *string
|
||||
filer *string
|
||||
port *int
|
||||
}
|
||||
|
||||
func init() {
|
||||
cmdMsgBroker.Run = runMsgBroker // break init cycle
|
||||
messageBrokerStandaloneOptions.filer = cmdMsgBroker.Flag.String("filer", "localhost:8888", "filer server address")
|
||||
messageBrokerStandaloneOptions.port = cmdMsgBroker.Flag.Int("port", 17777, "queue server gRPC listen port")
|
||||
messageBrokerStandaloneOptions.defaultTtl = cmdMsgBroker.Flag.String("ttl", "1h", "time to live, e.g.: 1m, 1h, 1d, 1M, 1y")
|
||||
}
|
||||
|
||||
var cmdMsgBroker = &Command{
|
||||
|
@ -62,9 +59,8 @@ func (msgBrokerOpt *QueueOptions) startQueueServer() bool {
|
|||
return false
|
||||
}
|
||||
|
||||
filerQueuesPath := "/queues"
|
||||
|
||||
grpcDialOption := security.LoadClientTLS(util.GetViper(), "grpc.client")
|
||||
grpcDialOption := security.LoadClientTLS(util.GetViper(), "grpc.msg_broker")
|
||||
cipher := false
|
||||
|
||||
for {
|
||||
err = pb.WithGrpcFilerClient(filerGrpcAddress, grpcDialOption, func(client filer_pb.SeaweedFilerClient) error {
|
||||
|
@ -72,8 +68,7 @@ func (msgBrokerOpt *QueueOptions) startQueueServer() bool {
|
|||
if err != nil {
|
||||
return fmt.Errorf("get filer %s configuration: %v", filerGrpcAddress, err)
|
||||
}
|
||||
filerQueuesPath = resp.DirQueues
|
||||
glog.V(0).Infof("Queue read filer queues dir: %s", filerQueuesPath)
|
||||
cipher = resp.Cipher
|
||||
return nil
|
||||
})
|
||||
if err != nil {
|
||||
|
@ -85,12 +80,13 @@ func (msgBrokerOpt *QueueOptions) startQueueServer() bool {
|
|||
}
|
||||
}
|
||||
|
||||
qs, err := weed_server.NewMessageBroker(&weed_server.MessageBrokerOption{
|
||||
qs, err := broker.NewMessageBroker(&broker.MessageBrokerOption{
|
||||
Filers: []string{*msgBrokerOpt.filer},
|
||||
DefaultReplication: "",
|
||||
MaxMB: 0,
|
||||
Port: *msgBrokerOpt.port,
|
||||
})
|
||||
Cipher: cipher,
|
||||
}, grpcDialOption)
|
||||
|
||||
// start grpc listener
|
||||
grpcL, err := util.NewListener(":"+strconv.Itoa(*msgBrokerOpt.port), 0)
|
||||
|
@ -98,7 +94,7 @@ func (msgBrokerOpt *QueueOptions) startQueueServer() bool {
|
|||
glog.Fatalf("failed to listen on grpc port %d: %v", *msgBrokerOpt.port, err)
|
||||
}
|
||||
grpcS := pb.NewGrpcServer(security.LoadServerTLS(util.GetViper(), "grpc.msg_broker"))
|
||||
queue_pb.RegisterSeaweedQueueServer(grpcS, qs)
|
||||
messaging_pb.RegisterSeaweedMessagingServer(grpcS, qs)
|
||||
reflection.Register(grpcS)
|
||||
grpcS.Serve(grpcL)
|
||||
|
||||
|
|
|
@ -18,7 +18,7 @@ var cmdScaffold = &Command{
|
|||
For example, the filer.toml mysql password can be overwritten by environment variable
|
||||
export WEED_MYSQL_PASSWORD=some_password
|
||||
Environment variable rules:
|
||||
* Prefix fix with "WEED_"
|
||||
* Prefix the variable name with "WEED_"
|
||||
* Upppercase the reset of variable name.
|
||||
* Replace '.' with '_'
|
||||
|
||||
|
@ -76,8 +76,10 @@ const (
|
|||
recursive_delete = false
|
||||
# directories under this folder will be automatically creating a separate bucket
|
||||
buckets_folder = "/buckets"
|
||||
# directories under this folder will be store message queue data
|
||||
queues_folder = "/queues"
|
||||
buckets_fsync = [ # a list of buckets with all write requests fsync=true
|
||||
"important_bucket",
|
||||
"should_always_fsync",
|
||||
]
|
||||
|
||||
####################################################
|
||||
# The following are filer store options
|
||||
|
@ -139,13 +141,13 @@ hosts=[
|
|||
"localhost:9042",
|
||||
]
|
||||
|
||||
[redis]
|
||||
[redis2]
|
||||
enabled = false
|
||||
address = "localhost:6379"
|
||||
password = ""
|
||||
database = 0
|
||||
|
||||
[redis_cluster]
|
||||
[redis_cluster2]
|
||||
enabled = false
|
||||
addresses = [
|
||||
"localhost:30001",
|
||||
|
@ -260,6 +262,7 @@ aws_secret_access_key = "" # if empty, loads from the shared credentials fil
|
|||
region = "us-east-2"
|
||||
bucket = "your_bucket_name" # an existing bucket
|
||||
directory = "/" # destination directory
|
||||
endpoint = ""
|
||||
|
||||
[sink.google_cloud_storage]
|
||||
# read credentials doc at https://cloud.google.com/docs/authentication/getting-started
|
||||
|
@ -358,11 +361,13 @@ scripts = """
|
|||
ec.rebuild -force
|
||||
ec.balance -force
|
||||
volume.balance -force
|
||||
volume.fix.replication
|
||||
"""
|
||||
sleep_minutes = 17 # sleep minutes between each script execution
|
||||
|
||||
[master.filer]
|
||||
default_filer_url = "http://localhost:8888/"
|
||||
default = "localhost:8888" # used by maintenance scripts if the scripts needs to use fs related commands
|
||||
|
||||
|
||||
[master.sequencer]
|
||||
type = "memory" # Choose [memory|etcd] type for storing the file id sequence
|
||||
|
|
|
@ -9,14 +9,14 @@ import (
|
|||
)
|
||||
|
||||
var (
|
||||
shellOptions shell.ShellOptions
|
||||
shellInitialFilerUrl *string
|
||||
shellOptions shell.ShellOptions
|
||||
shellInitialFiler *string
|
||||
)
|
||||
|
||||
func init() {
|
||||
cmdShell.Run = runShell // break init cycle
|
||||
shellOptions.Masters = cmdShell.Flag.String("master", "localhost:9333", "comma-separated master servers")
|
||||
shellInitialFilerUrl = cmdShell.Flag.String("filer.url", "http://localhost:8888/", "initial filer url")
|
||||
shellInitialFiler = cmdShell.Flag.String("filer", "localhost:8888", "filer host and port")
|
||||
}
|
||||
|
||||
var cmdShell = &Command{
|
||||
|
@ -32,12 +32,13 @@ func runShell(command *Command, args []string) bool {
|
|||
util.LoadConfiguration("security", false)
|
||||
shellOptions.GrpcDialOption = security.LoadClientTLS(util.GetViper(), "grpc.client")
|
||||
|
||||
var filerPwdErr error
|
||||
shellOptions.FilerHost, shellOptions.FilerPort, shellOptions.Directory, filerPwdErr = util.ParseFilerUrl(*shellInitialFilerUrl)
|
||||
if filerPwdErr != nil {
|
||||
fmt.Printf("failed to parse url filer.url=%s : %v\n", *shellInitialFilerUrl, filerPwdErr)
|
||||
var err error
|
||||
shellOptions.FilerHost, shellOptions.FilerPort, err = util.ParseHostPort(*shellInitialFiler)
|
||||
if err != nil {
|
||||
fmt.Printf("failed to parse filer %s: %v\n", *shellInitialFiler, err)
|
||||
return false
|
||||
}
|
||||
shellOptions.Directory = "/"
|
||||
|
||||
shell.RunShell(shellOptions)
|
||||
|
||||
|
|
|
@ -127,7 +127,8 @@ func (v VolumeServerOptions) startVolumeServer(volumeFolders, maxVolumeCounts, v
|
|||
}
|
||||
|
||||
if *v.ip == "" {
|
||||
*v.ip = "127.0.0.1"
|
||||
*v.ip = util.DetectedHostAddress()
|
||||
glog.V(0).Infof("detected volume server ip address: %v", *v.ip)
|
||||
}
|
||||
|
||||
if *v.publicPort == 0 {
|
||||
|
|
|
@ -34,7 +34,7 @@ func runWatch(cmd *Command, args []string) bool {
|
|||
|
||||
watchErr := pb.WithFilerClient(*watchFiler, grpcDialOption, func(client filer_pb.SeaweedFilerClient) error {
|
||||
|
||||
stream, err := client.ListenForEvents(context.Background(), &filer_pb.ListenForEventsRequest{
|
||||
stream, err := client.SubscribeMetadata(context.Background(), &filer_pb.SubscribeMetadataRequest{
|
||||
ClientName: "watch",
|
||||
PathPrefix: *watchTarget,
|
||||
SinceNs: 0,
|
||||
|
|
|
@ -4,6 +4,7 @@ import (
|
|||
"context"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"os"
|
||||
"os/user"
|
||||
"strconv"
|
||||
"time"
|
||||
|
@ -26,6 +27,8 @@ type WebDavOption struct {
|
|||
collection *string
|
||||
tlsPrivateKey *string
|
||||
tlsCertificate *string
|
||||
cacheDir *string
|
||||
cacheSizeMB *int64
|
||||
}
|
||||
|
||||
func init() {
|
||||
|
@ -35,6 +38,8 @@ func init() {
|
|||
webDavStandaloneOptions.collection = cmdWebDav.Flag.String("collection", "", "collection to create the files")
|
||||
webDavStandaloneOptions.tlsPrivateKey = cmdWebDav.Flag.String("key.file", "", "path to the TLS private key file")
|
||||
webDavStandaloneOptions.tlsCertificate = cmdWebDav.Flag.String("cert.file", "", "path to the TLS certificate file")
|
||||
webDavStandaloneOptions.cacheDir = cmdWebDav.Flag.String("cacheDir", os.TempDir(), "local cache directory for file chunks")
|
||||
webDavStandaloneOptions.cacheSizeMB = cmdWebDav.Flag.Int64("cacheCapacityMB", 1000, "local cache capacity in MB")
|
||||
}
|
||||
|
||||
var cmdWebDav = &Command{
|
||||
|
@ -105,6 +110,8 @@ func (wo *WebDavOption) startWebDav() bool {
|
|||
Uid: uid,
|
||||
Gid: gid,
|
||||
Cipher: cipher,
|
||||
CacheDir: *wo.cacheDir,
|
||||
CacheSizeMB: *wo.cacheSizeMB,
|
||||
})
|
||||
if webdavServer_err != nil {
|
||||
glog.Fatalf("WebDav Server startup error: %v", webdavServer_err)
|
||||
|
|
|
@ -21,6 +21,7 @@ type Attr struct {
|
|||
UserName string
|
||||
GroupNames []string
|
||||
SymlinkTarget string
|
||||
Md5 []byte
|
||||
}
|
||||
|
||||
func (attr Attr) IsDirectory() bool {
|
||||
|
|
|
@ -52,6 +52,7 @@ func EntryAttributeToPb(entry *Entry) *filer_pb.FuseAttributes {
|
|||
UserName: entry.Attr.UserName,
|
||||
GroupName: entry.Attr.GroupNames,
|
||||
SymlinkTarget: entry.Attr.SymlinkTarget,
|
||||
Md5: entry.Attr.Md5,
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -71,6 +72,7 @@ func PbToEntryAttribute(attr *filer_pb.FuseAttributes) Attr {
|
|||
t.UserName = attr.UserName
|
||||
t.GroupNames = attr.GroupName
|
||||
t.SymlinkTarget = attr.SymlinkTarget
|
||||
t.Md5 = attr.Md5
|
||||
|
||||
return t
|
||||
}
|
||||
|
@ -93,6 +95,10 @@ func EqualEntry(a, b *Entry) bool {
|
|||
return false
|
||||
}
|
||||
|
||||
if !bytes.Equal(a.Md5, b.Md5) {
|
||||
return false
|
||||
}
|
||||
|
||||
for i := 0; i < len(a.Chunks); i++ {
|
||||
if !proto.Equal(a.Chunks[i], b.Chunks[i]) {
|
||||
return false
|
||||
|
|
|
@ -20,7 +20,21 @@ func TotalSize(chunks []*filer_pb.FileChunk) (size uint64) {
|
|||
return
|
||||
}
|
||||
|
||||
func ETag(chunks []*filer_pb.FileChunk) (etag string) {
|
||||
func ETag(entry *filer_pb.Entry) (etag string) {
|
||||
if entry.Attributes == nil || entry.Attributes.Md5 == nil {
|
||||
return ETagChunks(entry.Chunks)
|
||||
}
|
||||
return fmt.Sprintf("%x", entry.Attributes.Md5)
|
||||
}
|
||||
|
||||
func ETagEntry(entry *Entry) (etag string) {
|
||||
if entry.Attr.Md5 == nil {
|
||||
return ETagChunks(entry.Chunks)
|
||||
}
|
||||
return fmt.Sprintf("%x", entry.Attr.Md5)
|
||||
}
|
||||
|
||||
func ETagChunks(chunks []*filer_pb.FileChunk) (etag string) {
|
||||
if len(chunks) == 1 {
|
||||
return chunks[0].ETag
|
||||
}
|
||||
|
@ -71,11 +85,15 @@ type ChunkView struct {
|
|||
Offset int64
|
||||
Size uint64
|
||||
LogicOffset int64
|
||||
IsFullChunk bool
|
||||
ChunkSize uint64
|
||||
CipherKey []byte
|
||||
IsGzipped bool
|
||||
}
|
||||
|
||||
func (cv *ChunkView) IsFullChunk() bool {
|
||||
return cv.Size == cv.ChunkSize
|
||||
}
|
||||
|
||||
func ViewFromChunks(chunks []*filer_pb.FileChunk, offset int64, size int64) (views []*ChunkView) {
|
||||
|
||||
visibles := NonOverlappingVisibleIntervals(chunks)
|
||||
|
@ -97,13 +115,12 @@ func ViewFromVisibleIntervals(visibles []VisibleInterval, offset int64, size int
|
|||
for _, chunk := range visibles {
|
||||
|
||||
if chunk.start <= offset && offset < chunk.stop && offset < stop {
|
||||
isFullChunk := chunk.isFullChunk && chunk.start == offset && chunk.stop <= stop
|
||||
views = append(views, &ChunkView{
|
||||
FileId: chunk.fileId,
|
||||
Offset: offset - chunk.start, // offset is the data starting location in this file id
|
||||
Size: uint64(min(chunk.stop, stop) - offset),
|
||||
LogicOffset: offset,
|
||||
IsFullChunk: isFullChunk,
|
||||
ChunkSize: chunk.chunkSize,
|
||||
CipherKey: chunk.cipherKey,
|
||||
IsGzipped: chunk.isGzipped,
|
||||
})
|
||||
|
@ -132,7 +149,7 @@ var bufPool = sync.Pool{
|
|||
|
||||
func MergeIntoVisibles(visibles, newVisibles []VisibleInterval, chunk *filer_pb.FileChunk) []VisibleInterval {
|
||||
|
||||
newV := newVisibleInterval(chunk.Offset, chunk.Offset+int64(chunk.Size), chunk.GetFileIdString(), chunk.Mtime, true, chunk.CipherKey, chunk.IsGzipped)
|
||||
newV := newVisibleInterval(chunk.Offset, chunk.Offset+int64(chunk.Size), chunk.GetFileIdString(), chunk.Mtime, chunk.Size, chunk.CipherKey, chunk.IsGzipped)
|
||||
|
||||
length := len(visibles)
|
||||
if length == 0 {
|
||||
|
@ -146,11 +163,11 @@ func MergeIntoVisibles(visibles, newVisibles []VisibleInterval, chunk *filer_pb.
|
|||
logPrintf(" before", visibles)
|
||||
for _, v := range visibles {
|
||||
if v.start < chunk.Offset && chunk.Offset < v.stop {
|
||||
newVisibles = append(newVisibles, newVisibleInterval(v.start, chunk.Offset, v.fileId, v.modifiedTime, false, v.cipherKey, v.isGzipped))
|
||||
newVisibles = append(newVisibles, newVisibleInterval(v.start, chunk.Offset, v.fileId, v.modifiedTime, chunk.Size, v.cipherKey, v.isGzipped))
|
||||
}
|
||||
chunkStop := chunk.Offset + int64(chunk.Size)
|
||||
if v.start < chunkStop && chunkStop < v.stop {
|
||||
newVisibles = append(newVisibles, newVisibleInterval(chunkStop, v.stop, v.fileId, v.modifiedTime, false, v.cipherKey, v.isGzipped))
|
||||
newVisibles = append(newVisibles, newVisibleInterval(chunkStop, v.stop, v.fileId, v.modifiedTime, chunk.Size, v.cipherKey, v.isGzipped))
|
||||
}
|
||||
if chunkStop <= v.start || v.stop <= chunk.Offset {
|
||||
newVisibles = append(newVisibles, v)
|
||||
|
@ -202,18 +219,18 @@ type VisibleInterval struct {
|
|||
stop int64
|
||||
modifiedTime int64
|
||||
fileId string
|
||||
isFullChunk bool
|
||||
chunkSize uint64
|
||||
cipherKey []byte
|
||||
isGzipped bool
|
||||
}
|
||||
|
||||
func newVisibleInterval(start, stop int64, fileId string, modifiedTime int64, isFullChunk bool, cipherKey []byte, isGzipped bool) VisibleInterval {
|
||||
func newVisibleInterval(start, stop int64, fileId string, modifiedTime int64, chunkSize uint64, cipherKey []byte, isGzipped bool) VisibleInterval {
|
||||
return VisibleInterval{
|
||||
start: start,
|
||||
stop: stop,
|
||||
fileId: fileId,
|
||||
modifiedTime: modifiedTime,
|
||||
isFullChunk: isFullChunk,
|
||||
chunkSize: chunkSize,
|
||||
cipherKey: cipherKey,
|
||||
isGzipped: isGzipped,
|
||||
}
|
||||
|
|
|
@ -13,8 +13,8 @@ import (
|
|||
|
||||
"github.com/chrislusf/seaweedfs/weed/glog"
|
||||
"github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
|
||||
"github.com/chrislusf/seaweedfs/weed/queue"
|
||||
"github.com/chrislusf/seaweedfs/weed/util"
|
||||
"github.com/chrislusf/seaweedfs/weed/util/log_buffer"
|
||||
"github.com/chrislusf/seaweedfs/weed/wdclient"
|
||||
)
|
||||
|
||||
|
@ -32,20 +32,24 @@ type Filer struct {
|
|||
fileIdDeletionQueue *util.UnboundedQueue
|
||||
GrpcDialOption grpc.DialOption
|
||||
DirBucketsPath string
|
||||
DirQueuesPath string
|
||||
FsyncBuckets []string
|
||||
buckets *FilerBuckets
|
||||
Cipher bool
|
||||
metaLogBuffer *queue.LogBuffer
|
||||
metaLogBuffer *log_buffer.LogBuffer
|
||||
metaLogCollection string
|
||||
metaLogReplication string
|
||||
}
|
||||
|
||||
func NewFiler(masters []string, grpcDialOption grpc.DialOption, filerGrpcPort uint32, notifyFn func()) *Filer {
|
||||
func NewFiler(masters []string, grpcDialOption grpc.DialOption, filerGrpcPort uint32, collection string, replication string, notifyFn func()) *Filer {
|
||||
f := &Filer{
|
||||
directoryCache: ccache.New(ccache.Configure().MaxSize(1000).ItemsToPrune(100)),
|
||||
MasterClient: wdclient.NewMasterClient(grpcDialOption, "filer", filerGrpcPort, masters),
|
||||
fileIdDeletionQueue: util.NewUnboundedQueue(),
|
||||
GrpcDialOption: grpcDialOption,
|
||||
}
|
||||
f.metaLogBuffer = queue.NewLogBuffer(time.Minute, f.logFlushFunc, notifyFn)
|
||||
f.metaLogBuffer = log_buffer.NewLogBuffer(time.Minute, f.logFlushFunc, notifyFn)
|
||||
f.metaLogCollection = collection
|
||||
f.metaLogReplication = replication
|
||||
|
||||
go f.loopProcessingDeletion()
|
||||
|
||||
|
|
|
@ -13,6 +13,7 @@ type BucketName string
|
|||
type BucketOption struct {
|
||||
Name BucketName
|
||||
Replication string
|
||||
fsync bool
|
||||
}
|
||||
type FilerBuckets struct {
|
||||
dirBucketsPath string
|
||||
|
@ -20,36 +21,42 @@ type FilerBuckets struct {
|
|||
sync.RWMutex
|
||||
}
|
||||
|
||||
func (f *Filer) LoadBuckets(dirBucketsPath string) {
|
||||
func (f *Filer) LoadBuckets() {
|
||||
|
||||
f.buckets = &FilerBuckets{
|
||||
buckets: make(map[BucketName]*BucketOption),
|
||||
}
|
||||
f.DirBucketsPath = dirBucketsPath
|
||||
|
||||
limit := math.MaxInt32
|
||||
|
||||
entries, err := f.ListDirectoryEntries(context.Background(), util.FullPath(dirBucketsPath), "", false, limit)
|
||||
entries, err := f.ListDirectoryEntries(context.Background(), util.FullPath(f.DirBucketsPath), "", false, limit)
|
||||
|
||||
if err != nil {
|
||||
glog.V(1).Infof("no buckets found: %v", err)
|
||||
return
|
||||
}
|
||||
|
||||
shouldFsyncMap := make(map[string]bool)
|
||||
for _, bucket := range f.FsyncBuckets {
|
||||
shouldFsyncMap[bucket] = true
|
||||
}
|
||||
|
||||
glog.V(1).Infof("buckets found: %d", len(entries))
|
||||
|
||||
f.buckets.Lock()
|
||||
for _, entry := range entries {
|
||||
_, shouldFsnyc := shouldFsyncMap[entry.Name()]
|
||||
f.buckets.buckets[BucketName(entry.Name())] = &BucketOption{
|
||||
Name: BucketName(entry.Name()),
|
||||
Replication: entry.Replication,
|
||||
fsync: shouldFsnyc,
|
||||
}
|
||||
}
|
||||
f.buckets.Unlock()
|
||||
|
||||
}
|
||||
|
||||
func (f *Filer) ReadBucketOption(buketName string) (replication string) {
|
||||
func (f *Filer) ReadBucketOption(buketName string) (replication string, fsync bool) {
|
||||
|
||||
f.buckets.RLock()
|
||||
defer f.buckets.RUnlock()
|
||||
|
@ -57,9 +64,9 @@ func (f *Filer) ReadBucketOption(buketName string) (replication string) {
|
|||
option, found := f.buckets.buckets[BucketName(buketName)]
|
||||
|
||||
if !found {
|
||||
return ""
|
||||
return "", false
|
||||
}
|
||||
return option.Replication
|
||||
return option.Replication, option.fsync
|
||||
|
||||
}
|
||||
|
||||
|
|
|
@ -25,7 +25,7 @@ func (f *Filer) NotifyUpdateEvent(oldEntry, newEntry *Entry, deleteChunks bool)
|
|||
|
||||
// println("fullpath:", fullpath)
|
||||
|
||||
if strings.HasPrefix(fullpath, "/.meta") {
|
||||
if strings.HasPrefix(fullpath, SystemLogDir) {
|
||||
return
|
||||
}
|
||||
|
||||
|
@ -45,32 +45,34 @@ func (f *Filer) NotifyUpdateEvent(oldEntry, newEntry *Entry, deleteChunks bool)
|
|||
notification.Queue.SendMessage(fullpath, eventNotification)
|
||||
}
|
||||
|
||||
f.logMetaEvent(time.Now(), fullpath, eventNotification)
|
||||
f.logMetaEvent(fullpath, eventNotification)
|
||||
|
||||
}
|
||||
|
||||
func (f *Filer) logMetaEvent(ts time.Time, fullpath string, eventNotification *filer_pb.EventNotification) {
|
||||
func (f *Filer) logMetaEvent(fullpath string, eventNotification *filer_pb.EventNotification) {
|
||||
|
||||
dir, _ := util.FullPath(fullpath).DirAndName()
|
||||
|
||||
event := &filer_pb.FullEventNotification{
|
||||
event := &filer_pb.SubscribeMetadataResponse{
|
||||
Directory: dir,
|
||||
EventNotification: eventNotification,
|
||||
}
|
||||
data, err := proto.Marshal(event)
|
||||
if err != nil {
|
||||
glog.Errorf("failed to marshal filer_pb.FullEventNotification %+v: %v", event, err)
|
||||
glog.Errorf("failed to marshal filer_pb.SubscribeMetadataResponse %+v: %v", event, err)
|
||||
return
|
||||
}
|
||||
|
||||
f.metaLogBuffer.AddToBuffer(ts, []byte(dir), data)
|
||||
f.metaLogBuffer.AddToBuffer([]byte(dir), data)
|
||||
|
||||
}
|
||||
|
||||
func (f *Filer) logFlushFunc(startTime, stopTime time.Time, buf []byte) {
|
||||
targetFile := fmt.Sprintf("/.meta/log/%04d/%02d/%02d/%02d/%02d/%02d.%09d.log",
|
||||
|
||||
targetFile := fmt.Sprintf("%s/%04d-%02d-%02d/%02d-%02d.segment", SystemLogDir,
|
||||
startTime.Year(), startTime.Month(), startTime.Day(), startTime.Hour(), startTime.Minute(),
|
||||
startTime.Second(), startTime.Nanosecond())
|
||||
// startTime.Second(), startTime.Nanosecond(),
|
||||
)
|
||||
|
||||
if err := f.appendToFile(targetFile, buf); err != nil {
|
||||
glog.V(0).Infof("log write failed %s: %v", targetFile, err)
|
||||
|
@ -95,11 +97,11 @@ func (f *Filer) ReadLogBuffer(lastReadTime time.Time, eachEventFn func(fullpath
|
|||
return lastReadTime, fmt.Errorf("unexpected unmarshal filer_pb.LogEntry: %v", err)
|
||||
}
|
||||
|
||||
event := &filer_pb.FullEventNotification{}
|
||||
event := &filer_pb.SubscribeMetadataResponse{}
|
||||
err = proto.Unmarshal(logEntry.Data, event)
|
||||
if err != nil {
|
||||
glog.Errorf("unexpected unmarshal filer_pb.FullEventNotification: %v", err)
|
||||
return lastReadTime, fmt.Errorf("unexpected unmarshal filer_pb.FullEventNotification: %v", err)
|
||||
glog.Errorf("unexpected unmarshal filer_pb.SubscribeMetadataResponse: %v", err)
|
||||
return lastReadTime, fmt.Errorf("unexpected unmarshal filer_pb.SubscribeMetadataResponse: %v", err)
|
||||
}
|
||||
|
||||
err = eachEventFn(event.Directory, event.EventNotification)
|
||||
|
|
|
@ -13,25 +13,10 @@ import (
|
|||
|
||||
func (f *Filer) appendToFile(targetFile string, data []byte) error {
|
||||
|
||||
// assign a volume location
|
||||
assignRequest := &operation.VolumeAssignRequest{
|
||||
Count: 1,
|
||||
assignResult, uploadResult, err2 := f.assignAndUpload(data)
|
||||
if err2 != nil {
|
||||
return err2
|
||||
}
|
||||
assignResult, err := operation.Assign(f.GetMaster(), f.GrpcDialOption, assignRequest)
|
||||
if err != nil {
|
||||
return fmt.Errorf("AssignVolume: %v", err)
|
||||
}
|
||||
if assignResult.Error != "" {
|
||||
return fmt.Errorf("AssignVolume error: %v", assignResult.Error)
|
||||
}
|
||||
|
||||
// upload data
|
||||
targetUrl := "http://" + assignResult.Url + "/" + assignResult.Fid
|
||||
uploadResult, err := operation.UploadData(targetUrl, "", false, data, false, "", nil, assignResult.Auth)
|
||||
if err != nil {
|
||||
return fmt.Errorf("upload data %s: %v", targetUrl, err)
|
||||
}
|
||||
// println("uploaded to", targetUrl)
|
||||
|
||||
// find out existing entry
|
||||
fullpath := util.FullPath(targetFile)
|
||||
|
@ -68,3 +53,29 @@ func (f *Filer) appendToFile(targetFile string, data []byte) error {
|
|||
|
||||
return err
|
||||
}
|
||||
|
||||
func (f *Filer) assignAndUpload(data []byte) (*operation.AssignResult, *operation.UploadResult, error) {
|
||||
// assign a volume location
|
||||
assignRequest := &operation.VolumeAssignRequest{
|
||||
Count: 1,
|
||||
Collection: f.metaLogCollection,
|
||||
Replication: f.metaLogReplication,
|
||||
WritableVolumeCount: 1,
|
||||
}
|
||||
assignResult, err := operation.Assign(f.GetMaster(), f.GrpcDialOption, assignRequest)
|
||||
if err != nil {
|
||||
return nil, nil, fmt.Errorf("AssignVolume: %v", err)
|
||||
}
|
||||
if assignResult.Error != "" {
|
||||
return nil, nil, fmt.Errorf("AssignVolume error: %v", assignResult.Error)
|
||||
}
|
||||
|
||||
// upload data
|
||||
targetUrl := "http://" + assignResult.Url + "/" + assignResult.Fid
|
||||
uploadResult, err := operation.UploadData(targetUrl, "", f.Cipher, data, false, "", nil, assignResult.Auth)
|
||||
if err != nil {
|
||||
return nil, nil, fmt.Errorf("upload data %s: %v", targetUrl, err)
|
||||
}
|
||||
// println("uploaded to", targetUrl)
|
||||
return assignResult, uploadResult, nil
|
||||
}
|
||||
|
|
|
@ -11,7 +11,7 @@ import (
|
|||
)
|
||||
|
||||
func TestCreateAndFind(t *testing.T) {
|
||||
filer := filer2.NewFiler(nil, nil, 0, nil)
|
||||
filer := filer2.NewFiler(nil, nil, 0, "", "", nil)
|
||||
dir, _ := ioutil.TempDir("", "seaweedfs_filer_test")
|
||||
defer os.RemoveAll(dir)
|
||||
store := &LevelDBStore{}
|
||||
|
@ -66,7 +66,7 @@ func TestCreateAndFind(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestEmptyRoot(t *testing.T) {
|
||||
filer := filer2.NewFiler(nil, nil, 0, nil)
|
||||
filer := filer2.NewFiler(nil, nil, 0, "", "", nil)
|
||||
dir, _ := ioutil.TempDir("", "seaweedfs_filer_test2")
|
||||
defer os.RemoveAll(dir)
|
||||
store := &LevelDBStore{}
|
||||
|
|
|
@ -11,7 +11,7 @@ import (
|
|||
)
|
||||
|
||||
func TestCreateAndFind(t *testing.T) {
|
||||
filer := filer2.NewFiler(nil, nil, 0, nil)
|
||||
filer := filer2.NewFiler(nil, nil, 0, "", "", nil)
|
||||
dir, _ := ioutil.TempDir("", "seaweedfs_filer_test")
|
||||
defer os.RemoveAll(dir)
|
||||
store := &LevelDB2Store{}
|
||||
|
@ -66,7 +66,7 @@ func TestCreateAndFind(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestEmptyRoot(t *testing.T) {
|
||||
filer := filer2.NewFiler(nil, nil, 0, nil)
|
||||
filer := filer2.NewFiler(nil, nil, 0, "", "", nil)
|
||||
dir, _ := ioutil.TempDir("", "seaweedfs_filer_test2")
|
||||
defer os.RemoveAll(dir)
|
||||
store := &LevelDB2Store{}
|
||||
|
|
|
@ -9,8 +9,8 @@ import (
|
|||
|
||||
"github.com/chrislusf/seaweedfs/weed/glog"
|
||||
"github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
|
||||
"github.com/chrislusf/seaweedfs/weed/pb/pb_cache"
|
||||
"github.com/chrislusf/seaweedfs/weed/util"
|
||||
"github.com/chrislusf/seaweedfs/weed/util/chunk_cache"
|
||||
"github.com/chrislusf/seaweedfs/weed/wdclient"
|
||||
)
|
||||
|
||||
|
@ -22,12 +22,12 @@ type ChunkReadAt struct {
|
|||
lookupFileId func(fileId string) (targetUrl string, err error)
|
||||
readerLock sync.Mutex
|
||||
|
||||
chunkCache *pb_cache.ChunkCache
|
||||
chunkCache *chunk_cache.ChunkCache
|
||||
}
|
||||
|
||||
// var _ = io.ReaderAt(&ChunkReadAt{})
|
||||
|
||||
func NewChunkReaderAtFromClient(filerClient filer_pb.FilerClient, chunkViews []*ChunkView, chunkCache *pb_cache.ChunkCache) *ChunkReadAt {
|
||||
func NewChunkReaderAtFromClient(filerClient filer_pb.FilerClient, chunkViews []*ChunkView, chunkCache *chunk_cache.ChunkCache) *ChunkReadAt {
|
||||
|
||||
return &ChunkReadAt{
|
||||
chunkViews: chunkViews,
|
||||
|
@ -105,9 +105,11 @@ func (c *ChunkReadAt) fetchChunkData(chunkView *ChunkView) (data []byte, err err
|
|||
|
||||
// fmt.Printf("fetching %s [%d,%d)\n", chunkView.FileId, chunkView.LogicOffset, chunkView.LogicOffset+int64(chunkView.Size))
|
||||
|
||||
chunkData := c.chunkCache.GetChunk(chunkView.FileId)
|
||||
hasDataInCache := false
|
||||
chunkData := c.chunkCache.GetChunk(chunkView.FileId, chunkView.ChunkSize)
|
||||
if chunkData != nil {
|
||||
glog.V(3).Infof("cache hit %s [%d,%d)", chunkView.FileId, chunkView.LogicOffset, chunkView.LogicOffset+int64(chunkView.Size))
|
||||
hasDataInCache = true
|
||||
} else {
|
||||
chunkData, err = c.doFetchFullChunkData(chunkView.FileId, chunkView.CipherKey, chunkView.IsGzipped)
|
||||
if err != nil {
|
||||
|
@ -121,7 +123,9 @@ func (c *ChunkReadAt) fetchChunkData(chunkView *ChunkView) (data []byte, err err
|
|||
|
||||
data = chunkData[chunkView.Offset : chunkView.Offset+int64(chunkView.Size)]
|
||||
|
||||
c.chunkCache.SetChunk(chunkView.FileId, chunkData)
|
||||
if !hasDataInCache {
|
||||
c.chunkCache.SetChunk(chunkView.FileId, chunkData)
|
||||
}
|
||||
|
||||
return data, nil
|
||||
}
|
||||
|
|
42
weed/filer2/redis2/redis_cluster_store.go
Normal file
42
weed/filer2/redis2/redis_cluster_store.go
Normal file
|
@ -0,0 +1,42 @@
|
|||
package redis2
|
||||
|
||||
import (
|
||||
"github.com/chrislusf/seaweedfs/weed/filer2"
|
||||
"github.com/chrislusf/seaweedfs/weed/util"
|
||||
"github.com/go-redis/redis"
|
||||
)
|
||||
|
||||
func init() {
|
||||
filer2.Stores = append(filer2.Stores, &RedisCluster2Store{})
|
||||
}
|
||||
|
||||
type RedisCluster2Store struct {
|
||||
UniversalRedis2Store
|
||||
}
|
||||
|
||||
func (store *RedisCluster2Store) GetName() string {
|
||||
return "redis_cluster2"
|
||||
}
|
||||
|
||||
func (store *RedisCluster2Store) Initialize(configuration util.Configuration, prefix string) (err error) {
|
||||
|
||||
configuration.SetDefault(prefix+"useReadOnly", true)
|
||||
configuration.SetDefault(prefix+"routeByLatency", true)
|
||||
|
||||
return store.initialize(
|
||||
configuration.GetStringSlice(prefix+"addresses"),
|
||||
configuration.GetString(prefix+"password"),
|
||||
configuration.GetBool(prefix+"useReadOnly"),
|
||||
configuration.GetBool(prefix+"routeByLatency"),
|
||||
)
|
||||
}
|
||||
|
||||
func (store *RedisCluster2Store) initialize(addresses []string, password string, readOnly, routeByLatency bool) (err error) {
|
||||
store.Client = redis.NewClusterClient(&redis.ClusterOptions{
|
||||
Addrs: addresses,
|
||||
Password: password,
|
||||
ReadOnly: readOnly,
|
||||
RouteByLatency: routeByLatency,
|
||||
})
|
||||
return
|
||||
}
|
36
weed/filer2/redis2/redis_store.go
Normal file
36
weed/filer2/redis2/redis_store.go
Normal file
|
@ -0,0 +1,36 @@
|
|||
package redis2
|
||||
|
||||
import (
|
||||
"github.com/chrislusf/seaweedfs/weed/filer2"
|
||||
"github.com/chrislusf/seaweedfs/weed/util"
|
||||
"github.com/go-redis/redis"
|
||||
)
|
||||
|
||||
func init() {
|
||||
filer2.Stores = append(filer2.Stores, &Redis2Store{})
|
||||
}
|
||||
|
||||
type Redis2Store struct {
|
||||
UniversalRedis2Store
|
||||
}
|
||||
|
||||
func (store *Redis2Store) GetName() string {
|
||||
return "redis2"
|
||||
}
|
||||
|
||||
func (store *Redis2Store) Initialize(configuration util.Configuration, prefix string) (err error) {
|
||||
return store.initialize(
|
||||
configuration.GetString(prefix+"address"),
|
||||
configuration.GetString(prefix+"password"),
|
||||
configuration.GetInt(prefix+"database"),
|
||||
)
|
||||
}
|
||||
|
||||
func (store *Redis2Store) initialize(hostPort string, password string, database int) (err error) {
|
||||
store.Client = redis.NewClient(&redis.Options{
|
||||
Addr: hostPort,
|
||||
Password: password,
|
||||
DB: database,
|
||||
})
|
||||
return
|
||||
}
|
162
weed/filer2/redis2/universal_redis_store.go
Normal file
162
weed/filer2/redis2/universal_redis_store.go
Normal file
|
@ -0,0 +1,162 @@
|
|||
package redis2
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"github.com/go-redis/redis"
|
||||
|
||||
"github.com/chrislusf/seaweedfs/weed/filer2"
|
||||
"github.com/chrislusf/seaweedfs/weed/glog"
|
||||
"github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
|
||||
"github.com/chrislusf/seaweedfs/weed/util"
|
||||
)
|
||||
|
||||
const (
|
||||
DIR_LIST_MARKER = "\x00"
|
||||
)
|
||||
|
||||
type UniversalRedis2Store struct {
|
||||
Client redis.UniversalClient
|
||||
}
|
||||
|
||||
func (store *UniversalRedis2Store) BeginTransaction(ctx context.Context) (context.Context, error) {
|
||||
return ctx, nil
|
||||
}
|
||||
func (store *UniversalRedis2Store) CommitTransaction(ctx context.Context) error {
|
||||
return nil
|
||||
}
|
||||
func (store *UniversalRedis2Store) RollbackTransaction(ctx context.Context) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (store *UniversalRedis2Store) InsertEntry(ctx context.Context, entry *filer2.Entry) (err error) {
|
||||
|
||||
value, err := entry.EncodeAttributesAndChunks()
|
||||
if err != nil {
|
||||
return fmt.Errorf("encoding %s %+v: %v", entry.FullPath, entry.Attr, err)
|
||||
}
|
||||
|
||||
if err = store.Client.Set(string(entry.FullPath), value, time.Duration(entry.TtlSec)*time.Second).Err(); err != nil {
|
||||
return fmt.Errorf("persisting %s : %v", entry.FullPath, err)
|
||||
}
|
||||
|
||||
dir, name := entry.FullPath.DirAndName()
|
||||
if name != "" {
|
||||
if err = store.Client.ZAddNX(genDirectoryListKey(dir), redis.Z{Score: 0, Member: name}).Err(); err != nil {
|
||||
return fmt.Errorf("persisting %s in parent dir: %v", entry.FullPath, err)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (store *UniversalRedis2Store) UpdateEntry(ctx context.Context, entry *filer2.Entry) (err error) {
|
||||
|
||||
return store.InsertEntry(ctx, entry)
|
||||
}
|
||||
|
||||
func (store *UniversalRedis2Store) FindEntry(ctx context.Context, fullpath util.FullPath) (entry *filer2.Entry, err error) {
|
||||
|
||||
data, err := store.Client.Get(string(fullpath)).Result()
|
||||
if err == redis.Nil {
|
||||
return nil, filer_pb.ErrNotFound
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("get %s : %v", fullpath, err)
|
||||
}
|
||||
|
||||
entry = &filer2.Entry{
|
||||
FullPath: fullpath,
|
||||
}
|
||||
err = entry.DecodeAttributesAndChunks([]byte(data))
|
||||
if err != nil {
|
||||
return entry, fmt.Errorf("decode %s : %v", entry.FullPath, err)
|
||||
}
|
||||
|
||||
return entry, nil
|
||||
}
|
||||
|
||||
func (store *UniversalRedis2Store) DeleteEntry(ctx context.Context, fullpath util.FullPath) (err error) {
|
||||
|
||||
_, err = store.Client.Del(string(fullpath)).Result()
|
||||
|
||||
if err != nil {
|
||||
return fmt.Errorf("delete %s : %v", fullpath, err)
|
||||
}
|
||||
|
||||
dir, name := fullpath.DirAndName()
|
||||
if name != "" {
|
||||
_, err = store.Client.ZRem(genDirectoryListKey(dir), name).Result()
|
||||
if err != nil {
|
||||
return fmt.Errorf("delete %s in parent dir: %v", fullpath, err)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (store *UniversalRedis2Store) DeleteFolderChildren(ctx context.Context, fullpath util.FullPath) (err error) {
|
||||
|
||||
members, err := store.Client.ZRange(genDirectoryListKey(string(fullpath)), 0, -1).Result()
|
||||
if err != nil {
|
||||
return fmt.Errorf("delete folder %s : %v", fullpath, err)
|
||||
}
|
||||
|
||||
for _, fileName := range members {
|
||||
path := util.NewFullPath(string(fullpath), fileName)
|
||||
_, err = store.Client.Del(string(path)).Result()
|
||||
if err != nil {
|
||||
return fmt.Errorf("delete %s in parent dir: %v", fullpath, err)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (store *UniversalRedis2Store) ListDirectoryEntries(ctx context.Context, fullpath util.FullPath, startFileName string, inclusive bool,
|
||||
limit int) (entries []*filer2.Entry, err error) {
|
||||
|
||||
dirListKey := genDirectoryListKey(string(fullpath))
|
||||
start := int64(0)
|
||||
if startFileName != "" {
|
||||
start, _ = store.Client.ZRank(dirListKey, startFileName).Result()
|
||||
if !inclusive {
|
||||
start++
|
||||
}
|
||||
}
|
||||
members, err := store.Client.ZRange(dirListKey, start, start+int64(limit)-1).Result()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("list %s : %v", fullpath, err)
|
||||
}
|
||||
|
||||
// fetch entry meta
|
||||
for _, fileName := range members {
|
||||
path := util.NewFullPath(string(fullpath), fileName)
|
||||
entry, err := store.FindEntry(ctx, path)
|
||||
if err != nil {
|
||||
glog.V(0).Infof("list %s : %v", path, err)
|
||||
} else {
|
||||
if entry.TtlSec > 0 {
|
||||
if entry.Attr.Crtime.Add(time.Duration(entry.TtlSec) * time.Second).Before(time.Now()) {
|
||||
store.Client.Del(string(path)).Result()
|
||||
store.Client.ZRem(dirListKey, fileName).Result()
|
||||
continue
|
||||
}
|
||||
}
|
||||
entries = append(entries, entry)
|
||||
}
|
||||
}
|
||||
|
||||
return entries, err
|
||||
}
|
||||
|
||||
func genDirectoryListKey(dir string) (dirList string) {
|
||||
return dir + DIR_LIST_MARKER
|
||||
}
|
||||
|
||||
func (store *UniversalRedis2Store) Shutdown() {
|
||||
store.Client.Close()
|
||||
}
|
|
@ -31,7 +31,7 @@ func StreamContent(masterClient *wdclient.MasterClient, w io.Writer, chunks []*f
|
|||
for _, chunkView := range chunkViews {
|
||||
|
||||
urlString := fileId2Url[chunkView.FileId]
|
||||
err := util.ReadUrlAsStream(urlString, chunkView.CipherKey, chunkView.IsGzipped, chunkView.IsFullChunk, chunkView.Offset, int(chunkView.Size), func(data []byte) {
|
||||
err := util.ReadUrlAsStream(urlString, chunkView.CipherKey, chunkView.IsGzipped, chunkView.IsFullChunk(), chunkView.Offset, int(chunkView.Size), func(data []byte) {
|
||||
w.Write(data)
|
||||
})
|
||||
if err != nil {
|
||||
|
@ -128,7 +128,7 @@ func (c *ChunkStreamReader) fetchChunkToBuffer(chunkView *ChunkView) error {
|
|||
return err
|
||||
}
|
||||
var buffer bytes.Buffer
|
||||
err = util.ReadUrlAsStream(urlString, chunkView.CipherKey, chunkView.IsGzipped, chunkView.IsFullChunk, chunkView.Offset, int(chunkView.Size), func(data []byte) {
|
||||
err = util.ReadUrlAsStream(urlString, chunkView.CipherKey, chunkView.IsGzipped, chunkView.IsFullChunk(), chunkView.Offset, int(chunkView.Size), func(data []byte) {
|
||||
buffer.Write(data)
|
||||
})
|
||||
if err != nil {
|
||||
|
|
6
weed/filer2/topics.go
Normal file
6
weed/filer2/topics.go
Normal file
|
@ -0,0 +1,6 @@
|
|||
package filer2
|
||||
|
||||
const (
|
||||
TopicsDir = "/topics"
|
||||
SystemLogDir = TopicsDir + "/.system/log"
|
||||
)
|
|
@ -58,7 +58,7 @@ func (dir *Dir) Attr(ctx context.Context, attr *fuse.Attr) error {
|
|||
attr.Gid = dir.entry.Attributes.Gid
|
||||
attr.Uid = dir.entry.Attributes.Uid
|
||||
|
||||
glog.V(3).Infof("dir Attr %s, attr: %+v", dir.FullPath(), attr)
|
||||
glog.V(4).Infof("dir Attr %s, attr: %+v", dir.FullPath(), attr)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
@ -200,7 +200,7 @@ func (dir *Dir) Mkdir(ctx context.Context, req *fuse.MkdirRequest) (fs.Node, err
|
|||
|
||||
func (dir *Dir) Lookup(ctx context.Context, req *fuse.LookupRequest, resp *fuse.LookupResponse) (node fs.Node, err error) {
|
||||
|
||||
glog.V(4).Infof("dir Lookup %s: %s", dir.FullPath(), req.Name)
|
||||
glog.V(4).Infof("dir Lookup %s: %s by %s", dir.FullPath(), req.Name, req.Header.String())
|
||||
|
||||
fullFilePath := util.NewFullPath(dir.FullPath(), req.Name)
|
||||
entry := dir.wfs.cacheGet(fullFilePath)
|
||||
|
|
|
@ -125,16 +125,18 @@ func (pages *ContinuousDirtyPages) saveExistingLargestPageToStorage() (chunk *fi
|
|||
return nil, false, nil
|
||||
}
|
||||
|
||||
chunk, err = pages.saveToStorage(maxList.ToReader(), maxList.Offset(), maxList.Size())
|
||||
if err == nil {
|
||||
hasSavedData = true
|
||||
glog.V(3).Infof("%s saveToStorage [%d,%d) %s", pages.f.fullpath(), maxList.Offset(), maxList.Offset()+maxList.Size(), chunk.FileId)
|
||||
} else {
|
||||
glog.V(0).Infof("%s saveToStorage [%d,%d): %v", pages.f.fullpath(), maxList.Offset(), maxList.Offset()+maxList.Size(), err)
|
||||
return
|
||||
for {
|
||||
chunk, err = pages.saveToStorage(maxList.ToReader(), maxList.Offset(), maxList.Size())
|
||||
if err == nil {
|
||||
hasSavedData = true
|
||||
glog.V(3).Infof("%s saveToStorage [%d,%d) %s", pages.f.fullpath(), maxList.Offset(), maxList.Offset()+maxList.Size(), chunk.FileId)
|
||||
return
|
||||
} else {
|
||||
glog.V(0).Infof("%s saveToStorage [%d,%d): %v", pages.f.fullpath(), maxList.Offset(), maxList.Offset()+maxList.Size(), err)
|
||||
time.Sleep(5 * time.Second)
|
||||
}
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
func (pages *ContinuousDirtyPages) saveToStorage(reader io.Reader, offset int64, size int64) (*filer_pb.FileChunk, error) {
|
||||
|
|
|
@ -4,12 +4,9 @@ import (
|
|||
"context"
|
||||
"fmt"
|
||||
"math"
|
||||
"mime"
|
||||
"path"
|
||||
"net/http"
|
||||
"time"
|
||||
|
||||
"github.com/gabriel-vasile/mimetype"
|
||||
|
||||
"github.com/chrislusf/seaweedfs/weed/filer2"
|
||||
"github.com/chrislusf/seaweedfs/weed/glog"
|
||||
"github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
|
||||
|
@ -33,12 +30,16 @@ type FileHandle struct {
|
|||
}
|
||||
|
||||
func newFileHandle(file *File, uid, gid uint32) *FileHandle {
|
||||
return &FileHandle{
|
||||
fh := &FileHandle{
|
||||
f: file,
|
||||
dirtyPages: newDirtyPages(file),
|
||||
Uid: uid,
|
||||
Gid: gid,
|
||||
}
|
||||
if fh.f.entry != nil {
|
||||
fh.f.entry.Attributes.FileSize = filer2.TotalSize(fh.f.entry.Chunks)
|
||||
}
|
||||
return fh
|
||||
}
|
||||
|
||||
var _ = fs.Handle(&FileHandle{})
|
||||
|
@ -110,26 +111,23 @@ func (fh *FileHandle) readFromChunks(buff []byte, offset int64) (int64, error) {
|
|||
func (fh *FileHandle) Write(ctx context.Context, req *fuse.WriteRequest, resp *fuse.WriteResponse) error {
|
||||
|
||||
// write the request to volume servers
|
||||
data := make([]byte, len(req.Data))
|
||||
copy(data, req.Data)
|
||||
|
||||
fh.f.entry.Attributes.FileSize = uint64(max(req.Offset+int64(len(req.Data)), int64(fh.f.entry.Attributes.FileSize)))
|
||||
fh.f.entry.Attributes.FileSize = uint64(max(req.Offset+int64(len(data)), int64(fh.f.entry.Attributes.FileSize)))
|
||||
// glog.V(0).Infof("%v write [%d,%d)", fh.f.fullpath(), req.Offset, req.Offset+int64(len(req.Data)))
|
||||
|
||||
chunks, err := fh.dirtyPages.AddPage(req.Offset, req.Data)
|
||||
chunks, err := fh.dirtyPages.AddPage(req.Offset, data)
|
||||
if err != nil {
|
||||
glog.Errorf("%v write fh %d: [%d,%d): %v", fh.f.fullpath(), fh.handle, req.Offset, req.Offset+int64(len(req.Data)), err)
|
||||
glog.Errorf("%v write fh %d: [%d,%d): %v", fh.f.fullpath(), fh.handle, req.Offset, req.Offset+int64(len(data)), err)
|
||||
return fuse.EIO
|
||||
}
|
||||
|
||||
resp.Size = len(req.Data)
|
||||
resp.Size = len(data)
|
||||
|
||||
if req.Offset == 0 {
|
||||
// detect mime type
|
||||
detectedMIME := mimetype.Detect(req.Data)
|
||||
fh.contentType = detectedMIME.String()
|
||||
if ext := path.Ext(fh.f.Name); ext != detectedMIME.Extension() {
|
||||
fh.contentType = mime.TypeByExtension(ext)
|
||||
}
|
||||
|
||||
fh.contentType = http.DetectContentType(data)
|
||||
fh.dirtyMetadata = true
|
||||
}
|
||||
|
||||
|
@ -187,7 +185,7 @@ func (fh *FileHandle) Flush(ctx context.Context, req *fuse.FlushRequest) error {
|
|||
fh.f.entry.Attributes.Gid = req.Gid
|
||||
fh.f.entry.Attributes.Mtime = time.Now().Unix()
|
||||
fh.f.entry.Attributes.Crtime = time.Now().Unix()
|
||||
fh.f.entry.Attributes.FileMode = uint32(0777 &^ fh.f.wfs.option.Umask)
|
||||
fh.f.entry.Attributes.FileMode = uint32(0666 &^ fh.f.wfs.option.Umask)
|
||||
fh.f.entry.Attributes.Collection = fh.dirtyPages.collection
|
||||
fh.f.entry.Attributes.Replication = fh.dirtyPages.replication
|
||||
}
|
||||
|
|
|
@ -9,6 +9,7 @@ import (
|
|||
|
||||
type FsCache struct {
|
||||
root *FsNode
|
||||
sync.RWMutex
|
||||
}
|
||||
type FsNode struct {
|
||||
parent *FsNode
|
||||
|
@ -27,6 +28,14 @@ func newFsCache(root fs.Node) *FsCache {
|
|||
}
|
||||
|
||||
func (c *FsCache) GetFsNode(path util.FullPath) fs.Node {
|
||||
|
||||
c.RLock()
|
||||
defer c.RUnlock()
|
||||
|
||||
return c.doGetFsNode(path)
|
||||
}
|
||||
|
||||
func (c *FsCache) doGetFsNode(path util.FullPath) fs.Node {
|
||||
t := c.root
|
||||
for _, p := range path.Split() {
|
||||
t = t.findChild(p)
|
||||
|
@ -38,6 +47,14 @@ func (c *FsCache) GetFsNode(path util.FullPath) fs.Node {
|
|||
}
|
||||
|
||||
func (c *FsCache) SetFsNode(path util.FullPath, node fs.Node) {
|
||||
|
||||
c.Lock()
|
||||
defer c.Unlock()
|
||||
|
||||
c.doSetFsNode(path, node)
|
||||
}
|
||||
|
||||
func (c *FsCache) doSetFsNode(path util.FullPath, node fs.Node) {
|
||||
t := c.root
|
||||
for _, p := range path.Split() {
|
||||
t = t.ensureChild(p)
|
||||
|
@ -46,16 +63,24 @@ func (c *FsCache) SetFsNode(path util.FullPath, node fs.Node) {
|
|||
}
|
||||
|
||||
func (c *FsCache) EnsureFsNode(path util.FullPath, genNodeFn func() fs.Node) fs.Node {
|
||||
t := c.GetFsNode(path)
|
||||
|
||||
c.Lock()
|
||||
defer c.Unlock()
|
||||
|
||||
t := c.doGetFsNode(path)
|
||||
if t != nil {
|
||||
return t
|
||||
}
|
||||
t = genNodeFn()
|
||||
c.SetFsNode(path, t)
|
||||
c.doSetFsNode(path, t)
|
||||
return t
|
||||
}
|
||||
|
||||
func (c *FsCache) DeleteFsNode(path util.FullPath) {
|
||||
|
||||
c.Lock()
|
||||
defer c.Unlock()
|
||||
|
||||
t := c.root
|
||||
for _, p := range path.Split() {
|
||||
t = t.findChild(p)
|
||||
|
@ -72,6 +97,9 @@ func (c *FsCache) DeleteFsNode(path util.FullPath) {
|
|||
// oldPath and newPath are full path including the new name
|
||||
func (c *FsCache) Move(oldPath util.FullPath, newPath util.FullPath) *FsNode {
|
||||
|
||||
c.Lock()
|
||||
defer c.Unlock()
|
||||
|
||||
// find old node
|
||||
src := c.root
|
||||
for _, p := range oldPath.Split() {
|
||||
|
|
|
@ -15,25 +15,26 @@ import (
|
|||
"github.com/chrislusf/seaweedfs/weed/glog"
|
||||
"github.com/chrislusf/seaweedfs/weed/pb"
|
||||
"github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
|
||||
"github.com/chrislusf/seaweedfs/weed/pb/pb_cache"
|
||||
"github.com/chrislusf/seaweedfs/weed/util"
|
||||
"github.com/chrislusf/seaweedfs/weed/util/chunk_cache"
|
||||
"github.com/seaweedfs/fuse"
|
||||
"github.com/seaweedfs/fuse/fs"
|
||||
)
|
||||
|
||||
type Option struct {
|
||||
FilerGrpcAddress string
|
||||
GrpcDialOption grpc.DialOption
|
||||
FilerMountRootPath string
|
||||
Collection string
|
||||
Replication string
|
||||
TtlSec int32
|
||||
ChunkSizeLimit int64
|
||||
ChunkCacheCountLimit int64
|
||||
DataCenter string
|
||||
DirListCacheLimit int64
|
||||
EntryCacheTtl time.Duration
|
||||
Umask os.FileMode
|
||||
FilerGrpcAddress string
|
||||
GrpcDialOption grpc.DialOption
|
||||
FilerMountRootPath string
|
||||
Collection string
|
||||
Replication string
|
||||
TtlSec int32
|
||||
ChunkSizeLimit int64
|
||||
CacheDir string
|
||||
CacheSizeMB int64
|
||||
DataCenter string
|
||||
DirListCacheLimit int64
|
||||
EntryCacheTtl time.Duration
|
||||
Umask os.FileMode
|
||||
|
||||
MountUid uint32
|
||||
MountGid uint32
|
||||
|
@ -54,9 +55,8 @@ type WFS struct {
|
|||
listDirectoryEntriesCache *ccache.Cache
|
||||
|
||||
// contains all open handles, protected by handlesLock
|
||||
handlesLock sync.Mutex
|
||||
handles []*FileHandle
|
||||
pathToHandleIndex map[util.FullPath]int
|
||||
handlesLock sync.Mutex
|
||||
handles map[uint64]*FileHandle
|
||||
|
||||
bufPool sync.Pool
|
||||
|
||||
|
@ -65,7 +65,7 @@ type WFS struct {
|
|||
root fs.Node
|
||||
fsNodeCache *FsCache
|
||||
|
||||
chunkCache *pb_cache.ChunkCache
|
||||
chunkCache *chunk_cache.ChunkCache
|
||||
}
|
||||
type statsCache struct {
|
||||
filer_pb.StatisticsResponse
|
||||
|
@ -76,13 +76,18 @@ func NewSeaweedFileSystem(option *Option) *WFS {
|
|||
wfs := &WFS{
|
||||
option: option,
|
||||
listDirectoryEntriesCache: ccache.New(ccache.Configure().MaxSize(option.DirListCacheLimit * 3).ItemsToPrune(100)),
|
||||
pathToHandleIndex: make(map[util.FullPath]int),
|
||||
handles: make(map[uint64]*FileHandle),
|
||||
bufPool: sync.Pool{
|
||||
New: func() interface{} {
|
||||
return make([]byte, option.ChunkSizeLimit)
|
||||
},
|
||||
},
|
||||
chunkCache: pb_cache.NewChunkCache(option.ChunkCacheCountLimit),
|
||||
}
|
||||
if option.CacheSizeMB > 0 {
|
||||
wfs.chunkCache = chunk_cache.NewChunkCache(256, option.CacheDir, option.CacheSizeMB)
|
||||
util.OnInterrupt(func() {
|
||||
wfs.chunkCache.Shutdown()
|
||||
})
|
||||
}
|
||||
|
||||
wfs.root = &Dir{name: wfs.option.FilerMountRootPath, wfs: wfs}
|
||||
|
@ -117,26 +122,15 @@ func (wfs *WFS) AcquireHandle(file *File, uid, gid uint32) (fileHandle *FileHand
|
|||
wfs.handlesLock.Lock()
|
||||
defer wfs.handlesLock.Unlock()
|
||||
|
||||
index, found := wfs.pathToHandleIndex[fullpath]
|
||||
if found && wfs.handles[index] != nil {
|
||||
glog.V(2).Infoln(fullpath, "found fileHandle id", index)
|
||||
return wfs.handles[index]
|
||||
inodeId := file.fullpath().AsInode()
|
||||
existingHandle, found := wfs.handles[inodeId]
|
||||
if found && existingHandle != nil {
|
||||
return existingHandle
|
||||
}
|
||||
|
||||
fileHandle = newFileHandle(file, uid, gid)
|
||||
for i, h := range wfs.handles {
|
||||
if h == nil {
|
||||
wfs.handles[i] = fileHandle
|
||||
fileHandle.handle = uint64(i)
|
||||
wfs.pathToHandleIndex[fullpath] = i
|
||||
glog.V(4).Infof("%s reuse fh %d", fullpath, fileHandle.handle)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
wfs.handles = append(wfs.handles, fileHandle)
|
||||
fileHandle.handle = uint64(len(wfs.handles) - 1)
|
||||
wfs.pathToHandleIndex[fullpath] = int(fileHandle.handle)
|
||||
wfs.handles[inodeId] = fileHandle
|
||||
fileHandle.handle = inodeId
|
||||
glog.V(4).Infof("%s new fh %d", fullpath, fileHandle.handle)
|
||||
|
||||
return
|
||||
|
@ -147,10 +141,8 @@ func (wfs *WFS) ReleaseHandle(fullpath util.FullPath, handleId fuse.HandleID) {
|
|||
defer wfs.handlesLock.Unlock()
|
||||
|
||||
glog.V(4).Infof("%s ReleaseHandle id %d current handles length %d", fullpath, handleId, len(wfs.handles))
|
||||
delete(wfs.pathToHandleIndex, fullpath)
|
||||
if int(handleId) < len(wfs.handles) {
|
||||
wfs.handles[int(handleId)] = nil
|
||||
}
|
||||
|
||||
delete(wfs.handles, fullpath.AsInode())
|
||||
|
||||
return
|
||||
}
|
||||
|
|
|
@ -7,7 +7,7 @@ import (
|
|||
"image/jpeg"
|
||||
"log"
|
||||
|
||||
"github.com/rwcarlsen/goexif/exif"
|
||||
"github.com/seaweedfs/goexif/exif"
|
||||
)
|
||||
|
||||
//many code is copied from http://camlistore.org/pkg/images/images.go
|
||||
|
|
113
weed/messaging/broker/broker_append.go
Normal file
113
weed/messaging/broker/broker_append.go
Normal file
|
@ -0,0 +1,113 @@
|
|||
package broker
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"github.com/chrislusf/seaweedfs/weed/glog"
|
||||
"github.com/chrislusf/seaweedfs/weed/operation"
|
||||
"github.com/chrislusf/seaweedfs/weed/pb"
|
||||
"github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
|
||||
"github.com/chrislusf/seaweedfs/weed/pb/messaging_pb"
|
||||
"github.com/chrislusf/seaweedfs/weed/security"
|
||||
"github.com/chrislusf/seaweedfs/weed/util"
|
||||
)
|
||||
|
||||
func (broker *MessageBroker) appendToFile(targetFile string, topicConfig *messaging_pb.TopicConfiguration, data []byte) error {
|
||||
|
||||
assignResult, uploadResult, err2 := broker.assignAndUpload(topicConfig, data)
|
||||
if err2 != nil {
|
||||
return err2
|
||||
}
|
||||
|
||||
dir, name := util.FullPath(targetFile).DirAndName()
|
||||
|
||||
chunk := &filer_pb.FileChunk{
|
||||
FileId: assignResult.Fid,
|
||||
Offset: 0, // needs to be fixed during appending
|
||||
Size: uint64(uploadResult.Size),
|
||||
Mtime: time.Now().UnixNano(),
|
||||
ETag: uploadResult.ETag,
|
||||
IsGzipped: uploadResult.Gzip > 0,
|
||||
}
|
||||
|
||||
// append the chunk
|
||||
if err := broker.WithFilerClient(func(client filer_pb.SeaweedFilerClient) error {
|
||||
|
||||
request := &filer_pb.AppendToEntryRequest{
|
||||
Directory: dir,
|
||||
EntryName: name,
|
||||
Chunks: []*filer_pb.FileChunk{chunk},
|
||||
}
|
||||
|
||||
_, err := client.AppendToEntry(context.Background(), request)
|
||||
if err != nil {
|
||||
glog.V(0).Infof("append to file %v: %v", request, err)
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}); err != nil {
|
||||
return fmt.Errorf("append to file %v: %v", targetFile, err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (broker *MessageBroker) assignAndUpload(topicConfig *messaging_pb.TopicConfiguration, data []byte) (*operation.AssignResult, *operation.UploadResult, error) {
|
||||
|
||||
var assignResult = &operation.AssignResult{}
|
||||
|
||||
// assign a volume location
|
||||
if err := broker.WithFilerClient(func(client filer_pb.SeaweedFilerClient) error {
|
||||
|
||||
request := &filer_pb.AssignVolumeRequest{
|
||||
Count: 1,
|
||||
Replication: topicConfig.Replication,
|
||||
Collection: topicConfig.Collection,
|
||||
}
|
||||
|
||||
resp, err := client.AssignVolume(context.Background(), request)
|
||||
if err != nil {
|
||||
glog.V(0).Infof("assign volume failure %v: %v", request, err)
|
||||
return err
|
||||
}
|
||||
if resp.Error != "" {
|
||||
return fmt.Errorf("assign volume failure %v: %v", request, resp.Error)
|
||||
}
|
||||
|
||||
assignResult.Auth = security.EncodedJwt(resp.Auth)
|
||||
assignResult.Fid = resp.FileId
|
||||
assignResult.Url = resp.Url
|
||||
assignResult.PublicUrl = resp.PublicUrl
|
||||
assignResult.Count = uint64(resp.Count)
|
||||
|
||||
return nil
|
||||
}); err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
// upload data
|
||||
targetUrl := fmt.Sprintf("http://%s/%s", assignResult.Url, assignResult.Fid)
|
||||
uploadResult, err := operation.UploadData(targetUrl, "", broker.option.Cipher, data, false, "", nil, assignResult.Auth)
|
||||
if err != nil {
|
||||
return nil, nil, fmt.Errorf("upload data %s: %v", targetUrl, err)
|
||||
}
|
||||
// println("uploaded to", targetUrl)
|
||||
return assignResult, uploadResult, nil
|
||||
}
|
||||
|
||||
func (broker *MessageBroker) WithFilerClient(fn func(filer_pb.SeaweedFilerClient) error) (err error) {
|
||||
|
||||
for _, filer := range broker.option.Filers {
|
||||
if err = pb.WithFilerClient(filer, broker.grpcDialOption, fn); err != nil {
|
||||
glog.V(0).Infof("fail to connect to %s: %v", filer, err)
|
||||
} else {
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
return
|
||||
|
||||
}
|
15
weed/messaging/broker/broker_grpc_server.go
Normal file
15
weed/messaging/broker/broker_grpc_server.go
Normal file
|
@ -0,0 +1,15 @@
|
|||
package broker
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/chrislusf/seaweedfs/weed/pb/messaging_pb"
|
||||
)
|
||||
|
||||
func (broker *MessageBroker) ConfigureTopic(c context.Context, request *messaging_pb.ConfigureTopicRequest) (*messaging_pb.ConfigureTopicResponse, error) {
|
||||
panic("implement me")
|
||||
}
|
||||
|
||||
func (broker *MessageBroker) GetTopicConfiguration(c context.Context, request *messaging_pb.GetTopicConfigurationRequest) (*messaging_pb.GetTopicConfigurationResponse, error) {
|
||||
panic("implement me")
|
||||
}
|
99
weed/messaging/broker/broker_grpc_server_publish.go
Normal file
99
weed/messaging/broker/broker_grpc_server_publish.go
Normal file
|
@ -0,0 +1,99 @@
|
|||
package broker
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
"time"
|
||||
|
||||
"github.com/golang/protobuf/proto"
|
||||
|
||||
"github.com/chrislusf/seaweedfs/weed/filer2"
|
||||
"github.com/chrislusf/seaweedfs/weed/glog"
|
||||
"github.com/chrislusf/seaweedfs/weed/pb/messaging_pb"
|
||||
)
|
||||
|
||||
func (broker *MessageBroker) Publish(stream messaging_pb.SeaweedMessaging_PublishServer) error {
|
||||
|
||||
// process initial request
|
||||
in, err := stream.Recv()
|
||||
if err == io.EOF {
|
||||
return nil
|
||||
}
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// TODO look it up
|
||||
topicConfig := &messaging_pb.TopicConfiguration{
|
||||
|
||||
}
|
||||
|
||||
// get lock
|
||||
tp := TopicPartition{
|
||||
Namespace: in.Init.Namespace,
|
||||
Topic: in.Init.Topic,
|
||||
Partition: in.Init.Partition,
|
||||
}
|
||||
logBuffer := broker.topicLocks.RequestPublisherLock(tp, func(startTime, stopTime time.Time, buf []byte) {
|
||||
|
||||
targetFile := fmt.Sprintf(
|
||||
"%s/%s/%s/%04d-%02d-%02d/%02d-%02d.part%02d",
|
||||
filer2.TopicsDir, tp.Namespace, tp.Topic,
|
||||
startTime.Year(), startTime.Month(), startTime.Day(), startTime.Hour(), startTime.Minute(),
|
||||
tp.Partition,
|
||||
)
|
||||
|
||||
if err := broker.appendToFile(targetFile, topicConfig, buf); err != nil {
|
||||
glog.V(0).Infof("log write failed %s: %v", targetFile, err)
|
||||
}
|
||||
|
||||
})
|
||||
defer broker.topicLocks.ReleaseLock(tp, true)
|
||||
|
||||
updatesChan := make(chan int32)
|
||||
|
||||
go func() {
|
||||
for update := range updatesChan {
|
||||
if err := stream.Send(&messaging_pb.PublishResponse{
|
||||
Config: &messaging_pb.PublishResponse_ConfigMessage{
|
||||
PartitionCount: update,
|
||||
},
|
||||
}); err != nil {
|
||||
glog.V(0).Infof("err sending publish response: %v", err)
|
||||
return
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
|
||||
// process each message
|
||||
for {
|
||||
in, err := stream.Recv()
|
||||
if err == io.EOF {
|
||||
return nil
|
||||
}
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if in.Data == nil {
|
||||
continue
|
||||
}
|
||||
|
||||
m := &messaging_pb.Message{
|
||||
Timestamp: time.Now().UnixNano(),
|
||||
Key: in.Data.Key,
|
||||
Value: in.Data.Value,
|
||||
Headers: in.Data.Headers,
|
||||
}
|
||||
|
||||
data, err := proto.Marshal(m)
|
||||
if err != nil {
|
||||
glog.Errorf("marshall error: %v\n", err)
|
||||
continue
|
||||
}
|
||||
|
||||
logBuffer.AddToBuffer(in.Data.Key, data)
|
||||
|
||||
}
|
||||
}
|
88
weed/messaging/broker/broker_grpc_server_subscribe.go
Normal file
88
weed/messaging/broker/broker_grpc_server_subscribe.go
Normal file
|
@ -0,0 +1,88 @@
|
|||
package broker
|
||||
|
||||
import (
|
||||
"io"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/golang/protobuf/proto"
|
||||
|
||||
"github.com/chrislusf/seaweedfs/weed/glog"
|
||||
"github.com/chrislusf/seaweedfs/weed/pb/messaging_pb"
|
||||
"github.com/chrislusf/seaweedfs/weed/util"
|
||||
)
|
||||
|
||||
func (broker *MessageBroker) Subscribe(stream messaging_pb.SeaweedMessaging_SubscribeServer) error {
|
||||
|
||||
// process initial request
|
||||
in, err := stream.Recv()
|
||||
if err == io.EOF {
|
||||
return nil
|
||||
}
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
subscriberId := in.Init.SubscriberId
|
||||
|
||||
// get lock
|
||||
tp := TopicPartition{
|
||||
Namespace: in.Init.Namespace,
|
||||
Topic: in.Init.Topic,
|
||||
Partition: in.Init.Partition,
|
||||
}
|
||||
lock := broker.topicLocks.RequestSubscriberLock(tp)
|
||||
defer broker.topicLocks.ReleaseLock(tp, false)
|
||||
cond := sync.NewCond(&lock.Mutex)
|
||||
|
||||
lastReadTime := time.Now()
|
||||
switch in.Init.StartPosition {
|
||||
case messaging_pb.SubscriberMessage_InitMessage_TIMESTAMP:
|
||||
lastReadTime = time.Unix(0, in.Init.TimestampNs)
|
||||
case messaging_pb.SubscriberMessage_InitMessage_LATEST:
|
||||
case messaging_pb.SubscriberMessage_InitMessage_EARLIEST:
|
||||
}
|
||||
|
||||
// how to process each message
|
||||
// an error returned will end the subscription
|
||||
eachMessageFn := func(m *messaging_pb.Message) error {
|
||||
err := stream.Send(&messaging_pb.BrokerMessage{
|
||||
Data: m,
|
||||
})
|
||||
if err != nil {
|
||||
glog.V(0).Infof("=> subscriber %v: %+v", subscriberId, err)
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
// loop through all messages
|
||||
for {
|
||||
|
||||
_, buf := lock.logBuffer.ReadFromBuffer(lastReadTime)
|
||||
|
||||
for pos := 0; pos+4 < len(buf); {
|
||||
|
||||
size := util.BytesToUint32(buf[pos : pos+4])
|
||||
entryData := buf[pos+4 : pos+4+int(size)]
|
||||
|
||||
m := &messaging_pb.Message{}
|
||||
if err = proto.Unmarshal(entryData, m); err != nil {
|
||||
glog.Errorf("unexpected unmarshal messaging_pb.Message: %v", err)
|
||||
pos += 4 + int(size)
|
||||
continue
|
||||
}
|
||||
|
||||
if err = eachMessageFn(m); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
lastReadTime = time.Unix(0, m.Timestamp)
|
||||
pos += 4 + int(size)
|
||||
}
|
||||
|
||||
lock.Mutex.Lock()
|
||||
cond.Wait()
|
||||
lock.Mutex.Unlock()
|
||||
}
|
||||
|
||||
}
|
|
@ -1,4 +1,4 @@
|
|||
package weed_server
|
||||
package broker
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
@ -10,8 +10,6 @@ import (
|
|||
"github.com/chrislusf/seaweedfs/weed/pb"
|
||||
"github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
|
||||
"github.com/chrislusf/seaweedfs/weed/pb/master_pb"
|
||||
"github.com/chrislusf/seaweedfs/weed/security"
|
||||
"github.com/chrislusf/seaweedfs/weed/util"
|
||||
)
|
||||
|
||||
type MessageBrokerOption struct {
|
||||
|
@ -19,18 +17,21 @@ type MessageBrokerOption struct {
|
|||
DefaultReplication string
|
||||
MaxMB int
|
||||
Port int
|
||||
Cipher bool
|
||||
}
|
||||
|
||||
type MessageBroker struct {
|
||||
option *MessageBrokerOption
|
||||
grpcDialOption grpc.DialOption
|
||||
topicLocks *TopicLocks
|
||||
}
|
||||
|
||||
func NewMessageBroker(option *MessageBrokerOption) (messageBroker *MessageBroker, err error) {
|
||||
func NewMessageBroker(option *MessageBrokerOption, grpcDialOption grpc.DialOption) (messageBroker *MessageBroker, err error) {
|
||||
|
||||
messageBroker = &MessageBroker{
|
||||
option: option,
|
||||
grpcDialOption: security.LoadClientTLS(util.GetViper(), "grpc.msg_broker"),
|
||||
grpcDialOption: grpcDialOption,
|
||||
topicLocks: NewTopicLocks(),
|
||||
}
|
||||
|
||||
go messageBroker.loopForEver()
|
80
weed/messaging/broker/topic_lock.go
Normal file
80
weed/messaging/broker/topic_lock.go
Normal file
|
@ -0,0 +1,80 @@
|
|||
package broker
|
||||
|
||||
import (
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/chrislusf/seaweedfs/weed/util/log_buffer"
|
||||
)
|
||||
|
||||
type TopicPartition struct {
|
||||
Namespace string
|
||||
Topic string
|
||||
Partition int32
|
||||
}
|
||||
type TopicLock struct {
|
||||
sync.Mutex
|
||||
subscriberCount int
|
||||
publisherCount int
|
||||
logBuffer *log_buffer.LogBuffer
|
||||
}
|
||||
|
||||
type TopicLocks struct {
|
||||
sync.Mutex
|
||||
locks map[TopicPartition]*TopicLock
|
||||
}
|
||||
|
||||
func NewTopicLocks() *TopicLocks {
|
||||
return &TopicLocks{
|
||||
locks: make(map[TopicPartition]*TopicLock),
|
||||
}
|
||||
}
|
||||
|
||||
func (tl *TopicLocks) RequestSubscriberLock(partition TopicPartition) *TopicLock {
|
||||
tl.Lock()
|
||||
defer tl.Unlock()
|
||||
|
||||
lock, found := tl.locks[partition]
|
||||
if !found {
|
||||
lock = &TopicLock{}
|
||||
tl.locks[partition] = lock
|
||||
}
|
||||
lock.subscriberCount++
|
||||
|
||||
return lock
|
||||
}
|
||||
|
||||
func (tl *TopicLocks) RequestPublisherLock(partition TopicPartition, flushFn func(startTime, stopTime time.Time, buf []byte)) *log_buffer.LogBuffer {
|
||||
tl.Lock()
|
||||
defer tl.Unlock()
|
||||
|
||||
lock, found := tl.locks[partition]
|
||||
if !found {
|
||||
lock = &TopicLock{}
|
||||
tl.locks[partition] = lock
|
||||
}
|
||||
lock.publisherCount++
|
||||
cond := sync.NewCond(&lock.Mutex)
|
||||
lock.logBuffer = log_buffer.NewLogBuffer(time.Minute, flushFn, func() {
|
||||
cond.Broadcast()
|
||||
})
|
||||
return lock.logBuffer
|
||||
}
|
||||
|
||||
func (tl *TopicLocks) ReleaseLock(partition TopicPartition, isPublisher bool) {
|
||||
tl.Lock()
|
||||
defer tl.Unlock()
|
||||
|
||||
lock, found := tl.locks[partition]
|
||||
if !found {
|
||||
return
|
||||
}
|
||||
if isPublisher {
|
||||
lock.publisherCount--
|
||||
} else {
|
||||
lock.subscriberCount--
|
||||
}
|
||||
if lock.subscriberCount <= 0 && lock.publisherCount <= 0 {
|
||||
delete(tl.locks, partition)
|
||||
}
|
||||
}
|
|
@ -45,11 +45,9 @@ var fileNameEscaper = strings.NewReplacer("\\", "\\\\", "\"", "\\\"")
|
|||
|
||||
// Upload sends a POST request to a volume server to upload the content with adjustable compression level
|
||||
func UploadData(uploadUrl string, filename string, cipher bool, data []byte, isInputGzipped bool, mtype string, pairMap map[string]string, jwt security.EncodedJwt) (uploadResult *UploadResult, err error) {
|
||||
hash := md5.New()
|
||||
hash.Write(data)
|
||||
uploadResult, err = doUploadData(uploadUrl, filename, cipher, data, isInputGzipped, mtype, pairMap, jwt)
|
||||
if uploadResult != nil {
|
||||
uploadResult.Md5 = fmt.Sprintf("%x", hash.Sum(nil))
|
||||
uploadResult.Md5 = util.Md5(data)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
@ -79,9 +77,15 @@ func doUploadData(uploadUrl string, filename string, cipher bool, data []byte, i
|
|||
contentIsGzipped := isInputGzipped
|
||||
shouldGzipNow := false
|
||||
if !isInputGzipped {
|
||||
if mtype == "" {
|
||||
mtype = http.DetectContentType(data)
|
||||
if mtype == "application/octet-stream" {
|
||||
mtype = ""
|
||||
}
|
||||
}
|
||||
if shouldBeZipped, iAmSure := util.IsGzippableFileType(filepath.Base(filename), mtype); iAmSure && shouldBeZipped {
|
||||
shouldGzipNow = true
|
||||
} else if len(data) > 128 {
|
||||
} else if !iAmSure && mtype == "" && len(data) > 128 {
|
||||
var compressed []byte
|
||||
compressed, err = util.GzipData(data[0:128])
|
||||
shouldGzipNow = len(compressed)*10 < 128*9 // can not compress to less than 90%
|
||||
|
|
|
@ -7,6 +7,6 @@ gen:
|
|||
protoc volume_server.proto --go_out=plugins=grpc:./volume_server_pb
|
||||
protoc filer.proto --go_out=plugins=grpc:./filer_pb
|
||||
protoc iam.proto --go_out=plugins=grpc:./iam_pb
|
||||
protoc queue.proto --go_out=plugins=grpc:./queue_pb
|
||||
protoc messaging.proto --go_out=plugins=grpc:./messaging_pb
|
||||
# protoc filer.proto --java_out=../../other/java/client/src/main/java
|
||||
cp filer.proto ../../other/java/client/src/main/proto
|
||||
|
|
|
@ -21,6 +21,9 @@ service SeaweedFiler {
|
|||
rpc UpdateEntry (UpdateEntryRequest) returns (UpdateEntryResponse) {
|
||||
}
|
||||
|
||||
rpc AppendToEntry (AppendToEntryRequest) returns (AppendToEntryResponse) {
|
||||
}
|
||||
|
||||
rpc DeleteEntry (DeleteEntryRequest) returns (DeleteEntryResponse) {
|
||||
}
|
||||
|
||||
|
@ -42,7 +45,7 @@ service SeaweedFiler {
|
|||
rpc GetFilerConfiguration (GetFilerConfigurationRequest) returns (GetFilerConfigurationResponse) {
|
||||
}
|
||||
|
||||
rpc ListenForEvents (ListenForEventsRequest) returns (stream FullEventNotification) {
|
||||
rpc SubscribeMetadata (SubscribeMetadataRequest) returns (stream SubscribeMetadataResponse) {
|
||||
}
|
||||
|
||||
}
|
||||
|
@ -123,6 +126,7 @@ message FuseAttributes {
|
|||
string user_name = 11; // for hdfs
|
||||
repeated string group_name = 12; // for hdfs
|
||||
string symlink_target = 13;
|
||||
bytes md5 = 14;
|
||||
}
|
||||
|
||||
message CreateEntryRequest {
|
||||
|
@ -142,6 +146,14 @@ message UpdateEntryRequest {
|
|||
message UpdateEntryResponse {
|
||||
}
|
||||
|
||||
message AppendToEntryRequest {
|
||||
string directory = 1;
|
||||
string entry_name = 2;
|
||||
repeated FileChunk chunks = 3;
|
||||
}
|
||||
message AppendToEntryResponse {
|
||||
}
|
||||
|
||||
message DeleteEntryRequest {
|
||||
string directory = 1;
|
||||
string name = 2;
|
||||
|
@ -230,16 +242,15 @@ message GetFilerConfigurationResponse {
|
|||
string collection = 3;
|
||||
uint32 max_mb = 4;
|
||||
string dir_buckets = 5;
|
||||
string dir_queues = 6;
|
||||
bool cipher = 7;
|
||||
}
|
||||
|
||||
message ListenForEventsRequest {
|
||||
message SubscribeMetadataRequest {
|
||||
string client_name = 1;
|
||||
string path_prefix = 2;
|
||||
int64 since_ns = 3;
|
||||
}
|
||||
message FullEventNotification {
|
||||
message SubscribeMetadataResponse {
|
||||
string directory = 1;
|
||||
EventNotification event_notification = 2;
|
||||
}
|
||||
|
|
|
@ -23,6 +23,8 @@ It has these top-level messages:
|
|||
CreateEntryResponse
|
||||
UpdateEntryRequest
|
||||
UpdateEntryResponse
|
||||
AppendToEntryRequest
|
||||
AppendToEntryResponse
|
||||
DeleteEntryRequest
|
||||
DeleteEntryResponse
|
||||
AtomicRenameEntryRequest
|
||||
|
@ -39,8 +41,8 @@ It has these top-level messages:
|
|||
StatisticsResponse
|
||||
GetFilerConfigurationRequest
|
||||
GetFilerConfigurationResponse
|
||||
ListenForEventsRequest
|
||||
FullEventNotification
|
||||
SubscribeMetadataRequest
|
||||
SubscribeMetadataResponse
|
||||
LogEntry
|
||||
*/
|
||||
package filer_pb
|
||||
|
@ -415,6 +417,7 @@ type FuseAttributes struct {
|
|||
UserName string `protobuf:"bytes,11,opt,name=user_name,json=userName" json:"user_name,omitempty"`
|
||||
GroupName []string `protobuf:"bytes,12,rep,name=group_name,json=groupName" json:"group_name,omitempty"`
|
||||
SymlinkTarget string `protobuf:"bytes,13,opt,name=symlink_target,json=symlinkTarget" json:"symlink_target,omitempty"`
|
||||
Md5 []byte `protobuf:"bytes,14,opt,name=md5,proto3" json:"md5,omitempty"`
|
||||
}
|
||||
|
||||
func (m *FuseAttributes) Reset() { *m = FuseAttributes{} }
|
||||
|
@ -513,6 +516,13 @@ func (m *FuseAttributes) GetSymlinkTarget() string {
|
|||
return ""
|
||||
}
|
||||
|
||||
func (m *FuseAttributes) GetMd5() []byte {
|
||||
if m != nil {
|
||||
return m.Md5
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
type CreateEntryRequest struct {
|
||||
Directory string `protobuf:"bytes,1,opt,name=directory" json:"directory,omitempty"`
|
||||
Entry *Entry `protobuf:"bytes,2,opt,name=entry" json:"entry,omitempty"`
|
||||
|
@ -593,6 +603,46 @@ func (m *UpdateEntryResponse) String() string { return proto.CompactT
|
|||
func (*UpdateEntryResponse) ProtoMessage() {}
|
||||
func (*UpdateEntryResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{13} }
|
||||
|
||||
type AppendToEntryRequest struct {
|
||||
Directory string `protobuf:"bytes,1,opt,name=directory" json:"directory,omitempty"`
|
||||
EntryName string `protobuf:"bytes,2,opt,name=entry_name,json=entryName" json:"entry_name,omitempty"`
|
||||
Chunks []*FileChunk `protobuf:"bytes,3,rep,name=chunks" json:"chunks,omitempty"`
|
||||
}
|
||||
|
||||
func (m *AppendToEntryRequest) Reset() { *m = AppendToEntryRequest{} }
|
||||
func (m *AppendToEntryRequest) String() string { return proto.CompactTextString(m) }
|
||||
func (*AppendToEntryRequest) ProtoMessage() {}
|
||||
func (*AppendToEntryRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{14} }
|
||||
|
||||
func (m *AppendToEntryRequest) GetDirectory() string {
|
||||
if m != nil {
|
||||
return m.Directory
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func (m *AppendToEntryRequest) GetEntryName() string {
|
||||
if m != nil {
|
||||
return m.EntryName
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func (m *AppendToEntryRequest) GetChunks() []*FileChunk {
|
||||
if m != nil {
|
||||
return m.Chunks
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
type AppendToEntryResponse struct {
|
||||
}
|
||||
|
||||
func (m *AppendToEntryResponse) Reset() { *m = AppendToEntryResponse{} }
|
||||
func (m *AppendToEntryResponse) String() string { return proto.CompactTextString(m) }
|
||||
func (*AppendToEntryResponse) ProtoMessage() {}
|
||||
func (*AppendToEntryResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{15} }
|
||||
|
||||
type DeleteEntryRequest struct {
|
||||
Directory string `protobuf:"bytes,1,opt,name=directory" json:"directory,omitempty"`
|
||||
Name string `protobuf:"bytes,2,opt,name=name" json:"name,omitempty"`
|
||||
|
@ -605,7 +655,7 @@ type DeleteEntryRequest struct {
|
|||
func (m *DeleteEntryRequest) Reset() { *m = DeleteEntryRequest{} }
|
||||
func (m *DeleteEntryRequest) String() string { return proto.CompactTextString(m) }
|
||||
func (*DeleteEntryRequest) ProtoMessage() {}
|
||||
func (*DeleteEntryRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{14} }
|
||||
func (*DeleteEntryRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{16} }
|
||||
|
||||
func (m *DeleteEntryRequest) GetDirectory() string {
|
||||
if m != nil {
|
||||
|
@ -649,7 +699,7 @@ type DeleteEntryResponse struct {
|
|||
func (m *DeleteEntryResponse) Reset() { *m = DeleteEntryResponse{} }
|
||||
func (m *DeleteEntryResponse) String() string { return proto.CompactTextString(m) }
|
||||
func (*DeleteEntryResponse) ProtoMessage() {}
|
||||
func (*DeleteEntryResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{15} }
|
||||
func (*DeleteEntryResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{17} }
|
||||
|
||||
func (m *DeleteEntryResponse) GetError() string {
|
||||
if m != nil {
|
||||
|
@ -668,7 +718,7 @@ type AtomicRenameEntryRequest struct {
|
|||
func (m *AtomicRenameEntryRequest) Reset() { *m = AtomicRenameEntryRequest{} }
|
||||
func (m *AtomicRenameEntryRequest) String() string { return proto.CompactTextString(m) }
|
||||
func (*AtomicRenameEntryRequest) ProtoMessage() {}
|
||||
func (*AtomicRenameEntryRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{16} }
|
||||
func (*AtomicRenameEntryRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{18} }
|
||||
|
||||
func (m *AtomicRenameEntryRequest) GetOldDirectory() string {
|
||||
if m != nil {
|
||||
|
@ -704,7 +754,7 @@ type AtomicRenameEntryResponse struct {
|
|||
func (m *AtomicRenameEntryResponse) Reset() { *m = AtomicRenameEntryResponse{} }
|
||||
func (m *AtomicRenameEntryResponse) String() string { return proto.CompactTextString(m) }
|
||||
func (*AtomicRenameEntryResponse) ProtoMessage() {}
|
||||
func (*AtomicRenameEntryResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{17} }
|
||||
func (*AtomicRenameEntryResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{19} }
|
||||
|
||||
type AssignVolumeRequest struct {
|
||||
Count int32 `protobuf:"varint,1,opt,name=count" json:"count,omitempty"`
|
||||
|
@ -718,7 +768,7 @@ type AssignVolumeRequest struct {
|
|||
func (m *AssignVolumeRequest) Reset() { *m = AssignVolumeRequest{} }
|
||||
func (m *AssignVolumeRequest) String() string { return proto.CompactTextString(m) }
|
||||
func (*AssignVolumeRequest) ProtoMessage() {}
|
||||
func (*AssignVolumeRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{18} }
|
||||
func (*AssignVolumeRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{20} }
|
||||
|
||||
func (m *AssignVolumeRequest) GetCount() int32 {
|
||||
if m != nil {
|
||||
|
@ -776,7 +826,7 @@ type AssignVolumeResponse struct {
|
|||
func (m *AssignVolumeResponse) Reset() { *m = AssignVolumeResponse{} }
|
||||
func (m *AssignVolumeResponse) String() string { return proto.CompactTextString(m) }
|
||||
func (*AssignVolumeResponse) ProtoMessage() {}
|
||||
func (*AssignVolumeResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{19} }
|
||||
func (*AssignVolumeResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{21} }
|
||||
|
||||
func (m *AssignVolumeResponse) GetFileId() string {
|
||||
if m != nil {
|
||||
|
@ -841,7 +891,7 @@ type LookupVolumeRequest struct {
|
|||
func (m *LookupVolumeRequest) Reset() { *m = LookupVolumeRequest{} }
|
||||
func (m *LookupVolumeRequest) String() string { return proto.CompactTextString(m) }
|
||||
func (*LookupVolumeRequest) ProtoMessage() {}
|
||||
func (*LookupVolumeRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{20} }
|
||||
func (*LookupVolumeRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{22} }
|
||||
|
||||
func (m *LookupVolumeRequest) GetVolumeIds() []string {
|
||||
if m != nil {
|
||||
|
@ -857,7 +907,7 @@ type Locations struct {
|
|||
func (m *Locations) Reset() { *m = Locations{} }
|
||||
func (m *Locations) String() string { return proto.CompactTextString(m) }
|
||||
func (*Locations) ProtoMessage() {}
|
||||
func (*Locations) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{21} }
|
||||
func (*Locations) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{23} }
|
||||
|
||||
func (m *Locations) GetLocations() []*Location {
|
||||
if m != nil {
|
||||
|
@ -874,7 +924,7 @@ type Location struct {
|
|||
func (m *Location) Reset() { *m = Location{} }
|
||||
func (m *Location) String() string { return proto.CompactTextString(m) }
|
||||
func (*Location) ProtoMessage() {}
|
||||
func (*Location) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{22} }
|
||||
func (*Location) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{24} }
|
||||
|
||||
func (m *Location) GetUrl() string {
|
||||
if m != nil {
|
||||
|
@ -897,7 +947,7 @@ type LookupVolumeResponse struct {
|
|||
func (m *LookupVolumeResponse) Reset() { *m = LookupVolumeResponse{} }
|
||||
func (m *LookupVolumeResponse) String() string { return proto.CompactTextString(m) }
|
||||
func (*LookupVolumeResponse) ProtoMessage() {}
|
||||
func (*LookupVolumeResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{23} }
|
||||
func (*LookupVolumeResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{25} }
|
||||
|
||||
func (m *LookupVolumeResponse) GetLocationsMap() map[string]*Locations {
|
||||
if m != nil {
|
||||
|
@ -913,7 +963,7 @@ type DeleteCollectionRequest struct {
|
|||
func (m *DeleteCollectionRequest) Reset() { *m = DeleteCollectionRequest{} }
|
||||
func (m *DeleteCollectionRequest) String() string { return proto.CompactTextString(m) }
|
||||
func (*DeleteCollectionRequest) ProtoMessage() {}
|
||||
func (*DeleteCollectionRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{24} }
|
||||
func (*DeleteCollectionRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{26} }
|
||||
|
||||
func (m *DeleteCollectionRequest) GetCollection() string {
|
||||
if m != nil {
|
||||
|
@ -928,7 +978,7 @@ type DeleteCollectionResponse struct {
|
|||
func (m *DeleteCollectionResponse) Reset() { *m = DeleteCollectionResponse{} }
|
||||
func (m *DeleteCollectionResponse) String() string { return proto.CompactTextString(m) }
|
||||
func (*DeleteCollectionResponse) ProtoMessage() {}
|
||||
func (*DeleteCollectionResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{25} }
|
||||
func (*DeleteCollectionResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{27} }
|
||||
|
||||
type StatisticsRequest struct {
|
||||
Replication string `protobuf:"bytes,1,opt,name=replication" json:"replication,omitempty"`
|
||||
|
@ -939,7 +989,7 @@ type StatisticsRequest struct {
|
|||
func (m *StatisticsRequest) Reset() { *m = StatisticsRequest{} }
|
||||
func (m *StatisticsRequest) String() string { return proto.CompactTextString(m) }
|
||||
func (*StatisticsRequest) ProtoMessage() {}
|
||||
func (*StatisticsRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{26} }
|
||||
func (*StatisticsRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{28} }
|
||||
|
||||
func (m *StatisticsRequest) GetReplication() string {
|
||||
if m != nil {
|
||||
|
@ -974,7 +1024,7 @@ type StatisticsResponse struct {
|
|||
func (m *StatisticsResponse) Reset() { *m = StatisticsResponse{} }
|
||||
func (m *StatisticsResponse) String() string { return proto.CompactTextString(m) }
|
||||
func (*StatisticsResponse) ProtoMessage() {}
|
||||
func (*StatisticsResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{27} }
|
||||
func (*StatisticsResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{29} }
|
||||
|
||||
func (m *StatisticsResponse) GetReplication() string {
|
||||
if m != nil {
|
||||
|
@ -1024,7 +1074,7 @@ type GetFilerConfigurationRequest struct {
|
|||
func (m *GetFilerConfigurationRequest) Reset() { *m = GetFilerConfigurationRequest{} }
|
||||
func (m *GetFilerConfigurationRequest) String() string { return proto.CompactTextString(m) }
|
||||
func (*GetFilerConfigurationRequest) ProtoMessage() {}
|
||||
func (*GetFilerConfigurationRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{28} }
|
||||
func (*GetFilerConfigurationRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{30} }
|
||||
|
||||
type GetFilerConfigurationResponse struct {
|
||||
Masters []string `protobuf:"bytes,1,rep,name=masters" json:"masters,omitempty"`
|
||||
|
@ -1032,14 +1082,13 @@ type GetFilerConfigurationResponse struct {
|
|||
Collection string `protobuf:"bytes,3,opt,name=collection" json:"collection,omitempty"`
|
||||
MaxMb uint32 `protobuf:"varint,4,opt,name=max_mb,json=maxMb" json:"max_mb,omitempty"`
|
||||
DirBuckets string `protobuf:"bytes,5,opt,name=dir_buckets,json=dirBuckets" json:"dir_buckets,omitempty"`
|
||||
DirQueues string `protobuf:"bytes,6,opt,name=dir_queues,json=dirQueues" json:"dir_queues,omitempty"`
|
||||
Cipher bool `protobuf:"varint,7,opt,name=cipher" json:"cipher,omitempty"`
|
||||
}
|
||||
|
||||
func (m *GetFilerConfigurationResponse) Reset() { *m = GetFilerConfigurationResponse{} }
|
||||
func (m *GetFilerConfigurationResponse) String() string { return proto.CompactTextString(m) }
|
||||
func (*GetFilerConfigurationResponse) ProtoMessage() {}
|
||||
func (*GetFilerConfigurationResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{29} }
|
||||
func (*GetFilerConfigurationResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{31} }
|
||||
|
||||
func (m *GetFilerConfigurationResponse) GetMasters() []string {
|
||||
if m != nil {
|
||||
|
@ -1076,13 +1125,6 @@ func (m *GetFilerConfigurationResponse) GetDirBuckets() string {
|
|||
return ""
|
||||
}
|
||||
|
||||
func (m *GetFilerConfigurationResponse) GetDirQueues() string {
|
||||
if m != nil {
|
||||
return m.DirQueues
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func (m *GetFilerConfigurationResponse) GetCipher() bool {
|
||||
if m != nil {
|
||||
return m.Cipher
|
||||
|
@ -1090,56 +1132,56 @@ func (m *GetFilerConfigurationResponse) GetCipher() bool {
|
|||
return false
|
||||
}
|
||||
|
||||
type ListenForEventsRequest struct {
|
||||
type SubscribeMetadataRequest struct {
|
||||
ClientName string `protobuf:"bytes,1,opt,name=client_name,json=clientName" json:"client_name,omitempty"`
|
||||
PathPrefix string `protobuf:"bytes,2,opt,name=path_prefix,json=pathPrefix" json:"path_prefix,omitempty"`
|
||||
SinceNs int64 `protobuf:"varint,3,opt,name=since_ns,json=sinceNs" json:"since_ns,omitempty"`
|
||||
}
|
||||
|
||||
func (m *ListenForEventsRequest) Reset() { *m = ListenForEventsRequest{} }
|
||||
func (m *ListenForEventsRequest) String() string { return proto.CompactTextString(m) }
|
||||
func (*ListenForEventsRequest) ProtoMessage() {}
|
||||
func (*ListenForEventsRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{30} }
|
||||
func (m *SubscribeMetadataRequest) Reset() { *m = SubscribeMetadataRequest{} }
|
||||
func (m *SubscribeMetadataRequest) String() string { return proto.CompactTextString(m) }
|
||||
func (*SubscribeMetadataRequest) ProtoMessage() {}
|
||||
func (*SubscribeMetadataRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{32} }
|
||||
|
||||
func (m *ListenForEventsRequest) GetClientName() string {
|
||||
func (m *SubscribeMetadataRequest) GetClientName() string {
|
||||
if m != nil {
|
||||
return m.ClientName
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func (m *ListenForEventsRequest) GetPathPrefix() string {
|
||||
func (m *SubscribeMetadataRequest) GetPathPrefix() string {
|
||||
if m != nil {
|
||||
return m.PathPrefix
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func (m *ListenForEventsRequest) GetSinceNs() int64 {
|
||||
func (m *SubscribeMetadataRequest) GetSinceNs() int64 {
|
||||
if m != nil {
|
||||
return m.SinceNs
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
type FullEventNotification struct {
|
||||
type SubscribeMetadataResponse struct {
|
||||
Directory string `protobuf:"bytes,1,opt,name=directory" json:"directory,omitempty"`
|
||||
EventNotification *EventNotification `protobuf:"bytes,2,opt,name=event_notification,json=eventNotification" json:"event_notification,omitempty"`
|
||||
}
|
||||
|
||||
func (m *FullEventNotification) Reset() { *m = FullEventNotification{} }
|
||||
func (m *FullEventNotification) String() string { return proto.CompactTextString(m) }
|
||||
func (*FullEventNotification) ProtoMessage() {}
|
||||
func (*FullEventNotification) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{31} }
|
||||
func (m *SubscribeMetadataResponse) Reset() { *m = SubscribeMetadataResponse{} }
|
||||
func (m *SubscribeMetadataResponse) String() string { return proto.CompactTextString(m) }
|
||||
func (*SubscribeMetadataResponse) ProtoMessage() {}
|
||||
func (*SubscribeMetadataResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{33} }
|
||||
|
||||
func (m *FullEventNotification) GetDirectory() string {
|
||||
func (m *SubscribeMetadataResponse) GetDirectory() string {
|
||||
if m != nil {
|
||||
return m.Directory
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func (m *FullEventNotification) GetEventNotification() *EventNotification {
|
||||
func (m *SubscribeMetadataResponse) GetEventNotification() *EventNotification {
|
||||
if m != nil {
|
||||
return m.EventNotification
|
||||
}
|
||||
|
@ -1155,7 +1197,7 @@ type LogEntry struct {
|
|||
func (m *LogEntry) Reset() { *m = LogEntry{} }
|
||||
func (m *LogEntry) String() string { return proto.CompactTextString(m) }
|
||||
func (*LogEntry) ProtoMessage() {}
|
||||
func (*LogEntry) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{32} }
|
||||
func (*LogEntry) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{34} }
|
||||
|
||||
func (m *LogEntry) GetTsNs() int64 {
|
||||
if m != nil {
|
||||
|
@ -1193,6 +1235,8 @@ func init() {
|
|||
proto.RegisterType((*CreateEntryResponse)(nil), "filer_pb.CreateEntryResponse")
|
||||
proto.RegisterType((*UpdateEntryRequest)(nil), "filer_pb.UpdateEntryRequest")
|
||||
proto.RegisterType((*UpdateEntryResponse)(nil), "filer_pb.UpdateEntryResponse")
|
||||
proto.RegisterType((*AppendToEntryRequest)(nil), "filer_pb.AppendToEntryRequest")
|
||||
proto.RegisterType((*AppendToEntryResponse)(nil), "filer_pb.AppendToEntryResponse")
|
||||
proto.RegisterType((*DeleteEntryRequest)(nil), "filer_pb.DeleteEntryRequest")
|
||||
proto.RegisterType((*DeleteEntryResponse)(nil), "filer_pb.DeleteEntryResponse")
|
||||
proto.RegisterType((*AtomicRenameEntryRequest)(nil), "filer_pb.AtomicRenameEntryRequest")
|
||||
|
@ -1209,8 +1253,8 @@ func init() {
|
|||
proto.RegisterType((*StatisticsResponse)(nil), "filer_pb.StatisticsResponse")
|
||||
proto.RegisterType((*GetFilerConfigurationRequest)(nil), "filer_pb.GetFilerConfigurationRequest")
|
||||
proto.RegisterType((*GetFilerConfigurationResponse)(nil), "filer_pb.GetFilerConfigurationResponse")
|
||||
proto.RegisterType((*ListenForEventsRequest)(nil), "filer_pb.ListenForEventsRequest")
|
||||
proto.RegisterType((*FullEventNotification)(nil), "filer_pb.FullEventNotification")
|
||||
proto.RegisterType((*SubscribeMetadataRequest)(nil), "filer_pb.SubscribeMetadataRequest")
|
||||
proto.RegisterType((*SubscribeMetadataResponse)(nil), "filer_pb.SubscribeMetadataResponse")
|
||||
proto.RegisterType((*LogEntry)(nil), "filer_pb.LogEntry")
|
||||
}
|
||||
|
||||
|
@ -1229,6 +1273,7 @@ type SeaweedFilerClient interface {
|
|||
ListEntries(ctx context.Context, in *ListEntriesRequest, opts ...grpc.CallOption) (SeaweedFiler_ListEntriesClient, error)
|
||||
CreateEntry(ctx context.Context, in *CreateEntryRequest, opts ...grpc.CallOption) (*CreateEntryResponse, error)
|
||||
UpdateEntry(ctx context.Context, in *UpdateEntryRequest, opts ...grpc.CallOption) (*UpdateEntryResponse, error)
|
||||
AppendToEntry(ctx context.Context, in *AppendToEntryRequest, opts ...grpc.CallOption) (*AppendToEntryResponse, error)
|
||||
DeleteEntry(ctx context.Context, in *DeleteEntryRequest, opts ...grpc.CallOption) (*DeleteEntryResponse, error)
|
||||
AtomicRenameEntry(ctx context.Context, in *AtomicRenameEntryRequest, opts ...grpc.CallOption) (*AtomicRenameEntryResponse, error)
|
||||
AssignVolume(ctx context.Context, in *AssignVolumeRequest, opts ...grpc.CallOption) (*AssignVolumeResponse, error)
|
||||
|
@ -1236,7 +1281,7 @@ type SeaweedFilerClient interface {
|
|||
DeleteCollection(ctx context.Context, in *DeleteCollectionRequest, opts ...grpc.CallOption) (*DeleteCollectionResponse, error)
|
||||
Statistics(ctx context.Context, in *StatisticsRequest, opts ...grpc.CallOption) (*StatisticsResponse, error)
|
||||
GetFilerConfiguration(ctx context.Context, in *GetFilerConfigurationRequest, opts ...grpc.CallOption) (*GetFilerConfigurationResponse, error)
|
||||
ListenForEvents(ctx context.Context, in *ListenForEventsRequest, opts ...grpc.CallOption) (SeaweedFiler_ListenForEventsClient, error)
|
||||
SubscribeMetadata(ctx context.Context, in *SubscribeMetadataRequest, opts ...grpc.CallOption) (SeaweedFiler_SubscribeMetadataClient, error)
|
||||
}
|
||||
|
||||
type seaweedFilerClient struct {
|
||||
|
@ -1306,6 +1351,15 @@ func (c *seaweedFilerClient) UpdateEntry(ctx context.Context, in *UpdateEntryReq
|
|||
return out, nil
|
||||
}
|
||||
|
||||
func (c *seaweedFilerClient) AppendToEntry(ctx context.Context, in *AppendToEntryRequest, opts ...grpc.CallOption) (*AppendToEntryResponse, error) {
|
||||
out := new(AppendToEntryResponse)
|
||||
err := grpc.Invoke(ctx, "/filer_pb.SeaweedFiler/AppendToEntry", in, out, c.cc, opts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return out, nil
|
||||
}
|
||||
|
||||
func (c *seaweedFilerClient) DeleteEntry(ctx context.Context, in *DeleteEntryRequest, opts ...grpc.CallOption) (*DeleteEntryResponse, error) {
|
||||
out := new(DeleteEntryResponse)
|
||||
err := grpc.Invoke(ctx, "/filer_pb.SeaweedFiler/DeleteEntry", in, out, c.cc, opts...)
|
||||
|
@ -1369,12 +1423,12 @@ func (c *seaweedFilerClient) GetFilerConfiguration(ctx context.Context, in *GetF
|
|||
return out, nil
|
||||
}
|
||||
|
||||
func (c *seaweedFilerClient) ListenForEvents(ctx context.Context, in *ListenForEventsRequest, opts ...grpc.CallOption) (SeaweedFiler_ListenForEventsClient, error) {
|
||||
stream, err := grpc.NewClientStream(ctx, &_SeaweedFiler_serviceDesc.Streams[1], c.cc, "/filer_pb.SeaweedFiler/ListenForEvents", opts...)
|
||||
func (c *seaweedFilerClient) SubscribeMetadata(ctx context.Context, in *SubscribeMetadataRequest, opts ...grpc.CallOption) (SeaweedFiler_SubscribeMetadataClient, error) {
|
||||
stream, err := grpc.NewClientStream(ctx, &_SeaweedFiler_serviceDesc.Streams[1], c.cc, "/filer_pb.SeaweedFiler/SubscribeMetadata", opts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
x := &seaweedFilerListenForEventsClient{stream}
|
||||
x := &seaweedFilerSubscribeMetadataClient{stream}
|
||||
if err := x.ClientStream.SendMsg(in); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
@ -1384,17 +1438,17 @@ func (c *seaweedFilerClient) ListenForEvents(ctx context.Context, in *ListenForE
|
|||
return x, nil
|
||||
}
|
||||
|
||||
type SeaweedFiler_ListenForEventsClient interface {
|
||||
Recv() (*FullEventNotification, error)
|
||||
type SeaweedFiler_SubscribeMetadataClient interface {
|
||||
Recv() (*SubscribeMetadataResponse, error)
|
||||
grpc.ClientStream
|
||||
}
|
||||
|
||||
type seaweedFilerListenForEventsClient struct {
|
||||
type seaweedFilerSubscribeMetadataClient struct {
|
||||
grpc.ClientStream
|
||||
}
|
||||
|
||||
func (x *seaweedFilerListenForEventsClient) Recv() (*FullEventNotification, error) {
|
||||
m := new(FullEventNotification)
|
||||
func (x *seaweedFilerSubscribeMetadataClient) Recv() (*SubscribeMetadataResponse, error) {
|
||||
m := new(SubscribeMetadataResponse)
|
||||
if err := x.ClientStream.RecvMsg(m); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
@ -1408,6 +1462,7 @@ type SeaweedFilerServer interface {
|
|||
ListEntries(*ListEntriesRequest, SeaweedFiler_ListEntriesServer) error
|
||||
CreateEntry(context.Context, *CreateEntryRequest) (*CreateEntryResponse, error)
|
||||
UpdateEntry(context.Context, *UpdateEntryRequest) (*UpdateEntryResponse, error)
|
||||
AppendToEntry(context.Context, *AppendToEntryRequest) (*AppendToEntryResponse, error)
|
||||
DeleteEntry(context.Context, *DeleteEntryRequest) (*DeleteEntryResponse, error)
|
||||
AtomicRenameEntry(context.Context, *AtomicRenameEntryRequest) (*AtomicRenameEntryResponse, error)
|
||||
AssignVolume(context.Context, *AssignVolumeRequest) (*AssignVolumeResponse, error)
|
||||
|
@ -1415,7 +1470,7 @@ type SeaweedFilerServer interface {
|
|||
DeleteCollection(context.Context, *DeleteCollectionRequest) (*DeleteCollectionResponse, error)
|
||||
Statistics(context.Context, *StatisticsRequest) (*StatisticsResponse, error)
|
||||
GetFilerConfiguration(context.Context, *GetFilerConfigurationRequest) (*GetFilerConfigurationResponse, error)
|
||||
ListenForEvents(*ListenForEventsRequest, SeaweedFiler_ListenForEventsServer) error
|
||||
SubscribeMetadata(*SubscribeMetadataRequest, SeaweedFiler_SubscribeMetadataServer) error
|
||||
}
|
||||
|
||||
func RegisterSeaweedFilerServer(s *grpc.Server, srv SeaweedFilerServer) {
|
||||
|
@ -1497,6 +1552,24 @@ func _SeaweedFiler_UpdateEntry_Handler(srv interface{}, ctx context.Context, dec
|
|||
return interceptor(ctx, in, info, handler)
|
||||
}
|
||||
|
||||
func _SeaweedFiler_AppendToEntry_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
|
||||
in := new(AppendToEntryRequest)
|
||||
if err := dec(in); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if interceptor == nil {
|
||||
return srv.(SeaweedFilerServer).AppendToEntry(ctx, in)
|
||||
}
|
||||
info := &grpc.UnaryServerInfo{
|
||||
Server: srv,
|
||||
FullMethod: "/filer_pb.SeaweedFiler/AppendToEntry",
|
||||
}
|
||||
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
|
||||
return srv.(SeaweedFilerServer).AppendToEntry(ctx, req.(*AppendToEntryRequest))
|
||||
}
|
||||
return interceptor(ctx, in, info, handler)
|
||||
}
|
||||
|
||||
func _SeaweedFiler_DeleteEntry_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
|
||||
in := new(DeleteEntryRequest)
|
||||
if err := dec(in); err != nil {
|
||||
|
@ -1623,24 +1696,24 @@ func _SeaweedFiler_GetFilerConfiguration_Handler(srv interface{}, ctx context.Co
|
|||
return interceptor(ctx, in, info, handler)
|
||||
}
|
||||
|
||||
func _SeaweedFiler_ListenForEvents_Handler(srv interface{}, stream grpc.ServerStream) error {
|
||||
m := new(ListenForEventsRequest)
|
||||
func _SeaweedFiler_SubscribeMetadata_Handler(srv interface{}, stream grpc.ServerStream) error {
|
||||
m := new(SubscribeMetadataRequest)
|
||||
if err := stream.RecvMsg(m); err != nil {
|
||||
return err
|
||||
}
|
||||
return srv.(SeaweedFilerServer).ListenForEvents(m, &seaweedFilerListenForEventsServer{stream})
|
||||
return srv.(SeaweedFilerServer).SubscribeMetadata(m, &seaweedFilerSubscribeMetadataServer{stream})
|
||||
}
|
||||
|
||||
type SeaweedFiler_ListenForEventsServer interface {
|
||||
Send(*FullEventNotification) error
|
||||
type SeaweedFiler_SubscribeMetadataServer interface {
|
||||
Send(*SubscribeMetadataResponse) error
|
||||
grpc.ServerStream
|
||||
}
|
||||
|
||||
type seaweedFilerListenForEventsServer struct {
|
||||
type seaweedFilerSubscribeMetadataServer struct {
|
||||
grpc.ServerStream
|
||||
}
|
||||
|
||||
func (x *seaweedFilerListenForEventsServer) Send(m *FullEventNotification) error {
|
||||
func (x *seaweedFilerSubscribeMetadataServer) Send(m *SubscribeMetadataResponse) error {
|
||||
return x.ServerStream.SendMsg(m)
|
||||
}
|
||||
|
||||
|
@ -1660,6 +1733,10 @@ var _SeaweedFiler_serviceDesc = grpc.ServiceDesc{
|
|||
MethodName: "UpdateEntry",
|
||||
Handler: _SeaweedFiler_UpdateEntry_Handler,
|
||||
},
|
||||
{
|
||||
MethodName: "AppendToEntry",
|
||||
Handler: _SeaweedFiler_AppendToEntry_Handler,
|
||||
},
|
||||
{
|
||||
MethodName: "DeleteEntry",
|
||||
Handler: _SeaweedFiler_DeleteEntry_Handler,
|
||||
|
@ -1696,8 +1773,8 @@ var _SeaweedFiler_serviceDesc = grpc.ServiceDesc{
|
|||
ServerStreams: true,
|
||||
},
|
||||
{
|
||||
StreamName: "ListenForEvents",
|
||||
Handler: _SeaweedFiler_ListenForEvents_Handler,
|
||||
StreamName: "SubscribeMetadata",
|
||||
Handler: _SeaweedFiler_SubscribeMetadata_Handler,
|
||||
ServerStreams: true,
|
||||
},
|
||||
},
|
||||
|
@ -1707,125 +1784,128 @@ var _SeaweedFiler_serviceDesc = grpc.ServiceDesc{
|
|||
func init() { proto.RegisterFile("filer.proto", fileDescriptor0) }
|
||||
|
||||
var fileDescriptor0 = []byte{
|
||||
// 1909 bytes of a gzipped FileDescriptorProto
|
||||
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0xb4, 0x58, 0x5f, 0x6f, 0xdc, 0xc6,
|
||||
0x11, 0x37, 0xef, 0x74, 0x7f, 0x38, 0x77, 0x67, 0x4b, 0x7b, 0xb2, 0x73, 0x3e, 0x4b, 0xb6, 0x42,
|
||||
0xd7, 0xa9, 0x0b, 0x1b, 0xaa, 0xa1, 0xe6, 0x21, 0x69, 0xda, 0x07, 0x5b, 0x96, 0x52, 0x37, 0xb6,
|
||||
0xe2, 0x52, 0x76, 0x91, 0xa2, 0x40, 0x09, 0x8a, 0x5c, 0xdd, 0x6d, 0xc5, 0x23, 0x99, 0xdd, 0xa5,
|
||||
0xfe, 0xe4, 0xad, 0x5f, 0xa3, 0x40, 0x1f, 0xfa, 0x1d, 0xfa, 0x58, 0xf4, 0xa5, 0x28, 0xd0, 0xcf,
|
||||
0xd1, 0xc7, 0x3e, 0xf4, 0x33, 0x14, 0x3b, 0x4b, 0xf2, 0x96, 0xc7, 0x93, 0x94, 0x20, 0xc8, 0x1b,
|
||||
0x77, 0x66, 0x76, 0x76, 0x76, 0xfe, 0xfc, 0x66, 0x96, 0xd0, 0x3b, 0x66, 0x11, 0xe5, 0xdb, 0x29,
|
||||
0x4f, 0x64, 0x42, 0xba, 0xb8, 0xf0, 0xd2, 0x23, 0xe7, 0x4b, 0xb8, 0xf7, 0x3a, 0x49, 0x4e, 0xb2,
|
||||
0xf4, 0x25, 0xe3, 0x34, 0x90, 0x09, 0xbf, 0xd8, 0x8b, 0x25, 0xbf, 0x70, 0xe9, 0xd7, 0x19, 0x15,
|
||||
0x92, 0x6c, 0x80, 0x1d, 0x16, 0x8c, 0x91, 0xb5, 0x65, 0x3d, 0xb6, 0xdd, 0x39, 0x81, 0x10, 0x58,
|
||||
0x89, 0xfd, 0x19, 0x1d, 0x35, 0x90, 0x81, 0xdf, 0xce, 0x1e, 0x6c, 0x2c, 0x57, 0x28, 0xd2, 0x24,
|
||||
0x16, 0x94, 0x3c, 0x82, 0x16, 0x55, 0x04, 0xd4, 0xd6, 0xdb, 0xb9, 0xb5, 0x5d, 0x98, 0xb2, 0xad,
|
||||
0xe5, 0x34, 0xd7, 0xf9, 0x87, 0x05, 0xe4, 0x35, 0x13, 0x52, 0x11, 0x19, 0x15, 0xdf, 0xce, 0x9e,
|
||||
0x3b, 0xd0, 0x4e, 0x39, 0x3d, 0x66, 0xe7, 0xb9, 0x45, 0xf9, 0x8a, 0x3c, 0x85, 0x35, 0x21, 0x7d,
|
||||
0x2e, 0xf7, 0x79, 0x32, 0xdb, 0x67, 0x11, 0x3d, 0x50, 0x46, 0x37, 0x51, 0xa4, 0xce, 0x20, 0xdb,
|
||||
0x40, 0x58, 0x1c, 0x44, 0x99, 0x60, 0xa7, 0xf4, 0xb0, 0xe0, 0x8e, 0x56, 0xb6, 0xac, 0xc7, 0x5d,
|
||||
0x77, 0x09, 0x87, 0xac, 0x43, 0x2b, 0x62, 0x33, 0x26, 0x47, 0xad, 0x2d, 0xeb, 0xf1, 0xc0, 0xd5,
|
||||
0x0b, 0xe7, 0x17, 0x30, 0xac, 0xd8, 0xff, 0xdd, 0xae, 0xff, 0x97, 0x06, 0xb4, 0x90, 0x50, 0xfa,
|
||||
0xd8, 0x9a, 0xfb, 0x98, 0x7c, 0x08, 0x7d, 0x26, 0xbc, 0xb9, 0x23, 0x1a, 0x68, 0x5b, 0x8f, 0x89,
|
||||
0xd2, 0xe7, 0xe4, 0x09, 0xb4, 0x83, 0x69, 0x16, 0x9f, 0x88, 0x51, 0x73, 0xab, 0xf9, 0xb8, 0xb7,
|
||||
0x33, 0x9c, 0x1f, 0xa4, 0x2e, 0xba, 0xab, 0x78, 0x6e, 0x2e, 0x42, 0x3e, 0x01, 0xf0, 0xa5, 0xe4,
|
||||
0xec, 0x28, 0x93, 0x54, 0xe0, 0x4d, 0x7b, 0x3b, 0x23, 0x63, 0x43, 0x26, 0xe8, 0xf3, 0x92, 0xef,
|
||||
0x1a, 0xb2, 0xe4, 0x53, 0xe8, 0xd2, 0x73, 0x49, 0xe3, 0x90, 0x86, 0xa3, 0x16, 0x1e, 0xb4, 0xb9,
|
||||
0x70, 0xa3, 0xed, 0xbd, 0x9c, 0xaf, 0xef, 0x57, 0x8a, 0x8f, 0x3f, 0x83, 0x41, 0x85, 0x45, 0x56,
|
||||
0xa1, 0x79, 0x42, 0x8b, 0xa8, 0xaa, 0x4f, 0xe5, 0xd9, 0x53, 0x3f, 0xca, 0x74, 0x82, 0xf5, 0x5d,
|
||||
0xbd, 0xf8, 0x79, 0xe3, 0x13, 0xcb, 0x79, 0x09, 0xf6, 0x7e, 0x16, 0x45, 0xe5, 0xc6, 0x90, 0xf1,
|
||||
0x62, 0x63, 0xc8, 0xf8, 0xdc, 0xcb, 0x8d, 0x2b, 0xbd, 0xfc, 0x77, 0x0b, 0xd6, 0xf6, 0x4e, 0x69,
|
||||
0x2c, 0x0f, 0x12, 0xc9, 0x8e, 0x59, 0xe0, 0x4b, 0x96, 0xc4, 0xe4, 0x29, 0xd8, 0x49, 0x14, 0x7a,
|
||||
0x57, 0x86, 0xa9, 0x9b, 0x44, 0xb9, 0xd5, 0x4f, 0xc1, 0x8e, 0xe9, 0x99, 0x77, 0xe5, 0x71, 0xdd,
|
||||
0x98, 0x9e, 0x69, 0xe9, 0x87, 0x30, 0x08, 0x69, 0x44, 0x25, 0xf5, 0xca, 0xe8, 0xa8, 0xd0, 0xf5,
|
||||
0x35, 0x71, 0x57, 0x87, 0xe3, 0x23, 0xb8, 0xa5, 0x54, 0xa6, 0x3e, 0xa7, 0xb1, 0xf4, 0x52, 0x5f,
|
||||
0x4e, 0x31, 0x26, 0xb6, 0x3b, 0x88, 0xe9, 0xd9, 0x5b, 0xa4, 0xbe, 0xf5, 0xe5, 0xd4, 0xf9, 0x5b,
|
||||
0x03, 0xec, 0x32, 0x98, 0xe4, 0x03, 0xe8, 0xa8, 0x63, 0x3d, 0x16, 0xe6, 0x9e, 0x68, 0xab, 0xe5,
|
||||
0xab, 0x50, 0x55, 0x45, 0x72, 0x7c, 0x2c, 0xa8, 0x44, 0xf3, 0x9a, 0x6e, 0xbe, 0x52, 0x99, 0x25,
|
||||
0xd8, 0x37, 0xba, 0x10, 0x56, 0x5c, 0xfc, 0x56, 0x1e, 0x9f, 0x49, 0x36, 0xa3, 0x78, 0x60, 0xd3,
|
||||
0xd5, 0x0b, 0x32, 0x84, 0x16, 0xf5, 0xa4, 0x3f, 0xc1, 0x0c, 0xb7, 0xdd, 0x15, 0xfa, 0xce, 0x9f,
|
||||
0x90, 0x1f, 0xc1, 0x4d, 0x91, 0x64, 0x3c, 0xa0, 0x5e, 0x71, 0x6c, 0x1b, 0xb9, 0x7d, 0x4d, 0xdd,
|
||||
0xd7, 0x87, 0x3b, 0xd0, 0x3c, 0x66, 0xe1, 0xa8, 0x83, 0x8e, 0x59, 0xad, 0x26, 0xe1, 0xab, 0xd0,
|
||||
0x55, 0x4c, 0xf2, 0x53, 0x80, 0x52, 0x53, 0x38, 0xea, 0x5e, 0x22, 0x6a, 0x17, 0x7a, 0x43, 0xb2,
|
||||
0x09, 0x10, 0xb0, 0x74, 0x4a, 0xb9, 0xa7, 0x12, 0xc6, 0xc6, 0xe4, 0xb0, 0x35, 0xe5, 0x0b, 0x7a,
|
||||
0xa1, 0xd8, 0x4c, 0x78, 0x93, 0x6f, 0x58, 0x9a, 0xd2, 0x70, 0x04, 0xe8, 0x61, 0x9b, 0x89, 0xcf,
|
||||
0x35, 0xc1, 0xf9, 0x0a, 0xda, 0xb9, 0x71, 0xf7, 0xc0, 0x3e, 0x4d, 0xa2, 0x6c, 0x56, 0x3a, 0x6d,
|
||||
0xe0, 0x76, 0x35, 0xe1, 0x55, 0x48, 0xee, 0x02, 0xa2, 0x24, 0x1e, 0xd1, 0x40, 0x17, 0xa1, 0x7f,
|
||||
0xd5, 0x01, 0x77, 0xa0, 0x1d, 0x24, 0xc9, 0x09, 0xd3, 0xbe, 0xeb, 0xb8, 0xf9, 0xca, 0xf9, 0x5f,
|
||||
0x03, 0x6e, 0x56, 0x8b, 0x45, 0x1d, 0x81, 0x5a, 0xd0, 0xd3, 0x16, 0xaa, 0x41, 0xb5, 0x87, 0x15,
|
||||
0x6f, 0x37, 0x4c, 0x6f, 0x17, 0x5b, 0x66, 0x49, 0xa8, 0x0f, 0x18, 0xe8, 0x2d, 0x6f, 0x92, 0x90,
|
||||
0xaa, 0x5c, 0xcf, 0x58, 0x88, 0xe1, 0x19, 0xb8, 0xea, 0x53, 0x51, 0x26, 0x2c, 0xcc, 0xc1, 0x47,
|
||||
0x7d, 0xa2, 0x79, 0x1c, 0xf5, 0xb6, 0x75, 0xc0, 0xf5, 0x4a, 0x05, 0x7c, 0xa6, 0xa8, 0x1d, 0x1d,
|
||||
0x45, 0xf5, 0x4d, 0xb6, 0xa0, 0xc7, 0x69, 0x1a, 0xe5, 0xb9, 0x8f, 0xce, 0xb7, 0x5d, 0x93, 0x44,
|
||||
0xee, 0x03, 0x04, 0x49, 0x14, 0xd1, 0x00, 0x05, 0x6c, 0x14, 0x30, 0x28, 0x2a, 0xef, 0xa4, 0x8c,
|
||||
0x3c, 0x41, 0x03, 0x74, 0x75, 0xcb, 0x6d, 0x4b, 0x19, 0x1d, 0xd2, 0x40, 0xdd, 0x23, 0x13, 0x94,
|
||||
0x7b, 0x08, 0x5f, 0x3d, 0xdc, 0xd7, 0x55, 0x04, 0x04, 0xd9, 0x4d, 0x80, 0x09, 0x4f, 0xb2, 0x54,
|
||||
0x73, 0xfb, 0x5b, 0x4d, 0x85, 0xe4, 0x48, 0x41, 0xf6, 0x23, 0xb8, 0x29, 0x2e, 0x66, 0x11, 0x8b,
|
||||
0x4f, 0x3c, 0xe9, 0xf3, 0x09, 0x95, 0xa3, 0x81, 0xae, 0x80, 0x9c, 0xfa, 0x0e, 0x89, 0x4e, 0x0a,
|
||||
0x64, 0x97, 0x53, 0x5f, 0xd2, 0xef, 0xd0, 0xb4, 0xbe, 0x1d, 0x36, 0x90, 0xdb, 0xd0, 0x4e, 0x3c,
|
||||
0x7a, 0x1e, 0x44, 0x79, 0x89, 0xb6, 0x92, 0xbd, 0xf3, 0x20, 0x72, 0x9e, 0xc0, 0xb0, 0x72, 0x62,
|
||||
0x0e, 0xeb, 0xeb, 0xd0, 0xa2, 0x9c, 0x27, 0x05, 0x08, 0xe9, 0x85, 0xf3, 0x3b, 0x20, 0xef, 0xd3,
|
||||
0xf0, 0x87, 0x30, 0xcf, 0xb9, 0x0d, 0xc3, 0x8a, 0x6a, 0x6d, 0x87, 0xf3, 0x2f, 0x0b, 0xc8, 0x4b,
|
||||
0xc4, 0x92, 0xef, 0xd7, 0xc6, 0x55, 0x75, 0xab, 0x16, 0xa3, 0xb1, 0x2a, 0xf4, 0xa5, 0x9f, 0x37,
|
||||
0xc0, 0x3e, 0x13, 0x5a, 0xff, 0x4b, 0x5f, 0xfa, 0x79, 0x23, 0xe2, 0x34, 0xc8, 0xb8, 0xea, 0x89,
|
||||
0x98, 0x84, 0xd8, 0x88, 0xdc, 0x82, 0x44, 0x3e, 0x86, 0x3b, 0x6c, 0x12, 0x27, 0x9c, 0xce, 0xc5,
|
||||
0x3c, 0xed, 0xaa, 0x36, 0x0a, 0xaf, 0x6b, 0x6e, 0xb9, 0x61, 0x0f, 0x3d, 0xf7, 0x04, 0x86, 0x95,
|
||||
0x6b, 0x5c, 0xe9, 0xe6, 0x3f, 0x5b, 0x30, 0x7a, 0x2e, 0x93, 0x19, 0x0b, 0x5c, 0xaa, 0x8c, 0xaf,
|
||||
0x5c, 0xfd, 0x21, 0x0c, 0x14, 0x9a, 0x2f, 0x5e, 0xbf, 0x9f, 0x44, 0xe1, 0xbc, 0x5b, 0xde, 0x05,
|
||||
0x05, 0xe8, 0x9e, 0xe1, 0x85, 0x4e, 0x12, 0x85, 0x98, 0x89, 0x0f, 0x41, 0xa1, 0xae, 0xb1, 0x5f,
|
||||
0xcf, 0x0d, 0xfd, 0x98, 0x9e, 0x55, 0xf6, 0x2b, 0x21, 0xdc, 0xaf, 0xa1, 0xba, 0x13, 0xd3, 0x33,
|
||||
0xb5, 0xdf, 0xb9, 0x07, 0x77, 0x97, 0xd8, 0x96, 0x87, 0xeb, 0xdf, 0x16, 0x0c, 0x9f, 0x0b, 0xc1,
|
||||
0x26, 0xf1, 0x6f, 0x11, 0x76, 0x0a, 0xa3, 0xd7, 0xa1, 0x15, 0x24, 0x59, 0x2c, 0xd1, 0xd8, 0x96,
|
||||
0xab, 0x17, 0x0b, 0x95, 0xd8, 0xa8, 0x55, 0xe2, 0x42, 0x2d, 0x37, 0xeb, 0xb5, 0x6c, 0xd4, 0xea,
|
||||
0x4a, 0xa5, 0x56, 0x1f, 0x40, 0x4f, 0x05, 0xd9, 0x0b, 0x68, 0x2c, 0x29, 0xcf, 0x71, 0x1e, 0x14,
|
||||
0x69, 0x17, 0x29, 0x4a, 0xc0, 0xec, 0x47, 0x1a, 0xea, 0x21, 0x9d, 0x37, 0xa3, 0xff, 0x58, 0xb0,
|
||||
0x5e, 0xbd, 0x4a, 0x1e, 0xb3, 0x4b, 0xfb, 0x92, 0x82, 0x32, 0x1e, 0xe5, 0xf7, 0x50, 0x9f, 0x0a,
|
||||
0x14, 0xd2, 0xec, 0x28, 0x62, 0x81, 0xa7, 0x18, 0xda, 0x7e, 0x5b, 0x53, 0xde, 0xf3, 0x68, 0xee,
|
||||
0x95, 0x15, 0xd3, 0x2b, 0x04, 0x56, 0xfc, 0x4c, 0x4e, 0x8b, 0xde, 0xa4, 0xbe, 0x17, 0x3c, 0xd5,
|
||||
0xbe, 0xce, 0x53, 0x9d, 0xba, 0xa7, 0xca, 0x4c, 0xeb, 0x9a, 0x99, 0xf6, 0x31, 0x0c, 0xf5, 0x70,
|
||||
0x5b, 0x0d, 0xd7, 0x26, 0x40, 0xd9, 0x47, 0xc4, 0xc8, 0xd2, 0x60, 0x56, 0x34, 0x12, 0xe1, 0xfc,
|
||||
0x12, 0xec, 0xd7, 0x89, 0xd6, 0x2b, 0xc8, 0x33, 0xb0, 0xa3, 0x62, 0x81, 0xa2, 0xbd, 0x1d, 0x32,
|
||||
0xaf, 0xf1, 0x42, 0xce, 0x9d, 0x0b, 0x39, 0x9f, 0x41, 0xb7, 0x20, 0x17, 0x3e, 0xb3, 0x2e, 0xf3,
|
||||
0x59, 0x63, 0xc1, 0x67, 0xce, 0x3f, 0x2d, 0x58, 0xaf, 0x9a, 0x9c, 0x87, 0xe5, 0x3d, 0x0c, 0xca,
|
||||
0x23, 0xbc, 0x99, 0x9f, 0xe6, 0xb6, 0x3c, 0x33, 0x6d, 0xa9, 0x6f, 0x2b, 0x0d, 0x14, 0x6f, 0xfc,
|
||||
0x54, 0xe7, 0x72, 0x3f, 0x32, 0x48, 0xe3, 0x77, 0xb0, 0x56, 0x13, 0x59, 0x32, 0xd9, 0xfd, 0xc4,
|
||||
0x9c, 0xec, 0x2a, 0xd3, 0x69, 0xb9, 0xdb, 0x1c, 0xf7, 0x3e, 0x85, 0x0f, 0x34, 0x1c, 0xec, 0x96,
|
||||
0x31, 0x2c, 0x7c, 0x5f, 0x0d, 0xb5, 0xb5, 0x18, 0x6a, 0x67, 0x0c, 0xa3, 0xfa, 0xd6, 0xbc, 0xfc,
|
||||
0x26, 0xb0, 0x76, 0x28, 0x7d, 0xc9, 0x84, 0x64, 0x41, 0xf9, 0xc4, 0x58, 0xc8, 0x0d, 0xeb, 0xba,
|
||||
0x8e, 0x58, 0xaf, 0xc3, 0x55, 0x68, 0x4a, 0x59, 0xe4, 0xaf, 0xfa, 0x54, 0x51, 0x20, 0xe6, 0x49,
|
||||
0x79, 0x0c, 0x7e, 0x80, 0xa3, 0x54, 0x3e, 0xc8, 0x44, 0xfa, 0x91, 0x9e, 0x38, 0x56, 0x70, 0xe2,
|
||||
0xb0, 0x91, 0x82, 0x23, 0x87, 0x6e, 0xca, 0xa1, 0xe6, 0xb6, 0xf4, 0x3c, 0xa2, 0x08, 0xc8, 0xdc,
|
||||
0x04, 0xc0, 0x52, 0xd5, 0x55, 0xd6, 0xd6, 0x7b, 0x15, 0x65, 0x57, 0x11, 0x9c, 0xfb, 0xb0, 0xf1,
|
||||
0x39, 0x95, 0x6a, 0x76, 0xe2, 0xbb, 0x49, 0x7c, 0xcc, 0x26, 0x19, 0xf7, 0x8d, 0x50, 0x38, 0xff,
|
||||
0xb5, 0x60, 0xf3, 0x12, 0x81, 0xfc, 0xc2, 0x23, 0xe8, 0xcc, 0x7c, 0x21, 0x29, 0x2f, 0xaa, 0xa4,
|
||||
0x58, 0x2e, 0xba, 0xa2, 0x71, 0x9d, 0x2b, 0x9a, 0x35, 0x57, 0xdc, 0x86, 0xf6, 0xcc, 0x3f, 0xf7,
|
||||
0x66, 0x47, 0xf9, 0x70, 0xd4, 0x9a, 0xf9, 0xe7, 0x6f, 0x8e, 0x10, 0xd9, 0x18, 0xf7, 0x8e, 0xb2,
|
||||
0xe0, 0x84, 0x4a, 0x51, 0x22, 0x1b, 0xe3, 0x2f, 0x34, 0x45, 0x5d, 0x5a, 0x09, 0x7c, 0x9d, 0xd1,
|
||||
0x8c, 0x8a, 0x1c, 0x2b, 0x54, 0x73, 0xfc, 0x0d, 0x12, 0x70, 0x98, 0xc2, 0xc9, 0x12, 0x51, 0xa2,
|
||||
0xeb, 0xe6, 0x2b, 0x27, 0x83, 0x3b, 0xea, 0x7d, 0x47, 0xe3, 0xfd, 0x84, 0xe3, 0x1b, 0xa2, 0x4c,
|
||||
0xa0, 0x07, 0xd0, 0x0b, 0x22, 0xa6, 0xa0, 0xd2, 0x78, 0xb8, 0x81, 0x26, 0x61, 0x4b, 0x41, 0x2c,
|
||||
0x95, 0x53, 0xaf, 0xf2, 0x56, 0x05, 0x45, 0x7a, 0xab, 0xdf, 0xab, 0x77, 0xa1, 0x2b, 0x58, 0x1c,
|
||||
0x50, 0x2f, 0xd6, 0x0f, 0x84, 0xa6, 0xdb, 0xc1, 0xf5, 0x81, 0x70, 0xfe, 0x64, 0xc1, 0x6d, 0x7c,
|
||||
0xf9, 0xd4, 0x9e, 0x2d, 0x57, 0xf7, 0xf8, 0x5f, 0x03, 0xa1, 0xa7, 0x68, 0x93, 0xb1, 0x27, 0xaf,
|
||||
0xbe, 0x7b, 0xc6, 0x8c, 0xb1, 0xa8, 0xd6, 0x5d, 0xa3, 0x8b, 0x24, 0xc7, 0x57, 0x80, 0x34, 0xd1,
|
||||
0xa5, 0x3d, 0x84, 0x96, 0x14, 0x1e, 0x42, 0x99, 0xb2, 0x73, 0x45, 0x8a, 0x03, 0x41, 0x9e, 0x02,
|
||||
0x49, 0x7d, 0x2e, 0x99, 0x92, 0x56, 0xf3, 0xb3, 0x37, 0xf5, 0xc5, 0x14, 0x0f, 0x6b, 0xb9, 0xab,
|
||||
0x25, 0xe7, 0x0b, 0x7a, 0xf1, 0x2b, 0x5f, 0x4c, 0x15, 0x80, 0xe3, 0x80, 0xd1, 0xc4, 0x39, 0x1e,
|
||||
0xbf, 0x77, 0xfe, 0xda, 0x85, 0xfe, 0x21, 0xf5, 0xcf, 0x28, 0x0d, 0x31, 0x9d, 0xc8, 0xa4, 0x80,
|
||||
0xb1, 0xea, 0x6f, 0x05, 0xf2, 0x68, 0x11, 0xaf, 0x96, 0xfe, 0xc7, 0x18, 0x7f, 0x74, 0x9d, 0x58,
|
||||
0x8e, 0x08, 0x37, 0xc8, 0x01, 0xf4, 0x8c, 0x77, 0x3b, 0xd9, 0x30, 0x36, 0xd6, 0x7e, 0x47, 0x8c,
|
||||
0x37, 0x2f, 0xe1, 0x16, 0xda, 0x9e, 0x59, 0xe4, 0x35, 0xf4, 0x8c, 0x81, 0xd1, 0xd4, 0x57, 0x9f,
|
||||
0x5c, 0x4d, 0x7d, 0x4b, 0xa6, 0x4c, 0xe7, 0x86, 0xd2, 0x66, 0x8c, 0x7d, 0xa6, 0xb6, 0xfa, 0xa0,
|
||||
0x69, 0x6a, 0x5b, 0x36, 0x2b, 0xa2, 0x36, 0x63, 0xca, 0x32, 0xb5, 0xd5, 0x67, 0x48, 0x53, 0xdb,
|
||||
0x92, 0xd1, 0xcc, 0xb9, 0x41, 0xfe, 0x00, 0x6b, 0xb5, 0x49, 0x87, 0x38, 0xf3, 0x5d, 0x97, 0x8d,
|
||||
0x68, 0xe3, 0x87, 0x57, 0xca, 0x94, 0xfa, 0xbf, 0x84, 0xbe, 0x39, 0x60, 0x10, 0xc3, 0xa0, 0x25,
|
||||
0x33, 0xd4, 0xf8, 0xfe, 0x65, 0x6c, 0x53, 0xa1, 0xd9, 0xe3, 0x4c, 0x85, 0x4b, 0xba, 0xbc, 0xa9,
|
||||
0x70, 0x59, 0x6b, 0x74, 0x6e, 0x90, 0xdf, 0xc3, 0xea, 0x62, 0xaf, 0x21, 0x1f, 0x2e, 0xba, 0xad,
|
||||
0xd6, 0xc2, 0xc6, 0xce, 0x55, 0x22, 0xa5, 0xf2, 0x57, 0x00, 0xf3, 0x16, 0x42, 0x8c, 0x9a, 0xad,
|
||||
0xb5, 0xb0, 0xf1, 0xc6, 0x72, 0x66, 0xa9, 0xea, 0x8f, 0x70, 0x7b, 0x29, 0x4e, 0x13, 0xa3, 0x4c,
|
||||
0xae, 0x42, 0xfa, 0xf1, 0x8f, 0xaf, 0x95, 0x2b, 0xcf, 0xfa, 0x0a, 0x6e, 0x2d, 0xe0, 0x24, 0xd9,
|
||||
0xaa, 0x56, 0x4d, 0x1d, 0x42, 0xc7, 0x0f, 0xcc, 0x9f, 0x4f, 0x4b, 0xc0, 0x4e, 0x55, 0xd6, 0x8b,
|
||||
0xfb, 0xb0, 0x2a, 0x34, 0x44, 0x1c, 0x8b, 0x6d, 0x0d, 0xaf, 0x2f, 0x00, 0x6d, 0x79, 0xcb, 0x13,
|
||||
0x99, 0x1c, 0xb5, 0xf1, 0x5f, 0xe7, 0xcf, 0xfe, 0x1f, 0x00, 0x00, 0xff, 0xff, 0x4d, 0x32, 0x75,
|
||||
0x14, 0xfa, 0x14, 0x00, 0x00,
|
||||
// 1956 bytes of a gzipped FileDescriptorProto
|
||||
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0xb4, 0x58, 0x5f, 0x6f, 0xdb, 0xc8,
|
||||
0x11, 0x37, 0x25, 0x4b, 0x16, 0x47, 0x52, 0xce, 0x5e, 0xdb, 0x89, 0xac, 0xd8, 0x8e, 0x8f, 0x69,
|
||||
0xae, 0x29, 0x12, 0xb8, 0x81, 0x7b, 0x05, 0xee, 0x7a, 0xed, 0x43, 0xe2, 0x38, 0xd7, 0xf4, 0x12,
|
||||
0x5f, 0x40, 0x27, 0x45, 0x8b, 0x02, 0x65, 0x29, 0x72, 0x2d, 0x6d, 0x4d, 0x91, 0xec, 0xee, 0xd2,
|
||||
0x7f, 0xee, 0xe9, 0x5e, 0xfa, 0x25, 0x0a, 0xf4, 0x5b, 0xf4, 0xb1, 0xe8, 0x4b, 0x51, 0xa0, 0x40,
|
||||
0xbf, 0x45, 0xbf, 0x47, 0x81, 0x62, 0x67, 0x49, 0x6a, 0xa9, 0x3f, 0xf6, 0x05, 0x87, 0xbc, 0xed,
|
||||
0xce, 0xcc, 0xce, 0xce, 0xce, 0x9f, 0xdf, 0x0c, 0x09, 0xed, 0x53, 0x16, 0x51, 0xbe, 0x9f, 0xf2,
|
||||
0x44, 0x26, 0xa4, 0x85, 0x1b, 0x2f, 0x1d, 0x38, 0x5f, 0xc3, 0xdd, 0x57, 0x49, 0x72, 0x96, 0xa5,
|
||||
0xcf, 0x19, 0xa7, 0x81, 0x4c, 0xf8, 0xd5, 0x51, 0x2c, 0xf9, 0x95, 0x4b, 0xff, 0x94, 0x51, 0x21,
|
||||
0xc9, 0x36, 0xd8, 0x61, 0xc1, 0xe8, 0x59, 0x7b, 0xd6, 0x43, 0xdb, 0x9d, 0x10, 0x08, 0x81, 0xe5,
|
||||
0xd8, 0x1f, 0xd3, 0x5e, 0x0d, 0x19, 0xb8, 0x76, 0x8e, 0x60, 0x7b, 0xbe, 0x42, 0x91, 0x26, 0xb1,
|
||||
0xa0, 0xe4, 0x01, 0x34, 0xa8, 0x22, 0xa0, 0xb6, 0xf6, 0xc1, 0x47, 0xfb, 0x85, 0x29, 0xfb, 0x5a,
|
||||
0x4e, 0x73, 0x9d, 0x7f, 0x58, 0x40, 0x5e, 0x31, 0x21, 0x15, 0x91, 0x51, 0xf1, 0xdd, 0xec, 0xb9,
|
||||
0x0d, 0xcd, 0x94, 0xd3, 0x53, 0x76, 0x99, 0x5b, 0x94, 0xef, 0xc8, 0x63, 0x58, 0x13, 0xd2, 0xe7,
|
||||
0xf2, 0x05, 0x4f, 0xc6, 0x2f, 0x58, 0x44, 0x8f, 0x95, 0xd1, 0x75, 0x14, 0x99, 0x65, 0x90, 0x7d,
|
||||
0x20, 0x2c, 0x0e, 0xa2, 0x4c, 0xb0, 0x73, 0x7a, 0x52, 0x70, 0x7b, 0xcb, 0x7b, 0xd6, 0xc3, 0x96,
|
||||
0x3b, 0x87, 0x43, 0x36, 0xa0, 0x11, 0xb1, 0x31, 0x93, 0xbd, 0xc6, 0x9e, 0xf5, 0xb0, 0xeb, 0xea,
|
||||
0x8d, 0xf3, 0x73, 0x58, 0xaf, 0xd8, 0xff, 0x7e, 0xcf, 0xff, 0x6b, 0x0d, 0x1a, 0x48, 0x28, 0x7d,
|
||||
0x6c, 0x4d, 0x7c, 0x4c, 0x3e, 0x86, 0x0e, 0x13, 0xde, 0xc4, 0x11, 0x35, 0xb4, 0xad, 0xcd, 0x44,
|
||||
0xe9, 0x73, 0xf2, 0x08, 0x9a, 0xc1, 0x28, 0x8b, 0xcf, 0x44, 0xaf, 0xbe, 0x57, 0x7f, 0xd8, 0x3e,
|
||||
0x58, 0x9f, 0x5c, 0xa4, 0x1e, 0x7a, 0xa8, 0x78, 0x6e, 0x2e, 0x42, 0x3e, 0x03, 0xf0, 0xa5, 0xe4,
|
||||
0x6c, 0x90, 0x49, 0x2a, 0xf0, 0xa5, 0xed, 0x83, 0x9e, 0x71, 0x20, 0x13, 0xf4, 0x69, 0xc9, 0x77,
|
||||
0x0d, 0x59, 0xf2, 0x39, 0xb4, 0xe8, 0xa5, 0xa4, 0x71, 0x48, 0xc3, 0x5e, 0x03, 0x2f, 0xda, 0x99,
|
||||
0x7a, 0xd1, 0xfe, 0x51, 0xce, 0xd7, 0xef, 0x2b, 0xc5, 0xfb, 0x5f, 0x40, 0xb7, 0xc2, 0x22, 0xab,
|
||||
0x50, 0x3f, 0xa3, 0x45, 0x54, 0xd5, 0x52, 0x79, 0xf6, 0xdc, 0x8f, 0x32, 0x9d, 0x60, 0x1d, 0x57,
|
||||
0x6f, 0x7e, 0x56, 0xfb, 0xcc, 0x72, 0x9e, 0x83, 0xfd, 0x22, 0x8b, 0xa2, 0xf2, 0x60, 0xc8, 0x78,
|
||||
0x71, 0x30, 0x64, 0x7c, 0xe2, 0xe5, 0xda, 0xb5, 0x5e, 0xfe, 0xbb, 0x05, 0x6b, 0x47, 0xe7, 0x34,
|
||||
0x96, 0xc7, 0x89, 0x64, 0xa7, 0x2c, 0xf0, 0x25, 0x4b, 0x62, 0xf2, 0x18, 0xec, 0x24, 0x0a, 0xbd,
|
||||
0x6b, 0xc3, 0xd4, 0x4a, 0xa2, 0xdc, 0xea, 0xc7, 0x60, 0xc7, 0xf4, 0xc2, 0xbb, 0xf6, 0xba, 0x56,
|
||||
0x4c, 0x2f, 0xb4, 0xf4, 0x7d, 0xe8, 0x86, 0x34, 0xa2, 0x92, 0x7a, 0x65, 0x74, 0x54, 0xe8, 0x3a,
|
||||
0x9a, 0x78, 0xa8, 0xc3, 0xf1, 0x09, 0x7c, 0xa4, 0x54, 0xa6, 0x3e, 0xa7, 0xb1, 0xf4, 0x52, 0x5f,
|
||||
0x8e, 0x30, 0x26, 0xb6, 0xdb, 0x8d, 0xe9, 0xc5, 0x1b, 0xa4, 0xbe, 0xf1, 0xe5, 0xc8, 0xf9, 0x5b,
|
||||
0x0d, 0xec, 0x32, 0x98, 0xe4, 0x0e, 0xac, 0xa8, 0x6b, 0x3d, 0x16, 0xe6, 0x9e, 0x68, 0xaa, 0xed,
|
||||
0xcb, 0x50, 0x55, 0x45, 0x72, 0x7a, 0x2a, 0xa8, 0x44, 0xf3, 0xea, 0x6e, 0xbe, 0x53, 0x99, 0x25,
|
||||
0xd8, 0x37, 0xba, 0x10, 0x96, 0x5d, 0x5c, 0x2b, 0x8f, 0x8f, 0x25, 0x1b, 0x53, 0xbc, 0xb0, 0xee,
|
||||
0xea, 0x0d, 0x59, 0x87, 0x06, 0xf5, 0xa4, 0x3f, 0xc4, 0x0c, 0xb7, 0xdd, 0x65, 0xfa, 0xd6, 0x1f,
|
||||
0x92, 0x1f, 0xc0, 0x2d, 0x91, 0x64, 0x3c, 0xa0, 0x5e, 0x71, 0x6d, 0x13, 0xb9, 0x1d, 0x4d, 0x7d,
|
||||
0xa1, 0x2f, 0x77, 0xa0, 0x7e, 0xca, 0xc2, 0xde, 0x0a, 0x3a, 0x66, 0xb5, 0x9a, 0x84, 0x2f, 0x43,
|
||||
0x57, 0x31, 0xc9, 0x8f, 0x01, 0x4a, 0x4d, 0x61, 0xaf, 0xb5, 0x40, 0xd4, 0x2e, 0xf4, 0x86, 0x64,
|
||||
0x07, 0x20, 0x60, 0xe9, 0x88, 0x72, 0x4f, 0x25, 0x8c, 0x8d, 0xc9, 0x61, 0x6b, 0xca, 0x57, 0xf4,
|
||||
0x4a, 0xb1, 0x99, 0xf0, 0x86, 0xdf, 0xb0, 0x34, 0xa5, 0x61, 0x0f, 0xd0, 0xc3, 0x36, 0x13, 0x5f,
|
||||
0x6a, 0x82, 0xf3, 0x1b, 0x68, 0xe6, 0xc6, 0xdd, 0x05, 0xfb, 0x3c, 0x89, 0xb2, 0x71, 0xe9, 0xb4,
|
||||
0xae, 0xdb, 0xd2, 0x84, 0x97, 0x21, 0xd9, 0x02, 0x44, 0x49, 0xbc, 0xa2, 0x86, 0x2e, 0x42, 0xff,
|
||||
0xaa, 0x0b, 0x6e, 0x43, 0x33, 0x48, 0x92, 0x33, 0xa6, 0x7d, 0xb7, 0xe2, 0xe6, 0x3b, 0xe7, 0xdb,
|
||||
0x3a, 0xdc, 0xaa, 0x16, 0x8b, 0xba, 0x02, 0xb5, 0xa0, 0xa7, 0x2d, 0x54, 0x83, 0x6a, 0x4f, 0x2a,
|
||||
0xde, 0xae, 0x99, 0xde, 0x2e, 0x8e, 0x8c, 0x93, 0x50, 0x5f, 0xd0, 0xd5, 0x47, 0x5e, 0x27, 0x21,
|
||||
0x55, 0xb9, 0x9e, 0xb1, 0x10, 0xc3, 0xd3, 0x75, 0xd5, 0x52, 0x51, 0x86, 0x2c, 0xcc, 0xc1, 0x47,
|
||||
0x2d, 0xd1, 0x3c, 0x8e, 0x7a, 0x9b, 0x3a, 0xe0, 0x7a, 0xa7, 0x02, 0x3e, 0x56, 0xd4, 0x15, 0x1d,
|
||||
0x45, 0xb5, 0x26, 0x7b, 0xd0, 0xe6, 0x34, 0x8d, 0xf2, 0xdc, 0x47, 0xe7, 0xdb, 0xae, 0x49, 0x22,
|
||||
0xbb, 0x00, 0x41, 0x12, 0x45, 0x34, 0x40, 0x01, 0x1b, 0x05, 0x0c, 0x8a, 0xca, 0x3b, 0x29, 0x23,
|
||||
0x4f, 0xd0, 0x00, 0x5d, 0xdd, 0x70, 0x9b, 0x52, 0x46, 0x27, 0x34, 0x50, 0xef, 0xc8, 0x04, 0xe5,
|
||||
0x1e, 0xc2, 0x57, 0x1b, 0xcf, 0xb5, 0x14, 0x01, 0x41, 0x76, 0x07, 0x60, 0xc8, 0x93, 0x2c, 0xd5,
|
||||
0xdc, 0xce, 0x5e, 0x5d, 0x21, 0x39, 0x52, 0x90, 0xfd, 0x00, 0x6e, 0x89, 0xab, 0x71, 0xc4, 0xe2,
|
||||
0x33, 0x4f, 0xfa, 0x7c, 0x48, 0x65, 0xaf, 0xab, 0x2b, 0x20, 0xa7, 0xbe, 0x45, 0xa2, 0x7a, 0xfb,
|
||||
0x38, 0xfc, 0x69, 0xef, 0x16, 0x66, 0x80, 0x5a, 0x3a, 0x29, 0x90, 0x43, 0x4e, 0x7d, 0x49, 0xdf,
|
||||
0xa3, 0x8d, 0x7d, 0x37, 0xb4, 0x20, 0x9b, 0xd0, 0x4c, 0x3c, 0x7a, 0x19, 0x44, 0x79, 0xd1, 0x36,
|
||||
0x92, 0xa3, 0xcb, 0x20, 0x72, 0x1e, 0xc1, 0x7a, 0xe5, 0xc6, 0x1c, 0xe8, 0x37, 0xa0, 0x41, 0x39,
|
||||
0x4f, 0x0a, 0x58, 0xd2, 0x1b, 0xe7, 0xb7, 0x40, 0xde, 0xa5, 0xe1, 0x87, 0x30, 0xcf, 0xd9, 0x84,
|
||||
0xf5, 0x8a, 0x6a, 0x6d, 0x87, 0xf3, 0xad, 0x05, 0x1b, 0x4f, 0xd3, 0x94, 0xc6, 0xe1, 0xdb, 0xe4,
|
||||
0x3d, 0x2e, 0xdd, 0x01, 0x40, 0xb5, 0x9e, 0xd1, 0xe0, 0x6d, 0xa4, 0x60, 0x7c, 0xde, 0xa7, 0xbd,
|
||||
0x38, 0x77, 0x60, 0x73, 0xca, 0x82, 0xdc, 0xb6, 0x7f, 0x59, 0x40, 0x9e, 0x23, 0xf2, 0x7d, 0xbf,
|
||||
0xa1, 0x43, 0x61, 0x91, 0x6a, 0x88, 0x1a, 0x59, 0x43, 0x5f, 0xfa, 0x79, 0xbb, 0xee, 0x30, 0xa1,
|
||||
0xf5, 0x3f, 0xf7, 0xa5, 0x9f, 0xb7, 0x4d, 0x4e, 0x83, 0x8c, 0xab, 0x0e, 0x8e, 0x25, 0x83, 0x6d,
|
||||
0xd3, 0x2d, 0x48, 0xe4, 0x53, 0xb8, 0xcd, 0x86, 0x71, 0xc2, 0xe9, 0x44, 0xcc, 0xd3, 0x61, 0x6c,
|
||||
0xa2, 0xf0, 0x86, 0xe6, 0x96, 0x07, 0x8e, 0x30, 0xaa, 0x8f, 0x60, 0xbd, 0xf2, 0x8c, 0x6b, 0x53,
|
||||
0xe0, 0x2f, 0x16, 0xf4, 0x9e, 0xca, 0x64, 0xcc, 0x02, 0x97, 0x2a, 0xe3, 0x2b, 0x4f, 0xbf, 0x0f,
|
||||
0x5d, 0xd5, 0x7b, 0xa6, 0x9f, 0xdf, 0x49, 0xa2, 0x70, 0xd2, 0xdb, 0xb7, 0x40, 0xb5, 0x1f, 0x33,
|
||||
0x32, 0x2b, 0x49, 0x14, 0x62, 0x5c, 0xee, 0x83, 0xea, 0x11, 0xc6, 0x79, 0x3d, 0xe5, 0x74, 0x62,
|
||||
0x7a, 0x51, 0x39, 0xaf, 0x84, 0xf0, 0xbc, 0x6e, 0x2c, 0x2b, 0x31, 0xbd, 0x50, 0xe7, 0x9d, 0xbb,
|
||||
0xb0, 0x35, 0xc7, 0xb6, 0x3c, 0x5c, 0xff, 0xb6, 0x60, 0xfd, 0xa9, 0x10, 0x6c, 0x18, 0xff, 0x1a,
|
||||
0x41, 0xb2, 0x30, 0x7a, 0x03, 0x1a, 0x41, 0x92, 0xc5, 0x12, 0x8d, 0x6d, 0xb8, 0x7a, 0x33, 0x85,
|
||||
0x1b, 0xb5, 0x19, 0xdc, 0x98, 0x42, 0x9e, 0xfa, 0x2c, 0xf2, 0x18, 0xc8, 0xb2, 0x5c, 0x41, 0x96,
|
||||
0x7b, 0xd0, 0x56, 0x41, 0xf6, 0x02, 0x1a, 0x4b, 0xca, 0xf3, 0xae, 0x04, 0x8a, 0x74, 0x88, 0x14,
|
||||
0x25, 0x60, 0x76, 0x4f, 0xdd, 0x98, 0x20, 0x9d, 0xb4, 0xce, 0xff, 0xaa, 0xaa, 0xa8, 0x3c, 0x25,
|
||||
0x8f, 0xd9, 0xc2, 0x2e, 0xaa, 0x80, 0x97, 0x47, 0xf9, 0x3b, 0xd4, 0x52, 0x95, 0x48, 0x9a, 0x0d,
|
||||
0x22, 0x16, 0x78, 0x8a, 0xa1, 0xed, 0xb7, 0x35, 0xe5, 0x1d, 0x8f, 0x26, 0x5e, 0x59, 0x36, 0xbd,
|
||||
0x42, 0x60, 0xd9, 0xcf, 0xe4, 0xa8, 0xe8, 0xa4, 0x6a, 0x3d, 0xe5, 0xa9, 0xe6, 0x4d, 0x9e, 0x5a,
|
||||
0x99, 0xf5, 0x54, 0x99, 0x69, 0x2d, 0x33, 0xd3, 0x3e, 0x85, 0x75, 0x3d, 0x8a, 0x57, 0xc3, 0xb5,
|
||||
0x03, 0x50, 0x76, 0x3d, 0xd1, 0xb3, 0x34, 0xf4, 0x16, 0x6d, 0x4f, 0x38, 0xbf, 0x00, 0xfb, 0x55,
|
||||
0xa2, 0xf5, 0x0a, 0xf2, 0x04, 0xec, 0xa8, 0xd8, 0xa0, 0x68, 0xfb, 0x80, 0x4c, 0x4a, 0xbd, 0x90,
|
||||
0x73, 0x27, 0x42, 0xce, 0x17, 0xd0, 0x2a, 0xc8, 0x85, 0xcf, 0xac, 0x45, 0x3e, 0xab, 0x4d, 0xf9,
|
||||
0xcc, 0xf9, 0xa7, 0x05, 0x1b, 0x55, 0x93, 0xf3, 0xb0, 0xbc, 0x83, 0x6e, 0x79, 0x85, 0x37, 0xf6,
|
||||
0xd3, 0xdc, 0x96, 0x27, 0xa6, 0x2d, 0xb3, 0xc7, 0x4a, 0x03, 0xc5, 0x6b, 0x3f, 0xd5, 0xb9, 0xdc,
|
||||
0x89, 0x0c, 0x52, 0xff, 0x2d, 0xac, 0xcd, 0x88, 0xcc, 0x99, 0x43, 0x7f, 0x64, 0xce, 0xa1, 0x15,
|
||||
0xb0, 0x2b, 0x4f, 0x9b, 0xc3, 0xe9, 0xe7, 0x70, 0x47, 0xc3, 0xc1, 0x61, 0x19, 0xc3, 0xc2, 0xf7,
|
||||
0xd5, 0x50, 0x5b, 0xd3, 0xa1, 0x76, 0xfa, 0xd0, 0x9b, 0x3d, 0x9a, 0x97, 0xdf, 0x10, 0xd6, 0x4e,
|
||||
0xa4, 0x2f, 0x99, 0x90, 0x2c, 0x28, 0x3f, 0x88, 0xa6, 0x72, 0xc3, 0xba, 0xa9, 0x7f, 0xcf, 0xd6,
|
||||
0xe1, 0x2a, 0xd4, 0xa5, 0x2c, 0xf2, 0x57, 0x2d, 0x55, 0x14, 0x88, 0x79, 0x53, 0x1e, 0x83, 0x0f,
|
||||
0x70, 0x95, 0xca, 0x07, 0x99, 0x48, 0x3f, 0xd2, 0xf3, 0xd1, 0x32, 0xce, 0x47, 0x36, 0x52, 0x70,
|
||||
0x40, 0xd2, 0x23, 0x44, 0xa8, 0xb9, 0x0d, 0x3d, 0x3d, 0x29, 0x02, 0x32, 0x77, 0x00, 0xb0, 0x54,
|
||||
0x75, 0x95, 0x35, 0xf5, 0x59, 0x45, 0x39, 0x54, 0x04, 0x67, 0x17, 0xb6, 0xbf, 0xa4, 0x52, 0x75,
|
||||
0x23, 0x7e, 0x98, 0xc4, 0xa7, 0x6c, 0x98, 0x71, 0xdf, 0x08, 0x85, 0xf3, 0x1f, 0x0b, 0x76, 0x16,
|
||||
0x08, 0xe4, 0x0f, 0xee, 0xc1, 0xca, 0xd8, 0x17, 0x92, 0xf2, 0xa2, 0x4a, 0x8a, 0xed, 0xb4, 0x2b,
|
||||
0x6a, 0x37, 0xb9, 0xa2, 0x3e, 0xe3, 0x8a, 0x4d, 0x68, 0x8e, 0xfd, 0x4b, 0x6f, 0x3c, 0xc8, 0x47,
|
||||
0xb9, 0xc6, 0xd8, 0xbf, 0x7c, 0x3d, 0x40, 0x64, 0x63, 0xdc, 0x1b, 0x64, 0xc1, 0x19, 0x95, 0xa2,
|
||||
0x44, 0x36, 0xc6, 0x9f, 0x69, 0x0a, 0xce, 0x76, 0x38, 0xe8, 0x22, 0x0c, 0xb4, 0xdc, 0x7c, 0xe7,
|
||||
0x5c, 0x40, 0xef, 0x24, 0x1b, 0x88, 0x80, 0xb3, 0x01, 0x7d, 0x4d, 0xa5, 0xaf, 0xc0, 0xb0, 0xc8,
|
||||
0x91, 0x7b, 0xd0, 0x0e, 0x22, 0xa6, 0xd0, 0xd0, 0xf8, 0x92, 0x04, 0x4d, 0xc2, 0xae, 0x81, 0x70,
|
||||
0x29, 0x47, 0x5e, 0xe5, 0xe3, 0x19, 0x14, 0xe9, 0x8d, 0xfe, 0x80, 0xde, 0x82, 0x96, 0x60, 0x71,
|
||||
0x40, 0xbd, 0x58, 0x7f, 0xb1, 0xd4, 0xdd, 0x15, 0xdc, 0x1f, 0x0b, 0xe7, 0xcf, 0x16, 0x6c, 0xcd,
|
||||
0xb9, 0x39, 0x77, 0xe1, 0xf5, 0xad, 0xfc, 0x57, 0x40, 0xe8, 0x39, 0xda, 0x65, 0x7c, 0x7f, 0xe5,
|
||||
0x45, 0x76, 0xd7, 0x18, 0x73, 0xa6, 0x3f, 0xd1, 0xdc, 0x35, 0x3a, 0x4d, 0x72, 0x7c, 0x85, 0x3b,
|
||||
0x43, 0x5d, 0xc1, 0xeb, 0xd0, 0x90, 0xc2, 0x43, 0xc4, 0x52, 0xb6, 0x2e, 0x4b, 0x71, 0x2c, 0xc8,
|
||||
0x63, 0x20, 0xa9, 0xcf, 0x25, 0x53, 0xd2, 0x6a, 0xa8, 0xf7, 0x46, 0xbe, 0x18, 0xe1, 0x65, 0x0d,
|
||||
0x77, 0xb5, 0xe4, 0x7c, 0x45, 0xaf, 0x7e, 0xe9, 0x8b, 0x91, 0xc2, 0x69, 0x9c, 0x23, 0xea, 0x38,
|
||||
0x5a, 0xe2, 0xfa, 0xe0, 0x7f, 0x2d, 0xe8, 0x9c, 0x50, 0xff, 0x82, 0xd2, 0x10, 0xb3, 0x86, 0x0c,
|
||||
0x0b, 0xb4, 0xaa, 0xfe, 0xeb, 0x20, 0x0f, 0xa6, 0x61, 0x69, 0xee, 0xcf, 0x95, 0xfe, 0x27, 0x37,
|
||||
0x89, 0xe5, 0x85, 0xbf, 0x44, 0x8e, 0xa1, 0x6d, 0xfc, 0x4c, 0x20, 0xdb, 0xc6, 0xc1, 0x99, 0x7f,
|
||||
0x24, 0xfd, 0x9d, 0x05, 0xdc, 0x42, 0xdb, 0x13, 0x8b, 0xbc, 0x82, 0xb6, 0x31, 0xb3, 0x9a, 0xfa,
|
||||
0x66, 0x87, 0x67, 0x53, 0xdf, 0x9c, 0x41, 0xd7, 0x59, 0x52, 0xda, 0x8c, 0xc9, 0xd3, 0xd4, 0x36,
|
||||
0x3b, 0xeb, 0x9a, 0xda, 0xe6, 0x8d, 0xab, 0x4b, 0xc4, 0x85, 0x6e, 0x65, 0x5a, 0x24, 0xbb, 0x93,
|
||||
0x13, 0xf3, 0x06, 0xd9, 0xfe, 0xbd, 0x85, 0x7c, 0xd3, 0x42, 0x63, 0x40, 0x33, 0x2d, 0x9c, 0x1d,
|
||||
0x3f, 0x4d, 0x0b, 0xe7, 0x4c, 0x75, 0xce, 0x12, 0xf9, 0x3d, 0xac, 0xcd, 0x0c, 0x49, 0xc4, 0x31,
|
||||
0xac, 0x58, 0x30, 0xdd, 0xf5, 0xef, 0x5f, 0x2b, 0x53, 0xea, 0xff, 0x1a, 0x3a, 0xe6, 0x6c, 0x42,
|
||||
0x0c, 0x83, 0xe6, 0x8c, 0x5f, 0xfd, 0xdd, 0x45, 0x6c, 0x53, 0xa1, 0xd9, 0x1e, 0x4d, 0x85, 0x73,
|
||||
0x06, 0x04, 0x53, 0xe1, 0xbc, 0xae, 0xea, 0x2c, 0x91, 0xdf, 0xc1, 0xea, 0x74, 0x9b, 0x22, 0x1f,
|
||||
0x4f, 0xbb, 0x6d, 0xa6, 0xfb, 0xf5, 0x9d, 0xeb, 0x44, 0x4a, 0xe5, 0x2f, 0x01, 0x26, 0xdd, 0x87,
|
||||
0x18, 0x38, 0x30, 0xd3, 0xfd, 0xfa, 0xdb, 0xf3, 0x99, 0xa5, 0xaa, 0x3f, 0xc2, 0xe6, 0x5c, 0x88,
|
||||
0x27, 0x46, 0xe9, 0x5d, 0xd7, 0x24, 0xfa, 0x3f, 0xbc, 0x51, 0xae, 0xbc, 0xeb, 0x0f, 0xb0, 0x36,
|
||||
0x83, 0x83, 0x66, 0x56, 0x2c, 0x82, 0x67, 0x33, 0x2b, 0x16, 0x02, 0xa9, 0xaa, 0xda, 0x67, 0xbb,
|
||||
0xb0, 0x2a, 0x34, 0xfc, 0x9c, 0x8a, 0x7d, 0x0d, 0xdf, 0xcf, 0x00, 0x6d, 0x7a, 0xc3, 0x13, 0x99,
|
||||
0x0c, 0x9a, 0xf8, 0x73, 0xf7, 0x27, 0xff, 0x0f, 0x00, 0x00, 0xff, 0xff, 0xc4, 0x22, 0xe9, 0x54,
|
||||
0xeb, 0x15, 0x00, 0x00,
|
||||
}
|
||||
|
|
110
weed/pb/messaging.proto
Normal file
110
weed/pb/messaging.proto
Normal file
|
@ -0,0 +1,110 @@
|
|||
syntax = "proto3";
|
||||
|
||||
package messaging_pb;
|
||||
|
||||
option java_package = "seaweedfs.client";
|
||||
option java_outer_classname = "MessagingProto";
|
||||
|
||||
//////////////////////////////////////////////////
|
||||
|
||||
service SeaweedMessaging {
|
||||
|
||||
rpc Subscribe (stream SubscriberMessage) returns (stream BrokerMessage) {
|
||||
}
|
||||
|
||||
rpc Publish (stream PublishRequest) returns (stream PublishResponse) {
|
||||
}
|
||||
|
||||
rpc ConfigureTopic (ConfigureTopicRequest) returns (ConfigureTopicResponse) {
|
||||
}
|
||||
|
||||
rpc GetTopicConfiguration (GetTopicConfigurationRequest) returns (GetTopicConfigurationResponse) {
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
//////////////////////////////////////////////////
|
||||
|
||||
message SubscriberMessage {
|
||||
message InitMessage {
|
||||
string namespace = 1;
|
||||
string topic = 2;
|
||||
int32 partition = 3;
|
||||
enum StartPosition {
|
||||
LATEST = 0; // Start at the newest message
|
||||
EARLIEST = 1; // Start at the oldest message
|
||||
TIMESTAMP = 2; // Start after a specified timestamp, exclusive
|
||||
}
|
||||
StartPosition startPosition = 4; // Where to begin consuming from
|
||||
int64 timestampNs = 5; // timestamp in nano seconds
|
||||
string subscriber_id = 6; // uniquely identify a subscriber to track consumption
|
||||
}
|
||||
InitMessage init = 1;
|
||||
message AckMessage {
|
||||
int64 message_id = 1;
|
||||
}
|
||||
AckMessage ack = 2;
|
||||
}
|
||||
|
||||
message Message {
|
||||
int64 timestamp = 1 [jstype = JS_STRING]; // When the message was received by the broker
|
||||
bytes key = 2; // Message key
|
||||
bytes value = 3; // Message payload
|
||||
map<string, bytes> headers = 4; // Message headers
|
||||
}
|
||||
|
||||
message BrokerMessage {
|
||||
Message data = 1;
|
||||
message RedirectMessage {
|
||||
string new_broker = 1;
|
||||
}
|
||||
RedirectMessage redirect = 2;
|
||||
}
|
||||
|
||||
message PublishRequest {
|
||||
message InitMessage {
|
||||
string namespace = 1; // only needed on the initial request
|
||||
string topic = 2; // only needed on the initial request
|
||||
int32 partition = 3;
|
||||
}
|
||||
InitMessage init = 1;
|
||||
message DataMessage {
|
||||
bytes key = 1; // Message key
|
||||
bytes value = 2; // Message payload
|
||||
map<string, bytes> headers = 3; // Message headers
|
||||
}
|
||||
DataMessage data = 2;
|
||||
}
|
||||
|
||||
message PublishResponse {
|
||||
message ConfigMessage {
|
||||
int32 partition_count = 1;
|
||||
}
|
||||
ConfigMessage config = 1;
|
||||
message RedirectMessage {
|
||||
string new_broker = 1;
|
||||
}
|
||||
RedirectMessage redirect = 2;
|
||||
}
|
||||
|
||||
message ConfigureTopicRequest {
|
||||
string namespace = 1;
|
||||
string topic = 2;
|
||||
TopicConfiguration configuration = 3;
|
||||
}
|
||||
message ConfigureTopicResponse {
|
||||
}
|
||||
|
||||
message GetTopicConfigurationRequest {
|
||||
string namespace = 1;
|
||||
string topic = 2;
|
||||
}
|
||||
message GetTopicConfigurationResponse {
|
||||
TopicConfiguration configuration = 1;
|
||||
}
|
||||
|
||||
message TopicConfiguration {
|
||||
int32 partition_count = 1;
|
||||
string collection = 2;
|
||||
string replication = 3;
|
||||
}
|
824
weed/pb/messaging_pb/messaging.pb.go
Normal file
824
weed/pb/messaging_pb/messaging.pb.go
Normal file
|
@ -0,0 +1,824 @@
|
|||
// Code generated by protoc-gen-go.
|
||||
// source: messaging.proto
|
||||
// DO NOT EDIT!
|
||||
|
||||
/*
|
||||
Package messaging_pb is a generated protocol buffer package.
|
||||
|
||||
It is generated from these files:
|
||||
messaging.proto
|
||||
|
||||
It has these top-level messages:
|
||||
SubscriberMessage
|
||||
Message
|
||||
BrokerMessage
|
||||
PublishRequest
|
||||
PublishResponse
|
||||
ConfigureTopicRequest
|
||||
ConfigureTopicResponse
|
||||
GetTopicConfigurationRequest
|
||||
GetTopicConfigurationResponse
|
||||
TopicConfiguration
|
||||
*/
|
||||
package messaging_pb
|
||||
|
||||
import proto "github.com/golang/protobuf/proto"
|
||||
import fmt "fmt"
|
||||
import math "math"
|
||||
|
||||
import (
|
||||
context "golang.org/x/net/context"
|
||||
grpc "google.golang.org/grpc"
|
||||
)
|
||||
|
||||
// Reference imports to suppress errors if they are not otherwise used.
|
||||
var _ = proto.Marshal
|
||||
var _ = fmt.Errorf
|
||||
var _ = math.Inf
|
||||
|
||||
// This is a compile-time assertion to ensure that this generated file
|
||||
// is compatible with the proto package it is being compiled against.
|
||||
// A compilation error at this line likely means your copy of the
|
||||
// proto package needs to be updated.
|
||||
const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package
|
||||
|
||||
type SubscriberMessage_InitMessage_StartPosition int32
|
||||
|
||||
const (
|
||||
SubscriberMessage_InitMessage_LATEST SubscriberMessage_InitMessage_StartPosition = 0
|
||||
SubscriberMessage_InitMessage_EARLIEST SubscriberMessage_InitMessage_StartPosition = 1
|
||||
SubscriberMessage_InitMessage_TIMESTAMP SubscriberMessage_InitMessage_StartPosition = 2
|
||||
)
|
||||
|
||||
var SubscriberMessage_InitMessage_StartPosition_name = map[int32]string{
|
||||
0: "LATEST",
|
||||
1: "EARLIEST",
|
||||
2: "TIMESTAMP",
|
||||
}
|
||||
var SubscriberMessage_InitMessage_StartPosition_value = map[string]int32{
|
||||
"LATEST": 0,
|
||||
"EARLIEST": 1,
|
||||
"TIMESTAMP": 2,
|
||||
}
|
||||
|
||||
func (x SubscriberMessage_InitMessage_StartPosition) String() string {
|
||||
return proto.EnumName(SubscriberMessage_InitMessage_StartPosition_name, int32(x))
|
||||
}
|
||||
func (SubscriberMessage_InitMessage_StartPosition) EnumDescriptor() ([]byte, []int) {
|
||||
return fileDescriptor0, []int{0, 0, 0}
|
||||
}
|
||||
|
||||
type SubscriberMessage struct {
|
||||
Init *SubscriberMessage_InitMessage `protobuf:"bytes,1,opt,name=init" json:"init,omitempty"`
|
||||
Ack *SubscriberMessage_AckMessage `protobuf:"bytes,2,opt,name=ack" json:"ack,omitempty"`
|
||||
}
|
||||
|
||||
func (m *SubscriberMessage) Reset() { *m = SubscriberMessage{} }
|
||||
func (m *SubscriberMessage) String() string { return proto.CompactTextString(m) }
|
||||
func (*SubscriberMessage) ProtoMessage() {}
|
||||
func (*SubscriberMessage) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} }
|
||||
|
||||
func (m *SubscriberMessage) GetInit() *SubscriberMessage_InitMessage {
|
||||
if m != nil {
|
||||
return m.Init
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *SubscriberMessage) GetAck() *SubscriberMessage_AckMessage {
|
||||
if m != nil {
|
||||
return m.Ack
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
type SubscriberMessage_InitMessage struct {
|
||||
Namespace string `protobuf:"bytes,1,opt,name=namespace" json:"namespace,omitempty"`
|
||||
Topic string `protobuf:"bytes,2,opt,name=topic" json:"topic,omitempty"`
|
||||
Partition int32 `protobuf:"varint,3,opt,name=partition" json:"partition,omitempty"`
|
||||
StartPosition SubscriberMessage_InitMessage_StartPosition `protobuf:"varint,4,opt,name=startPosition,enum=messaging_pb.SubscriberMessage_InitMessage_StartPosition" json:"startPosition,omitempty"`
|
||||
TimestampNs int64 `protobuf:"varint,5,opt,name=timestampNs" json:"timestampNs,omitempty"`
|
||||
SubscriberId string `protobuf:"bytes,6,opt,name=subscriber_id,json=subscriberId" json:"subscriber_id,omitempty"`
|
||||
}
|
||||
|
||||
func (m *SubscriberMessage_InitMessage) Reset() { *m = SubscriberMessage_InitMessage{} }
|
||||
func (m *SubscriberMessage_InitMessage) String() string { return proto.CompactTextString(m) }
|
||||
func (*SubscriberMessage_InitMessage) ProtoMessage() {}
|
||||
func (*SubscriberMessage_InitMessage) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor0, []int{0, 0}
|
||||
}
|
||||
|
||||
func (m *SubscriberMessage_InitMessage) GetNamespace() string {
|
||||
if m != nil {
|
||||
return m.Namespace
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func (m *SubscriberMessage_InitMessage) GetTopic() string {
|
||||
if m != nil {
|
||||
return m.Topic
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func (m *SubscriberMessage_InitMessage) GetPartition() int32 {
|
||||
if m != nil {
|
||||
return m.Partition
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
func (m *SubscriberMessage_InitMessage) GetStartPosition() SubscriberMessage_InitMessage_StartPosition {
|
||||
if m != nil {
|
||||
return m.StartPosition
|
||||
}
|
||||
return SubscriberMessage_InitMessage_LATEST
|
||||
}
|
||||
|
||||
func (m *SubscriberMessage_InitMessage) GetTimestampNs() int64 {
|
||||
if m != nil {
|
||||
return m.TimestampNs
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
func (m *SubscriberMessage_InitMessage) GetSubscriberId() string {
|
||||
if m != nil {
|
||||
return m.SubscriberId
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
type SubscriberMessage_AckMessage struct {
|
||||
MessageId int64 `protobuf:"varint,1,opt,name=message_id,json=messageId" json:"message_id,omitempty"`
|
||||
}
|
||||
|
||||
func (m *SubscriberMessage_AckMessage) Reset() { *m = SubscriberMessage_AckMessage{} }
|
||||
func (m *SubscriberMessage_AckMessage) String() string { return proto.CompactTextString(m) }
|
||||
func (*SubscriberMessage_AckMessage) ProtoMessage() {}
|
||||
func (*SubscriberMessage_AckMessage) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0, 1} }
|
||||
|
||||
func (m *SubscriberMessage_AckMessage) GetMessageId() int64 {
|
||||
if m != nil {
|
||||
return m.MessageId
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
type Message struct {
|
||||
Timestamp int64 `protobuf:"varint,1,opt,name=timestamp" json:"timestamp,omitempty"`
|
||||
Key []byte `protobuf:"bytes,2,opt,name=key,proto3" json:"key,omitempty"`
|
||||
Value []byte `protobuf:"bytes,3,opt,name=value,proto3" json:"value,omitempty"`
|
||||
Headers map[string][]byte `protobuf:"bytes,4,rep,name=headers" json:"headers,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value,proto3"`
|
||||
}
|
||||
|
||||
func (m *Message) Reset() { *m = Message{} }
|
||||
func (m *Message) String() string { return proto.CompactTextString(m) }
|
||||
func (*Message) ProtoMessage() {}
|
||||
func (*Message) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{1} }
|
||||
|
||||
func (m *Message) GetTimestamp() int64 {
|
||||
if m != nil {
|
||||
return m.Timestamp
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
func (m *Message) GetKey() []byte {
|
||||
if m != nil {
|
||||
return m.Key
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *Message) GetValue() []byte {
|
||||
if m != nil {
|
||||
return m.Value
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *Message) GetHeaders() map[string][]byte {
|
||||
if m != nil {
|
||||
return m.Headers
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
type BrokerMessage struct {
|
||||
Data *Message `protobuf:"bytes,1,opt,name=data" json:"data,omitempty"`
|
||||
Redirect *BrokerMessage_RedirectMessage `protobuf:"bytes,2,opt,name=redirect" json:"redirect,omitempty"`
|
||||
}
|
||||
|
||||
func (m *BrokerMessage) Reset() { *m = BrokerMessage{} }
|
||||
func (m *BrokerMessage) String() string { return proto.CompactTextString(m) }
|
||||
func (*BrokerMessage) ProtoMessage() {}
|
||||
func (*BrokerMessage) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{2} }
|
||||
|
||||
func (m *BrokerMessage) GetData() *Message {
|
||||
if m != nil {
|
||||
return m.Data
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *BrokerMessage) GetRedirect() *BrokerMessage_RedirectMessage {
|
||||
if m != nil {
|
||||
return m.Redirect
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
type BrokerMessage_RedirectMessage struct {
|
||||
NewBroker string `protobuf:"bytes,1,opt,name=new_broker,json=newBroker" json:"new_broker,omitempty"`
|
||||
}
|
||||
|
||||
func (m *BrokerMessage_RedirectMessage) Reset() { *m = BrokerMessage_RedirectMessage{} }
|
||||
func (m *BrokerMessage_RedirectMessage) String() string { return proto.CompactTextString(m) }
|
||||
func (*BrokerMessage_RedirectMessage) ProtoMessage() {}
|
||||
func (*BrokerMessage_RedirectMessage) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor0, []int{2, 0}
|
||||
}
|
||||
|
||||
func (m *BrokerMessage_RedirectMessage) GetNewBroker() string {
|
||||
if m != nil {
|
||||
return m.NewBroker
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
type PublishRequest struct {
|
||||
Init *PublishRequest_InitMessage `protobuf:"bytes,1,opt,name=init" json:"init,omitempty"`
|
||||
Data *PublishRequest_DataMessage `protobuf:"bytes,2,opt,name=data" json:"data,omitempty"`
|
||||
}
|
||||
|
||||
func (m *PublishRequest) Reset() { *m = PublishRequest{} }
|
||||
func (m *PublishRequest) String() string { return proto.CompactTextString(m) }
|
||||
func (*PublishRequest) ProtoMessage() {}
|
||||
func (*PublishRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{3} }
|
||||
|
||||
func (m *PublishRequest) GetInit() *PublishRequest_InitMessage {
|
||||
if m != nil {
|
||||
return m.Init
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *PublishRequest) GetData() *PublishRequest_DataMessage {
|
||||
if m != nil {
|
||||
return m.Data
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
type PublishRequest_InitMessage struct {
|
||||
Namespace string `protobuf:"bytes,1,opt,name=namespace" json:"namespace,omitempty"`
|
||||
Topic string `protobuf:"bytes,2,opt,name=topic" json:"topic,omitempty"`
|
||||
Partition int32 `protobuf:"varint,3,opt,name=partition" json:"partition,omitempty"`
|
||||
}
|
||||
|
||||
func (m *PublishRequest_InitMessage) Reset() { *m = PublishRequest_InitMessage{} }
|
||||
func (m *PublishRequest_InitMessage) String() string { return proto.CompactTextString(m) }
|
||||
func (*PublishRequest_InitMessage) ProtoMessage() {}
|
||||
func (*PublishRequest_InitMessage) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{3, 0} }
|
||||
|
||||
func (m *PublishRequest_InitMessage) GetNamespace() string {
|
||||
if m != nil {
|
||||
return m.Namespace
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func (m *PublishRequest_InitMessage) GetTopic() string {
|
||||
if m != nil {
|
||||
return m.Topic
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func (m *PublishRequest_InitMessage) GetPartition() int32 {
|
||||
if m != nil {
|
||||
return m.Partition
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
type PublishRequest_DataMessage struct {
|
||||
Key []byte `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"`
|
||||
Value []byte `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"`
|
||||
Headers map[string][]byte `protobuf:"bytes,3,rep,name=headers" json:"headers,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value,proto3"`
|
||||
}
|
||||
|
||||
func (m *PublishRequest_DataMessage) Reset() { *m = PublishRequest_DataMessage{} }
|
||||
func (m *PublishRequest_DataMessage) String() string { return proto.CompactTextString(m) }
|
||||
func (*PublishRequest_DataMessage) ProtoMessage() {}
|
||||
func (*PublishRequest_DataMessage) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{3, 1} }
|
||||
|
||||
func (m *PublishRequest_DataMessage) GetKey() []byte {
|
||||
if m != nil {
|
||||
return m.Key
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *PublishRequest_DataMessage) GetValue() []byte {
|
||||
if m != nil {
|
||||
return m.Value
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *PublishRequest_DataMessage) GetHeaders() map[string][]byte {
|
||||
if m != nil {
|
||||
return m.Headers
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
type PublishResponse struct {
|
||||
Config *PublishResponse_ConfigMessage `protobuf:"bytes,1,opt,name=config" json:"config,omitempty"`
|
||||
Redirect *PublishResponse_RedirectMessage `protobuf:"bytes,2,opt,name=redirect" json:"redirect,omitempty"`
|
||||
}
|
||||
|
||||
func (m *PublishResponse) Reset() { *m = PublishResponse{} }
|
||||
func (m *PublishResponse) String() string { return proto.CompactTextString(m) }
|
||||
func (*PublishResponse) ProtoMessage() {}
|
||||
func (*PublishResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{4} }
|
||||
|
||||
func (m *PublishResponse) GetConfig() *PublishResponse_ConfigMessage {
|
||||
if m != nil {
|
||||
return m.Config
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *PublishResponse) GetRedirect() *PublishResponse_RedirectMessage {
|
||||
if m != nil {
|
||||
return m.Redirect
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
type PublishResponse_ConfigMessage struct {
|
||||
PartitionCount int32 `protobuf:"varint,1,opt,name=partition_count,json=partitionCount" json:"partition_count,omitempty"`
|
||||
}
|
||||
|
||||
func (m *PublishResponse_ConfigMessage) Reset() { *m = PublishResponse_ConfigMessage{} }
|
||||
func (m *PublishResponse_ConfigMessage) String() string { return proto.CompactTextString(m) }
|
||||
func (*PublishResponse_ConfigMessage) ProtoMessage() {}
|
||||
func (*PublishResponse_ConfigMessage) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor0, []int{4, 0}
|
||||
}
|
||||
|
||||
func (m *PublishResponse_ConfigMessage) GetPartitionCount() int32 {
|
||||
if m != nil {
|
||||
return m.PartitionCount
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
type PublishResponse_RedirectMessage struct {
|
||||
NewBroker string `protobuf:"bytes,1,opt,name=new_broker,json=newBroker" json:"new_broker,omitempty"`
|
||||
}
|
||||
|
||||
func (m *PublishResponse_RedirectMessage) Reset() { *m = PublishResponse_RedirectMessage{} }
|
||||
func (m *PublishResponse_RedirectMessage) String() string { return proto.CompactTextString(m) }
|
||||
func (*PublishResponse_RedirectMessage) ProtoMessage() {}
|
||||
func (*PublishResponse_RedirectMessage) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor0, []int{4, 1}
|
||||
}
|
||||
|
||||
func (m *PublishResponse_RedirectMessage) GetNewBroker() string {
|
||||
if m != nil {
|
||||
return m.NewBroker
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
type ConfigureTopicRequest struct {
|
||||
Namespace string `protobuf:"bytes,1,opt,name=namespace" json:"namespace,omitempty"`
|
||||
Topic string `protobuf:"bytes,2,opt,name=topic" json:"topic,omitempty"`
|
||||
Configuration *TopicConfiguration `protobuf:"bytes,3,opt,name=configuration" json:"configuration,omitempty"`
|
||||
}
|
||||
|
||||
func (m *ConfigureTopicRequest) Reset() { *m = ConfigureTopicRequest{} }
|
||||
func (m *ConfigureTopicRequest) String() string { return proto.CompactTextString(m) }
|
||||
func (*ConfigureTopicRequest) ProtoMessage() {}
|
||||
func (*ConfigureTopicRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{5} }
|
||||
|
||||
func (m *ConfigureTopicRequest) GetNamespace() string {
|
||||
if m != nil {
|
||||
return m.Namespace
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func (m *ConfigureTopicRequest) GetTopic() string {
|
||||
if m != nil {
|
||||
return m.Topic
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func (m *ConfigureTopicRequest) GetConfiguration() *TopicConfiguration {
|
||||
if m != nil {
|
||||
return m.Configuration
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
type ConfigureTopicResponse struct {
|
||||
}
|
||||
|
||||
func (m *ConfigureTopicResponse) Reset() { *m = ConfigureTopicResponse{} }
|
||||
func (m *ConfigureTopicResponse) String() string { return proto.CompactTextString(m) }
|
||||
func (*ConfigureTopicResponse) ProtoMessage() {}
|
||||
func (*ConfigureTopicResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{6} }
|
||||
|
||||
type GetTopicConfigurationRequest struct {
|
||||
Namespace string `protobuf:"bytes,1,opt,name=namespace" json:"namespace,omitempty"`
|
||||
Topic string `protobuf:"bytes,2,opt,name=topic" json:"topic,omitempty"`
|
||||
}
|
||||
|
||||
func (m *GetTopicConfigurationRequest) Reset() { *m = GetTopicConfigurationRequest{} }
|
||||
func (m *GetTopicConfigurationRequest) String() string { return proto.CompactTextString(m) }
|
||||
func (*GetTopicConfigurationRequest) ProtoMessage() {}
|
||||
func (*GetTopicConfigurationRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{7} }
|
||||
|
||||
func (m *GetTopicConfigurationRequest) GetNamespace() string {
|
||||
if m != nil {
|
||||
return m.Namespace
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func (m *GetTopicConfigurationRequest) GetTopic() string {
|
||||
if m != nil {
|
||||
return m.Topic
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
type GetTopicConfigurationResponse struct {
|
||||
Configuration *TopicConfiguration `protobuf:"bytes,1,opt,name=configuration" json:"configuration,omitempty"`
|
||||
}
|
||||
|
||||
func (m *GetTopicConfigurationResponse) Reset() { *m = GetTopicConfigurationResponse{} }
|
||||
func (m *GetTopicConfigurationResponse) String() string { return proto.CompactTextString(m) }
|
||||
func (*GetTopicConfigurationResponse) ProtoMessage() {}
|
||||
func (*GetTopicConfigurationResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{8} }
|
||||
|
||||
func (m *GetTopicConfigurationResponse) GetConfiguration() *TopicConfiguration {
|
||||
if m != nil {
|
||||
return m.Configuration
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
type TopicConfiguration struct {
|
||||
PartitionCount int32 `protobuf:"varint,1,opt,name=partition_count,json=partitionCount" json:"partition_count,omitempty"`
|
||||
Collection string `protobuf:"bytes,2,opt,name=collection" json:"collection,omitempty"`
|
||||
Replication string `protobuf:"bytes,3,opt,name=replication" json:"replication,omitempty"`
|
||||
}
|
||||
|
||||
func (m *TopicConfiguration) Reset() { *m = TopicConfiguration{} }
|
||||
func (m *TopicConfiguration) String() string { return proto.CompactTextString(m) }
|
||||
func (*TopicConfiguration) ProtoMessage() {}
|
||||
func (*TopicConfiguration) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{9} }
|
||||
|
||||
func (m *TopicConfiguration) GetPartitionCount() int32 {
|
||||
if m != nil {
|
||||
return m.PartitionCount
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
func (m *TopicConfiguration) GetCollection() string {
|
||||
if m != nil {
|
||||
return m.Collection
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func (m *TopicConfiguration) GetReplication() string {
|
||||
if m != nil {
|
||||
return m.Replication
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func init() {
|
||||
proto.RegisterType((*SubscriberMessage)(nil), "messaging_pb.SubscriberMessage")
|
||||
proto.RegisterType((*SubscriberMessage_InitMessage)(nil), "messaging_pb.SubscriberMessage.InitMessage")
|
||||
proto.RegisterType((*SubscriberMessage_AckMessage)(nil), "messaging_pb.SubscriberMessage.AckMessage")
|
||||
proto.RegisterType((*Message)(nil), "messaging_pb.Message")
|
||||
proto.RegisterType((*BrokerMessage)(nil), "messaging_pb.BrokerMessage")
|
||||
proto.RegisterType((*BrokerMessage_RedirectMessage)(nil), "messaging_pb.BrokerMessage.RedirectMessage")
|
||||
proto.RegisterType((*PublishRequest)(nil), "messaging_pb.PublishRequest")
|
||||
proto.RegisterType((*PublishRequest_InitMessage)(nil), "messaging_pb.PublishRequest.InitMessage")
|
||||
proto.RegisterType((*PublishRequest_DataMessage)(nil), "messaging_pb.PublishRequest.DataMessage")
|
||||
proto.RegisterType((*PublishResponse)(nil), "messaging_pb.PublishResponse")
|
||||
proto.RegisterType((*PublishResponse_ConfigMessage)(nil), "messaging_pb.PublishResponse.ConfigMessage")
|
||||
proto.RegisterType((*PublishResponse_RedirectMessage)(nil), "messaging_pb.PublishResponse.RedirectMessage")
|
||||
proto.RegisterType((*ConfigureTopicRequest)(nil), "messaging_pb.ConfigureTopicRequest")
|
||||
proto.RegisterType((*ConfigureTopicResponse)(nil), "messaging_pb.ConfigureTopicResponse")
|
||||
proto.RegisterType((*GetTopicConfigurationRequest)(nil), "messaging_pb.GetTopicConfigurationRequest")
|
||||
proto.RegisterType((*GetTopicConfigurationResponse)(nil), "messaging_pb.GetTopicConfigurationResponse")
|
||||
proto.RegisterType((*TopicConfiguration)(nil), "messaging_pb.TopicConfiguration")
|
||||
proto.RegisterEnum("messaging_pb.SubscriberMessage_InitMessage_StartPosition", SubscriberMessage_InitMessage_StartPosition_name, SubscriberMessage_InitMessage_StartPosition_value)
|
||||
}
|
||||
|
||||
// Reference imports to suppress errors if they are not otherwise used.
|
||||
var _ context.Context
|
||||
var _ grpc.ClientConn
|
||||
|
||||
// This is a compile-time assertion to ensure that this generated file
|
||||
// is compatible with the grpc package it is being compiled against.
|
||||
const _ = grpc.SupportPackageIsVersion4
|
||||
|
||||
// Client API for SeaweedMessaging service
|
||||
|
||||
type SeaweedMessagingClient interface {
|
||||
Subscribe(ctx context.Context, opts ...grpc.CallOption) (SeaweedMessaging_SubscribeClient, error)
|
||||
Publish(ctx context.Context, opts ...grpc.CallOption) (SeaweedMessaging_PublishClient, error)
|
||||
ConfigureTopic(ctx context.Context, in *ConfigureTopicRequest, opts ...grpc.CallOption) (*ConfigureTopicResponse, error)
|
||||
GetTopicConfiguration(ctx context.Context, in *GetTopicConfigurationRequest, opts ...grpc.CallOption) (*GetTopicConfigurationResponse, error)
|
||||
}
|
||||
|
||||
type seaweedMessagingClient struct {
|
||||
cc *grpc.ClientConn
|
||||
}
|
||||
|
||||
func NewSeaweedMessagingClient(cc *grpc.ClientConn) SeaweedMessagingClient {
|
||||
return &seaweedMessagingClient{cc}
|
||||
}
|
||||
|
||||
func (c *seaweedMessagingClient) Subscribe(ctx context.Context, opts ...grpc.CallOption) (SeaweedMessaging_SubscribeClient, error) {
|
||||
stream, err := grpc.NewClientStream(ctx, &_SeaweedMessaging_serviceDesc.Streams[0], c.cc, "/messaging_pb.SeaweedMessaging/Subscribe", opts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
x := &seaweedMessagingSubscribeClient{stream}
|
||||
return x, nil
|
||||
}
|
||||
|
||||
type SeaweedMessaging_SubscribeClient interface {
|
||||
Send(*SubscriberMessage) error
|
||||
Recv() (*BrokerMessage, error)
|
||||
grpc.ClientStream
|
||||
}
|
||||
|
||||
type seaweedMessagingSubscribeClient struct {
|
||||
grpc.ClientStream
|
||||
}
|
||||
|
||||
func (x *seaweedMessagingSubscribeClient) Send(m *SubscriberMessage) error {
|
||||
return x.ClientStream.SendMsg(m)
|
||||
}
|
||||
|
||||
func (x *seaweedMessagingSubscribeClient) Recv() (*BrokerMessage, error) {
|
||||
m := new(BrokerMessage)
|
||||
if err := x.ClientStream.RecvMsg(m); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return m, nil
|
||||
}
|
||||
|
||||
func (c *seaweedMessagingClient) Publish(ctx context.Context, opts ...grpc.CallOption) (SeaweedMessaging_PublishClient, error) {
|
||||
stream, err := grpc.NewClientStream(ctx, &_SeaweedMessaging_serviceDesc.Streams[1], c.cc, "/messaging_pb.SeaweedMessaging/Publish", opts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
x := &seaweedMessagingPublishClient{stream}
|
||||
return x, nil
|
||||
}
|
||||
|
||||
type SeaweedMessaging_PublishClient interface {
|
||||
Send(*PublishRequest) error
|
||||
Recv() (*PublishResponse, error)
|
||||
grpc.ClientStream
|
||||
}
|
||||
|
||||
type seaweedMessagingPublishClient struct {
|
||||
grpc.ClientStream
|
||||
}
|
||||
|
||||
func (x *seaweedMessagingPublishClient) Send(m *PublishRequest) error {
|
||||
return x.ClientStream.SendMsg(m)
|
||||
}
|
||||
|
||||
func (x *seaweedMessagingPublishClient) Recv() (*PublishResponse, error) {
|
||||
m := new(PublishResponse)
|
||||
if err := x.ClientStream.RecvMsg(m); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return m, nil
|
||||
}
|
||||
|
||||
func (c *seaweedMessagingClient) ConfigureTopic(ctx context.Context, in *ConfigureTopicRequest, opts ...grpc.CallOption) (*ConfigureTopicResponse, error) {
|
||||
out := new(ConfigureTopicResponse)
|
||||
err := grpc.Invoke(ctx, "/messaging_pb.SeaweedMessaging/ConfigureTopic", in, out, c.cc, opts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return out, nil
|
||||
}
|
||||
|
||||
func (c *seaweedMessagingClient) GetTopicConfiguration(ctx context.Context, in *GetTopicConfigurationRequest, opts ...grpc.CallOption) (*GetTopicConfigurationResponse, error) {
|
||||
out := new(GetTopicConfigurationResponse)
|
||||
err := grpc.Invoke(ctx, "/messaging_pb.SeaweedMessaging/GetTopicConfiguration", in, out, c.cc, opts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return out, nil
|
||||
}
|
||||
|
||||
// Server API for SeaweedMessaging service
|
||||
|
||||
type SeaweedMessagingServer interface {
|
||||
Subscribe(SeaweedMessaging_SubscribeServer) error
|
||||
Publish(SeaweedMessaging_PublishServer) error
|
||||
ConfigureTopic(context.Context, *ConfigureTopicRequest) (*ConfigureTopicResponse, error)
|
||||
GetTopicConfiguration(context.Context, *GetTopicConfigurationRequest) (*GetTopicConfigurationResponse, error)
|
||||
}
|
||||
|
||||
func RegisterSeaweedMessagingServer(s *grpc.Server, srv SeaweedMessagingServer) {
|
||||
s.RegisterService(&_SeaweedMessaging_serviceDesc, srv)
|
||||
}
|
||||
|
||||
func _SeaweedMessaging_Subscribe_Handler(srv interface{}, stream grpc.ServerStream) error {
|
||||
return srv.(SeaweedMessagingServer).Subscribe(&seaweedMessagingSubscribeServer{stream})
|
||||
}
|
||||
|
||||
type SeaweedMessaging_SubscribeServer interface {
|
||||
Send(*BrokerMessage) error
|
||||
Recv() (*SubscriberMessage, error)
|
||||
grpc.ServerStream
|
||||
}
|
||||
|
||||
type seaweedMessagingSubscribeServer struct {
|
||||
grpc.ServerStream
|
||||
}
|
||||
|
||||
func (x *seaweedMessagingSubscribeServer) Send(m *BrokerMessage) error {
|
||||
return x.ServerStream.SendMsg(m)
|
||||
}
|
||||
|
||||
func (x *seaweedMessagingSubscribeServer) Recv() (*SubscriberMessage, error) {
|
||||
m := new(SubscriberMessage)
|
||||
if err := x.ServerStream.RecvMsg(m); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return m, nil
|
||||
}
|
||||
|
||||
func _SeaweedMessaging_Publish_Handler(srv interface{}, stream grpc.ServerStream) error {
|
||||
return srv.(SeaweedMessagingServer).Publish(&seaweedMessagingPublishServer{stream})
|
||||
}
|
||||
|
||||
type SeaweedMessaging_PublishServer interface {
|
||||
Send(*PublishResponse) error
|
||||
Recv() (*PublishRequest, error)
|
||||
grpc.ServerStream
|
||||
}
|
||||
|
||||
type seaweedMessagingPublishServer struct {
|
||||
grpc.ServerStream
|
||||
}
|
||||
|
||||
func (x *seaweedMessagingPublishServer) Send(m *PublishResponse) error {
|
||||
return x.ServerStream.SendMsg(m)
|
||||
}
|
||||
|
||||
func (x *seaweedMessagingPublishServer) Recv() (*PublishRequest, error) {
|
||||
m := new(PublishRequest)
|
||||
if err := x.ServerStream.RecvMsg(m); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return m, nil
|
||||
}
|
||||
|
||||
func _SeaweedMessaging_ConfigureTopic_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
|
||||
in := new(ConfigureTopicRequest)
|
||||
if err := dec(in); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if interceptor == nil {
|
||||
return srv.(SeaweedMessagingServer).ConfigureTopic(ctx, in)
|
||||
}
|
||||
info := &grpc.UnaryServerInfo{
|
||||
Server: srv,
|
||||
FullMethod: "/messaging_pb.SeaweedMessaging/ConfigureTopic",
|
||||
}
|
||||
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
|
||||
return srv.(SeaweedMessagingServer).ConfigureTopic(ctx, req.(*ConfigureTopicRequest))
|
||||
}
|
||||
return interceptor(ctx, in, info, handler)
|
||||
}
|
||||
|
||||
func _SeaweedMessaging_GetTopicConfiguration_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
|
||||
in := new(GetTopicConfigurationRequest)
|
||||
if err := dec(in); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if interceptor == nil {
|
||||
return srv.(SeaweedMessagingServer).GetTopicConfiguration(ctx, in)
|
||||
}
|
||||
info := &grpc.UnaryServerInfo{
|
||||
Server: srv,
|
||||
FullMethod: "/messaging_pb.SeaweedMessaging/GetTopicConfiguration",
|
||||
}
|
||||
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
|
||||
return srv.(SeaweedMessagingServer).GetTopicConfiguration(ctx, req.(*GetTopicConfigurationRequest))
|
||||
}
|
||||
return interceptor(ctx, in, info, handler)
|
||||
}
|
||||
|
||||
var _SeaweedMessaging_serviceDesc = grpc.ServiceDesc{
|
||||
ServiceName: "messaging_pb.SeaweedMessaging",
|
||||
HandlerType: (*SeaweedMessagingServer)(nil),
|
||||
Methods: []grpc.MethodDesc{
|
||||
{
|
||||
MethodName: "ConfigureTopic",
|
||||
Handler: _SeaweedMessaging_ConfigureTopic_Handler,
|
||||
},
|
||||
{
|
||||
MethodName: "GetTopicConfiguration",
|
||||
Handler: _SeaweedMessaging_GetTopicConfiguration_Handler,
|
||||
},
|
||||
},
|
||||
Streams: []grpc.StreamDesc{
|
||||
{
|
||||
StreamName: "Subscribe",
|
||||
Handler: _SeaweedMessaging_Subscribe_Handler,
|
||||
ServerStreams: true,
|
||||
ClientStreams: true,
|
||||
},
|
||||
{
|
||||
StreamName: "Publish",
|
||||
Handler: _SeaweedMessaging_Publish_Handler,
|
||||
ServerStreams: true,
|
||||
ClientStreams: true,
|
||||
},
|
||||
},
|
||||
Metadata: "messaging.proto",
|
||||
}
|
||||
|
||||
func init() { proto.RegisterFile("messaging.proto", fileDescriptor0) }
|
||||
|
||||
var fileDescriptor0 = []byte{
|
||||
// 829 bytes of a gzipped FileDescriptorProto
|
||||
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0xb4, 0x56, 0x4d, 0x8f, 0xe3, 0x44,
|
||||
0x10, 0xdd, 0xb6, 0x93, 0xcc, 0xba, 0xf2, 0x49, 0x8b, 0x41, 0x91, 0x99, 0x01, 0xcb, 0x8b, 0x44,
|
||||
0x60, 0x84, 0x35, 0x0a, 0x42, 0x1a, 0x56, 0x2b, 0xa1, 0x24, 0x84, 0x25, 0xd2, 0x84, 0x8d, 0x3a,
|
||||
0xb9, 0xa2, 0xc8, 0x71, 0x7a, 0xb3, 0x56, 0x12, 0xdb, 0xb8, 0x3b, 0x8c, 0xe6, 0xc4, 0x01, 0xae,
|
||||
0x9c, 0xf8, 0x27, 0x48, 0xfc, 0x00, 0xb8, 0xf3, 0x9f, 0x90, 0xdb, 0xdf, 0x49, 0x26, 0x13, 0x22,
|
||||
0xcd, 0xcd, 0x2e, 0xbf, 0x7a, 0xaf, 0xba, 0xea, 0x95, 0x6d, 0xa8, 0xaf, 0x29, 0x63, 0xe6, 0xc2,
|
||||
0x76, 0x16, 0x86, 0xe7, 0xbb, 0xdc, 0xc5, 0x95, 0x24, 0x30, 0xf5, 0x66, 0xfa, 0xaf, 0x05, 0x78,
|
||||
0x6f, 0xbc, 0x99, 0x31, 0xcb, 0xb7, 0x67, 0xd4, 0x1f, 0x8a, 0x47, 0x14, 0x7f, 0x03, 0x05, 0xdb,
|
||||
0xb1, 0x79, 0x13, 0x69, 0xa8, 0x55, 0x6e, 0x5f, 0x19, 0xd9, 0x14, 0x63, 0x07, 0x6e, 0x0c, 0x1c,
|
||||
0x9b, 0x47, 0xd7, 0x44, 0x24, 0xe2, 0x57, 0x20, 0x9b, 0xd6, 0xb2, 0x29, 0x89, 0xfc, 0xcf, 0x1f,
|
||||
0xcb, 0xef, 0x58, 0xcb, 0x38, 0x3d, 0x48, 0x53, 0xff, 0x96, 0xa0, 0x9c, 0xe1, 0xc4, 0x17, 0xa0,
|
||||
0x38, 0xe6, 0x9a, 0x32, 0xcf, 0xb4, 0xa8, 0xa8, 0x49, 0x21, 0x69, 0x00, 0xbf, 0x0f, 0x45, 0xee,
|
||||
0x7a, 0xb6, 0x25, 0xd4, 0x14, 0x12, 0xde, 0x04, 0x39, 0x9e, 0xe9, 0x73, 0x9b, 0xdb, 0xae, 0xd3,
|
||||
0x94, 0x35, 0xd4, 0x2a, 0x92, 0x34, 0x80, 0xa7, 0x50, 0x65, 0xdc, 0xf4, 0xf9, 0xc8, 0x65, 0x21,
|
||||
0xa2, 0xa0, 0xa1, 0x56, 0xad, 0xfd, 0xf5, 0xff, 0x38, 0xa9, 0x31, 0xce, 0x12, 0x90, 0x3c, 0x1f,
|
||||
0xd6, 0xa0, 0xcc, 0xed, 0x35, 0x65, 0xdc, 0x5c, 0x7b, 0x3f, 0xb0, 0x66, 0x51, 0x43, 0x2d, 0x99,
|
||||
0x64, 0x43, 0xf8, 0x05, 0x54, 0x59, 0xc2, 0x3f, 0xb5, 0xe7, 0xcd, 0x92, 0x28, 0xbf, 0x92, 0x06,
|
||||
0x07, 0x73, 0xfd, 0x06, 0xaa, 0x39, 0x19, 0x0c, 0x50, 0xba, 0xed, 0x4c, 0xfa, 0xe3, 0x49, 0xe3,
|
||||
0x19, 0xae, 0xc0, 0xf3, 0x7e, 0x87, 0xdc, 0x0e, 0x82, 0x3b, 0x84, 0xab, 0xa0, 0x4c, 0x06, 0xc3,
|
||||
0xfe, 0x78, 0xd2, 0x19, 0x8e, 0x1a, 0x92, 0x7a, 0x05, 0x90, 0xb6, 0x15, 0x5f, 0x02, 0x84, 0x27,
|
||||
0xa3, 0x81, 0x12, 0x12, 0xd5, 0x28, 0x51, 0x64, 0x30, 0xd7, 0xff, 0x45, 0x70, 0x16, 0x43, 0x35,
|
||||
0x50, 0x92, 0x32, 0x43, 0x64, 0x57, 0xba, 0x46, 0x24, 0x0d, 0xe2, 0x06, 0xc8, 0x4b, 0x7a, 0x2f,
|
||||
0xda, 0x5d, 0x21, 0xc1, 0x65, 0x30, 0x82, 0x9f, 0xcd, 0xd5, 0x86, 0x8a, 0x46, 0x57, 0x48, 0x78,
|
||||
0x83, 0x5f, 0xc1, 0xd9, 0x3b, 0x6a, 0xce, 0xa9, 0xcf, 0x9a, 0x05, 0x4d, 0x6e, 0x95, 0xdb, 0x7a,
|
||||
0xbe, 0xbd, 0x71, 0x23, 0xbf, 0x0f, 0x41, 0x7d, 0x87, 0xfb, 0xf7, 0x24, 0x4e, 0x51, 0x5f, 0x42,
|
||||
0x25, 0xfb, 0x20, 0x56, 0x0d, 0xc7, 0x9f, 0x57, 0x95, 0x32, 0xaa, 0x2f, 0xa5, 0x1b, 0xa4, 0xff,
|
||||
0x85, 0xa0, 0xda, 0xf5, 0xdd, 0x65, 0xea, 0xe8, 0xcf, 0xa0, 0x30, 0x37, 0xb9, 0x19, 0x39, 0xfa,
|
||||
0x7c, 0x6f, 0x21, 0x44, 0x40, 0xf0, 0x6b, 0x78, 0xee, 0xd3, 0xb9, 0xed, 0x53, 0x8b, 0x47, 0x06,
|
||||
0xde, 0x5a, 0x80, 0x1c, 0xb3, 0x41, 0x22, 0x6c, 0x4c, 0x92, 0x24, 0xab, 0xd7, 0x50, 0xdf, 0x7a,
|
||||
0x18, 0xcc, 0xc1, 0xa1, 0x77, 0xd3, 0x99, 0x60, 0x48, 0xac, 0x4c, 0xef, 0x42, 0x4a, 0xfd, 0x4f,
|
||||
0x19, 0x6a, 0xa3, 0xcd, 0x6c, 0x65, 0xb3, 0x77, 0x84, 0xfe, 0xb4, 0xa1, 0x2c, 0xd8, 0xa4, 0xec,
|
||||
0x2a, 0xb6, 0xf2, 0x95, 0xe4, 0xb1, 0x7b, 0xf7, 0x30, 0x3c, 0xb6, 0x74, 0x44, 0xf6, 0xb7, 0x26,
|
||||
0x37, 0x73, 0x9d, 0x50, 0xa7, 0x4f, 0xbc, 0x86, 0xea, 0x3f, 0x08, 0xca, 0x19, 0xd9, 0xec, 0x8c,
|
||||
0x2b, 0x07, 0x66, 0x8c, 0xdf, 0xa4, 0xce, 0x92, 0x85, 0xb3, 0xbe, 0x3a, 0xf6, 0x64, 0x4f, 0x60,
|
||||
0xb6, 0xdf, 0x25, 0xa8, 0x27, 0x82, 0xcc, 0x73, 0x1d, 0x46, 0x71, 0x0f, 0x4a, 0x96, 0xeb, 0xbc,
|
||||
0xb5, 0x17, 0xfb, 0x5f, 0xa1, 0x5b, 0x70, 0xa3, 0x27, 0xb0, 0x71, 0xf3, 0xa3, 0x54, 0x3c, 0xd8,
|
||||
0x31, 0xe2, 0x17, 0x87, 0x69, 0x1e, 0xb6, 0xe2, 0x0d, 0x54, 0x73, 0x1a, 0xf8, 0x53, 0xa8, 0x27,
|
||||
0x63, 0x98, 0x5a, 0xee, 0xc6, 0x09, 0x1d, 0x56, 0x24, 0xb5, 0x24, 0xdc, 0x0b, 0xa2, 0x27, 0x98,
|
||||
0xf8, 0x0f, 0x04, 0xe7, 0xa1, 0xd8, 0xc6, 0xa7, 0x93, 0xc0, 0x05, 0xb1, 0x97, 0x4f, 0x31, 0xd0,
|
||||
0x77, 0x50, 0xb5, 0x22, 0x32, 0x33, 0x31, 0x51, 0xb9, 0xad, 0xe5, 0x3b, 0x21, 0x64, 0x7a, 0x59,
|
||||
0x1c, 0xc9, 0xa7, 0xe9, 0x4d, 0xf8, 0x60, 0xbb, 0xa8, 0xb0, 0x6b, 0x3a, 0x81, 0x8b, 0xd7, 0x94,
|
||||
0xef, 0x61, 0x38, 0xbd, 0x6a, 0x7d, 0x01, 0x97, 0x0f, 0x70, 0x46, 0x06, 0xd9, 0x39, 0x16, 0x3a,
|
||||
0xed, 0x58, 0xbf, 0x00, 0xde, 0x05, 0x1d, 0x3d, 0x5d, 0xfc, 0x11, 0x80, 0xe5, 0xae, 0x56, 0xd4,
|
||||
0x12, 0x35, 0x84, 0x47, 0xc8, 0x44, 0x82, 0xcf, 0x98, 0x4f, 0xbd, 0x95, 0x6d, 0xa5, 0xbd, 0x57,
|
||||
0x48, 0x36, 0xd4, 0xfe, 0x4d, 0x86, 0xc6, 0x98, 0x9a, 0x77, 0x94, 0xce, 0x87, 0x71, 0xe9, 0xf8,
|
||||
0x0d, 0x28, 0xc9, 0xb7, 0x13, 0x7f, 0xfc, 0xc8, 0x47, 0x55, 0xfd, 0xf0, 0xc0, 0xeb, 0x55, 0x7f,
|
||||
0xd6, 0x42, 0xd7, 0x08, 0xdf, 0xc2, 0x59, 0x64, 0x76, 0x7c, 0x71, 0x68, 0xd5, 0xd5, 0xcb, 0x83,
|
||||
0x1b, 0x12, 0xb1, 0xfd, 0x08, 0xb5, 0xbc, 0x17, 0xf0, 0x8b, 0x7c, 0xda, 0x5e, 0xfb, 0xaa, 0x9f,
|
||||
0x1c, 0x06, 0xc5, 0x12, 0xd8, 0x87, 0xf3, 0xbd, 0xc3, 0xc7, 0x5b, 0x3f, 0x42, 0x87, 0x5c, 0xa7,
|
||||
0x5e, 0x1d, 0x85, 0x8d, 0x35, 0xbb, 0x3a, 0x34, 0x58, 0x38, 0x85, 0xb7, 0xcc, 0xb0, 0x56, 0x36,
|
||||
0x75, 0x78, 0xb7, 0x96, 0x0c, 0x64, 0x14, 0xfc, 0xf9, 0xcd, 0x4a, 0xe2, 0x07, 0xf0, 0xcb, 0xff,
|
||||
0x02, 0x00, 0x00, 0xff, 0xff, 0xed, 0x8d, 0x77, 0xac, 0x13, 0x0a, 0x00, 0x00,
|
||||
}
|
|
@ -1,66 +0,0 @@
|
|||
syntax = "proto3";
|
||||
|
||||
package queue_pb;
|
||||
|
||||
option java_package = "seaweedfs.client";
|
||||
option java_outer_classname = "QueueProto";
|
||||
|
||||
//////////////////////////////////////////////////
|
||||
|
||||
service SeaweedQueue {
|
||||
|
||||
rpc StreamWrite (stream WriteMessageRequest) returns (stream WriteMessageResponse) {
|
||||
}
|
||||
|
||||
rpc StreamRead (ReadMessageRequest) returns (stream ReadMessageResponse) {
|
||||
}
|
||||
|
||||
rpc ConfigureTopic (ConfigureTopicRequest) returns (ConfigureTopicResponse) {
|
||||
}
|
||||
|
||||
rpc DeleteTopic (DeleteTopicRequest) returns (DeleteTopicResponse) {
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
//////////////////////////////////////////////////
|
||||
|
||||
|
||||
message WriteMessageRequest {
|
||||
string topic = 1;
|
||||
int64 event_ns = 2;
|
||||
bytes partition_key = 3;
|
||||
bytes data = 4;
|
||||
}
|
||||
|
||||
message WriteMessageResponse {
|
||||
string error = 1;
|
||||
int64 ack_ns = 2;
|
||||
}
|
||||
|
||||
message ReadMessageRequest {
|
||||
string topic = 1;
|
||||
int64 start_ns = 2;
|
||||
}
|
||||
|
||||
message ReadMessageResponse {
|
||||
string error = 1;
|
||||
int64 event_ns = 2;
|
||||
bytes data = 3;
|
||||
}
|
||||
|
||||
message ConfigureTopicRequest {
|
||||
string topic = 1;
|
||||
int64 ttl_seconds = 2;
|
||||
int32 partition_count = 3;
|
||||
}
|
||||
message ConfigureTopicResponse {
|
||||
string error = 1;
|
||||
}
|
||||
|
||||
message DeleteTopicRequest {
|
||||
string topic = 1;
|
||||
}
|
||||
message DeleteTopicResponse {
|
||||
string error = 1;
|
||||
}
|
|
@ -1,516 +0,0 @@
|
|||
// Code generated by protoc-gen-go.
|
||||
// source: queue.proto
|
||||
// DO NOT EDIT!
|
||||
|
||||
/*
|
||||
Package queue_pb is a generated protocol buffer package.
|
||||
|
||||
It is generated from these files:
|
||||
queue.proto
|
||||
|
||||
It has these top-level messages:
|
||||
WriteMessageRequest
|
||||
WriteMessageResponse
|
||||
ReadMessageRequest
|
||||
ReadMessageResponse
|
||||
ConfigureTopicRequest
|
||||
ConfigureTopicResponse
|
||||
DeleteTopicRequest
|
||||
DeleteTopicResponse
|
||||
*/
|
||||
package queue_pb
|
||||
|
||||
import proto "github.com/golang/protobuf/proto"
|
||||
import fmt "fmt"
|
||||
import math "math"
|
||||
|
||||
import (
|
||||
context "golang.org/x/net/context"
|
||||
grpc "google.golang.org/grpc"
|
||||
)
|
||||
|
||||
// Reference imports to suppress errors if they are not otherwise used.
|
||||
var _ = proto.Marshal
|
||||
var _ = fmt.Errorf
|
||||
var _ = math.Inf
|
||||
|
||||
// This is a compile-time assertion to ensure that this generated file
|
||||
// is compatible with the proto package it is being compiled against.
|
||||
// A compilation error at this line likely means your copy of the
|
||||
// proto package needs to be updated.
|
||||
const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package
|
||||
|
||||
type WriteMessageRequest struct {
|
||||
Topic string `protobuf:"bytes,1,opt,name=topic" json:"topic,omitempty"`
|
||||
EventNs int64 `protobuf:"varint,2,opt,name=event_ns,json=eventNs" json:"event_ns,omitempty"`
|
||||
PartitionKey []byte `protobuf:"bytes,3,opt,name=partition_key,json=partitionKey,proto3" json:"partition_key,omitempty"`
|
||||
Data []byte `protobuf:"bytes,4,opt,name=data,proto3" json:"data,omitempty"`
|
||||
}
|
||||
|
||||
func (m *WriteMessageRequest) Reset() { *m = WriteMessageRequest{} }
|
||||
func (m *WriteMessageRequest) String() string { return proto.CompactTextString(m) }
|
||||
func (*WriteMessageRequest) ProtoMessage() {}
|
||||
func (*WriteMessageRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} }
|
||||
|
||||
func (m *WriteMessageRequest) GetTopic() string {
|
||||
if m != nil {
|
||||
return m.Topic
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func (m *WriteMessageRequest) GetEventNs() int64 {
|
||||
if m != nil {
|
||||
return m.EventNs
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
func (m *WriteMessageRequest) GetPartitionKey() []byte {
|
||||
if m != nil {
|
||||
return m.PartitionKey
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *WriteMessageRequest) GetData() []byte {
|
||||
if m != nil {
|
||||
return m.Data
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
type WriteMessageResponse struct {
|
||||
Error string `protobuf:"bytes,1,opt,name=error" json:"error,omitempty"`
|
||||
AckNs int64 `protobuf:"varint,2,opt,name=ack_ns,json=ackNs" json:"ack_ns,omitempty"`
|
||||
}
|
||||
|
||||
func (m *WriteMessageResponse) Reset() { *m = WriteMessageResponse{} }
|
||||
func (m *WriteMessageResponse) String() string { return proto.CompactTextString(m) }
|
||||
func (*WriteMessageResponse) ProtoMessage() {}
|
||||
func (*WriteMessageResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{1} }
|
||||
|
||||
func (m *WriteMessageResponse) GetError() string {
|
||||
if m != nil {
|
||||
return m.Error
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func (m *WriteMessageResponse) GetAckNs() int64 {
|
||||
if m != nil {
|
||||
return m.AckNs
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
type ReadMessageRequest struct {
|
||||
Topic string `protobuf:"bytes,1,opt,name=topic" json:"topic,omitempty"`
|
||||
StartNs int64 `protobuf:"varint,2,opt,name=start_ns,json=startNs" json:"start_ns,omitempty"`
|
||||
}
|
||||
|
||||
func (m *ReadMessageRequest) Reset() { *m = ReadMessageRequest{} }
|
||||
func (m *ReadMessageRequest) String() string { return proto.CompactTextString(m) }
|
||||
func (*ReadMessageRequest) ProtoMessage() {}
|
||||
func (*ReadMessageRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{2} }
|
||||
|
||||
func (m *ReadMessageRequest) GetTopic() string {
|
||||
if m != nil {
|
||||
return m.Topic
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func (m *ReadMessageRequest) GetStartNs() int64 {
|
||||
if m != nil {
|
||||
return m.StartNs
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
type ReadMessageResponse struct {
|
||||
Error string `protobuf:"bytes,1,opt,name=error" json:"error,omitempty"`
|
||||
EventNs int64 `protobuf:"varint,2,opt,name=event_ns,json=eventNs" json:"event_ns,omitempty"`
|
||||
Data []byte `protobuf:"bytes,3,opt,name=data,proto3" json:"data,omitempty"`
|
||||
}
|
||||
|
||||
func (m *ReadMessageResponse) Reset() { *m = ReadMessageResponse{} }
|
||||
func (m *ReadMessageResponse) String() string { return proto.CompactTextString(m) }
|
||||
func (*ReadMessageResponse) ProtoMessage() {}
|
||||
func (*ReadMessageResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{3} }
|
||||
|
||||
func (m *ReadMessageResponse) GetError() string {
|
||||
if m != nil {
|
||||
return m.Error
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func (m *ReadMessageResponse) GetEventNs() int64 {
|
||||
if m != nil {
|
||||
return m.EventNs
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
func (m *ReadMessageResponse) GetData() []byte {
|
||||
if m != nil {
|
||||
return m.Data
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
type ConfigureTopicRequest struct {
|
||||
Topic string `protobuf:"bytes,1,opt,name=topic" json:"topic,omitempty"`
|
||||
TtlSeconds int64 `protobuf:"varint,2,opt,name=ttl_seconds,json=ttlSeconds" json:"ttl_seconds,omitempty"`
|
||||
PartitionCount int32 `protobuf:"varint,3,opt,name=partition_count,json=partitionCount" json:"partition_count,omitempty"`
|
||||
}
|
||||
|
||||
func (m *ConfigureTopicRequest) Reset() { *m = ConfigureTopicRequest{} }
|
||||
func (m *ConfigureTopicRequest) String() string { return proto.CompactTextString(m) }
|
||||
func (*ConfigureTopicRequest) ProtoMessage() {}
|
||||
func (*ConfigureTopicRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{4} }
|
||||
|
||||
func (m *ConfigureTopicRequest) GetTopic() string {
|
||||
if m != nil {
|
||||
return m.Topic
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func (m *ConfigureTopicRequest) GetTtlSeconds() int64 {
|
||||
if m != nil {
|
||||
return m.TtlSeconds
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
func (m *ConfigureTopicRequest) GetPartitionCount() int32 {
|
||||
if m != nil {
|
||||
return m.PartitionCount
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
type ConfigureTopicResponse struct {
|
||||
Error string `protobuf:"bytes,1,opt,name=error" json:"error,omitempty"`
|
||||
}
|
||||
|
||||
func (m *ConfigureTopicResponse) Reset() { *m = ConfigureTopicResponse{} }
|
||||
func (m *ConfigureTopicResponse) String() string { return proto.CompactTextString(m) }
|
||||
func (*ConfigureTopicResponse) ProtoMessage() {}
|
||||
func (*ConfigureTopicResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{5} }
|
||||
|
||||
func (m *ConfigureTopicResponse) GetError() string {
|
||||
if m != nil {
|
||||
return m.Error
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
type DeleteTopicRequest struct {
|
||||
Topic string `protobuf:"bytes,1,opt,name=topic" json:"topic,omitempty"`
|
||||
}
|
||||
|
||||
func (m *DeleteTopicRequest) Reset() { *m = DeleteTopicRequest{} }
|
||||
func (m *DeleteTopicRequest) String() string { return proto.CompactTextString(m) }
|
||||
func (*DeleteTopicRequest) ProtoMessage() {}
|
||||
func (*DeleteTopicRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{6} }
|
||||
|
||||
func (m *DeleteTopicRequest) GetTopic() string {
|
||||
if m != nil {
|
||||
return m.Topic
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
type DeleteTopicResponse struct {
|
||||
Error string `protobuf:"bytes,1,opt,name=error" json:"error,omitempty"`
|
||||
}
|
||||
|
||||
func (m *DeleteTopicResponse) Reset() { *m = DeleteTopicResponse{} }
|
||||
func (m *DeleteTopicResponse) String() string { return proto.CompactTextString(m) }
|
||||
func (*DeleteTopicResponse) ProtoMessage() {}
|
||||
func (*DeleteTopicResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{7} }
|
||||
|
||||
func (m *DeleteTopicResponse) GetError() string {
|
||||
if m != nil {
|
||||
return m.Error
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func init() {
|
||||
proto.RegisterType((*WriteMessageRequest)(nil), "queue_pb.WriteMessageRequest")
|
||||
proto.RegisterType((*WriteMessageResponse)(nil), "queue_pb.WriteMessageResponse")
|
||||
proto.RegisterType((*ReadMessageRequest)(nil), "queue_pb.ReadMessageRequest")
|
||||
proto.RegisterType((*ReadMessageResponse)(nil), "queue_pb.ReadMessageResponse")
|
||||
proto.RegisterType((*ConfigureTopicRequest)(nil), "queue_pb.ConfigureTopicRequest")
|
||||
proto.RegisterType((*ConfigureTopicResponse)(nil), "queue_pb.ConfigureTopicResponse")
|
||||
proto.RegisterType((*DeleteTopicRequest)(nil), "queue_pb.DeleteTopicRequest")
|
||||
proto.RegisterType((*DeleteTopicResponse)(nil), "queue_pb.DeleteTopicResponse")
|
||||
}
|
||||
|
||||
// Reference imports to suppress errors if they are not otherwise used.
|
||||
var _ context.Context
|
||||
var _ grpc.ClientConn
|
||||
|
||||
// This is a compile-time assertion to ensure that this generated file
|
||||
// is compatible with the grpc package it is being compiled against.
|
||||
const _ = grpc.SupportPackageIsVersion4
|
||||
|
||||
// Client API for SeaweedQueue service
|
||||
|
||||
type SeaweedQueueClient interface {
|
||||
StreamWrite(ctx context.Context, opts ...grpc.CallOption) (SeaweedQueue_StreamWriteClient, error)
|
||||
StreamRead(ctx context.Context, in *ReadMessageRequest, opts ...grpc.CallOption) (SeaweedQueue_StreamReadClient, error)
|
||||
ConfigureTopic(ctx context.Context, in *ConfigureTopicRequest, opts ...grpc.CallOption) (*ConfigureTopicResponse, error)
|
||||
DeleteTopic(ctx context.Context, in *DeleteTopicRequest, opts ...grpc.CallOption) (*DeleteTopicResponse, error)
|
||||
}
|
||||
|
||||
type seaweedQueueClient struct {
|
||||
cc *grpc.ClientConn
|
||||
}
|
||||
|
||||
func NewSeaweedQueueClient(cc *grpc.ClientConn) SeaweedQueueClient {
|
||||
return &seaweedQueueClient{cc}
|
||||
}
|
||||
|
||||
func (c *seaweedQueueClient) StreamWrite(ctx context.Context, opts ...grpc.CallOption) (SeaweedQueue_StreamWriteClient, error) {
|
||||
stream, err := grpc.NewClientStream(ctx, &_SeaweedQueue_serviceDesc.Streams[0], c.cc, "/queue_pb.SeaweedQueue/StreamWrite", opts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
x := &seaweedQueueStreamWriteClient{stream}
|
||||
return x, nil
|
||||
}
|
||||
|
||||
type SeaweedQueue_StreamWriteClient interface {
|
||||
Send(*WriteMessageRequest) error
|
||||
Recv() (*WriteMessageResponse, error)
|
||||
grpc.ClientStream
|
||||
}
|
||||
|
||||
type seaweedQueueStreamWriteClient struct {
|
||||
grpc.ClientStream
|
||||
}
|
||||
|
||||
func (x *seaweedQueueStreamWriteClient) Send(m *WriteMessageRequest) error {
|
||||
return x.ClientStream.SendMsg(m)
|
||||
}
|
||||
|
||||
func (x *seaweedQueueStreamWriteClient) Recv() (*WriteMessageResponse, error) {
|
||||
m := new(WriteMessageResponse)
|
||||
if err := x.ClientStream.RecvMsg(m); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return m, nil
|
||||
}
|
||||
|
||||
func (c *seaweedQueueClient) StreamRead(ctx context.Context, in *ReadMessageRequest, opts ...grpc.CallOption) (SeaweedQueue_StreamReadClient, error) {
|
||||
stream, err := grpc.NewClientStream(ctx, &_SeaweedQueue_serviceDesc.Streams[1], c.cc, "/queue_pb.SeaweedQueue/StreamRead", opts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
x := &seaweedQueueStreamReadClient{stream}
|
||||
if err := x.ClientStream.SendMsg(in); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err := x.ClientStream.CloseSend(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return x, nil
|
||||
}
|
||||
|
||||
type SeaweedQueue_StreamReadClient interface {
|
||||
Recv() (*ReadMessageResponse, error)
|
||||
grpc.ClientStream
|
||||
}
|
||||
|
||||
type seaweedQueueStreamReadClient struct {
|
||||
grpc.ClientStream
|
||||
}
|
||||
|
||||
func (x *seaweedQueueStreamReadClient) Recv() (*ReadMessageResponse, error) {
|
||||
m := new(ReadMessageResponse)
|
||||
if err := x.ClientStream.RecvMsg(m); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return m, nil
|
||||
}
|
||||
|
||||
func (c *seaweedQueueClient) ConfigureTopic(ctx context.Context, in *ConfigureTopicRequest, opts ...grpc.CallOption) (*ConfigureTopicResponse, error) {
|
||||
out := new(ConfigureTopicResponse)
|
||||
err := grpc.Invoke(ctx, "/queue_pb.SeaweedQueue/ConfigureTopic", in, out, c.cc, opts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return out, nil
|
||||
}
|
||||
|
||||
func (c *seaweedQueueClient) DeleteTopic(ctx context.Context, in *DeleteTopicRequest, opts ...grpc.CallOption) (*DeleteTopicResponse, error) {
|
||||
out := new(DeleteTopicResponse)
|
||||
err := grpc.Invoke(ctx, "/queue_pb.SeaweedQueue/DeleteTopic", in, out, c.cc, opts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return out, nil
|
||||
}
|
||||
|
||||
// Server API for SeaweedQueue service
|
||||
|
||||
type SeaweedQueueServer interface {
|
||||
StreamWrite(SeaweedQueue_StreamWriteServer) error
|
||||
StreamRead(*ReadMessageRequest, SeaweedQueue_StreamReadServer) error
|
||||
ConfigureTopic(context.Context, *ConfigureTopicRequest) (*ConfigureTopicResponse, error)
|
||||
DeleteTopic(context.Context, *DeleteTopicRequest) (*DeleteTopicResponse, error)
|
||||
}
|
||||
|
||||
func RegisterSeaweedQueueServer(s *grpc.Server, srv SeaweedQueueServer) {
|
||||
s.RegisterService(&_SeaweedQueue_serviceDesc, srv)
|
||||
}
|
||||
|
||||
func _SeaweedQueue_StreamWrite_Handler(srv interface{}, stream grpc.ServerStream) error {
|
||||
return srv.(SeaweedQueueServer).StreamWrite(&seaweedQueueStreamWriteServer{stream})
|
||||
}
|
||||
|
||||
type SeaweedQueue_StreamWriteServer interface {
|
||||
Send(*WriteMessageResponse) error
|
||||
Recv() (*WriteMessageRequest, error)
|
||||
grpc.ServerStream
|
||||
}
|
||||
|
||||
type seaweedQueueStreamWriteServer struct {
|
||||
grpc.ServerStream
|
||||
}
|
||||
|
||||
func (x *seaweedQueueStreamWriteServer) Send(m *WriteMessageResponse) error {
|
||||
return x.ServerStream.SendMsg(m)
|
||||
}
|
||||
|
||||
func (x *seaweedQueueStreamWriteServer) Recv() (*WriteMessageRequest, error) {
|
||||
m := new(WriteMessageRequest)
|
||||
if err := x.ServerStream.RecvMsg(m); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return m, nil
|
||||
}
|
||||
|
||||
func _SeaweedQueue_StreamRead_Handler(srv interface{}, stream grpc.ServerStream) error {
|
||||
m := new(ReadMessageRequest)
|
||||
if err := stream.RecvMsg(m); err != nil {
|
||||
return err
|
||||
}
|
||||
return srv.(SeaweedQueueServer).StreamRead(m, &seaweedQueueStreamReadServer{stream})
|
||||
}
|
||||
|
||||
type SeaweedQueue_StreamReadServer interface {
|
||||
Send(*ReadMessageResponse) error
|
||||
grpc.ServerStream
|
||||
}
|
||||
|
||||
type seaweedQueueStreamReadServer struct {
|
||||
grpc.ServerStream
|
||||
}
|
||||
|
||||
func (x *seaweedQueueStreamReadServer) Send(m *ReadMessageResponse) error {
|
||||
return x.ServerStream.SendMsg(m)
|
||||
}
|
||||
|
||||
func _SeaweedQueue_ConfigureTopic_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
|
||||
in := new(ConfigureTopicRequest)
|
||||
if err := dec(in); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if interceptor == nil {
|
||||
return srv.(SeaweedQueueServer).ConfigureTopic(ctx, in)
|
||||
}
|
||||
info := &grpc.UnaryServerInfo{
|
||||
Server: srv,
|
||||
FullMethod: "/queue_pb.SeaweedQueue/ConfigureTopic",
|
||||
}
|
||||
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
|
||||
return srv.(SeaweedQueueServer).ConfigureTopic(ctx, req.(*ConfigureTopicRequest))
|
||||
}
|
||||
return interceptor(ctx, in, info, handler)
|
||||
}
|
||||
|
||||
func _SeaweedQueue_DeleteTopic_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
|
||||
in := new(DeleteTopicRequest)
|
||||
if err := dec(in); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if interceptor == nil {
|
||||
return srv.(SeaweedQueueServer).DeleteTopic(ctx, in)
|
||||
}
|
||||
info := &grpc.UnaryServerInfo{
|
||||
Server: srv,
|
||||
FullMethod: "/queue_pb.SeaweedQueue/DeleteTopic",
|
||||
}
|
||||
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
|
||||
return srv.(SeaweedQueueServer).DeleteTopic(ctx, req.(*DeleteTopicRequest))
|
||||
}
|
||||
return interceptor(ctx, in, info, handler)
|
||||
}
|
||||
|
||||
var _SeaweedQueue_serviceDesc = grpc.ServiceDesc{
|
||||
ServiceName: "queue_pb.SeaweedQueue",
|
||||
HandlerType: (*SeaweedQueueServer)(nil),
|
||||
Methods: []grpc.MethodDesc{
|
||||
{
|
||||
MethodName: "ConfigureTopic",
|
||||
Handler: _SeaweedQueue_ConfigureTopic_Handler,
|
||||
},
|
||||
{
|
||||
MethodName: "DeleteTopic",
|
||||
Handler: _SeaweedQueue_DeleteTopic_Handler,
|
||||
},
|
||||
},
|
||||
Streams: []grpc.StreamDesc{
|
||||
{
|
||||
StreamName: "StreamWrite",
|
||||
Handler: _SeaweedQueue_StreamWrite_Handler,
|
||||
ServerStreams: true,
|
||||
ClientStreams: true,
|
||||
},
|
||||
{
|
||||
StreamName: "StreamRead",
|
||||
Handler: _SeaweedQueue_StreamRead_Handler,
|
||||
ServerStreams: true,
|
||||
},
|
||||
},
|
||||
Metadata: "queue.proto",
|
||||
}
|
||||
|
||||
func init() { proto.RegisterFile("queue.proto", fileDescriptor0) }
|
||||
|
||||
var fileDescriptor0 = []byte{
|
||||
// 429 bytes of a gzipped FileDescriptorProto
|
||||
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0x8c, 0x53, 0xcd, 0x6e, 0xd3, 0x40,
|
||||
0x10, 0xae, 0x9b, 0xa6, 0x94, 0x49, 0x28, 0x68, 0xd2, 0xa2, 0x10, 0xd1, 0x36, 0x5a, 0x0e, 0x44,
|
||||
0x20, 0x59, 0x15, 0xbc, 0x41, 0x03, 0x27, 0x68, 0x04, 0x0e, 0x08, 0x89, 0x8b, 0xb5, 0xb5, 0xa7,
|
||||
0x95, 0x15, 0xb3, 0xeb, 0xee, 0x8e, 0xa9, 0x7a, 0xe2, 0x2d, 0x79, 0x1e, 0xe4, 0xb5, 0x5c, 0xdb,
|
||||
0x34, 0xb1, 0x7a, 0xf3, 0xcc, 0x78, 0xe7, 0xfb, 0xd9, 0x6f, 0x61, 0x70, 0x9d, 0x53, 0x4e, 0x7e,
|
||||
0x66, 0x34, 0x6b, 0xdc, 0x73, 0x45, 0x98, 0x5d, 0x88, 0x3f, 0x30, 0xfa, 0x61, 0x12, 0xa6, 0x73,
|
||||
0xb2, 0x56, 0x5e, 0x51, 0x40, 0xd7, 0x39, 0x59, 0xc6, 0x03, 0xe8, 0xb3, 0xce, 0x92, 0x68, 0xec,
|
||||
0x4d, 0xbd, 0xd9, 0xe3, 0xa0, 0x2c, 0xf0, 0x05, 0xec, 0xd1, 0x6f, 0x52, 0x1c, 0x2a, 0x3b, 0xde,
|
||||
0x9e, 0x7a, 0xb3, 0x5e, 0xf0, 0xc8, 0xd5, 0x0b, 0x8b, 0xaf, 0xe0, 0x49, 0x26, 0x0d, 0x27, 0x9c,
|
||||
0x68, 0x15, 0xae, 0xe8, 0x76, 0xdc, 0x9b, 0x7a, 0xb3, 0x61, 0x30, 0xbc, 0x6b, 0x7e, 0xa2, 0x5b,
|
||||
0x44, 0xd8, 0x89, 0x25, 0xcb, 0xf1, 0x8e, 0x9b, 0xb9, 0x6f, 0x31, 0x87, 0x83, 0x36, 0x01, 0x9b,
|
||||
0x69, 0x65, 0xa9, 0x60, 0x40, 0xc6, 0x68, 0x53, 0x31, 0x70, 0x05, 0x1e, 0xc2, 0xae, 0x8c, 0x56,
|
||||
0x35, 0x7e, 0x5f, 0x46, 0xab, 0x85, 0x15, 0x1f, 0x01, 0x03, 0x92, 0xf1, 0x43, 0x45, 0x58, 0x96,
|
||||
0xa6, 0x29, 0xc2, 0xd5, 0x0b, 0x2b, 0x7e, 0xc2, 0xa8, 0xb5, 0xa6, 0x93, 0x4a, 0x87, 0x19, 0x95,
|
||||
0xce, 0x5e, 0x43, 0xe7, 0x0d, 0x1c, 0xce, 0xb5, 0xba, 0x4c, 0xae, 0x72, 0x43, 0xdf, 0x0a, 0x22,
|
||||
0xdd, 0x2c, 0x4f, 0x60, 0xc0, 0x9c, 0x86, 0x96, 0x22, 0xad, 0xe2, 0x0a, 0x00, 0x98, 0xd3, 0x65,
|
||||
0xd9, 0xc1, 0xd7, 0xf0, 0xb4, 0x36, 0x3c, 0xd2, 0xb9, 0x62, 0x07, 0xd7, 0x0f, 0xf6, 0xef, 0xda,
|
||||
0xf3, 0xa2, 0x2b, 0x7c, 0x78, 0xfe, 0x3f, 0x70, 0x97, 0x2e, 0xf1, 0x06, 0xf0, 0x03, 0xa5, 0xc4,
|
||||
0x0f, 0x60, 0x29, 0xde, 0xc2, 0xa8, 0xf5, 0x6f, 0xd7, 0xe2, 0x77, 0x7f, 0xb7, 0x61, 0xb8, 0x24,
|
||||
0x79, 0x43, 0x14, 0x7f, 0x2d, 0xe2, 0x87, 0x01, 0x0c, 0x96, 0x6c, 0x48, 0xfe, 0x72, 0x01, 0xc0,
|
||||
0x23, 0xbf, 0x4a, 0xa5, 0xbf, 0x26, 0x92, 0x93, 0xe3, 0x4d, 0xe3, 0x12, 0x54, 0x6c, 0xcd, 0xbc,
|
||||
0x53, 0x0f, 0xcf, 0x01, 0xca, 0x9d, 0xc5, 0x45, 0xe2, 0xcb, 0xfa, 0xcc, 0xfd, 0x7c, 0x4c, 0x8e,
|
||||
0x36, 0x4c, 0xab, 0x85, 0xa7, 0x1e, 0x7e, 0x87, 0xfd, 0xb6, 0x79, 0x78, 0x52, 0x1f, 0x5a, 0x7b,
|
||||
0x9f, 0x93, 0xe9, 0xe6, 0x1f, 0xaa, 0xc5, 0xf8, 0x19, 0x06, 0x0d, 0xdf, 0x9a, 0x34, 0xef, 0x5b,
|
||||
0xdf, 0xa4, 0xb9, 0xc6, 0x6c, 0xb1, 0x75, 0x76, 0x0c, 0xcf, 0x6c, 0xe9, 0xeb, 0xa5, 0xf5, 0xa3,
|
||||
0x34, 0x21, 0xc5, 0x67, 0xe0, 0x2c, 0xfe, 0x52, 0xbc, 0xf6, 0x8b, 0x5d, 0xf7, 0xe8, 0xdf, 0xff,
|
||||
0x0b, 0x00, 0x00, 0xff, 0xff, 0x7d, 0x3e, 0x14, 0xd8, 0x03, 0x04, 0x00, 0x00,
|
||||
}
|
5
weed/pb/shared_values.go
Normal file
5
weed/pb/shared_values.go
Normal file
|
@ -0,0 +1,5 @@
|
|||
package pb
|
||||
|
||||
const (
|
||||
AdminShellClient = "shell"
|
||||
)
|
|
@ -115,7 +115,7 @@ func (g *AzureSink) CreateEntry(key string, entry *filer_pb.Entry) error {
|
|||
}
|
||||
|
||||
var writeErr error
|
||||
readErr := util.ReadUrlAsStream(fileUrl, nil, false, chunk.IsFullChunk, chunk.Offset, int(chunk.Size), func(data []byte) {
|
||||
readErr := util.ReadUrlAsStream(fileUrl, nil, false, chunk.IsFullChunk(), chunk.Offset, int(chunk.Size), func(data []byte) {
|
||||
_, writeErr = appendBlobURL.AppendBlock(context.Background(), bytes.NewReader(data), azblob.AppendBlobAccessConditions{}, nil)
|
||||
})
|
||||
|
||||
|
|
|
@ -103,7 +103,7 @@ func (g *B2Sink) CreateEntry(key string, entry *filer_pb.Entry) error {
|
|||
}
|
||||
|
||||
var writeErr error
|
||||
readErr := util.ReadUrlAsStream(fileUrl, nil, false, chunk.IsFullChunk, chunk.Offset, int(chunk.Size), func(data []byte) {
|
||||
readErr := util.ReadUrlAsStream(fileUrl, nil, false, chunk.IsFullChunk(), chunk.Offset, int(chunk.Size), func(data []byte) {
|
||||
_, err := writer.Write(data)
|
||||
if err != nil {
|
||||
writeErr = err
|
||||
|
|
|
@ -90,7 +90,7 @@ func (fs *FilerSink) CreateEntry(key string, entry *filer_pb.Entry) error {
|
|||
}
|
||||
glog.V(1).Infof("lookup: %v", lookupRequest)
|
||||
if resp, err := filer_pb.LookupEntry(client, lookupRequest); err == nil {
|
||||
if filer2.ETag(resp.Entry.Chunks) == filer2.ETag(entry.Chunks) {
|
||||
if filer2.ETag(resp.Entry) == filer2.ETag(entry) {
|
||||
glog.V(0).Infof("already replicated %s", key)
|
||||
return nil
|
||||
}
|
||||
|
@ -160,7 +160,7 @@ func (fs *FilerSink) UpdateEntry(key string, oldEntry *filer_pb.Entry, newParent
|
|||
// skip if already changed
|
||||
// this usually happens when the messages are not ordered
|
||||
glog.V(0).Infof("late updates %s", key)
|
||||
} else if filer2.ETag(newEntry.Chunks) == filer2.ETag(existingEntry.Chunks) {
|
||||
} else if filer2.ETag(newEntry) == filer2.ETag(existingEntry) {
|
||||
// skip if no change
|
||||
// this usually happens when retrying the replication
|
||||
glog.V(0).Infof("already replicated %s", key)
|
||||
|
|
|
@ -101,7 +101,7 @@ func (g *GcsSink) CreateEntry(key string, entry *filer_pb.Entry) error {
|
|||
return err
|
||||
}
|
||||
|
||||
err = util.ReadUrlAsStream(fileUrl, nil, false, chunk.IsFullChunk, chunk.Offset, int(chunk.Size), func(data []byte) {
|
||||
err = util.ReadUrlAsStream(fileUrl, nil, false, chunk.IsFullChunk(), chunk.Offset, int(chunk.Size), func(data []byte) {
|
||||
wc.Write(data)
|
||||
})
|
||||
|
||||
|
|
|
@ -25,6 +25,7 @@ type S3Sink struct {
|
|||
region string
|
||||
bucket string
|
||||
dir string
|
||||
endpoint string
|
||||
filerSource *source.FilerSource
|
||||
}
|
||||
|
||||
|
@ -44,12 +45,14 @@ func (s3sink *S3Sink) Initialize(configuration util.Configuration, prefix string
|
|||
glog.V(0).Infof("sink.s3.region: %v", configuration.GetString(prefix+"region"))
|
||||
glog.V(0).Infof("sink.s3.bucket: %v", configuration.GetString(prefix+"bucket"))
|
||||
glog.V(0).Infof("sink.s3.directory: %v", configuration.GetString(prefix+"directory"))
|
||||
glog.V(0).Infof("sink.s3.endpoint: %v", configuration.GetString(prefix+"endpoint"))
|
||||
return s3sink.initialize(
|
||||
configuration.GetString(prefix+"aws_access_key_id"),
|
||||
configuration.GetString(prefix+"aws_secret_access_key"),
|
||||
configuration.GetString(prefix+"region"),
|
||||
configuration.GetString(prefix+"bucket"),
|
||||
configuration.GetString(prefix+"directory"),
|
||||
configuration.GetString(prefix+"endpoint"),
|
||||
)
|
||||
}
|
||||
|
||||
|
@ -57,13 +60,15 @@ func (s3sink *S3Sink) SetSourceFiler(s *source.FilerSource) {
|
|||
s3sink.filerSource = s
|
||||
}
|
||||
|
||||
func (s3sink *S3Sink) initialize(awsAccessKeyId, awsSecretAccessKey, region, bucket, dir string) error {
|
||||
func (s3sink *S3Sink) initialize(awsAccessKeyId, awsSecretAccessKey, region, bucket, dir, endpoint string) error {
|
||||
s3sink.region = region
|
||||
s3sink.bucket = bucket
|
||||
s3sink.dir = dir
|
||||
s3sink.endpoint = endpoint
|
||||
|
||||
config := &aws.Config{
|
||||
Region: aws.String(s3sink.region),
|
||||
Region: aws.String(s3sink.region),
|
||||
Endpoint: aws.String(s3sink.endpoint),
|
||||
}
|
||||
if awsAccessKeyId != "" && awsSecretAccessKey != "" {
|
||||
config.Credentials = credentials.NewStaticCredentials(awsAccessKeyId, awsSecretAccessKey, "")
|
||||
|
|
|
@ -92,7 +92,9 @@ func (k *AwsSqsInput) ReceiveMessage() (key string, message *filer_pb.EventNotif
|
|||
}
|
||||
|
||||
// process the message
|
||||
key = *result.Messages[0].Attributes["key"]
|
||||
// fmt.Printf("messages: %+v\n", result.Messages[0])
|
||||
keyValue := result.Messages[0].MessageAttributes["key"]
|
||||
key = *keyValue.StringValue
|
||||
text := *result.Messages[0].Body
|
||||
message = &filer_pb.EventNotification{}
|
||||
err = proto.UnmarshalText(text, message)
|
||||
|
|
|
@ -107,7 +107,7 @@ func (s3a *S3ApiServer) completeMultipartUpload(input *s3.CompleteMultipartUploa
|
|||
CompleteMultipartUploadOutput: s3.CompleteMultipartUploadOutput{
|
||||
Location: aws.String(fmt.Sprintf("http://%s%s/%s", s3a.option.Filer, dirName, entryName)),
|
||||
Bucket: input.Bucket,
|
||||
ETag: aws.String("\"" + filer2.ETag(finalParts) + "\""),
|
||||
ETag: aws.String("\"" + filer2.ETagChunks(finalParts) + "\""),
|
||||
Key: objectKey(input.Key),
|
||||
},
|
||||
}
|
||||
|
@ -208,7 +208,7 @@ func (s3a *S3ApiServer) listObjectParts(input *s3.ListPartsInput) (output *ListP
|
|||
PartNumber: aws.Int64(int64(partNumber)),
|
||||
LastModified: aws.Time(time.Unix(entry.Attributes.Mtime, 0)),
|
||||
Size: aws.Int64(int64(filer2.TotalSize(entry.Chunks))),
|
||||
ETag: aws.String("\"" + filer2.ETag(entry.Chunks) + "\""),
|
||||
ETag: aws.String("\"" + filer2.ETag(entry) + "\""),
|
||||
})
|
||||
}
|
||||
}
|
||||
|
|
|
@ -139,7 +139,7 @@ func (s3a *S3ApiServer) listFilerEntries(bucket, originalPrefix string, maxKeys
|
|||
contents = append(contents, ListEntry{
|
||||
Key: fmt.Sprintf("%s%s", dir, entry.Name),
|
||||
LastModified: time.Unix(entry.Attributes.Mtime, 0),
|
||||
ETag: "\"" + filer2.ETag(entry.Chunks) + "\"",
|
||||
ETag: "\"" + filer2.ETag(entry) + "\"",
|
||||
Size: int64(filer2.TotalSize(entry.Chunks)),
|
||||
Owner: CanonicalUser{
|
||||
ID: fmt.Sprintf("%x", entry.Attributes.Uid),
|
||||
|
|
|
@ -217,6 +217,38 @@ func (fs *FilerServer) UpdateEntry(ctx context.Context, req *filer_pb.UpdateEntr
|
|||
return &filer_pb.UpdateEntryResponse{}, err
|
||||
}
|
||||
|
||||
func (fs *FilerServer) AppendToEntry(ctx context.Context, req *filer_pb.AppendToEntryRequest) (*filer_pb.AppendToEntryResponse, error) {
|
||||
|
||||
fullpath := util.NewFullPath(req.Directory, req.EntryName)
|
||||
var offset int64 = 0
|
||||
entry, err := fs.filer.FindEntry(ctx, util.FullPath(fullpath))
|
||||
if err == filer_pb.ErrNotFound {
|
||||
entry = &filer2.Entry{
|
||||
FullPath: fullpath,
|
||||
Attr: filer2.Attr{
|
||||
Crtime: time.Now(),
|
||||
Mtime: time.Now(),
|
||||
Mode: os.FileMode(0644),
|
||||
Uid: OS_UID,
|
||||
Gid: OS_GID,
|
||||
},
|
||||
}
|
||||
} else {
|
||||
offset = int64(filer2.TotalSize(entry.Chunks))
|
||||
}
|
||||
|
||||
for _, chunk := range req.Chunks {
|
||||
chunk.Offset = offset
|
||||
offset += int64(chunk.Size)
|
||||
}
|
||||
|
||||
entry.Chunks = append(entry.Chunks, req.Chunks...)
|
||||
|
||||
err = fs.filer.CreateEntry(context.Background(), entry, false)
|
||||
|
||||
return &filer_pb.AppendToEntryResponse{}, err
|
||||
}
|
||||
|
||||
func (fs *FilerServer) DeleteEntry(ctx context.Context, req *filer_pb.DeleteEntryRequest) (resp *filer_pb.DeleteEntryResponse, err error) {
|
||||
err = fs.filer.DeleteEntryMetaAndData(ctx, util.JoinPath(req.Directory, req.Name), req.IsRecursive, req.IgnoreRecursiveError, req.IsDeleteData)
|
||||
resp = &filer_pb.DeleteEntryResponse{}
|
||||
|
@ -232,7 +264,7 @@ func (fs *FilerServer) AssignVolume(ctx context.Context, req *filer_pb.AssignVol
|
|||
if req.TtlSec > 0 {
|
||||
ttlStr = strconv.Itoa(int(req.TtlSec))
|
||||
}
|
||||
collection, replication := fs.detectCollection(req.ParentPath, req.Collection, req.Replication)
|
||||
collection, replication, _ := fs.detectCollection(req.ParentPath, req.Collection, req.Replication)
|
||||
|
||||
var altRequest *operation.VolumeAssignRequest
|
||||
|
||||
|
@ -327,7 +359,6 @@ func (fs *FilerServer) GetFilerConfiguration(ctx context.Context, req *filer_pb.
|
|||
Replication: fs.option.DefaultReplication,
|
||||
MaxMb: uint32(fs.option.MaxMB),
|
||||
DirBuckets: fs.filer.DirBucketsPath,
|
||||
DirQueues: fs.filer.DirQueuesPath,
|
||||
Cipher: fs.filer.Cipher,
|
||||
}, nil
|
||||
}
|
||||
|
|
|
@ -4,12 +4,13 @@ import (
|
|||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/chrislusf/seaweedfs/weed/filer2"
|
||||
"github.com/chrislusf/seaweedfs/weed/glog"
|
||||
"github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
|
||||
"github.com/chrislusf/seaweedfs/weed/util"
|
||||
)
|
||||
|
||||
func (fs *FilerServer) ListenForEvents(req *filer_pb.ListenForEventsRequest, stream filer_pb.SeaweedFiler_ListenForEventsServer) error {
|
||||
func (fs *FilerServer) SubscribeMetadata(req *filer_pb.SubscribeMetadataRequest, stream filer_pb.SeaweedFiler_SubscribeMetadataServer) error {
|
||||
|
||||
peerAddress := findClientAddress(stream.Context(), 0)
|
||||
|
||||
|
@ -37,7 +38,7 @@ func (fs *FilerServer) ListenForEvents(req *filer_pb.ListenForEventsRequest, str
|
|||
fullpath := util.Join(dirPath, entryName)
|
||||
|
||||
// skip on filer internal meta logs
|
||||
if strings.HasPrefix(fullpath, "/.meta") {
|
||||
if strings.HasPrefix(fullpath, filer2.SystemLogDir) {
|
||||
return nil
|
||||
}
|
||||
|
||||
|
@ -45,7 +46,7 @@ func (fs *FilerServer) ListenForEvents(req *filer_pb.ListenForEventsRequest, str
|
|||
return nil
|
||||
}
|
||||
|
||||
message := &filer_pb.FullEventNotification{
|
||||
message := &filer_pb.SubscribeMetadataResponse{
|
||||
Directory: dirPath,
|
||||
EventNotification: eventNotification,
|
||||
}
|
||||
|
@ -64,7 +65,6 @@ func (fs *FilerServer) ListenForEvents(req *filer_pb.ListenForEventsRequest, str
|
|||
fs.listenersLock.Unlock()
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (fs *FilerServer) addClient(clientType string, clientAddress string) (clientName string) {
|
||||
|
|
|
@ -44,12 +44,19 @@ func (fs *FilerServer) AtomicRenameEntry(ctx context.Context, req *filer_pb.Atom
|
|||
}
|
||||
|
||||
func (fs *FilerServer) moveEntry(ctx context.Context, oldParent util.FullPath, entry *filer2.Entry, newParent util.FullPath, newName string, events *MoveEvents) error {
|
||||
if entry.IsDirectory() {
|
||||
if err := fs.moveFolderSubEntries(ctx, oldParent, entry, newParent, newName, events); err != nil {
|
||||
return err
|
||||
|
||||
if err := fs.moveSelfEntry(ctx, oldParent, entry, newParent, newName, events, func() error {
|
||||
if entry.IsDirectory() {
|
||||
if err := fs.moveFolderSubEntries(ctx, oldParent, entry, newParent, newName, events); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}); err != nil {
|
||||
return fmt.Errorf("fail to move %s => %s: %v", oldParent.Child(entry.Name()), newParent.Child(newName), err)
|
||||
}
|
||||
return fs.moveSelfEntry(ctx, oldParent, entry, newParent, newName, events)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (fs *FilerServer) moveFolderSubEntries(ctx context.Context, oldParent util.FullPath, entry *filer2.Entry, newParent util.FullPath, newName string, events *MoveEvents) error {
|
||||
|
@ -85,7 +92,8 @@ func (fs *FilerServer) moveFolderSubEntries(ctx context.Context, oldParent util.
|
|||
return nil
|
||||
}
|
||||
|
||||
func (fs *FilerServer) moveSelfEntry(ctx context.Context, oldParent util.FullPath, entry *filer2.Entry, newParent util.FullPath, newName string, events *MoveEvents) error {
|
||||
func (fs *FilerServer) moveSelfEntry(ctx context.Context, oldParent util.FullPath, entry *filer2.Entry, newParent util.FullPath, newName string, events *MoveEvents,
|
||||
moveFolderSubEntries func() error) error {
|
||||
|
||||
oldPath, newPath := oldParent.Child(entry.Name()), newParent.Child(newName)
|
||||
|
||||
|
@ -107,6 +115,14 @@ func (fs *FilerServer) moveSelfEntry(ctx context.Context, oldParent util.FullPat
|
|||
return createErr
|
||||
}
|
||||
|
||||
events.newEntries = append(events.newEntries, newEntry)
|
||||
|
||||
if moveFolderSubEntries != nil {
|
||||
if moveChildrenErr := moveFolderSubEntries(); moveChildrenErr != nil {
|
||||
return moveChildrenErr
|
||||
}
|
||||
}
|
||||
|
||||
// delete old entry
|
||||
deleteErr := fs.filer.DeleteEntryMetaAndData(ctx, oldPath, false, false, false)
|
||||
if deleteErr != nil {
|
||||
|
@ -114,7 +130,7 @@ func (fs *FilerServer) moveSelfEntry(ctx context.Context, oldParent util.FullPat
|
|||
}
|
||||
|
||||
events.oldEntries = append(events.oldEntries, entry)
|
||||
events.newEntries = append(events.newEntries, newEntry)
|
||||
|
||||
return nil
|
||||
|
||||
}
|
||||
|
|
|
@ -24,6 +24,7 @@ import (
|
|||
_ "github.com/chrislusf/seaweedfs/weed/filer2/mysql"
|
||||
_ "github.com/chrislusf/seaweedfs/weed/filer2/postgres"
|
||||
_ "github.com/chrislusf/seaweedfs/weed/filer2/redis"
|
||||
_ "github.com/chrislusf/seaweedfs/weed/filer2/redis2"
|
||||
_ "github.com/chrislusf/seaweedfs/weed/filer2/mongodb"
|
||||
"github.com/chrislusf/seaweedfs/weed/glog"
|
||||
"github.com/chrislusf/seaweedfs/weed/notification"
|
||||
|
@ -73,7 +74,7 @@ func NewFilerServer(defaultMux, readonlyMux *http.ServeMux, option *FilerOption)
|
|||
glog.Fatal("master list is required!")
|
||||
}
|
||||
|
||||
fs.filer = filer2.NewFiler(option.Masters, fs.grpcDialOption, option.Port+10000, fs.notifyMetaListeners)
|
||||
fs.filer = filer2.NewFiler(option.Masters, fs.grpcDialOption, option.Port+10000, option.Collection, option.DefaultReplication, fs.notifyMetaListeners)
|
||||
fs.filer.Cipher = option.Cipher
|
||||
|
||||
maybeStartMetrics(fs, option)
|
||||
|
@ -92,10 +93,9 @@ func NewFilerServer(defaultMux, readonlyMux *http.ServeMux, option *FilerOption)
|
|||
util.LoadConfiguration("notification", false)
|
||||
|
||||
fs.option.recursiveDelete = v.GetBool("filer.options.recursive_delete")
|
||||
v.Set("filer.option.buckets_folder", "/buckets")
|
||||
v.Set("filer.option.queues_folder", "/queues")
|
||||
fs.filer.DirBucketsPath = v.GetString("filer.option.buckets_folder")
|
||||
fs.filer.DirQueuesPath = v.GetString("filer.option.queues_folder")
|
||||
v.SetDefault("filer.options.buckets_folder", "/buckets")
|
||||
fs.filer.DirBucketsPath = v.GetString("filer.options.buckets_folder")
|
||||
fs.filer.FsyncBuckets = v.GetStringSlice("filer.options.buckets_fsync")
|
||||
fs.filer.LoadConfiguration(v)
|
||||
|
||||
notification.LoadConfiguration(v, "notification.")
|
||||
|
@ -108,7 +108,7 @@ func NewFilerServer(defaultMux, readonlyMux *http.ServeMux, option *FilerOption)
|
|||
readonlyMux.HandleFunc("/", fs.readonlyFilerHandler)
|
||||
}
|
||||
|
||||
fs.filer.LoadBuckets(fs.filer.DirBucketsPath)
|
||||
fs.filer.LoadBuckets()
|
||||
|
||||
util.OnInterrupt(func() {
|
||||
fs.filer.Shutdown()
|
||||
|
|
|
@ -8,6 +8,7 @@ import (
|
|||
"path/filepath"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/chrislusf/seaweedfs/weed/filer2"
|
||||
"github.com/chrislusf/seaweedfs/weed/glog"
|
||||
|
@ -78,8 +79,26 @@ func (fs *FilerServer) GetOrHeadHandler(w http.ResponseWriter, r *http.Request,
|
|||
w.Header().Set("Content-Type", mimeType)
|
||||
}
|
||||
|
||||
// if modified since
|
||||
if !entry.Attr.Mtime.IsZero() {
|
||||
w.Header().Set("Last-Modified", entry.Attr.Mtime.UTC().Format(http.TimeFormat))
|
||||
if r.Header.Get("If-Modified-Since") != "" {
|
||||
if t, parseError := time.Parse(http.TimeFormat, r.Header.Get("If-Modified-Since")); parseError == nil {
|
||||
if t.After(entry.Attr.Mtime) {
|
||||
w.WriteHeader(http.StatusNotModified)
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// set etag
|
||||
setEtag(w, filer2.ETag(entry.Chunks))
|
||||
etag := filer2.ETagEntry(entry)
|
||||
if inm := r.Header.Get("If-None-Match"); inm == "\""+etag+"\"" {
|
||||
w.WriteHeader(http.StatusNotModified)
|
||||
return
|
||||
}
|
||||
setEtag(w, etag)
|
||||
|
||||
if r.Method == "HEAD" {
|
||||
w.Header().Set("Content-Length", strconv.FormatInt(int64(filer2.TotalSize(entry.Chunks)), 10))
|
||||
|
|
|
@ -40,7 +40,7 @@ type FilerPostResult struct {
|
|||
Url string `json:"url,omitempty"`
|
||||
}
|
||||
|
||||
func (fs *FilerServer) assignNewFileInfo(w http.ResponseWriter, r *http.Request, replication, collection, dataCenter, ttlString string) (fileId, urlLocation string, auth security.EncodedJwt, err error) {
|
||||
func (fs *FilerServer) assignNewFileInfo(w http.ResponseWriter, r *http.Request, replication, collection, dataCenter, ttlString string, fsync bool) (fileId, urlLocation string, auth security.EncodedJwt, err error) {
|
||||
|
||||
stats.FilerRequestCounter.WithLabelValues("assign").Inc()
|
||||
start := time.Now()
|
||||
|
@ -73,6 +73,9 @@ func (fs *FilerServer) assignNewFileInfo(w http.ResponseWriter, r *http.Request,
|
|||
}
|
||||
fileId = assignResult.Fid
|
||||
urlLocation = "http://" + assignResult.Url + "/" + assignResult.Fid
|
||||
if fsync {
|
||||
urlLocation += "?fsync=true"
|
||||
}
|
||||
auth = assignResult.Auth
|
||||
return
|
||||
}
|
||||
|
@ -82,7 +85,7 @@ func (fs *FilerServer) PostHandler(w http.ResponseWriter, r *http.Request) {
|
|||
ctx := context.Background()
|
||||
|
||||
query := r.URL.Query()
|
||||
collection, replication := fs.detectCollection(r.RequestURI, query.Get("collection"), query.Get("replication"))
|
||||
collection, replication, fsync := fs.detectCollection(r.RequestURI, query.Get("collection"), query.Get("replication"))
|
||||
dataCenter := query.Get("dataCenter")
|
||||
if dataCenter == "" {
|
||||
dataCenter = fs.option.DataCenter
|
||||
|
@ -96,12 +99,12 @@ func (fs *FilerServer) PostHandler(w http.ResponseWriter, r *http.Request) {
|
|||
ttlSeconds = int32(ttl.Minutes()) * 60
|
||||
}
|
||||
|
||||
if autoChunked := fs.autoChunk(ctx, w, r, replication, collection, dataCenter, ttlSeconds, ttlString); autoChunked {
|
||||
if autoChunked := fs.autoChunk(ctx, w, r, replication, collection, dataCenter, ttlSeconds, ttlString, fsync); autoChunked {
|
||||
return
|
||||
}
|
||||
|
||||
if fs.option.Cipher {
|
||||
reply, err := fs.encrypt(ctx, w, r, replication, collection, dataCenter, ttlSeconds, ttlString)
|
||||
reply, err := fs.encrypt(ctx, w, r, replication, collection, dataCenter, ttlSeconds, ttlString, fsync)
|
||||
if err != nil {
|
||||
writeJsonError(w, r, http.StatusInternalServerError, err)
|
||||
} else if reply != nil {
|
||||
|
@ -111,7 +114,7 @@ func (fs *FilerServer) PostHandler(w http.ResponseWriter, r *http.Request) {
|
|||
return
|
||||
}
|
||||
|
||||
fileId, urlLocation, auth, err := fs.assignNewFileInfo(w, r, replication, collection, dataCenter, ttlString)
|
||||
fileId, urlLocation, auth, err := fs.assignNewFileInfo(w, r, replication, collection, dataCenter, ttlString, fsync)
|
||||
|
||||
if err != nil || fileId == "" || urlLocation == "" {
|
||||
glog.V(0).Infof("fail to allocate volume for %s, collection:%s, datacenter:%s", r.URL.Path, collection, dataCenter)
|
||||
|
@ -122,12 +125,12 @@ func (fs *FilerServer) PostHandler(w http.ResponseWriter, r *http.Request) {
|
|||
glog.V(4).Infof("write %s to %v", r.URL.Path, urlLocation)
|
||||
|
||||
u, _ := url.Parse(urlLocation)
|
||||
ret, err := fs.uploadToVolumeServer(r, u, auth, w, fileId)
|
||||
ret, md5value, err := fs.uploadToVolumeServer(r, u, auth, w, fileId)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
if err = fs.updateFilerStore(ctx, r, w, replication, collection, ret, fileId, ttlSeconds); err != nil {
|
||||
if err = fs.updateFilerStore(ctx, r, w, replication, collection, ret, md5value, fileId, ttlSeconds); err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
|
@ -144,8 +147,8 @@ func (fs *FilerServer) PostHandler(w http.ResponseWriter, r *http.Request) {
|
|||
}
|
||||
|
||||
// update metadata in filer store
|
||||
func (fs *FilerServer) updateFilerStore(ctx context.Context, r *http.Request, w http.ResponseWriter,
|
||||
replication string, collection string, ret *operation.UploadResult, fileId string, ttlSeconds int32) (err error) {
|
||||
func (fs *FilerServer) updateFilerStore(ctx context.Context, r *http.Request, w http.ResponseWriter, replication string,
|
||||
collection string, ret *operation.UploadResult, md5value []byte, fileId string, ttlSeconds int32) (err error) {
|
||||
|
||||
stats.FilerRequestCounter.WithLabelValues("postStoreWrite").Inc()
|
||||
start := time.Now()
|
||||
|
@ -186,6 +189,7 @@ func (fs *FilerServer) updateFilerStore(ctx context.Context, r *http.Request, w
|
|||
Collection: collection,
|
||||
TtlSec: ttlSeconds,
|
||||
Mime: ret.Mime,
|
||||
Md5: md5value,
|
||||
},
|
||||
Chunks: []*filer_pb.FileChunk{{
|
||||
FileId: fileId,
|
||||
|
@ -212,15 +216,20 @@ func (fs *FilerServer) updateFilerStore(ctx context.Context, r *http.Request, w
|
|||
}
|
||||
|
||||
// send request to volume server
|
||||
func (fs *FilerServer) uploadToVolumeServer(r *http.Request, u *url.URL, auth security.EncodedJwt, w http.ResponseWriter, fileId string) (ret *operation.UploadResult, err error) {
|
||||
func (fs *FilerServer) uploadToVolumeServer(r *http.Request, u *url.URL, auth security.EncodedJwt, w http.ResponseWriter, fileId string) (ret *operation.UploadResult, md5value []byte, err error) {
|
||||
|
||||
stats.FilerRequestCounter.WithLabelValues("postUpload").Inc()
|
||||
start := time.Now()
|
||||
defer func() { stats.FilerRequestHistogram.WithLabelValues("postUpload").Observe(time.Since(start).Seconds()) }()
|
||||
|
||||
ret = &operation.UploadResult{}
|
||||
hash := md5.New()
|
||||
var body = ioutil.NopCloser(io.TeeReader(r.Body, hash))
|
||||
|
||||
md5Hash := md5.New()
|
||||
body := r.Body
|
||||
if r.Method == "PUT" {
|
||||
// only PUT or large chunked files has Md5 in attributes
|
||||
body = ioutil.NopCloser(io.TeeReader(r.Body, md5Hash))
|
||||
}
|
||||
|
||||
request := &http.Request{
|
||||
Method: r.Method,
|
||||
|
@ -285,7 +294,10 @@ func (fs *FilerServer) uploadToVolumeServer(r *http.Request, u *url.URL, auth se
|
|||
}
|
||||
}
|
||||
// use filer calculated md5 ETag, instead of the volume server crc ETag
|
||||
ret.ETag = fmt.Sprintf("%x", hash.Sum(nil))
|
||||
if r.Method == "PUT" {
|
||||
md5value = md5Hash.Sum(nil)
|
||||
}
|
||||
ret.ETag = getEtag(resp)
|
||||
return
|
||||
}
|
||||
|
||||
|
@ -318,7 +330,7 @@ func (fs *FilerServer) DeleteHandler(w http.ResponseWriter, r *http.Request) {
|
|||
w.WriteHeader(http.StatusNoContent)
|
||||
}
|
||||
|
||||
func (fs *FilerServer) detectCollection(requestURI, qCollection, qReplication string) (collection, replication string) {
|
||||
func (fs *FilerServer) detectCollection(requestURI, qCollection, qReplication string) (collection, replication string, fsync bool) {
|
||||
// default
|
||||
collection = fs.option.Collection
|
||||
replication = fs.option.DefaultReplication
|
||||
|
@ -341,7 +353,7 @@ func (fs *FilerServer) detectCollection(requestURI, qCollection, qReplication st
|
|||
if t > 0 {
|
||||
collection = bucketAndObjectKey[:t]
|
||||
}
|
||||
replication = fs.filer.ReadBucketOption(collection)
|
||||
replication, fsync = fs.filer.ReadBucketOption(collection)
|
||||
}
|
||||
|
||||
return
|
||||
|
|
|
@ -2,7 +2,9 @@ package weed_server
|
|||
|
||||
import (
|
||||
"context"
|
||||
"crypto/md5"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
"path"
|
||||
"strconv"
|
||||
|
@ -19,7 +21,7 @@ import (
|
|||
)
|
||||
|
||||
func (fs *FilerServer) autoChunk(ctx context.Context, w http.ResponseWriter, r *http.Request,
|
||||
replication string, collection string, dataCenter string, ttlSec int32, ttlString string) bool {
|
||||
replication string, collection string, dataCenter string, ttlSec int32, ttlString string, fsync bool) bool {
|
||||
if r.Method != "POST" {
|
||||
glog.V(4).Infoln("AutoChunking not supported for method", r.Method)
|
||||
return false
|
||||
|
@ -55,7 +57,7 @@ func (fs *FilerServer) autoChunk(ctx context.Context, w http.ResponseWriter, r *
|
|||
return false
|
||||
}
|
||||
|
||||
reply, err := fs.doAutoChunk(ctx, w, r, contentLength, chunkSize, replication, collection, dataCenter, ttlSec, ttlString)
|
||||
reply, err := fs.doAutoChunk(ctx, w, r, contentLength, chunkSize, replication, collection, dataCenter, ttlSec, ttlString, fsync)
|
||||
if err != nil {
|
||||
writeJsonError(w, r, http.StatusInternalServerError, err)
|
||||
} else if reply != nil {
|
||||
|
@ -65,7 +67,7 @@ func (fs *FilerServer) autoChunk(ctx context.Context, w http.ResponseWriter, r *
|
|||
}
|
||||
|
||||
func (fs *FilerServer) doAutoChunk(ctx context.Context, w http.ResponseWriter, r *http.Request,
|
||||
contentLength int64, chunkSize int32, replication string, collection string, dataCenter string, ttlSec int32, ttlString string) (filerResult *FilerPostResult, replyerr error) {
|
||||
contentLength int64, chunkSize int32, replication string, collection string, dataCenter string, ttlSec int32, ttlString string, fsync bool) (filerResult *FilerPostResult, replyerr error) {
|
||||
|
||||
stats.FilerRequestCounter.WithLabelValues("postAutoChunk").Inc()
|
||||
start := time.Now()
|
||||
|
@ -91,13 +93,16 @@ func (fs *FilerServer) doAutoChunk(ctx context.Context, w http.ResponseWriter, r
|
|||
|
||||
var fileChunks []*filer_pb.FileChunk
|
||||
|
||||
md5Hash := md5.New()
|
||||
var partReader = ioutil.NopCloser(io.TeeReader(part1, md5Hash))
|
||||
|
||||
chunkOffset := int64(0)
|
||||
|
||||
for chunkOffset < contentLength {
|
||||
limitedReader := io.LimitReader(part1, int64(chunkSize))
|
||||
limitedReader := io.LimitReader(partReader, int64(chunkSize))
|
||||
|
||||
// assign one file id for one chunk
|
||||
fileId, urlLocation, auth, assignErr := fs.assignNewFileInfo(w, r, replication, collection, dataCenter, ttlString)
|
||||
fileId, urlLocation, auth, assignErr := fs.assignNewFileInfo(w, r, replication, collection, dataCenter, ttlString, fsync)
|
||||
if assignErr != nil {
|
||||
return nil, assignErr
|
||||
}
|
||||
|
@ -157,6 +162,7 @@ func (fs *FilerServer) doAutoChunk(ctx context.Context, w http.ResponseWriter, r
|
|||
Collection: collection,
|
||||
TtlSec: ttlSec,
|
||||
Mime: contentType,
|
||||
Md5: md5Hash.Sum(nil),
|
||||
},
|
||||
Chunks: fileChunks,
|
||||
}
|
||||
|
|
|
@ -17,9 +17,9 @@ import (
|
|||
|
||||
// handling single chunk POST or PUT upload
|
||||
func (fs *FilerServer) encrypt(ctx context.Context, w http.ResponseWriter, r *http.Request,
|
||||
replication string, collection string, dataCenter string, ttlSeconds int32, ttlString string) (filerResult *FilerPostResult, err error) {
|
||||
replication string, collection string, dataCenter string, ttlSeconds int32, ttlString string, fsync bool) (filerResult *FilerPostResult, err error) {
|
||||
|
||||
fileId, urlLocation, auth, err := fs.assignNewFileInfo(w, r, replication, collection, dataCenter, ttlString)
|
||||
fileId, urlLocation, auth, err := fs.assignNewFileInfo(w, r, replication, collection, dataCenter, ttlString, fsync)
|
||||
|
||||
if err != nil || fileId == "" || urlLocation == "" {
|
||||
return nil, fmt.Errorf("fail to allocate volume for %s, collection:%s, datacenter:%s", r.URL.Path, collection, dataCenter)
|
||||
|
|
|
@ -11,6 +11,7 @@ import (
|
|||
"google.golang.org/grpc/peer"
|
||||
|
||||
"github.com/chrislusf/seaweedfs/weed/glog"
|
||||
"github.com/chrislusf/seaweedfs/weed/pb"
|
||||
"github.com/chrislusf/seaweedfs/weed/pb/master_pb"
|
||||
"github.com/chrislusf/seaweedfs/weed/storage/backend"
|
||||
"github.com/chrislusf/seaweedfs/weed/storage/needle"
|
||||
|
@ -190,6 +191,18 @@ func (ms *MasterServer) KeepConnected(stream master_pb.Seaweed_KeepConnectedServ
|
|||
|
||||
peerAddress := findClientAddress(stream.Context(), req.GrpcPort)
|
||||
|
||||
// only one shell can be connected at any time
|
||||
if req.Name == pb.AdminShellClient {
|
||||
if ms.currentAdminShellClient == "" {
|
||||
ms.currentAdminShellClient = peerAddress
|
||||
defer func() {
|
||||
ms.currentAdminShellClient = ""
|
||||
}()
|
||||
} else {
|
||||
return fmt.Errorf("only one concurrent shell allowed, but another shell is already connected from %s", peerAddress)
|
||||
}
|
||||
}
|
||||
|
||||
stopChan := make(chan bool)
|
||||
|
||||
clientName, messageChan := ms.addClient(req.Name, peerAddress)
|
||||
|
@ -230,7 +243,6 @@ func (ms *MasterServer) KeepConnected(stream master_pb.Seaweed_KeepConnectedServ
|
|||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (ms *MasterServer) informNewLeader(stream master_pb.Seaweed_KeepConnectedServer) error {
|
||||
|
|
|
@ -64,6 +64,8 @@ type MasterServer struct {
|
|||
grpcDialOption grpc.DialOption
|
||||
|
||||
MasterClient *wdclient.MasterClient
|
||||
|
||||
currentAdminShellClient string
|
||||
}
|
||||
|
||||
func NewMasterServer(r *mux.Router, option *MasterOption, peers []string) *MasterServer {
|
||||
|
@ -197,8 +199,8 @@ func (ms *MasterServer) startAdminScripts() {
|
|||
v.SetDefault("master.maintenance.sleep_minutes", 17)
|
||||
sleepMinutes := v.GetInt("master.maintenance.sleep_minutes")
|
||||
|
||||
v.SetDefault("master.filer.default_filer_url", "http://localhost:8888/")
|
||||
filerURL := v.GetString("master.filer.default_filer_url")
|
||||
v.SetDefault("master.filer.default", "localhost:8888")
|
||||
filerHostPort := v.GetString("master.filer.default")
|
||||
|
||||
scriptLines := strings.Split(adminScripts, "\n")
|
||||
|
||||
|
@ -208,9 +210,10 @@ func (ms *MasterServer) startAdminScripts() {
|
|||
shellOptions.GrpcDialOption = security.LoadClientTLS(v, "grpc.master")
|
||||
shellOptions.Masters = &masterAddress
|
||||
|
||||
shellOptions.FilerHost, shellOptions.FilerPort, shellOptions.Directory, err = util.ParseFilerUrl(filerURL)
|
||||
shellOptions.FilerHost, shellOptions.FilerPort, err = util.ParseHostPort(filerHostPort)
|
||||
shellOptions.Directory = "/"
|
||||
if err != nil {
|
||||
glog.V(0).Infof("failed to parse master.filer.default_filer_urll=%s : %v\n", filerURL, err)
|
||||
glog.V(0).Infof("failed to parse master.filer.default = %s : %v\n", filerHostPort, err)
|
||||
return
|
||||
}
|
||||
|
||||
|
|
|
@ -1,23 +0,0 @@
|
|||
package weed_server
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/chrislusf/seaweedfs/weed/pb/queue_pb"
|
||||
)
|
||||
|
||||
func (broker *MessageBroker) ConfigureTopic(context.Context, *queue_pb.ConfigureTopicRequest) (*queue_pb.ConfigureTopicResponse, error) {
|
||||
panic("implement me")
|
||||
}
|
||||
|
||||
func (broker *MessageBroker) DeleteTopic(context.Context, *queue_pb.DeleteTopicRequest) (*queue_pb.DeleteTopicResponse, error) {
|
||||
panic("implement me")
|
||||
}
|
||||
|
||||
func (broker *MessageBroker) StreamWrite(queue_pb.SeaweedQueue_StreamWriteServer) error {
|
||||
panic("implement me")
|
||||
}
|
||||
|
||||
func (broker *MessageBroker) StreamRead(*queue_pb.ReadMessageRequest, queue_pb.SeaweedQueue_StreamReadServer) error {
|
||||
panic("implement me")
|
||||
}
|
|
@ -90,7 +90,7 @@ func (vs *VolumeServer) VolumeTailReceiver(ctx context.Context, req *volume_serv
|
|||
defer glog.V(1).Infof("receive tailing volume %d finished", v.Id)
|
||||
|
||||
return resp, operation.TailVolumeFromSource(req.SourceVolumeServer, vs.grpcDialOption, v.Id, req.SinceNs, int(req.IdleTimeoutSeconds), func(n *needle.Needle) error {
|
||||
_, err := vs.store.WriteVolumeNeedle(v.Id, n)
|
||||
_, err := vs.store.WriteVolumeNeedle(v.Id, n, false)
|
||||
return err
|
||||
})
|
||||
|
||||
|
|
|
@ -166,3 +166,11 @@ func setEtag(w http.ResponseWriter, etag string) {
|
|||
}
|
||||
}
|
||||
}
|
||||
|
||||
func getEtag(resp *http.Response) (etag string) {
|
||||
etag = resp.Header.Get("ETag")
|
||||
if strings.HasPrefix(etag, "\"") && strings.HasSuffix(etag, "\"") {
|
||||
return etag[1 : len(etag)-1]
|
||||
}
|
||||
return
|
||||
}
|
||||
|
|
|
@ -151,9 +151,9 @@ var StatusTpl = template.Must(template.New("status").Funcs(funcMap).Parse(`<!DOC
|
|||
<tr>
|
||||
<td><code>{{ .Id }}</code></td>
|
||||
<td>{{ .Collection }}</td>
|
||||
<td>{{ .Size }} Bytes</td>
|
||||
<td>{{ bytesToHumanReadble .Size }}</td>
|
||||
<td>{{ .FileCount }}</td>
|
||||
<td>{{ .DeleteCount }} / {{.DeletedByteCount}} Bytes</td>
|
||||
<td>{{ .DeleteCount }} / {{bytesToHumanReadble .DeletedByteCount}}</td>
|
||||
<td>{{ .Ttl }}</td>
|
||||
<td>{{ .ReadOnly }}</td>
|
||||
</tr>
|
||||
|
@ -181,9 +181,9 @@ var StatusTpl = template.Must(template.New("status").Funcs(funcMap).Parse(`<!DOC
|
|||
<tr>
|
||||
<td><code>{{ .Id }}</code></td>
|
||||
<td>{{ .Collection }}</td>
|
||||
<td>{{ .Size }} Bytes</td>
|
||||
<td>{{ bytesToHumanReadble .Size }}</td>
|
||||
<td>{{ .FileCount }}</td>
|
||||
<td>{{ .DeleteCount }} / {{.DeletedByteCount}} Bytes</td>
|
||||
<td>{{ .DeleteCount }} / {{bytesToHumanReadble .DeletedByteCount}}</td>
|
||||
<td>{{ .RemoteStorageName }}</td>
|
||||
<td>{{ .RemoteStorageKey }}</td>
|
||||
</tr>
|
||||
|
@ -209,7 +209,7 @@ var StatusTpl = template.Must(template.New("status").Funcs(funcMap).Parse(`<!DOC
|
|||
<tr>
|
||||
<td><code>{{ .VolumeId }}</code></td>
|
||||
<td>{{ .Collection }}</td>
|
||||
<td>{{ .ShardSize }} Bytes</td>
|
||||
<td>{{ bytesToHumanReadble .ShardSize }}</td>
|
||||
<td>{{ .ShardIdList }}</td>
|
||||
<td>{{ .CreatedAt.Format "02 Jan 06 15:04 -0700" }}</td>
|
||||
</tr>
|
||||
|
|
|
@ -16,8 +16,8 @@ import (
|
|||
"github.com/chrislusf/seaweedfs/weed/operation"
|
||||
"github.com/chrislusf/seaweedfs/weed/pb"
|
||||
"github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
|
||||
"github.com/chrislusf/seaweedfs/weed/pb/pb_cache"
|
||||
"github.com/chrislusf/seaweedfs/weed/util"
|
||||
"github.com/chrislusf/seaweedfs/weed/util/chunk_cache"
|
||||
|
||||
"github.com/chrislusf/seaweedfs/weed/filer2"
|
||||
"github.com/chrislusf/seaweedfs/weed/glog"
|
||||
|
@ -34,6 +34,8 @@ type WebDavOption struct {
|
|||
Uid uint32
|
||||
Gid uint32
|
||||
Cipher bool
|
||||
CacheDir string
|
||||
CacheSizeMB int64
|
||||
}
|
||||
|
||||
type WebDavServer struct {
|
||||
|
@ -67,7 +69,7 @@ type WebDavFileSystem struct {
|
|||
secret security.SigningKey
|
||||
filer *filer2.Filer
|
||||
grpcDialOption grpc.DialOption
|
||||
chunkCache *pb_cache.ChunkCache
|
||||
chunkCache *chunk_cache.ChunkCache
|
||||
}
|
||||
|
||||
type FileInfo struct {
|
||||
|
@ -96,9 +98,14 @@ type WebDavFile struct {
|
|||
}
|
||||
|
||||
func NewWebDavFileSystem(option *WebDavOption) (webdav.FileSystem, error) {
|
||||
|
||||
chunkCache := chunk_cache.NewChunkCache(256, option.CacheDir, option.CacheSizeMB)
|
||||
util.OnInterrupt(func() {
|
||||
chunkCache.Shutdown()
|
||||
})
|
||||
return &WebDavFileSystem{
|
||||
option: option,
|
||||
chunkCache: pb_cache.NewChunkCache(1000),
|
||||
chunkCache: chunkCache,
|
||||
}, nil
|
||||
}
|
||||
|
||||
|
|
|
@ -43,7 +43,7 @@ var (
|
|||
func NewCommandEnv(options ShellOptions) *CommandEnv {
|
||||
return &CommandEnv{
|
||||
env: make(map[string]string),
|
||||
MasterClient: wdclient.NewMasterClient(options.GrpcDialOption, "shell", 0, strings.Split(*options.Masters, ",")),
|
||||
MasterClient: wdclient.NewMasterClient(options.GrpcDialOption, pb.AdminShellClient, 0, strings.Split(*options.Masters, ",")),
|
||||
option: options,
|
||||
}
|
||||
}
|
||||
|
|
|
@ -3,12 +3,10 @@ package memory_map
|
|||
import (
|
||||
"os"
|
||||
"time"
|
||||
|
||||
"github.com/chrislusf/seaweedfs/weed/storage/backend"
|
||||
)
|
||||
|
||||
var (
|
||||
_ backend.BackendStorageFile = &MemoryMappedFile{}
|
||||
// _ backend.BackendStorageFile = &MemoryMappedFile{} // remove this to break import cycle
|
||||
)
|
||||
|
||||
type MemoryMappedFile struct {
|
||||
|
|
|
@ -1,15 +1,14 @@
|
|||
// +build !linux,!windows
|
||||
|
||||
package storage
|
||||
package backend
|
||||
|
||||
import (
|
||||
"os"
|
||||
|
||||
"github.com/chrislusf/seaweedfs/weed/glog"
|
||||
"github.com/chrislusf/seaweedfs/weed/storage/backend"
|
||||
)
|
||||
|
||||
func createVolumeFile(fileName string, preallocate int64, memoryMapSizeMB uint32) (backend.BackendStorageFile, error) {
|
||||
func CreateVolumeFile(fileName string, preallocate int64, memoryMapSizeMB uint32) (BackendStorageFile, error) {
|
||||
file, e := os.OpenFile(fileName, os.O_RDWR|os.O_CREATE|os.O_TRUNC, 0644)
|
||||
if e != nil {
|
||||
return nil, e
|
||||
|
@ -17,5 +16,5 @@ func createVolumeFile(fileName string, preallocate int64, memoryMapSizeMB uint32
|
|||
if preallocate > 0 {
|
||||
glog.V(0).Infof("Preallocated disk space for %s is not supported", fileName)
|
||||
}
|
||||
return backend.NewDiskFile(file), nil
|
||||
return NewDiskFile(file), nil
|
||||
}
|
|
@ -1,16 +1,15 @@
|
|||
// +build linux
|
||||
|
||||
package storage
|
||||
package backend
|
||||
|
||||
import (
|
||||
"os"
|
||||
"syscall"
|
||||
|
||||
"github.com/chrislusf/seaweedfs/weed/glog"
|
||||
"github.com/chrislusf/seaweedfs/weed/storage/backend"
|
||||
)
|
||||
|
||||
func createVolumeFile(fileName string, preallocate int64, memoryMapSizeMB uint32) (backend.BackendStorageFile, error) {
|
||||
func CreateVolumeFile(fileName string, preallocate int64, memoryMapSizeMB uint32) (BackendStorageFile, error) {
|
||||
file, e := os.OpenFile(fileName, os.O_RDWR|os.O_CREATE|os.O_TRUNC, 0644)
|
||||
if e != nil {
|
||||
return nil, e
|
||||
|
@ -19,5 +18,5 @@ func createVolumeFile(fileName string, preallocate int64, memoryMapSizeMB uint32
|
|||
syscall.Fallocate(int(file.Fd()), 1, 0, preallocate)
|
||||
glog.V(0).Infof("Preallocated %d bytes disk space for %s", preallocate, fileName)
|
||||
}
|
||||
return backend.NewDiskFile(file), nil
|
||||
return NewDiskFile(file), nil
|
||||
}
|
|
@ -1,17 +1,16 @@
|
|||
// +build windows
|
||||
|
||||
package storage
|
||||
package backend
|
||||
|
||||
import (
|
||||
"github.com/chrislusf/seaweedfs/weed/storage/backend/memory_map"
|
||||
"golang.org/x/sys/windows"
|
||||
|
||||
"github.com/chrislusf/seaweedfs/weed/glog"
|
||||
"github.com/chrislusf/seaweedfs/weed/storage/backend"
|
||||
"github.com/chrislusf/seaweedfs/weed/storage/backend/memory_map/os_overloads"
|
||||
)
|
||||
|
||||
func createVolumeFile(fileName string, preallocate int64, memoryMapSizeMB uint32) (backend.BackendStorageFile, error) {
|
||||
func CreateVolumeFile(fileName string, preallocate int64, memoryMapSizeMB uint32) (BackendStorageFile, error) {
|
||||
if preallocate > 0 {
|
||||
glog.V(0).Infof("Preallocated disk space for %s is not supported", fileName)
|
||||
}
|
||||
|
@ -27,7 +26,7 @@ func createVolumeFile(fileName string, preallocate int64, memoryMapSizeMB uint32
|
|||
if e != nil {
|
||||
return nil, e
|
||||
}
|
||||
return backend.NewDiskFile(file), nil
|
||||
return NewDiskFile(file), nil
|
||||
}
|
||||
|
||||
}
|
|
@ -252,7 +252,7 @@ func (s *Store) Close() {
|
|||
}
|
||||
}
|
||||
|
||||
func (s *Store) WriteVolumeNeedle(i needle.VolumeId, n *needle.Needle) (isUnchanged bool, err error) {
|
||||
func (s *Store) WriteVolumeNeedle(i needle.VolumeId, n *needle.Needle, fsync bool) (isUnchanged bool, err error) {
|
||||
if v := s.findVolume(i); v != nil {
|
||||
if v.IsReadOnly() {
|
||||
err = fmt.Errorf("volume %d is read only", i)
|
||||
|
@ -260,7 +260,7 @@ func (s *Store) WriteVolumeNeedle(i needle.VolumeId, n *needle.Needle) (isUnchan
|
|||
}
|
||||
// using len(n.Data) here instead of n.Size before n.Size is populated in v.writeNeedle(n)
|
||||
if MaxPossibleVolumeSize >= v.ContentSize()+uint64(needle.GetActualSize(uint32(len(n.Data)), v.Version())) {
|
||||
_, _, isUnchanged, err = v.writeNeedle(n)
|
||||
_, _, isUnchanged, err = v.writeNeedle(n, fsync)
|
||||
} else {
|
||||
err = fmt.Errorf("volume size limit %d exceeded! current size is %d", s.GetVolumeSizeLimit(), v.ContentSize())
|
||||
}
|
||||
|
@ -421,7 +421,7 @@ func (s *Store) MaybeAdjustVolumeMax() (hasChanges bool) {
|
|||
maxVolumeCount += int(uint64(unclaimedSpaces)/volumeSizeLimit) - 1
|
||||
}
|
||||
diskLocation.MaxVolumeCount = maxVolumeCount
|
||||
glog.V(0).Infof("disk %s max %d unclaimedSpace:%dMB, unused:%dMB volumeSizeLimit:%d/MB",
|
||||
glog.V(0).Infof("disk %s max %d unclaimedSpace:%dMB, unused:%dMB volumeSizeLimit:%dMB",
|
||||
diskLocation.Directory, maxVolumeCount, unclaimedSpaces/1024/1024, unusedSpace/1024/1024, volumeSizeLimit/1024/1024)
|
||||
hasChanges = true
|
||||
}
|
||||
|
|
|
@ -54,7 +54,7 @@ func (v *Volume) load(alsoLoadIndex bool, createDatIfMissing bool, needleMapKind
|
|||
v.DataBackend = backend.NewDiskFile(dataFile)
|
||||
} else {
|
||||
if createDatIfMissing {
|
||||
v.DataBackend, err = createVolumeFile(fileName+".dat", preallocate, v.MemoryMapMaxSizeMb)
|
||||
v.DataBackend, err = backend.CreateVolumeFile(fileName+".dat", preallocate, v.MemoryMapMaxSizeMb)
|
||||
} else {
|
||||
return fmt.Errorf("Volume Data file %s.dat does not exist.", fileName)
|
||||
}
|
||||
|
|
|
@ -63,7 +63,7 @@ func (v *Volume) Destroy() (err error) {
|
|||
return
|
||||
}
|
||||
|
||||
func (v *Volume) writeNeedle(n *needle.Needle) (offset uint64, size uint32, isUnchanged bool, err error) {
|
||||
func (v *Volume) writeNeedle(n *needle.Needle, fsync bool) (offset uint64, size uint32, isUnchanged bool, err error) {
|
||||
// glog.V(4).Infof("writing needle %s", needle.NewFileIdFromNeedle(v.Id, n).String())
|
||||
v.dataFileAccessLock.Lock()
|
||||
defer v.dataFileAccessLock.Unlock()
|
||||
|
@ -98,6 +98,11 @@ func (v *Volume) writeNeedle(n *needle.Needle) (offset uint64, size uint32, isUn
|
|||
if offset, size, _, err = n.Append(v.DataBackend, v.Version()); err != nil {
|
||||
return
|
||||
}
|
||||
if fsync {
|
||||
if err = v.DataBackend.Sync(); err != nil {
|
||||
return
|
||||
}
|
||||
}
|
||||
v.lastAppendAtNs = n.AppendAtNs
|
||||
|
||||
// add to needle map
|
||||
|
|
|
@ -354,7 +354,7 @@ func (v *Volume) copyDataAndGenerateIndexFile(dstName, idxName string, prealloca
|
|||
var (
|
||||
dst backend.BackendStorageFile
|
||||
)
|
||||
if dst, err = createVolumeFile(dstName, preallocate, 0); err != nil {
|
||||
if dst, err = backend.CreateVolumeFile(dstName, preallocate, 0); err != nil {
|
||||
return
|
||||
}
|
||||
defer dst.Close()
|
||||
|
@ -383,7 +383,7 @@ func copyDataBasedOnIndexFile(srcDatName, srcIdxName, dstDatName, datIdxName str
|
|||
srcDatBackend, dstDatBackend backend.BackendStorageFile
|
||||
dataFile *os.File
|
||||
)
|
||||
if dstDatBackend, err = createVolumeFile(dstDatName, preallocate, 0); err != nil {
|
||||
if dstDatBackend, err = backend.CreateVolumeFile(dstDatName, preallocate, 0); err != nil {
|
||||
return
|
||||
}
|
||||
defer dstDatBackend.Close()
|
||||
|
|
|
@ -129,7 +129,7 @@ func TestCompaction(t *testing.T) {
|
|||
}
|
||||
func doSomeWritesDeletes(i int, v *Volume, t *testing.T, infos []*needleInfo) {
|
||||
n := newRandomNeedle(uint64(i))
|
||||
_, size, _, err := v.writeNeedle(n)
|
||||
_, size, _, err := v.writeNeedle(n, false)
|
||||
if err != nil {
|
||||
t.Fatalf("write file %d: %v", i, err)
|
||||
}
|
||||
|
|
|
@ -22,8 +22,10 @@ func ReplicatedWrite(masterNode string, s *storage.Store, volumeId needle.Volume
|
|||
//check JWT
|
||||
jwt := security.GetJwt(r)
|
||||
|
||||
// check whether this is a replicated write request
|
||||
var remoteLocations []operation.Location
|
||||
if r.FormValue("type") != "replicate" {
|
||||
// this is the initial request
|
||||
remoteLocations, err = getWritableRemoteReplications(s, volumeId, masterNode)
|
||||
if err != nil {
|
||||
glog.V(0).Infoln(err)
|
||||
|
@ -31,8 +33,14 @@ func ReplicatedWrite(masterNode string, s *storage.Store, volumeId needle.Volume
|
|||
}
|
||||
}
|
||||
|
||||
// read fsync value
|
||||
fsync := false
|
||||
if r.FormValue("fsync") == "true" {
|
||||
fsync = true
|
||||
}
|
||||
|
||||
if s.GetVolume(volumeId) != nil {
|
||||
isUnchanged, err = s.WriteVolumeNeedle(volumeId, n)
|
||||
isUnchanged, err = s.WriteVolumeNeedle(volumeId, n, fsync)
|
||||
if err != nil {
|
||||
err = fmt.Errorf("failed to write to local disk: %v", err)
|
||||
glog.V(0).Infoln(err)
|
||||
|
|
|
@ -2,6 +2,7 @@ package util
|
|||
|
||||
import (
|
||||
"crypto/md5"
|
||||
"fmt"
|
||||
"io"
|
||||
)
|
||||
|
||||
|
@ -91,3 +92,9 @@ func HashToInt32(data []byte) (v int32) {
|
|||
|
||||
return
|
||||
}
|
||||
|
||||
func Md5(data []byte) string {
|
||||
hash := md5.New()
|
||||
hash.Write(data)
|
||||
return fmt.Sprintf("%x", hash.Sum(nil))
|
||||
}
|
||||
|
|
113
weed/util/chunk_cache/chunk_cache.go
Normal file
113
weed/util/chunk_cache/chunk_cache.go
Normal file
|
@ -0,0 +1,113 @@
|
|||
package chunk_cache
|
||||
|
||||
import (
|
||||
"sync"
|
||||
|
||||
"github.com/chrislusf/seaweedfs/weed/glog"
|
||||
"github.com/chrislusf/seaweedfs/weed/storage/needle"
|
||||
)
|
||||
|
||||
const (
|
||||
memCacheSizeLimit = 1024 * 1024
|
||||
onDiskCacheSizeLimit0 = memCacheSizeLimit
|
||||
onDiskCacheSizeLimit1 = 4 * memCacheSizeLimit
|
||||
)
|
||||
|
||||
// a global cache for recently accessed file chunks
|
||||
type ChunkCache struct {
|
||||
memCache *ChunkCacheInMemory
|
||||
diskCaches []*OnDiskCacheLayer
|
||||
sync.RWMutex
|
||||
}
|
||||
|
||||
func NewChunkCache(maxEntries int64, dir string, diskSizeMB int64) *ChunkCache {
|
||||
|
||||
c := &ChunkCache{
|
||||
memCache: NewChunkCacheInMemory(maxEntries),
|
||||
}
|
||||
c.diskCaches = make([]*OnDiskCacheLayer, 3)
|
||||
c.diskCaches[0] = NewOnDiskCacheLayer(dir, "c0_1", diskSizeMB/4, 4)
|
||||
c.diskCaches[1] = NewOnDiskCacheLayer(dir, "c1_4", diskSizeMB/4, 4)
|
||||
c.diskCaches[2] = NewOnDiskCacheLayer(dir, "cache", diskSizeMB/2, 4)
|
||||
|
||||
return c
|
||||
}
|
||||
|
||||
func (c *ChunkCache) GetChunk(fileId string, chunkSize uint64) (data []byte) {
|
||||
if c == nil {
|
||||
return
|
||||
}
|
||||
|
||||
c.RLock()
|
||||
defer c.RUnlock()
|
||||
|
||||
return c.doGetChunk(fileId, chunkSize)
|
||||
}
|
||||
|
||||
func (c *ChunkCache) doGetChunk(fileId string, chunkSize uint64) (data []byte) {
|
||||
|
||||
if chunkSize < memCacheSizeLimit {
|
||||
if data = c.memCache.GetChunk(fileId); data != nil {
|
||||
return data
|
||||
}
|
||||
}
|
||||
|
||||
fid, err := needle.ParseFileIdFromString(fileId)
|
||||
if err != nil {
|
||||
glog.Errorf("failed to parse file id %s", fileId)
|
||||
return nil
|
||||
}
|
||||
|
||||
for _, diskCache := range c.diskCaches {
|
||||
data := diskCache.getChunk(fid.Key)
|
||||
if len(data) != 0 {
|
||||
return data
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
|
||||
}
|
||||
|
||||
func (c *ChunkCache) SetChunk(fileId string, data []byte) {
|
||||
if c == nil {
|
||||
return
|
||||
}
|
||||
c.Lock()
|
||||
defer c.Unlock()
|
||||
|
||||
c.doSetChunk(fileId, data)
|
||||
}
|
||||
|
||||
func (c *ChunkCache) doSetChunk(fileId string, data []byte) {
|
||||
|
||||
if len(data) < memCacheSizeLimit {
|
||||
c.memCache.SetChunk(fileId, data)
|
||||
}
|
||||
|
||||
fid, err := needle.ParseFileIdFromString(fileId)
|
||||
if err != nil {
|
||||
glog.Errorf("failed to parse file id %s", fileId)
|
||||
return
|
||||
}
|
||||
|
||||
if len(data) < onDiskCacheSizeLimit0 {
|
||||
c.diskCaches[0].setChunk(fid.Key, data)
|
||||
} else if len(data) < onDiskCacheSizeLimit1 {
|
||||
c.diskCaches[1].setChunk(fid.Key, data)
|
||||
} else {
|
||||
c.diskCaches[2].setChunk(fid.Key, data)
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
func (c *ChunkCache) Shutdown() {
|
||||
if c == nil {
|
||||
return
|
||||
}
|
||||
c.Lock()
|
||||
defer c.Unlock()
|
||||
for _, diskCache := range c.diskCaches {
|
||||
diskCache.shutdown()
|
||||
}
|
||||
}
|
|
@ -1,4 +1,4 @@
|
|||
package pb_cache
|
||||
package chunk_cache
|
||||
|
||||
import (
|
||||
"time"
|
||||
|
@ -7,21 +7,21 @@ import (
|
|||
)
|
||||
|
||||
// a global cache for recently accessed file chunks
|
||||
type ChunkCache struct {
|
||||
type ChunkCacheInMemory struct {
|
||||
cache *ccache.Cache
|
||||
}
|
||||
|
||||
func NewChunkCache(maxEntries int64) *ChunkCache {
|
||||
func NewChunkCacheInMemory(maxEntries int64) *ChunkCacheInMemory {
|
||||
pruneCount := maxEntries >> 3
|
||||
if pruneCount <= 0 {
|
||||
pruneCount = 500
|
||||
}
|
||||
return &ChunkCache{
|
||||
return &ChunkCacheInMemory{
|
||||
cache: ccache.New(ccache.Configure().MaxSize(maxEntries).ItemsToPrune(uint32(pruneCount))),
|
||||
}
|
||||
}
|
||||
|
||||
func (c *ChunkCache) GetChunk(fileId string) []byte {
|
||||
func (c *ChunkCacheInMemory) GetChunk(fileId string) []byte {
|
||||
item := c.cache.Get(fileId)
|
||||
if item == nil {
|
||||
return nil
|
||||
|
@ -31,6 +31,6 @@ func (c *ChunkCache) GetChunk(fileId string) []byte {
|
|||
return data
|
||||
}
|
||||
|
||||
func (c *ChunkCache) SetChunk(fileId string, data []byte) {
|
||||
func (c *ChunkCacheInMemory) SetChunk(fileId string, data []byte) {
|
||||
c.cache.Set(fileId, data, time.Hour)
|
||||
}
|
145
weed/util/chunk_cache/chunk_cache_on_disk.go
Normal file
145
weed/util/chunk_cache/chunk_cache_on_disk.go
Normal file
|
@ -0,0 +1,145 @@
|
|||
package chunk_cache
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"time"
|
||||
|
||||
"github.com/syndtr/goleveldb/leveldb/opt"
|
||||
|
||||
"github.com/chrislusf/seaweedfs/weed/glog"
|
||||
"github.com/chrislusf/seaweedfs/weed/storage"
|
||||
"github.com/chrislusf/seaweedfs/weed/storage/backend"
|
||||
"github.com/chrislusf/seaweedfs/weed/storage/types"
|
||||
"github.com/chrislusf/seaweedfs/weed/util"
|
||||
)
|
||||
|
||||
// This implements an on disk cache
|
||||
// The entries are an FIFO with a size limit
|
||||
|
||||
type ChunkCacheVolume struct {
|
||||
DataBackend backend.BackendStorageFile
|
||||
nm storage.NeedleMapper
|
||||
fileName string
|
||||
smallBuffer []byte
|
||||
sizeLimit int64
|
||||
lastModTime time.Time
|
||||
fileSize int64
|
||||
}
|
||||
|
||||
func LoadOrCreateChunkCacheVolume(fileName string, preallocate int64) (*ChunkCacheVolume, error) {
|
||||
|
||||
v := &ChunkCacheVolume{
|
||||
smallBuffer: make([]byte, types.NeedlePaddingSize),
|
||||
fileName: fileName,
|
||||
sizeLimit: preallocate,
|
||||
}
|
||||
|
||||
var err error
|
||||
|
||||
if exists, canRead, canWrite, modTime, fileSize := util.CheckFile(v.fileName + ".dat"); exists {
|
||||
if !canRead {
|
||||
return nil, fmt.Errorf("cannot read cache file %s.dat", v.fileName)
|
||||
}
|
||||
if !canWrite {
|
||||
return nil, fmt.Errorf("cannot write cache file %s.dat", v.fileName)
|
||||
}
|
||||
if dataFile, err := os.OpenFile(v.fileName+".dat", os.O_RDWR|os.O_CREATE, 0644); err != nil {
|
||||
return nil, fmt.Errorf("cannot create cache file %s.dat: %v", v.fileName, err)
|
||||
} else {
|
||||
v.DataBackend = backend.NewDiskFile(dataFile)
|
||||
v.lastModTime = modTime
|
||||
v.fileSize = fileSize
|
||||
}
|
||||
} else {
|
||||
if v.DataBackend, err = backend.CreateVolumeFile(v.fileName+".dat", preallocate, 0); err != nil {
|
||||
return nil, fmt.Errorf("cannot create cache file %s.dat: %v", v.fileName, err)
|
||||
}
|
||||
v.lastModTime = time.Now()
|
||||
}
|
||||
|
||||
var indexFile *os.File
|
||||
if indexFile, err = os.OpenFile(v.fileName+".idx", os.O_RDWR|os.O_CREATE, 0644); err != nil {
|
||||
return nil, fmt.Errorf("cannot write cache index %s.idx: %v", v.fileName, err)
|
||||
}
|
||||
|
||||
glog.V(0).Infoln("loading leveldb", v.fileName+".ldb")
|
||||
opts := &opt.Options{
|
||||
BlockCacheCapacity: 2 * 1024 * 1024, // default value is 8MiB
|
||||
WriteBuffer: 1 * 1024 * 1024, // default value is 4MiB
|
||||
CompactionTableSizeMultiplier: 10, // default value is 1
|
||||
}
|
||||
if v.nm, err = storage.NewLevelDbNeedleMap(v.fileName+".ldb", indexFile, opts); err != nil {
|
||||
return nil, fmt.Errorf("loading leveldb %s error: %v", v.fileName+".ldb", err)
|
||||
}
|
||||
|
||||
return v, nil
|
||||
|
||||
}
|
||||
|
||||
func (v *ChunkCacheVolume) Shutdown() {
|
||||
if v.DataBackend != nil {
|
||||
v.DataBackend.Close()
|
||||
v.DataBackend = nil
|
||||
}
|
||||
if v.nm != nil {
|
||||
v.nm.Close()
|
||||
v.nm = nil
|
||||
}
|
||||
}
|
||||
|
||||
func (v *ChunkCacheVolume) destroy() {
|
||||
v.Shutdown()
|
||||
os.Remove(v.fileName + ".dat")
|
||||
os.Remove(v.fileName + ".idx")
|
||||
os.RemoveAll(v.fileName + ".ldb")
|
||||
}
|
||||
|
||||
func (v *ChunkCacheVolume) Reset() (*ChunkCacheVolume, error) {
|
||||
v.destroy()
|
||||
return LoadOrCreateChunkCacheVolume(v.fileName, v.sizeLimit)
|
||||
}
|
||||
|
||||
func (v *ChunkCacheVolume) GetNeedle(key types.NeedleId) ([]byte, error) {
|
||||
|
||||
nv, ok := v.nm.Get(key)
|
||||
if !ok {
|
||||
return nil, storage.ErrorNotFound
|
||||
}
|
||||
data := make([]byte, nv.Size)
|
||||
if readSize, readErr := v.DataBackend.ReadAt(data, nv.Offset.ToAcutalOffset()); readErr != nil {
|
||||
return nil, fmt.Errorf("read %s.dat [%d,%d): %v",
|
||||
v.fileName, nv.Offset.ToAcutalOffset(), nv.Offset.ToAcutalOffset()+int64(nv.Size), readErr)
|
||||
} else {
|
||||
if readSize != int(nv.Size) {
|
||||
return nil, fmt.Errorf("read %d, expected %d", readSize, nv.Size)
|
||||
}
|
||||
}
|
||||
|
||||
return data, nil
|
||||
}
|
||||
|
||||
func (v *ChunkCacheVolume) WriteNeedle(key types.NeedleId, data []byte) error {
|
||||
|
||||
offset := v.fileSize
|
||||
|
||||
written, err := v.DataBackend.WriteAt(data, offset)
|
||||
if err != nil {
|
||||
return err
|
||||
} else if written != len(data) {
|
||||
return fmt.Errorf("partial written %d, expected %d", written, len(data))
|
||||
}
|
||||
|
||||
v.fileSize += int64(written)
|
||||
extraSize := written % types.NeedlePaddingSize
|
||||
if extraSize != 0 {
|
||||
v.DataBackend.WriteAt(v.smallBuffer[:types.NeedlePaddingSize-extraSize], offset+int64(written))
|
||||
v.fileSize += int64(types.NeedlePaddingSize - extraSize)
|
||||
}
|
||||
|
||||
if err := v.nm.Put(key, types.ToOffset(offset), uint32(len(data))); err != nil {
|
||||
glog.V(4).Infof("failed to save in needle map %d: %v", key, err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
59
weed/util/chunk_cache/chunk_cache_on_disk_test.go
Normal file
59
weed/util/chunk_cache/chunk_cache_on_disk_test.go
Normal file
|
@ -0,0 +1,59 @@
|
|||
package chunk_cache
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"math/rand"
|
||||
"os"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestOnDisk(t *testing.T) {
|
||||
|
||||
tmpDir, _ := ioutil.TempDir("", "c")
|
||||
defer os.RemoveAll(tmpDir)
|
||||
|
||||
totalDiskSizeMb := int64(32)
|
||||
|
||||
cache := NewChunkCache(0, tmpDir, totalDiskSizeMb)
|
||||
|
||||
writeCount := 5
|
||||
type test_data struct {
|
||||
data []byte
|
||||
fileId string
|
||||
size uint64
|
||||
}
|
||||
testData := make([]*test_data, writeCount)
|
||||
for i := 0; i < writeCount; i++ {
|
||||
buff := make([]byte, 1024*1024)
|
||||
rand.Read(buff)
|
||||
testData[i] = &test_data{
|
||||
data: buff,
|
||||
fileId: fmt.Sprintf("1,%daabbccdd", i+1),
|
||||
size: uint64(len(buff)),
|
||||
}
|
||||
cache.SetChunk(testData[i].fileId, testData[i].data)
|
||||
}
|
||||
|
||||
for i := 0; i < writeCount; i++ {
|
||||
data := cache.GetChunk(testData[i].fileId, testData[i].size)
|
||||
if bytes.Compare(data, testData[i].data) != 0 {
|
||||
t.Errorf("failed to write to and read from cache: %d", i)
|
||||
}
|
||||
}
|
||||
|
||||
cache.Shutdown()
|
||||
|
||||
cache = NewChunkCache(0, tmpDir, totalDiskSizeMb)
|
||||
|
||||
for i := 0; i < writeCount; i++ {
|
||||
data := cache.GetChunk(testData[i].fileId, testData[i].size)
|
||||
if bytes.Compare(data, testData[i].data) != 0 {
|
||||
t.Errorf("failed to write to and read from cache: %d", i)
|
||||
}
|
||||
}
|
||||
|
||||
cache.Shutdown()
|
||||
|
||||
}
|
89
weed/util/chunk_cache/on_disk_cache_layer.go
Normal file
89
weed/util/chunk_cache/on_disk_cache_layer.go
Normal file
|
@ -0,0 +1,89 @@
|
|||
package chunk_cache
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"path"
|
||||
"sort"
|
||||
|
||||
"github.com/chrislusf/seaweedfs/weed/glog"
|
||||
"github.com/chrislusf/seaweedfs/weed/storage"
|
||||
"github.com/chrislusf/seaweedfs/weed/storage/types"
|
||||
)
|
||||
|
||||
type OnDiskCacheLayer struct {
|
||||
diskCaches []*ChunkCacheVolume
|
||||
}
|
||||
|
||||
func NewOnDiskCacheLayer(dir, namePrefix string, diskSizeMB int64, segmentCount int) *OnDiskCacheLayer{
|
||||
|
||||
volumeCount, volumeSize := int(diskSizeMB/30000), int64(30000)
|
||||
if volumeCount < segmentCount {
|
||||
volumeCount, volumeSize = segmentCount, diskSizeMB/int64(segmentCount)
|
||||
}
|
||||
|
||||
c := &OnDiskCacheLayer{}
|
||||
for i := 0; i < volumeCount; i++ {
|
||||
fileName := path.Join(dir, fmt.Sprintf("%s_%d", namePrefix, i))
|
||||
diskCache, err := LoadOrCreateChunkCacheVolume(fileName, volumeSize*1024*1024)
|
||||
if err != nil {
|
||||
glog.Errorf("failed to add cache %s : %v", fileName, err)
|
||||
} else {
|
||||
c.diskCaches = append(c.diskCaches, diskCache)
|
||||
}
|
||||
}
|
||||
|
||||
// keep newest cache to the front
|
||||
sort.Slice(c.diskCaches, func(i, j int) bool {
|
||||
return c.diskCaches[i].lastModTime.After(c.diskCaches[j].lastModTime)
|
||||
})
|
||||
|
||||
return c
|
||||
}
|
||||
|
||||
func (c *OnDiskCacheLayer) setChunk(needleId types.NeedleId, data []byte) {
|
||||
|
||||
if c.diskCaches[0].fileSize+int64(len(data)) > c.diskCaches[0].sizeLimit {
|
||||
t, resetErr := c.diskCaches[len(c.diskCaches)-1].Reset()
|
||||
if resetErr != nil {
|
||||
glog.Errorf("failed to reset cache file %s", c.diskCaches[len(c.diskCaches)-1].fileName)
|
||||
return
|
||||
}
|
||||
for i := len(c.diskCaches) - 1; i > 0; i-- {
|
||||
c.diskCaches[i] = c.diskCaches[i-1]
|
||||
}
|
||||
c.diskCaches[0] = t
|
||||
}
|
||||
|
||||
c.diskCaches[0].WriteNeedle(needleId, data)
|
||||
|
||||
}
|
||||
|
||||
func (c *OnDiskCacheLayer) getChunk(needleId types.NeedleId) (data []byte){
|
||||
|
||||
var err error
|
||||
|
||||
for _, diskCache := range c.diskCaches {
|
||||
data, err = diskCache.GetNeedle(needleId)
|
||||
if err == storage.ErrorNotFound {
|
||||
continue
|
||||
}
|
||||
if err != nil {
|
||||
glog.Errorf("failed to read cache file %s id %d", diskCache.fileName, needleId)
|
||||
continue
|
||||
}
|
||||
if len(data) != 0 {
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
|
||||
}
|
||||
|
||||
func (c *OnDiskCacheLayer) shutdown(){
|
||||
|
||||
for _, diskCache := range c.diskCaches {
|
||||
diskCache.Shutdown()
|
||||
}
|
||||
|
||||
}
|
|
@ -42,7 +42,8 @@ func LoadConfiguration(configFileName string, required bool) (loaded bool) {
|
|||
}
|
||||
|
||||
func GetViper() *viper.Viper {
|
||||
v := viper.GetViper()
|
||||
v := &viper.Viper{}
|
||||
*v = *viper.GetViper()
|
||||
v.AutomaticEnv()
|
||||
v.SetEnvPrefix("weed")
|
||||
v.SetEnvKeyReplacer(strings.NewReplacer(".", "_"))
|
||||
|
|
|
@ -5,5 +5,5 @@ import (
|
|||
)
|
||||
|
||||
var (
|
||||
VERSION = fmt.Sprintf("%s %d.%d", sizeLimit, 1, 71)
|
||||
VERSION = fmt.Sprintf("%s %d.%d", sizeLimit, 1, 74)
|
||||
)
|
||||
|
|
|
@ -117,7 +117,7 @@ func Delete(url string, jwt string) error {
|
|||
return nil
|
||||
}
|
||||
m := make(map[string]interface{})
|
||||
if e := json.Unmarshal(body, m); e == nil {
|
||||
if e := json.Unmarshal(body, &m); e == nil {
|
||||
if s, ok := m["error"].(string); ok {
|
||||
return errors.New(s)
|
||||
}
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
package queue
|
||||
package log_buffer
|
||||
|
||||
import (
|
||||
"sync"
|
||||
|
@ -11,6 +11,9 @@ import (
|
|||
"github.com/chrislusf/seaweedfs/weed/util"
|
||||
)
|
||||
|
||||
const BufferSize = 4 * 1024 * 1024
|
||||
const PreviousBufferCount = 3
|
||||
|
||||
type dataToFlush struct {
|
||||
startTime time.Time
|
||||
stopTime time.Time
|
||||
|
@ -18,6 +21,7 @@ type dataToFlush struct {
|
|||
}
|
||||
|
||||
type LogBuffer struct {
|
||||
prevBuffers *SealedBuffers
|
||||
buf []byte
|
||||
idx []int
|
||||
pos int
|
||||
|
@ -34,7 +38,8 @@ type LogBuffer struct {
|
|||
|
||||
func NewLogBuffer(flushInterval time.Duration, flushFn func(startTime, stopTime time.Time, buf []byte), notifyFn func()) *LogBuffer {
|
||||
lb := &LogBuffer{
|
||||
buf: make([]byte, 4*1024*1024),
|
||||
prevBuffers: newSealedBuffers(PreviousBufferCount),
|
||||
buf: make([]byte, BufferSize),
|
||||
sizeBuf: make([]byte, 4),
|
||||
flushInterval: flushInterval,
|
||||
flushFn: flushFn,
|
||||
|
@ -46,17 +51,7 @@ func NewLogBuffer(flushInterval time.Duration, flushFn func(startTime, stopTime
|
|||
return lb
|
||||
}
|
||||
|
||||
func (m *LogBuffer) AddToBuffer(ts time.Time, key, data []byte) {
|
||||
|
||||
logEntry := &filer_pb.LogEntry{
|
||||
TsNs: ts.UnixNano(),
|
||||
PartitionKeyHash: util.HashToInt32(key),
|
||||
Data: data,
|
||||
}
|
||||
|
||||
logEntryData, _ := proto.Marshal(logEntry)
|
||||
|
||||
size := len(logEntryData)
|
||||
func (m *LogBuffer) AddToBuffer(partitionKey, data []byte) {
|
||||
|
||||
m.Lock()
|
||||
defer func() {
|
||||
|
@ -66,6 +61,18 @@ func (m *LogBuffer) AddToBuffer(ts time.Time, key, data []byte) {
|
|||
}
|
||||
}()
|
||||
|
||||
// need to put the timestamp inside the lock
|
||||
ts := time.Now()
|
||||
logEntry := &filer_pb.LogEntry{
|
||||
TsNs: ts.UnixNano(),
|
||||
PartitionKeyHash: util.HashToInt32(partitionKey),
|
||||
Data: data,
|
||||
}
|
||||
|
||||
logEntryData, _ := proto.Marshal(logEntry)
|
||||
|
||||
size := len(logEntryData)
|
||||
|
||||
if m.pos == 0 {
|
||||
m.startTime = ts
|
||||
}
|
||||
|
@ -125,6 +132,7 @@ func (m *LogBuffer) copyToFlush() *dataToFlush {
|
|||
stopTime: m.stopTime,
|
||||
data: copiedBytes(m.buf[:m.pos]),
|
||||
}
|
||||
m.buf = m.prevBuffers.SealBuffer(m.startTime, m.stopTime, m.buf)
|
||||
m.pos = 0
|
||||
m.idx = m.idx[:0]
|
||||
return d
|
||||
|
@ -153,18 +161,18 @@ func (m *LogBuffer) ReadFromBuffer(lastReadTime time.Time) (ts time.Time, buffer
|
|||
l, h := 0, len(m.idx)-1
|
||||
|
||||
/*
|
||||
for i, pos := range m.idx {
|
||||
logEntry, ts := readTs(m.buf, pos)
|
||||
event := &filer_pb.FullEventNotification{}
|
||||
proto.Unmarshal(logEntry.Data, event)
|
||||
entry := event.EventNotification.OldEntry
|
||||
if entry == nil {
|
||||
entry = event.EventNotification.NewEntry
|
||||
for i, pos := range m.idx {
|
||||
logEntry, ts := readTs(m.buf, pos)
|
||||
event := &filer_pb.SubscribeMetadataResponse{}
|
||||
proto.Unmarshal(logEntry.Data, event)
|
||||
entry := event.EventNotification.OldEntry
|
||||
if entry == nil {
|
||||
entry = event.EventNotification.NewEntry
|
||||
}
|
||||
fmt.Printf("entry %d ts: %v offset:%d dir:%s name:%s\n", i, time.Unix(0, ts), pos, event.Directory, entry.Name)
|
||||
}
|
||||
fmt.Printf("entry %d ts: %v offset:%d dir:%s name:%s\n", i, time.Unix(0, ts), pos, event.Directory, entry.Name)
|
||||
}
|
||||
fmt.Printf("l=%d, h=%d\n", l, h)
|
||||
*/
|
||||
fmt.Printf("l=%d, h=%d\n", l, h)
|
||||
*/
|
||||
|
||||
for l <= h {
|
||||
mid := (l + h) / 2
|
40
weed/util/log_buffer/sealed_buffer.go
Normal file
40
weed/util/log_buffer/sealed_buffer.go
Normal file
|
@ -0,0 +1,40 @@
|
|||
package log_buffer
|
||||
|
||||
import "time"
|
||||
|
||||
type MemBuffer struct {
|
||||
buf []byte
|
||||
startTime time.Time
|
||||
stopTime time.Time
|
||||
}
|
||||
|
||||
type SealedBuffers struct {
|
||||
buffers []*MemBuffer
|
||||
}
|
||||
|
||||
func newSealedBuffers(size int) *SealedBuffers {
|
||||
sbs := &SealedBuffers{}
|
||||
|
||||
sbs.buffers = make([]*MemBuffer, size)
|
||||
for i := 0; i < size; i++ {
|
||||
sbs.buffers[i] = &MemBuffer{
|
||||
buf: make([]byte, BufferSize),
|
||||
}
|
||||
}
|
||||
|
||||
return sbs
|
||||
}
|
||||
|
||||
func (sbs *SealedBuffers) SealBuffer(startTime, stopTime time.Time, buf []byte) (newBuf []byte) {
|
||||
oldMemBuffer := sbs.buffers[0]
|
||||
size := len(sbs.buffers)
|
||||
for i := 0; i < size-1; i++ {
|
||||
sbs.buffers[i].buf = sbs.buffers[i+1].buf
|
||||
sbs.buffers[i].startTime = sbs.buffers[i+1].startTime
|
||||
sbs.buffers[i].stopTime = sbs.buffers[i+1].stopTime
|
||||
}
|
||||
sbs.buffers[size-1].buf = buf
|
||||
sbs.buffers[size-1].startTime = startTime
|
||||
sbs.buffers[size-1].stopTime = stopTime
|
||||
return oldMemBuffer.buf
|
||||
}
|
25
weed/util/network.go
Normal file
25
weed/util/network.go
Normal file
|
@ -0,0 +1,25 @@
|
|||
package util
|
||||
|
||||
import (
|
||||
"net"
|
||||
|
||||
"github.com/chrislusf/seaweedfs/weed/glog"
|
||||
)
|
||||
|
||||
func DetectedHostAddress() string {
|
||||
addrs, err := net.InterfaceAddrs()
|
||||
if err != nil {
|
||||
glog.V(0).Infof("failed to detect ip address: %v", err)
|
||||
return ""
|
||||
}
|
||||
|
||||
for _, a := range addrs {
|
||||
if ipnet, ok := a.(*net.IPNet); ok && !ipnet.IP.IsLoopback() {
|
||||
if ipnet.IP.To4() != nil {
|
||||
return ipnet.IP.String()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return "localhost"
|
||||
}
|
|
@ -1,6 +1,7 @@
|
|||
package util
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"net/url"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
@ -45,3 +46,18 @@ func ParseFilerUrl(entryPath string) (filerServer string, filerPort int64, path
|
|||
path = u.Path
|
||||
return
|
||||
}
|
||||
|
||||
func ParseHostPort(hostPort string) (filerServer string, filerPort int64, err error) {
|
||||
parts := strings.Split(hostPort, ":")
|
||||
if len(parts) != 2 {
|
||||
err = fmt.Errorf("failed to parse %s\n", hostPort)
|
||||
return
|
||||
}
|
||||
|
||||
filerPort, err = strconv.ParseInt(parts[1], 10, 64)
|
||||
if err == nil {
|
||||
filerServer = parts[0]
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show more
Loading…
Reference in a new issue