mirror of
https://github.com/seaweedfs/seaweedfs.git
synced 2024-01-19 02:48:24 +00:00
Merge branch 'master' into message_send
This commit is contained in:
commit
956ce6416f
45
go.mod
45
go.mod
|
@ -9,7 +9,7 @@ require (
|
|||
github.com/Azure/azure-pipeline-go v0.2.3
|
||||
github.com/Azure/azure-storage-blob-go v0.15.0
|
||||
github.com/Shopify/sarama v1.36.0
|
||||
github.com/aws/aws-sdk-go v1.44.91
|
||||
github.com/aws/aws-sdk-go v1.44.95
|
||||
github.com/beorn7/perks v1.0.1 // indirect
|
||||
github.com/bwmarrin/snowflake v0.3.0
|
||||
github.com/cespare/xxhash/v2 v2.1.2 // indirect
|
||||
|
@ -31,7 +31,7 @@ require (
|
|||
github.com/fsnotify/fsnotify v1.5.4 // indirect
|
||||
github.com/go-errors/errors v1.1.1 // indirect
|
||||
github.com/go-redis/redis/v8 v8.11.5
|
||||
github.com/go-redsync/redsync/v4 v4.5.1
|
||||
github.com/go-redsync/redsync/v4 v4.6.0
|
||||
github.com/go-sql-driver/mysql v1.6.0
|
||||
github.com/go-zookeeper/zk v1.0.2 // indirect
|
||||
github.com/gocql/gocql v0.0.0-20210707082121-9a3953d1826d
|
||||
|
@ -60,12 +60,12 @@ require (
|
|||
github.com/klauspost/compress v1.15.9 // indirect
|
||||
github.com/klauspost/reedsolomon v1.10.0
|
||||
github.com/kurin/blazer v0.5.3
|
||||
github.com/lib/pq v1.10.6
|
||||
github.com/lib/pq v1.10.7
|
||||
github.com/linxGnu/grocksdb v1.7.7
|
||||
github.com/magiconair/properties v1.8.6 // indirect
|
||||
github.com/mailru/easyjson v0.7.7 // indirect
|
||||
github.com/mattn/go-ieproxy v0.0.3 // indirect
|
||||
github.com/mattn/go-isatty v0.0.14 // indirect
|
||||
github.com/mattn/go-isatty v0.0.16 // indirect
|
||||
github.com/matttproud/golang_protobuf_extensions v1.0.1 // indirect
|
||||
github.com/mitchellh/mapstructure v1.5.0 // indirect
|
||||
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect
|
||||
|
@ -83,13 +83,13 @@ require (
|
|||
github.com/prometheus/procfs v0.8.0
|
||||
github.com/rcrowley/go-metrics v0.0.0-20201227073835-cf1acfcdf475 // indirect
|
||||
github.com/remyoudompheng/bigfft v0.0.0-20200410134404-eec4a21b6bb0 // indirect
|
||||
github.com/seaweedfs/goexif v2.0.0+incompatible
|
||||
github.com/seaweedfs/goexif v1.0.3
|
||||
github.com/seaweedfs/raft v1.1.0
|
||||
github.com/sirupsen/logrus v1.8.1 // indirect
|
||||
github.com/spf13/afero v1.9.2 // indirect
|
||||
github.com/spf13/cast v1.5.0 // indirect
|
||||
github.com/spf13/jwalterweatherman v1.1.0 // indirect
|
||||
github.com/spf13/viper v1.12.0
|
||||
github.com/spf13/viper v1.13.0
|
||||
github.com/stretchr/testify v1.8.0
|
||||
github.com/stvp/tempredis v0.0.0-20181119212430-b82af8480203
|
||||
github.com/syndtr/goleveldb v1.0.1-0.20190318030020-c3a204f8e965
|
||||
|
@ -107,7 +107,7 @@ require (
|
|||
github.com/xdg-go/stringprep v1.0.3 // indirect
|
||||
github.com/youmark/pkcs8 v0.0.0-20181117223130-1be2e3e5546d // indirect
|
||||
go.etcd.io/etcd/client/v3 v3.5.4
|
||||
go.mongodb.org/mongo-driver v1.10.1
|
||||
go.mongodb.org/mongo-driver v1.10.2
|
||||
go.opencensus.io v0.23.0 // indirect
|
||||
gocloud.dev v0.26.0
|
||||
gocloud.dev/pubsub/natspubsub v0.26.0
|
||||
|
@ -119,24 +119,24 @@ require (
|
|||
golang.org/x/oauth2 v0.0.0-20220822191816-0ebed06d0094 // indirect
|
||||
golang.org/x/sys v0.0.0-20220818161305-2296e01440c6
|
||||
golang.org/x/text v0.3.7 // indirect
|
||||
golang.org/x/tools v0.1.8-0.20211029000441-d6a9af8af023
|
||||
golang.org/x/tools v0.1.10
|
||||
golang.org/x/xerrors v0.0.0-20220609144429-65e65417b02f // indirect
|
||||
google.golang.org/api v0.94.0
|
||||
google.golang.org/api v0.95.0
|
||||
google.golang.org/appengine v1.6.7 // indirect
|
||||
google.golang.org/genproto v0.0.0-20220822174746-9e6da59bd2fc // indirect
|
||||
google.golang.org/grpc v1.49.0
|
||||
google.golang.org/protobuf v1.28.1
|
||||
gopkg.in/inf.v0 v0.9.1 // indirect
|
||||
modernc.org/b v1.0.0 // indirect
|
||||
modernc.org/cc/v3 v3.36.0 // indirect
|
||||
modernc.org/ccgo/v3 v3.16.8 // indirect
|
||||
modernc.org/libc v1.16.19 // indirect
|
||||
modernc.org/cc/v3 v3.37.0 // indirect
|
||||
modernc.org/ccgo/v3 v3.16.9 // indirect
|
||||
modernc.org/libc v1.18.0 // indirect
|
||||
modernc.org/mathutil v1.5.0 // indirect
|
||||
modernc.org/memory v1.1.1 // indirect
|
||||
modernc.org/memory v1.3.0 // indirect
|
||||
modernc.org/opt v0.1.1 // indirect
|
||||
modernc.org/sqlite v1.18.1
|
||||
modernc.org/sqlite v1.18.2
|
||||
modernc.org/strutil v1.1.3
|
||||
modernc.org/token v1.0.0 // indirect
|
||||
modernc.org/token v1.0.1 // indirect
|
||||
)
|
||||
|
||||
require (
|
||||
|
@ -147,10 +147,11 @@ require (
|
|||
github.com/hanwen/go-fuse/v2 v2.1.1-0.20220627082937-d01fda7edf17
|
||||
github.com/hashicorp/raft v1.3.10
|
||||
github.com/hashicorp/raft-boltdb/v2 v2.2.2
|
||||
github.com/rabbitmq/amqp091-go v1.4.0
|
||||
github.com/rabbitmq/amqp091-go v1.5.0
|
||||
github.com/tikv/client-go/v2 v2.0.1
|
||||
github.com/ydb-platform/ydb-go-sdk-auth-environ v0.1.2
|
||||
github.com/ydb-platform/ydb-go-sdk/v3 v3.37.4
|
||||
github.com/ydb-platform/ydb-go-sdk/v3 v3.37.7
|
||||
golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4
|
||||
google.golang.org/grpc/security/advancedtls v0.0.0-20220622233350-5cdb09fa29c1
|
||||
)
|
||||
|
||||
|
@ -201,16 +202,15 @@ require (
|
|||
github.com/nats-io/nkeys v0.3.0 // indirect
|
||||
github.com/nats-io/nuid v1.0.1 // indirect
|
||||
github.com/opentracing/opentracing-go v1.2.0 // indirect
|
||||
github.com/pelletier/go-toml/v2 v2.0.1 // indirect
|
||||
github.com/pelletier/go-toml/v2 v2.0.5 // indirect
|
||||
github.com/philhofer/fwd v1.1.1 // indirect
|
||||
github.com/pierrec/lz4/v4 v4.1.15 // indirect
|
||||
github.com/pingcap/errors v0.11.5-0.20211224045212-9687c2b0f87c // indirect
|
||||
github.com/pingcap/failpoint v0.0.0-20210918120811-547c13e3eb00 // indirect
|
||||
github.com/pingcap/kvproto v0.0.0-20220106070556-3fa8fa04f898 // indirect
|
||||
github.com/pingcap/log v0.0.0-20211215031037-e024ba4eb0ee // indirect
|
||||
github.com/rwcarlsen/goexif v0.0.0-20190401172101-9e8deecbddbd // indirect
|
||||
github.com/spf13/pflag v1.0.5 // indirect
|
||||
github.com/subosito/gotenv v1.3.0 // indirect
|
||||
github.com/subosito/gotenv v1.4.1 // indirect
|
||||
github.com/tikv/pd/client v0.0.0-20220216070739-26c668271201 // indirect
|
||||
github.com/tinylib/msgp v1.1.6 // indirect
|
||||
github.com/twmb/murmur3 v1.1.3 // indirect
|
||||
|
@ -224,9 +224,8 @@ require (
|
|||
go.uber.org/atomic v1.9.0 // indirect
|
||||
go.uber.org/multierr v1.8.0 // indirect
|
||||
go.uber.org/zap v1.21.0 // indirect
|
||||
golang.org/x/mod v0.6.0-dev.0.20211013180041-c96bc1413d57 // indirect
|
||||
golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4 // indirect
|
||||
gopkg.in/ini.v1 v1.66.4 // indirect
|
||||
golang.org/x/mod v0.6.0-dev.0.20220106191415-9b9b3d81d5e3 // indirect
|
||||
gopkg.in/ini.v1 v1.67.0 // indirect
|
||||
gopkg.in/natefinch/lumberjack.v2 v2.0.0 // indirect
|
||||
gopkg.in/yaml.v2 v2.4.0 // indirect
|
||||
gopkg.in/yaml.v3 v3.0.1 // indirect
|
||||
|
|
106
go.sum
106
go.sum
|
@ -154,8 +154,8 @@ github.com/armon/go-metrics v0.3.10/go.mod h1:4O98XIr/9W0sxpJ8UaYkvjk10Iff7SnFrb
|
|||
github.com/aws/aws-sdk-go v1.15.27/go.mod h1:mFuSZ37Z9YOHbQEwBWztmVzqXrEkub65tZoCYDt7FT0=
|
||||
github.com/aws/aws-sdk-go v1.37.0/go.mod h1:hcU610XS61/+aQV88ixoOzUoG7v3b31pl2zKMmprdro=
|
||||
github.com/aws/aws-sdk-go v1.43.31/go.mod h1:y4AeaBuwd2Lk+GepC1E9v0qOiTws0MIWAX4oIKwKHZo=
|
||||
github.com/aws/aws-sdk-go v1.44.91 h1:SRWmuX7PTyhBdLuvSfM7KWrWISJsrRsUPcFDSFduRxY=
|
||||
github.com/aws/aws-sdk-go v1.44.91/go.mod h1:y4AeaBuwd2Lk+GepC1E9v0qOiTws0MIWAX4oIKwKHZo=
|
||||
github.com/aws/aws-sdk-go v1.44.95 h1:QwmA+PeR6v4pF0f/dPHVPWGAshAhb9TnGZBTM5uKuI8=
|
||||
github.com/aws/aws-sdk-go v1.44.95/go.mod h1:y4AeaBuwd2Lk+GepC1E9v0qOiTws0MIWAX4oIKwKHZo=
|
||||
github.com/aws/aws-sdk-go-v2 v1.16.2 h1:fqlCk6Iy3bnCumtrLz9r3mJ/2gUT0pJ0wLFVIdWh+JA=
|
||||
github.com/aws/aws-sdk-go-v2 v1.16.2/go.mod h1:ytwTPBG6fXTZLxxeeCCWj2/EMYp/xDUgX+OET6TLNNU=
|
||||
github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.4.1/go.mod h1:n8Bs1ElDD2wJ9kCRTczA83gYbBmjSwZp3umc6zF4EeM=
|
||||
|
@ -335,8 +335,10 @@ github.com/go-redis/redis/v7 v7.4.0/go.mod h1:JDNMw23GTyLNC4GZu9njt15ctBQVn7xjRf
|
|||
github.com/go-redis/redis/v8 v8.11.4/go.mod h1:2Z2wHZXdQpCDXEGzqMockDpNyYvi2l4Pxt6RJr792+w=
|
||||
github.com/go-redis/redis/v8 v8.11.5 h1:AcZZR7igkdvfVmQTPnu9WE37LRrO/YrBH5zWyjDC0oI=
|
||||
github.com/go-redis/redis/v8 v8.11.5/go.mod h1:gREzHqY1hg6oD9ngVRbLStwAWKhA0FEgq8Jd4h5lpwo=
|
||||
github.com/go-redsync/redsync/v4 v4.5.1 h1:T97UCaY8MfQg/6kB7MTuimF4tnLOCdJbsvIoN5KmjZE=
|
||||
github.com/go-redsync/redsync/v4 v4.5.1/go.mod h1:AfhgO1E6W3rlUTs6Zmz/B6qBZJFasV30lwo7nlizdDs=
|
||||
github.com/go-redis/redis/v9 v9.0.0-beta.2 h1:ZSr84TsnQyKMAg8gnV+oawuQezeJR11/09THcWCQzr4=
|
||||
github.com/go-redis/redis/v9 v9.0.0-beta.2/go.mod h1:Bldcd/M/bm9HbnNPi/LUtYBSD8ttcZYBMupwMXhdU0o=
|
||||
github.com/go-redsync/redsync/v4 v4.6.0 h1:CXpvsHB3XzktCleBu2Vo9Df0/qInrTG3jgzhvLzyk+U=
|
||||
github.com/go-redsync/redsync/v4 v4.6.0/go.mod h1:IxV3sygNwjOERTXrj3XvNMSb1tgNgic8GvM8alwnWcM=
|
||||
github.com/go-sql-driver/mysql v1.6.0 h1:BCTh4TKNUYmOmMUcQ3IipzF5prigylS7XXjEkfCHuOE=
|
||||
github.com/go-sql-driver/mysql v1.6.0/go.mod h1:DCzpHaOWr8IXmIStZouvnhqoel9Qv2LBy8hT2VhHyBg=
|
||||
github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY=
|
||||
|
@ -650,8 +652,8 @@ github.com/lib/pq v1.1.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo=
|
|||
github.com/lib/pq v1.2.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo=
|
||||
github.com/lib/pq v1.10.2/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o=
|
||||
github.com/lib/pq v1.10.4/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o=
|
||||
github.com/lib/pq v1.10.6 h1:jbk+ZieJ0D7EVGJYpL9QTz7/YW6UHbmdnZWYyK5cdBs=
|
||||
github.com/lib/pq v1.10.6/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o=
|
||||
github.com/lib/pq v1.10.7 h1:p7ZhMD+KsSRozJr34udlUrhboJwWAgCg34+/ZZNvZZw=
|
||||
github.com/lib/pq v1.10.7/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o=
|
||||
github.com/linxGnu/grocksdb v1.7.7 h1:b6o8gagb4FL+P55qUzPchBR/C0u1lWjJOWQSWbhvTWg=
|
||||
github.com/linxGnu/grocksdb v1.7.7/go.mod h1:0hTf+iA+GOr0jDX4CgIYyJZxqOH9XlBh6KVj8+zmF34=
|
||||
github.com/magiconair/properties v1.8.6 h1:5ibWZ6iY0NctNGWo87LalDlEZ6R41TqbbDamhfG/Qzo=
|
||||
|
@ -673,8 +675,9 @@ github.com/mattn/go-isatty v0.0.7/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hd
|
|||
github.com/mattn/go-isatty v0.0.8/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s=
|
||||
github.com/mattn/go-isatty v0.0.10/go.mod h1:qgIWMr58cqv1PHHyhnkY9lrL7etaEgOFcMEpPG5Rm84=
|
||||
github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU=
|
||||
github.com/mattn/go-isatty v0.0.14 h1:yVuAays6BHfxijgZPzw+3Zlu5yQgKGP2/hcQbHb7S9Y=
|
||||
github.com/mattn/go-isatty v0.0.14/go.mod h1:7GGIvUiUoEMVVmxf/4nioHXj79iQHKdU27kJ6hsGG94=
|
||||
github.com/mattn/go-isatty v0.0.16 h1:bq3VjFmv/sOjHtdEhmkEV4x1AJtvUvOJ2PFAZ5+peKQ=
|
||||
github.com/mattn/go-isatty v0.0.16/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM=
|
||||
github.com/mattn/go-runewidth v0.0.3/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU=
|
||||
github.com/mattn/go-runewidth v0.0.7 h1:Ei8KR0497xHyKJPAv59M1dkC+rOZCMBJ+t3fZ+twI54=
|
||||
github.com/mattn/go-runewidth v0.0.7/go.mod h1:H031xJmbD/WCDINGzjvQ9THkh0rPKHF+m2gUSrubnMI=
|
||||
|
@ -725,14 +728,18 @@ github.com/onsi/ginkgo v1.16.4/go.mod h1:dX+/inL/fNMqNlz0e9LfyB9TswhZpCVdJM/Z6Vv
|
|||
github.com/onsi/ginkgo v1.16.5 h1:8xi0RTUf59SOSfEtZMvwTvXYMzG4gV23XVHOZiXNtnE=
|
||||
github.com/onsi/ginkgo v1.16.5/go.mod h1:+E8gABHa3K6zRBolWtd+ROzc/U5bkGt0FwiG042wbpU=
|
||||
github.com/onsi/ginkgo/v2 v2.0.0/go.mod h1:vw5CSIxN1JObi/U8gcbwft7ZxR2dgaR70JSE3/PpL4c=
|
||||
github.com/onsi/ginkgo/v2 v2.1.3/go.mod h1:vw5CSIxN1JObi/U8gcbwft7ZxR2dgaR70JSE3/PpL4c=
|
||||
github.com/onsi/ginkgo/v2 v2.1.4/go.mod h1:um6tUpWM/cxCK3/FK8BXqEiUMUwRgSM4JXG47RKZmLU=
|
||||
github.com/onsi/gomega v1.4.3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY=
|
||||
github.com/onsi/gomega v1.7.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY=
|
||||
github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY=
|
||||
github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo=
|
||||
github.com/onsi/gomega v1.16.0/go.mod h1:HnhC7FXeEQY45zxNK3PPoIUhzk/80Xly9PcubAlGdZY=
|
||||
github.com/onsi/gomega v1.17.0/go.mod h1:HnhC7FXeEQY45zxNK3PPoIUhzk/80Xly9PcubAlGdZY=
|
||||
github.com/onsi/gomega v1.18.1 h1:M1GfJqGRrBrrGGsbxzV5dqM2U2ApXefZCQpkukxYRLE=
|
||||
github.com/onsi/gomega v1.18.1/go.mod h1:0q+aL8jAiMXy9hbwj2mr5GziHiwhAIQpFmmtT5hitRs=
|
||||
github.com/onsi/gomega v1.19.0/go.mod h1:LY+I3pBVzYsTBU1AnDwOSxaYi9WoWiqgwooUqq9yPro=
|
||||
github.com/onsi/gomega v1.20.0 h1:8W0cWlwFkflGPLltQvLRB7ZVD5HuP6ng320w2IS245Q=
|
||||
github.com/onsi/gomega v1.20.0/go.mod h1:DtrZpjmvpn2mPm4YWQa0/ALMDj9v4YxLgojwPeREyVo=
|
||||
github.com/opentracing/opentracing-go v1.1.0/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o=
|
||||
github.com/opentracing/opentracing-go v1.2.0 h1:uEJPy/1a5RIPAJ0Ov+OIO8OxWu77jEv+1B0VhjKrZUs=
|
||||
github.com/opentracing/opentracing-go v1.2.0/go.mod h1:GxEUsuufX4nBwe+T+Wl9TAgYrxe9dPLANfrWvHYVTgc=
|
||||
|
@ -740,8 +747,8 @@ github.com/pascaldekloe/goe v0.1.0 h1:cBOtyMzM9HTpWjXfbbunk26uA6nG3a8n06Wieeh0Mw
|
|||
github.com/pascaldekloe/goe v0.1.0/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc=
|
||||
github.com/pelletier/go-toml v1.9.5 h1:4yBQzkHv+7BHq2PQUZF3Mx0IYxG7LsP222s7Agd3ve8=
|
||||
github.com/pelletier/go-toml v1.9.5/go.mod h1:u1nR/EPcESfeI/szUZKdtJ0xRNbUoANCkoOuaOx1Y+c=
|
||||
github.com/pelletier/go-toml/v2 v2.0.1 h1:8e3L2cCQzLFi2CR4g7vGFuFxX7Jl1kKX8gW+iV0GUKU=
|
||||
github.com/pelletier/go-toml/v2 v2.0.1/go.mod h1:r9LEWfGN8R5k0VXJ+0BkIe7MYkRdwZOjgMj2KwnJFUo=
|
||||
github.com/pelletier/go-toml/v2 v2.0.5 h1:ipoSadvV8oGUjnUbMub59IDPPwfxF694nG/jwbMiyQg=
|
||||
github.com/pelletier/go-toml/v2 v2.0.5/go.mod h1:OMHamSCAODeSsVrwwvcJOaoN0LIUIaFVNZzmWyNfXas=
|
||||
github.com/peterh/liner v1.2.2 h1:aJ4AOodmL+JxOZZEL2u9iJf8omNRpqHc/EbrK+3mAXw=
|
||||
github.com/peterh/liner v1.2.2/go.mod h1:xFwJyiKIXJZUKItq5dGHZSTBRAuG/CpeNpWLyiNRNwI=
|
||||
github.com/philhofer/fwd v1.1.1 h1:GdGcTjf5RNAxwS4QLsiMzJYj5KEvPJD3Abr261yRQXQ=
|
||||
|
@ -809,8 +816,8 @@ github.com/prometheus/procfs v0.7.3/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1
|
|||
github.com/prometheus/procfs v0.8.0 h1:ODq8ZFEaYeCaZOJlZZdJA2AbQR98dSHSM1KW/You5mo=
|
||||
github.com/prometheus/procfs v0.8.0/go.mod h1:z7EfXMXOkbkqb9IINtpCn86r/to3BnA0uaxHdg830/4=
|
||||
github.com/rabbitmq/amqp091-go v1.3.4/go.mod h1:ogQDLSOACsLPsIq0NpbtiifNZi2YOz0VTJ0kHRghqbM=
|
||||
github.com/rabbitmq/amqp091-go v1.4.0 h1:T2G+J9W9OY4p64Di23J6yH7tOkMocgnESvYeBjuG9cY=
|
||||
github.com/rabbitmq/amqp091-go v1.4.0/go.mod h1:JsV0ofX5f1nwOGafb8L5rBItt9GyhfQfcJj+oyz0dGg=
|
||||
github.com/rabbitmq/amqp091-go v1.5.0 h1:VouyHPBu1CrKyJVfteGknGOGCzmOz0zcv/tONLkb7rg=
|
||||
github.com/rabbitmq/amqp091-go v1.5.0/go.mod h1:JsV0ofX5f1nwOGafb8L5rBItt9GyhfQfcJj+oyz0dGg=
|
||||
github.com/rcrowley/go-metrics v0.0.0-20201227073835-cf1acfcdf475 h1:N/ElC8H3+5XpJzTSTfLsJV/mx9Q9g7kxmchpfZyxgzM=
|
||||
github.com/rcrowley/go-metrics v0.0.0-20201227073835-cf1acfcdf475/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4=
|
||||
github.com/remyoudompheng/bigfft v0.0.0-20200410134404-eec4a21b6bb0 h1:OdAsTTz6OkFY5QxjkYwrChwuRruF69c169dPK26NUlk=
|
||||
|
@ -827,6 +834,8 @@ github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQD
|
|||
github.com/rwcarlsen/goexif v0.0.0-20190401172101-9e8deecbddbd h1:CmH9+J6ZSsIjUK3dcGsnCnO41eRBOnY12zwkn5qVwgc=
|
||||
github.com/rwcarlsen/goexif v0.0.0-20190401172101-9e8deecbddbd/go.mod h1:hPqNNc0+uJM6H+SuU8sEs5K5IQeKccPqeSjfgcKGgPk=
|
||||
github.com/satori/go.uuid v1.2.0/go.mod h1:dA0hQrYB0VpLJoorglMZABFdXlWrHn1NEOzdhQKdks0=
|
||||
github.com/seaweedfs/goexif v1.0.3 h1:ve/OjI7dxPW8X9YQsv3JuVMaxEyF9Rvfd04ouL+Bz30=
|
||||
github.com/seaweedfs/goexif v1.0.3/go.mod h1:Oni780Z236sXpIQzk1XoJlTwqrJ02smEin9zQeff7Fk=
|
||||
github.com/seaweedfs/goexif v2.0.0+incompatible h1:x8pckiT12QQhifwhDQpeISgDfsqmQ6VR4LFPQ64JRps=
|
||||
github.com/seaweedfs/goexif v2.0.0+incompatible/go.mod h1:Oni780Z236sXpIQzk1XoJlTwqrJ02smEin9zQeff7Fk=
|
||||
github.com/seaweedfs/raft v1.1.0 h1:Oy1mf3MzktDzNyXamD5lAZirLjEqPS7FzZoxLY0i8SU=
|
||||
|
@ -852,8 +861,8 @@ github.com/spf13/jwalterweatherman v1.1.0 h1:ue6voC5bR5F8YxI5S67j9i582FU4Qvo2bmq
|
|||
github.com/spf13/jwalterweatherman v1.1.0/go.mod h1:aNWZUN0dPAAO/Ljvb5BEdw96iTZ0EXowPYD95IqWIGo=
|
||||
github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA=
|
||||
github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=
|
||||
github.com/spf13/viper v1.12.0 h1:CZ7eSOd3kZoaYDLbXnmzgQI5RlciuXBMA+18HwHRfZQ=
|
||||
github.com/spf13/viper v1.12.0/go.mod h1:b6COn30jlNxbm/V2IqWiNWkJ+vZNiMNksliPCiuKtSI=
|
||||
github.com/spf13/viper v1.13.0 h1:BWSJ/M+f+3nmdz9bxB+bWX28kkALN2ok11D0rSo8EJU=
|
||||
github.com/spf13/viper v1.13.0/go.mod h1:Icm2xNL3/8uyh/wFuB1jI7TiTNKp8632Nwegu+zgdYw=
|
||||
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
||||
github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
||||
github.com/stretchr/objx v0.2.0/go.mod h1:qt09Ya8vawLte6SNmTgCsAVtYtaKzEcn8ATUoHMkEqE=
|
||||
|
@ -870,8 +879,8 @@ github.com/stretchr/testify v1.8.0 h1:pSgiaMZlXftHpm5L7V1+rVB+AZJydKsMxsQBIJw4PK
|
|||
github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU=
|
||||
github.com/stvp/tempredis v0.0.0-20181119212430-b82af8480203 h1:QVqDTf3h2WHt08YuiTGPZLls0Wq99X9bWd0Q5ZSBesM=
|
||||
github.com/stvp/tempredis v0.0.0-20181119212430-b82af8480203/go.mod h1:oqN97ltKNihBbwlX8dLpwxCl3+HnXKV/R0e+sRLd9C8=
|
||||
github.com/subosito/gotenv v1.3.0 h1:mjC+YW8QpAdXibNi+vNWgzmgBH4+5l5dCXv8cNysBLI=
|
||||
github.com/subosito/gotenv v1.3.0/go.mod h1:YzJjq/33h7nrwdY+iHMhEOEEbW0ovIz0tB6t6PwAXzs=
|
||||
github.com/subosito/gotenv v1.4.1 h1:jyEFiXpy21Wm81FBN71l9VoMMV8H8jG+qIK3GCpY6Qs=
|
||||
github.com/subosito/gotenv v1.4.1/go.mod h1:ayKnFf/c6rvx/2iiLrJUk1e6plDbT3edrFNGqEflhK0=
|
||||
github.com/syndtr/goleveldb v1.0.1-0.20190318030020-c3a204f8e965 h1:1oFLiOyVl+W7bnBzGhf7BbIv9loSFQcieWWYIjLqcAw=
|
||||
github.com/syndtr/goleveldb v1.0.1-0.20190318030020-c3a204f8e965/go.mod h1:9OrXJhf154huy1nPWmuSrkgjPUtUNhA+Zmy+6AESzuA=
|
||||
github.com/tidwall/gjson v1.14.3 h1:9jvXn7olKEHU1S9vwoMGliaT8jq1vJ7IH/n9zD9Dnlw=
|
||||
|
@ -921,8 +930,8 @@ github.com/ydb-platform/ydb-go-genproto v0.0.0-20220801095836-cf975531fd1f/go.mo
|
|||
github.com/ydb-platform/ydb-go-sdk-auth-environ v0.1.2 h1:EYSI1kulnHb0H0zt3yOw4cRj4ABMSMGwNe43D+fX7e4=
|
||||
github.com/ydb-platform/ydb-go-sdk-auth-environ v0.1.2/go.mod h1:Xfjce+VMU9yJVr1lj60yK2fFPWjB4jr/4cp3K7cjzi4=
|
||||
github.com/ydb-platform/ydb-go-sdk/v3 v3.25.3/go.mod h1:PFizF/vJsdAgEwjK3DVSBD52kdmRkWfSIS2q2pA+e88=
|
||||
github.com/ydb-platform/ydb-go-sdk/v3 v3.37.4 h1:wQtx05MHEuYnIt56wos9vaz3N7/Ue04PiSYWk7o7Akw=
|
||||
github.com/ydb-platform/ydb-go-sdk/v3 v3.37.4/go.mod h1:eD5OyVA8MuMq3+BYBMKGUfa2faTZhbx+LE+y1RgitFE=
|
||||
github.com/ydb-platform/ydb-go-sdk/v3 v3.37.7 h1:UvV78GGF7ZFBnvI6HtOK03bu5vb2UvyZwXhdEHlt0SI=
|
||||
github.com/ydb-platform/ydb-go-sdk/v3 v3.37.7/go.mod h1:eD5OyVA8MuMq3+BYBMKGUfa2faTZhbx+LE+y1RgitFE=
|
||||
github.com/ydb-platform/ydb-go-yc v0.8.3 h1:92UUUMsfvtMl6mho8eQ9lbkiPrF3a9CT+RrVRAKNRwo=
|
||||
github.com/ydb-platform/ydb-go-yc v0.8.3/go.mod h1:zUolAFGzJ5XG8uwiseTLr9Lapm7L7hdVdZgLSuv9FXE=
|
||||
github.com/ydb-platform/ydb-go-yc-metadata v0.5.2 h1:nMtixUijP0Z7iHJNT9fOL+dbmEzZxqU6Xk87ll7hqXg=
|
||||
|
@ -934,6 +943,7 @@ github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9de
|
|||
github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
|
||||
github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
|
||||
github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k=
|
||||
github.com/yuin/goldmark v1.4.1/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k=
|
||||
github.com/zenazn/goji v0.9.0/go.mod h1:7S9M489iMyHBNxwZnk9/EHS098H4/F6TATF2mIxtB1Q=
|
||||
go.etcd.io/bbolt v1.3.5/go.mod h1:G5EMThwa9y8QZGBClrRx5EY+Yw9kAhnjy3bSjsnlVTQ=
|
||||
go.etcd.io/bbolt v1.3.6 h1:/ecaJf0sk1l4l6V4awd65v2C3ILy7MSj+s/x1ADCIMU=
|
||||
|
@ -947,8 +957,8 @@ go.etcd.io/etcd/client/pkg/v3 v3.5.4/go.mod h1:IJHfcCEKxYu1Os13ZdwCwIUTUVGYTSAM3
|
|||
go.etcd.io/etcd/client/v3 v3.5.2/go.mod h1:kOOaWFFgHygyT0WlSmL8TJiXmMysO/nNUlEsSsN6W4o=
|
||||
go.etcd.io/etcd/client/v3 v3.5.4 h1:p83BUL3tAYS0OT/r0qglgc3M1JjhM0diV8DSWAhVXv4=
|
||||
go.etcd.io/etcd/client/v3 v3.5.4/go.mod h1:ZaRkVgBZC+L+dLCjTcF1hRXpgZXQPOvnA/Ak/gq3kiY=
|
||||
go.mongodb.org/mongo-driver v1.10.1 h1:NujsPveKwHaWuKUer/ceo9DzEe7HIj1SlJ6uvXZG0S4=
|
||||
go.mongodb.org/mongo-driver v1.10.1/go.mod h1:z4XpeoU6w+9Vht+jAFyLgVrD+jGSQQe0+CBWFHNiHt8=
|
||||
go.mongodb.org/mongo-driver v1.10.2 h1:4Wk3cnqOrQCn0P92L3/mmurMxzdvWWs5J9jinAVKD+k=
|
||||
go.mongodb.org/mongo-driver v1.10.2/go.mod h1:z4XpeoU6w+9Vht+jAFyLgVrD+jGSQQe0+CBWFHNiHt8=
|
||||
go.opencensus.io v0.15.0/go.mod h1:UffZAU+4sDEINUGP/B7UfBBkq4fqLu9zXAX7ke6CHW0=
|
||||
go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU=
|
||||
go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8=
|
||||
|
@ -1058,8 +1068,8 @@ golang.org/x/mod v0.4.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
|||
golang.org/x/mod v0.4.1/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
||||
golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
||||
golang.org/x/mod v0.5.0/go.mod h1:5OXOZSfqPIIbmVBIIKWRFfZjPR0E5r58TLhUjH0a2Ro=
|
||||
golang.org/x/mod v0.6.0-dev.0.20211013180041-c96bc1413d57 h1:LQmS1nU0twXLA96Kt7U9qtHJEbBk3z6Q0V4UXjZkpr4=
|
||||
golang.org/x/mod v0.6.0-dev.0.20211013180041-c96bc1413d57/go.mod h1:3p9vT2HGsQu2K1YbXdKPJLVgG5VJdoTa1poYQBtP1AY=
|
||||
golang.org/x/mod v0.6.0-dev.0.20220106191415-9b9b3d81d5e3 h1:kQgndtyPBW/JIYERgdxfwMYh3AVStj88WQTlNDi2a+o=
|
||||
golang.org/x/mod v0.6.0-dev.0.20220106191415-9b9b3d81d5e3/go.mod h1:3p9vT2HGsQu2K1YbXdKPJLVgG5VJdoTa1poYQBtP1AY=
|
||||
golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
|
@ -1109,6 +1119,7 @@ golang.org/x/net v0.0.0-20210503060351-7fd8e65b6420/go.mod h1:9nx3DQGgdP8bBQD5qx
|
|||
golang.org/x/net v0.0.0-20210525063256-abc453219eb5/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
|
||||
golang.org/x/net v0.0.0-20210610132358-84b48f89b13b/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
|
||||
golang.org/x/net v0.0.0-20210907225631-ff17edfbf26d/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
|
||||
golang.org/x/net v0.0.0-20211015210444-4f30a5c0130f/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
|
||||
golang.org/x/net v0.0.0-20211020060615-d418f374d309/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
|
||||
golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
|
||||
golang.org/x/net v0.0.0-20220107192237-5cfca573fb4d/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
|
||||
|
@ -1240,6 +1251,7 @@ golang.org/x/sys v0.0.0-20210908233432-aa78b53d3365/go.mod h1:oPkhp1MJrh7nUepCBc
|
|||
golang.org/x/sys v0.0.0-20210917161153-d61c044b1678/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20210927094055-39ccf1dd6fa6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20211007075335-d3039528d8ac/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20211019181941-9d821ace8654/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20211116061358-0a5406a5449c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20211117180635-dee7805ff2e1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20211124211545-fe61309f8881/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
|
@ -1252,9 +1264,11 @@ golang.org/x/sys v0.0.0-20220128215802-99c3d69c2c27/go.mod h1:oPkhp1MJrh7nUepCBc
|
|||
golang.org/x/sys v0.0.0-20220204135822-1c1b9b1eba6a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220209214540-3681064d5158/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220227234510-4e6760a101f9/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220319134239-a9b59b0215f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220328115105-d36c6a25d886/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220330033206-e17cdc41300f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220412211240-33da011f77ad/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220422013727-9388b58f7150/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220502124256-b6088ccd6cba/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220503163025-988cb79eb6c6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
|
@ -1262,6 +1276,7 @@ golang.org/x/sys v0.0.0-20220610221304-9f5ed59c137d/go.mod h1:oPkhp1MJrh7nUepCBc
|
|||
golang.org/x/sys v0.0.0-20220624220833-87e55d714810/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220728004956-3c1f35247d10/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220818161305-2296e01440c6 h1:Sx/u41w+OwrInGdEckYmEuU5gHoGSL4QbDz3S9s6j4U=
|
||||
golang.org/x/sys v0.0.0-20220818161305-2296e01440c6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw=
|
||||
|
@ -1353,8 +1368,8 @@ golang.org/x/tools v0.1.2/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=
|
|||
golang.org/x/tools v0.1.3/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=
|
||||
golang.org/x/tools v0.1.4/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=
|
||||
golang.org/x/tools v0.1.5/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=
|
||||
golang.org/x/tools v0.1.8-0.20211029000441-d6a9af8af023 h1:0c3L82FDQ5rt1bjTBlchS8t6RQ6299/+5bWMnRLh+uI=
|
||||
golang.org/x/tools v0.1.8-0.20211029000441-d6a9af8af023/go.mod h1:nABZi5QlRsZVlzPpHl034qft6wpY4eDcsTt5AaioBiU=
|
||||
golang.org/x/tools v0.1.10 h1:QjFRCZxdOhBJ/UNgnBZLbNV13DlbnK0quyivTnXJM20=
|
||||
golang.org/x/tools v0.1.10/go.mod h1:Uh6Zz+xoGYZom868N8YTex3t7RhtHDBrE8Gzo9bV56E=
|
||||
golang.org/x/xerrors v0.0.0-20190410155217-1f06c39b4373/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20190513163551-3ee3066db522/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
|
@ -1411,8 +1426,8 @@ google.golang.org/api v0.75.0/go.mod h1:pU9QmyHLnzlpar1Mjt4IbapUCy8J+6HD6GeELN69
|
|||
google.golang.org/api v0.78.0/go.mod h1:1Sg78yoMLOhlQTeF+ARBoytAcH1NNyyl390YMy6rKmw=
|
||||
google.golang.org/api v0.80.0/go.mod h1:xY3nI94gbvBrE0J6NHXhxOmW97HG7Khjkku6AFB3Hyg=
|
||||
google.golang.org/api v0.84.0/go.mod h1:NTsGnUFJMYROtiquksZHBWtHfeMC7iYthki7Eq3pa8o=
|
||||
google.golang.org/api v0.94.0 h1:KtKM9ru3nzQioV1HLlUf1cR7vMYJIpgls5VhAYQXIwA=
|
||||
google.golang.org/api v0.94.0/go.mod h1:eADj+UBuxkh5zlrSntJghuNeg8HwQ1w5lTKkuqaETEI=
|
||||
google.golang.org/api v0.95.0 h1:d1c24AAS01DYqXreBeuVV7ewY/U8Mnhh47pwtsgVtYg=
|
||||
google.golang.org/api v0.95.0/go.mod h1:eADj+UBuxkh5zlrSntJghuNeg8HwQ1w5lTKkuqaETEI=
|
||||
google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=
|
||||
google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
|
||||
google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
|
||||
|
@ -1588,8 +1603,8 @@ gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMy
|
|||
gopkg.in/inconshreveable/log15.v2 v2.0.0-20180818164646-67afb5ed74ec/go.mod h1:aPpfJ7XW+gOuirDoZ8gHhLh3kZ1B08FtV2bbmy7Jv3s=
|
||||
gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc=
|
||||
gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw=
|
||||
gopkg.in/ini.v1 v1.66.4 h1:SsAcf+mM7mRZo2nJNGt8mZCjG8ZRaNGMURJw7BsIST4=
|
||||
gopkg.in/ini.v1 v1.66.4/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k=
|
||||
gopkg.in/ini.v1 v1.67.0 h1:Dgnx+6+nfE+IfzjUEISNeydPJh9AXNNsWbGP9KzCsOA=
|
||||
gopkg.in/ini.v1 v1.67.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k=
|
||||
gopkg.in/natefinch/lumberjack.v2 v2.0.0 h1:1Lc07Kr7qY4U2YPouBjpCLxpiyxIVoxqXgkXLknAOE8=
|
||||
gopkg.in/natefinch/lumberjack.v2 v2.0.0/go.mod h1:l0ndWWf7gzL7RNwBG7wST/UCcT4T24xpD6X8LsfU/+k=
|
||||
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ=
|
||||
|
@ -1618,41 +1633,38 @@ lukechampine.com/uint128 v1.1.1 h1:pnxCASz787iMf+02ssImqk6OLt+Z5QHMoZyUXR4z6JU=
|
|||
lukechampine.com/uint128 v1.1.1/go.mod h1:c4eWIwlEGaxC/+H1VguhU4PHXNWDCDMUlWdIWl2j1gk=
|
||||
modernc.org/b v1.0.0 h1:vpvqeyp17ddcQWF29Czawql4lDdABCDRbXRAS4+aF2o=
|
||||
modernc.org/b v1.0.0/go.mod h1:uZWcZfRj1BpYzfN9JTerzlNUnnPsV9O2ZA8JsRcubNg=
|
||||
modernc.org/cc/v3 v3.36.0 h1:0kmRkTmqNidmu3c7BNDSdVHCxXCkWLmWmCIVX4LUboo=
|
||||
modernc.org/cc/v3 v3.36.0/go.mod h1:NFUHyPn4ekoC/JHeZFfZurN6ixxawE1BnVonP/oahEI=
|
||||
modernc.org/ccgo/v3 v3.0.0-20220428102840-41399a37e894/go.mod h1:eI31LL8EwEBKPpNpA4bU1/i+sKOwOrQy8D87zWUcRZc=
|
||||
modernc.org/ccgo/v3 v3.0.0-20220430103911-bc99d88307be/go.mod h1:bwdAnOoaIt8Ax9YdWGjxWsdkPcZyRPHqrOvJxaKAKGw=
|
||||
modernc.org/ccgo/v3 v3.16.6/go.mod h1:tGtX0gE9Jn7hdZFeU88slbTh1UtCYKusWOoCJuvkWsQ=
|
||||
modernc.org/ccgo/v3 v3.16.8 h1:G0QNlTqI5uVgczBWfGKs7B++EPwCfXPWGD2MdeKloDs=
|
||||
modernc.org/ccgo/v3 v3.16.8/go.mod h1:zNjwkizS+fIFDrDjIAgBSCLkWbJuHF+ar3QRn+Z9aws=
|
||||
modernc.org/cc/v3 v3.36.2/go.mod h1:NFUHyPn4ekoC/JHeZFfZurN6ixxawE1BnVonP/oahEI=
|
||||
modernc.org/cc/v3 v3.37.0 h1:Y9XYwAPXYZUL1h5vvYPJDlvx7XEVBZdDcdodqax8t7c=
|
||||
modernc.org/cc/v3 v3.37.0/go.mod h1:vtL+3mdHx/wcj3iEGz84rQa8vEqR6XM84v5Lcvfph20=
|
||||
modernc.org/ccgo/v3 v3.16.9 h1:AXquSwg7GuMk11pIdw7fmO1Y/ybgazVkMhsZWCV0mHM=
|
||||
modernc.org/ccgo/v3 v3.16.9/go.mod h1:zNMzC9A9xeNUepy6KuZBbugn3c0Mc9TeiJO4lgvkJDo=
|
||||
modernc.org/ccorpus v1.11.6 h1:J16RXiiqiCgua6+ZvQot4yUuUy8zxgqbqEEUuGPlISk=
|
||||
modernc.org/ccorpus v1.11.6/go.mod h1:2gEUTrWqdpH2pXsmTM1ZkjeSrUWDpjMu2T6m29L/ErQ=
|
||||
modernc.org/httpfs v1.0.6 h1:AAgIpFZRXuYnkjftxTAZwMIiwEqAfk8aVB2/oA6nAeM=
|
||||
modernc.org/httpfs v1.0.6/go.mod h1:7dosgurJGp0sPaRanU53W4xZYKh14wfzX420oZADeHM=
|
||||
modernc.org/libc v0.0.0-20220428101251-2d5f3daf273b/go.mod h1:p7Mg4+koNjc8jkqwcoFBJx7tXkpj00G77X7A72jXPXA=
|
||||
modernc.org/libc v1.16.0/go.mod h1:N4LD6DBE9cf+Dzf9buBlzVJndKr/iJHG97vGLHYnb5A=
|
||||
modernc.org/libc v1.16.1/go.mod h1:JjJE0eu4yeK7tab2n4S1w8tlWd9MxXLRzheaRnAKymU=
|
||||
modernc.org/libc v1.16.17/go.mod h1:hYIV5VZczAmGZAnG15Vdngn5HSF5cSkbvfz2B7GRuVU=
|
||||
modernc.org/libc v1.16.19 h1:S8flPn5ZeXx6iw/8yNa986hwTQDrY8RXU7tObZuAozo=
|
||||
modernc.org/libc v1.16.19/go.mod h1:p7Mg4+koNjc8jkqwcoFBJx7tXkpj00G77X7A72jXPXA=
|
||||
modernc.org/libc v1.17.0/go.mod h1:XsgLldpP4aWlPlsjqKRdHPqCxCjISdHfM/yeWC5GyW0=
|
||||
modernc.org/libc v1.18.0 h1:EKpC8eyhOcxpstYjohs7vxni7BoQBUVWXsf5rAZzlgk=
|
||||
modernc.org/libc v1.18.0/go.mod h1:vj6zehR5bfc98ipowQOM2nIDUZnVew/wNC/2tOGS+q0=
|
||||
modernc.org/mathutil v1.1.1/go.mod h1:mZW8CKdRPY1v87qxC/wUdX5O1qDzXMP5TH3wjfpga6E=
|
||||
modernc.org/mathutil v1.2.2/go.mod h1:mZW8CKdRPY1v87qxC/wUdX5O1qDzXMP5TH3wjfpga6E=
|
||||
modernc.org/mathutil v1.4.1/go.mod h1:mZW8CKdRPY1v87qxC/wUdX5O1qDzXMP5TH3wjfpga6E=
|
||||
modernc.org/mathutil v1.5.0 h1:rV0Ko/6SfM+8G+yKiyI830l3Wuz1zRutdslNoQ0kfiQ=
|
||||
modernc.org/mathutil v1.5.0/go.mod h1:mZW8CKdRPY1v87qxC/wUdX5O1qDzXMP5TH3wjfpga6E=
|
||||
modernc.org/memory v1.1.1 h1:bDOL0DIDLQv7bWhP3gMvIrnoFw+Eo6F7a2QK9HPDiFU=
|
||||
modernc.org/memory v1.1.1/go.mod h1:/0wo5ibyrQiaoUoH7f9D8dnglAmILJ5/cxZlRECf+Nw=
|
||||
modernc.org/memory v1.2.0/go.mod h1:/0wo5ibyrQiaoUoH7f9D8dnglAmILJ5/cxZlRECf+Nw=
|
||||
modernc.org/memory v1.3.0 h1:6ZIOLb5ronARPxEPxtZz1WbSRllgA09FCvNNyql5kZg=
|
||||
modernc.org/memory v1.3.0/go.mod h1:PkUhL0Mugw21sHPeskwZW4D6VscE/GQJOnIpCnW6pSU=
|
||||
modernc.org/opt v0.1.1 h1:/0RX92k9vwVeDXj+Xn23DKp2VJubL7k8qNffND6qn3A=
|
||||
modernc.org/opt v0.1.1/go.mod h1:WdSiB5evDcignE70guQKxYUl14mgWtbClRi5wmkkTX0=
|
||||
modernc.org/sqlite v1.18.1 h1:ko32eKt3jf7eqIkCgPAeHMBXw3riNSLhl2f3loEF7o8=
|
||||
modernc.org/sqlite v1.18.1/go.mod h1:6ho+Gow7oX5V+OiOQ6Tr4xeqbx13UZ6t+Fw9IRUG4d4=
|
||||
modernc.org/sqlite v1.18.2 h1:S2uFiaNPd/vTAP/4EmyY8Qe2Quzu26A2L1e25xRNTio=
|
||||
modernc.org/sqlite v1.18.2/go.mod h1:kvrTLEWgxUcHa2GfHBQtanR1H9ht3hTJNtKpzH9k1u0=
|
||||
modernc.org/strutil v1.1.0/go.mod h1:lstksw84oURvj9y3tn8lGvRxyRC1S2+g5uuIzNfIOBs=
|
||||
modernc.org/strutil v1.1.1/go.mod h1:DE+MQQ/hjKBZS2zNInV5hhcipt5rLPWkmpbGeW5mmdw=
|
||||
modernc.org/strutil v1.1.3 h1:fNMm+oJklMGYfU9Ylcywl0CO5O6nTfaowNsh2wpPjzY=
|
||||
modernc.org/strutil v1.1.3/go.mod h1:MEHNA7PdEnEwLvspRMtWTNnp2nnyvMfkimT1NKNAGbw=
|
||||
modernc.org/tcl v1.13.1 h1:npxzTwFTZYM8ghWicVIX1cRWzj7Nd8i6AqqX2p+IYao=
|
||||
modernc.org/token v1.0.0 h1:a0jaWiNMDhDUtqOj09wvjWWAqd3q7WpBulmL9H2egsk=
|
||||
modernc.org/tcl v1.13.2 h1:5PQgL/29XkQ9wsEmmNPjzKs+7iPCaYqUJAhzPvQbjDA=
|
||||
modernc.org/token v1.0.0/go.mod h1:UGzOrNV1mAFSEB63lOFHIpNRUVMvYTc6yu1SMY/XTDM=
|
||||
modernc.org/token v1.0.1 h1:A3qvTqOwexpfZZeyI0FeGPDlSWX5pjZu9hF4lU+EKWg=
|
||||
modernc.org/token v1.0.1/go.mod h1:UGzOrNV1mAFSEB63lOFHIpNRUVMvYTc6yu1SMY/XTDM=
|
||||
modernc.org/z v1.5.1 h1:RTNHdsrOpeoSeOF4FbzTo8gBYByaJ5xT7NgZ9ZqRiJM=
|
||||
nhooyr.io/websocket v1.8.7/go.mod h1:B70DZP8IakI65RVQ51MsWP/8jndNma26DVA/nFSCgW0=
|
||||
rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8=
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
apiVersion: v1
|
||||
description: SeaweedFS
|
||||
name: seaweedfs
|
||||
appVersion: "3.25"
|
||||
version: "3.25"
|
||||
appVersion: "3.27"
|
||||
version: "3.27"
|
||||
|
|
|
@ -9,7 +9,6 @@ import (
|
|||
"github.com/seaweedfs/seaweedfs/weed/pb/remote_pb"
|
||||
"github.com/seaweedfs/seaweedfs/weed/remote_storage"
|
||||
"github.com/seaweedfs/seaweedfs/weed/replication/source"
|
||||
"github.com/seaweedfs/seaweedfs/weed/s3api/s3_constants"
|
||||
"github.com/seaweedfs/seaweedfs/weed/util"
|
||||
"google.golang.org/protobuf/proto"
|
||||
"math"
|
||||
|
@ -183,7 +182,7 @@ func (option *RemoteGatewayOptions) makeBucketedEventProcessor(filerSource *sour
|
|||
if message.NewParentPath == option.bucketsDir {
|
||||
return handleCreateBucket(message.NewEntry)
|
||||
}
|
||||
if strings.HasPrefix(message.NewParentPath, option.bucketsDir) && strings.Contains(message.NewParentPath, "/"+s3_constants.MultipartUploadsFolder+"/") {
|
||||
if isMultipartUploadFile(message.NewParentPath, message.NewEntry.Name) {
|
||||
return nil
|
||||
}
|
||||
if !filer.HasData(message.NewEntry) {
|
||||
|
|
|
@ -14,6 +14,7 @@ import (
|
|||
|
||||
type RemoteSyncOptions struct {
|
||||
filerAddress *string
|
||||
storageClass *string
|
||||
grpcDialOption grpc.DialOption
|
||||
readChunkFromFiler *bool
|
||||
timeAgo *time.Duration
|
||||
|
@ -45,6 +46,7 @@ func init() {
|
|||
cmdFilerRemoteSynchronize.Run = runFilerRemoteSynchronize // break init cycle
|
||||
remoteSyncOptions.filerAddress = cmdFilerRemoteSynchronize.Flag.String("filer", "localhost:8888", "filer of the SeaweedFS cluster")
|
||||
remoteSyncOptions.dir = cmdFilerRemoteSynchronize.Flag.String("dir", "", "a mounted directory on filer")
|
||||
remoteSyncOptions.storageClass = cmdFilerRemoteSynchronize.Flag.String("storageClass", "", "override amz storage class, empty to delete")
|
||||
remoteSyncOptions.readChunkFromFiler = cmdFilerRemoteSynchronize.Flag.Bool("filerProxy", false, "read file chunks from filer instead of volume servers")
|
||||
remoteSyncOptions.timeAgo = cmdFilerRemoteSynchronize.Flag.Duration("timeAgo", 0, "start time before now, skipping previous metadata changes. \"300ms\", \"1.5h\" or \"2h45m\". Valid time units are \"ns\", \"us\" (or \"µs\"), \"ms\", \"s\", \"m\", \"h\"")
|
||||
remoteSyncOptions.clientId = util.RandomInt32()
|
||||
|
|
|
@ -3,6 +3,7 @@ package command
|
|||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"github.com/seaweedfs/seaweedfs/weed/s3api/s3_constants"
|
||||
"os"
|
||||
"strings"
|
||||
"time"
|
||||
|
@ -36,6 +37,11 @@ func followUpdatesAndUploadToRemote(option *RemoteSyncOptions, filerSource *sour
|
|||
|
||||
var lastLogTsNs = time.Now().UnixNano()
|
||||
processEventFnWithOffset := pb.AddOffsetFunc(func(resp *filer_pb.SubscribeMetadataResponse) error {
|
||||
if *option.storageClass == "" {
|
||||
delete(resp.EventNotification.NewEntry.Extended, s3_constants.AmzStorageClass)
|
||||
} else {
|
||||
resp.EventNotification.NewEntry.Extended[s3_constants.AmzStorageClass] = []byte(*option.storageClass)
|
||||
}
|
||||
processor.AddSyncJob(resp)
|
||||
return nil
|
||||
}, 3*time.Second, func(counter int64, lastTsNs int64) error {
|
||||
|
@ -107,6 +113,9 @@ func makeEventProcessor(remoteStorage *remote_pb.RemoteConf, mountedDir string,
|
|||
return nil
|
||||
}
|
||||
if filer_pb.IsCreate(resp) {
|
||||
if isMultipartUploadFile(message.NewParentPath, message.NewEntry.Name) {
|
||||
return nil
|
||||
}
|
||||
if !filer.HasData(message.NewEntry) {
|
||||
return nil
|
||||
}
|
||||
|
@ -156,7 +165,9 @@ func makeEventProcessor(remoteStorage *remote_pb.RemoteConf, mountedDir string,
|
|||
glog.V(2).Infof("update: %+v", resp)
|
||||
glog.V(0).Infof("delete %s", remote_storage.FormatLocation(oldDest))
|
||||
if err := client.DeleteFile(oldDest); err != nil {
|
||||
return err
|
||||
if isMultipartUploadFile(resp.Directory, message.OldEntry.Name) {
|
||||
return nil
|
||||
}
|
||||
}
|
||||
remoteEntry, writeErr := retriedWriteFile(client, filerSource, message.NewEntry, dest)
|
||||
if writeErr != nil {
|
||||
|
@ -247,3 +258,9 @@ func updateLocalEntry(filerClient filer_pb.FilerClient, dir string, entry *filer
|
|||
return err
|
||||
})
|
||||
}
|
||||
|
||||
func isMultipartUploadFile(dir string, name string) bool {
|
||||
return strings.HasPrefix(dir, "/buckets/") &&
|
||||
strings.Contains(dir, "/"+s3_constants.MultipartUploadsFolder+"/") &&
|
||||
strings.HasSuffix(name, ".part")
|
||||
}
|
||||
|
|
|
@ -54,7 +54,7 @@ func (t *MetadataProcessor) AddSyncJob(resp *filer_pb.SubscribeMetadataResponse)
|
|||
|
||||
// if is the oldest job, write down the watermark
|
||||
isOldest := true
|
||||
for t, _ := range t.activeJobs {
|
||||
for t := range t.activeJobs {
|
||||
if resp.TsNs > t {
|
||||
isOldest = false
|
||||
break
|
||||
|
|
|
@ -184,7 +184,10 @@ func (s3opt *S3Options) startS3Server() bool {
|
|||
go stats_collect.LoopPushingMetric("s3", stats_collect.SourceName(uint32(*s3opt.port)), metricsAddress, metricsIntervalSec)
|
||||
|
||||
router := mux.NewRouter().SkipClean(true)
|
||||
|
||||
var localFilerSocket string
|
||||
if s3opt.localFilerSocket != nil {
|
||||
localFilerSocket = *s3opt.localFilerSocket
|
||||
}
|
||||
s3ApiServer, s3ApiServer_err := s3api.NewS3ApiServer(router, &s3api.S3ApiServerOption{
|
||||
Filer: filerAddress,
|
||||
Port: *s3opt.port,
|
||||
|
@ -194,7 +197,7 @@ func (s3opt *S3Options) startS3Server() bool {
|
|||
GrpcDialOption: grpcDialOption,
|
||||
AllowEmptyFolder: *s3opt.allowEmptyFolder,
|
||||
AllowDeleteBucketNotEmpty: *s3opt.allowDeleteBucketNotEmpty,
|
||||
LocalFilerSocket: *s3opt.localFilerSocket,
|
||||
LocalFilerSocket: localFilerSocket,
|
||||
DataCenter: *s3opt.dataCenter,
|
||||
})
|
||||
if s3ApiServer_err != nil {
|
||||
|
|
|
@ -130,7 +130,6 @@ func init() {
|
|||
serverOptions.v.preStopSeconds = cmdServer.Flag.Int("volume.preStopSeconds", 10, "number of seconds between stop send heartbeats and stop volume server")
|
||||
serverOptions.v.pprof = cmdServer.Flag.Bool("volume.pprof", false, "enable pprof http handlers. precludes --memprofile and --cpuprofile")
|
||||
serverOptions.v.idxFolder = cmdServer.Flag.String("volume.dir.idx", "", "directory to store .idx files")
|
||||
serverOptions.v.enableTcp = cmdServer.Flag.Bool("volume.tcp", false, "<exprimental> enable tcp port")
|
||||
serverOptions.v.inflightUploadDataTimeout = cmdServer.Flag.Duration("volume.inflightUploadDataTimeout", 60*time.Second, "inflight upload data wait timeout of volume servers")
|
||||
|
||||
s3Options.port = cmdServer.Flag.Int("s3.port", 8333, "s3 server http listen port")
|
||||
|
|
|
@ -65,7 +65,6 @@ type VolumeServerOptions struct {
|
|||
preStopSeconds *int
|
||||
metricsHttpPort *int
|
||||
// pulseSeconds *int
|
||||
enableTcp *bool
|
||||
inflightUploadDataTimeout *time.Duration
|
||||
}
|
||||
|
||||
|
@ -96,7 +95,6 @@ func init() {
|
|||
v.pprof = cmdVolume.Flag.Bool("pprof", false, "enable pprof http handlers. precludes --memprofile and --cpuprofile")
|
||||
v.metricsHttpPort = cmdVolume.Flag.Int("metricsPort", 0, "Prometheus metrics listen port")
|
||||
v.idxFolder = cmdVolume.Flag.String("dir.idx", "", "directory to store .idx files")
|
||||
v.enableTcp = cmdVolume.Flag.Bool("tcp", false, "<experimental> enable tcp port")
|
||||
v.inflightUploadDataTimeout = cmdVolume.Flag.Duration("inflightUploadDataTimeout", 60*time.Second, "inflight upload data wait timeout of volume servers")
|
||||
}
|
||||
|
||||
|
@ -258,11 +256,6 @@ func (v VolumeServerOptions) startVolumeServer(volumeFolders, maxVolumeCounts, v
|
|||
}
|
||||
}
|
||||
|
||||
// starting tcp server
|
||||
if *v.enableTcp {
|
||||
go v.startTcpService(volumeServer)
|
||||
}
|
||||
|
||||
// starting the cluster http server
|
||||
clusterHttpServer := v.startClusterHttpService(volumeMux)
|
||||
|
||||
|
@ -388,22 +381,3 @@ func (v VolumeServerOptions) startClusterHttpService(handler http.Handler) httpd
|
|||
}()
|
||||
return clusterHttpServer
|
||||
}
|
||||
|
||||
func (v VolumeServerOptions) startTcpService(volumeServer *weed_server.VolumeServer) {
|
||||
listeningAddress := util.JoinHostPort(*v.bindIp, *v.port+20000)
|
||||
glog.V(0).Infoln("Start Seaweed volume server", util.Version(), "tcp at", listeningAddress)
|
||||
listener, e := util.NewListener(listeningAddress, 0)
|
||||
if e != nil {
|
||||
glog.Fatalf("Volume server listener error on %s:%v", listeningAddress, e)
|
||||
}
|
||||
defer listener.Close()
|
||||
|
||||
for {
|
||||
c, err := listener.Accept()
|
||||
if err != nil {
|
||||
fmt.Println(err)
|
||||
return
|
||||
}
|
||||
go volumeServer.HandleTcpConnection(c)
|
||||
}
|
||||
}
|
||||
|
|
|
@ -66,8 +66,8 @@ func (store *ArangodbStore) Initialize(configuration util.Configuration, prefix
|
|||
}
|
||||
|
||||
func (store *ArangodbStore) connection(uris []string, user string, pass string, insecure bool) (err error) {
|
||||
ctx, _ := context.WithTimeout(context.Background(), 10*time.Second)
|
||||
|
||||
ctx, cn := context.WithTimeout(context.Background(), 10*time.Second)
|
||||
defer cn()
|
||||
store.connect, err = http.NewConnection(http.ConnectionConfig{
|
||||
Endpoints: uris,
|
||||
TLSConfig: &tls.Config{
|
||||
|
@ -274,10 +274,10 @@ func (store *ArangodbStore) DeleteFolderChildren(ctx context.Context, fullpath u
|
|||
for d in %s
|
||||
filter starts_with(d.directory, "%s/") || d.directory == "%s"
|
||||
remove d._key in %s`,
|
||||
targetCollection.Name(),
|
||||
"`"+targetCollection.Name()+"`",
|
||||
strings.Join(strings.Split(string(fullpath), "/"), ","),
|
||||
string(fullpath),
|
||||
targetCollection.Name(),
|
||||
"`"+targetCollection.Name()+"`",
|
||||
)
|
||||
cur, err := store.database.Query(ctx, query, nil)
|
||||
if err != nil {
|
||||
|
@ -296,7 +296,7 @@ func (store *ArangodbStore) ListDirectoryPrefixedEntries(ctx context.Context, di
|
|||
if err != nil {
|
||||
return lastFileName, err
|
||||
}
|
||||
query := "for d in " + targetCollection.Name()
|
||||
query := "for d in " + "`" + targetCollection.Name() + "`"
|
||||
if includeStartFile {
|
||||
query = query + " filter d.name >= \"" + startFileName + "\" "
|
||||
} else {
|
||||
|
|
|
@ -12,7 +12,7 @@ import (
|
|||
"github.com/seaweedfs/seaweedfs/weed/util"
|
||||
)
|
||||
|
||||
//convert a string into arango-key safe hex bytes hash
|
||||
// convert a string into arango-key safe hex bytes hash
|
||||
func hashString(dir string) string {
|
||||
h := md5.New()
|
||||
io.WriteString(h, dir)
|
||||
|
@ -98,8 +98,26 @@ func (store *ArangodbStore) ensureBucket(ctx context.Context, bucket string) (bc
|
|||
return store.buckets[bucket], nil
|
||||
}
|
||||
|
||||
// transform to an arango compliant name
|
||||
func bucketToCollectionName(s string) string {
|
||||
if len(s) == 0 {
|
||||
return ""
|
||||
}
|
||||
// replace all "." with _
|
||||
s = strings.ReplaceAll(s, ".", "_")
|
||||
|
||||
// if starts with number or '.' then add a special prefix
|
||||
if (s[0] >= '0' && s[0] <= '9') || (s[0] == '.' || s[0] == '_' || s[0] == '-') {
|
||||
s = "xN--" + s
|
||||
}
|
||||
return s
|
||||
}
|
||||
|
||||
// creates collection if not exist, ensures indices if not exist
|
||||
func (store *ArangodbStore) ensureCollection(ctx context.Context, name string) (c driver.Collection, err error) {
|
||||
func (store *ArangodbStore) ensureCollection(ctx context.Context, bucket_name string) (c driver.Collection, err error) {
|
||||
// convert the bucket to collection name
|
||||
name := bucketToCollectionName(bucket_name)
|
||||
|
||||
ok, err := store.database.CollectionExists(ctx, name)
|
||||
if err != nil {
|
||||
return
|
||||
|
|
|
@ -22,6 +22,39 @@ i test using this dev database:
|
|||
`docker run -p 8529:8529 -e ARANGO_ROOT_PASSWORD=test arangodb/arangodb:3.9.0`
|
||||
|
||||
|
||||
|
||||
## database structure
|
||||
|
||||
|
||||
arangodb has a few restrictions which require the use of a few tricks in order to losslessly store the data.
|
||||
|
||||
### filer store
|
||||
|
||||
arangodb does not support []byte, and will store such as a uint64 array. this would be a waste of space. to counteract this, we store the data as a length prefixed uint64 byteset.
|
||||
|
||||
### filer kv
|
||||
|
||||
same as above
|
||||
|
||||
### filer buckets
|
||||
|
||||
s3 buckets are implemented through arangodb collection. this allows us to do very fast bucket deletion by simply deleting the collection
|
||||
|
||||
|
||||
arangodb collection name rules is character set `azAZ09_-` with a 256 character max. however the first character must be a letter.
|
||||
|
||||
|
||||
s3 bucket name rule is the set `azAZ09.-` with a 63 characters max.
|
||||
|
||||
the rules for collection names is then the following:
|
||||
|
||||
1. if the bucket name is a valid arangodb collection name, then nothing is done.
|
||||
2. if the bucket name contains a ".", the "." is replaced with "_"
|
||||
3. if the bucket name now begins with a number or "_", the prefix "xN--" is prepended to the collection name
|
||||
|
||||
this allows for these collection names to be used.
|
||||
|
||||
|
||||
## features i don't personally need but are missing
|
||||
[ ] provide tls cert to arango
|
||||
[ ] authentication that is not basic auth
|
||||
|
|
|
@ -58,7 +58,7 @@ func (f *Filer) loopProcessingDeletion() {
|
|||
glog.V(0).Infof("deleting fileIds len=%d error: %v", deletionCount, err)
|
||||
}
|
||||
} else {
|
||||
glog.V(2).Infof("deleting fileIds len=%d", deletionCount)
|
||||
glog.V(2).Infof("deleting fileIds %+v", toDeleteFileIds)
|
||||
}
|
||||
}
|
||||
})
|
||||
|
|
|
@ -1,6 +1,8 @@
|
|||
package mount
|
||||
|
||||
import (
|
||||
"golang.org/x/sync/semaphore"
|
||||
"math"
|
||||
"sync"
|
||||
|
||||
"golang.org/x/exp/slices"
|
||||
|
@ -28,17 +30,18 @@ type FileHandle struct {
|
|||
reader *filer.ChunkReadAt
|
||||
contentType string
|
||||
handle uint64
|
||||
sync.Mutex
|
||||
orderedMutex *semaphore.Weighted
|
||||
|
||||
isDeleted bool
|
||||
}
|
||||
|
||||
func newFileHandle(wfs *WFS, handleId FileHandleId, inode uint64, entry *filer_pb.Entry) *FileHandle {
|
||||
fh := &FileHandle{
|
||||
fh: handleId,
|
||||
counter: 1,
|
||||
inode: inode,
|
||||
wfs: wfs,
|
||||
fh: handleId,
|
||||
counter: 1,
|
||||
inode: inode,
|
||||
wfs: wfs,
|
||||
orderedMutex: semaphore.NewWeighted(int64(math.MaxInt64)),
|
||||
}
|
||||
// dirtyPages: newContinuousDirtyPages(file, writeOnly),
|
||||
fh.dirtyPages = newPageWriter(fh, wfs.option.ChunkSizeLimit)
|
||||
|
|
|
@ -80,7 +80,7 @@ func (i *InodeToPath) Lookup(path util.FullPath, unixTime int64, isDirectory boo
|
|||
}
|
||||
if !isHardlink {
|
||||
for _, found := i.inode2path[inode]; found; inode++ {
|
||||
_, found = i.inode2path[inode]
|
||||
_, found = i.inode2path[inode+1]
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -1,6 +1,7 @@
|
|||
package mount
|
||||
|
||||
import (
|
||||
"context"
|
||||
"net/http"
|
||||
|
||||
"github.com/hanwen/go-fuse/v2/fuse"
|
||||
|
@ -43,8 +44,8 @@ func (wfs *WFS) CopyFileRange(cancel <-chan struct{}, in *fuse.CopyFileRangeIn)
|
|||
}
|
||||
|
||||
// lock source and target file handles
|
||||
fhOut.Lock()
|
||||
defer fhOut.Unlock()
|
||||
fhOut.orderedMutex.Acquire(context.Background(), 1)
|
||||
defer fhOut.orderedMutex.Release(1)
|
||||
fhOut.entryLock.Lock()
|
||||
defer fhOut.entryLock.Unlock()
|
||||
|
||||
|
@ -53,8 +54,8 @@ func (wfs *WFS) CopyFileRange(cancel <-chan struct{}, in *fuse.CopyFileRangeIn)
|
|||
}
|
||||
|
||||
if fhIn.fh != fhOut.fh {
|
||||
fhIn.Lock()
|
||||
defer fhIn.Unlock()
|
||||
fhIn.orderedMutex.Acquire(context.Background(), 1)
|
||||
defer fhIn.orderedMutex.Release(1)
|
||||
fhIn.entryLock.Lock()
|
||||
defer fhIn.entryLock.Unlock()
|
||||
}
|
||||
|
|
|
@ -1,6 +1,7 @@
|
|||
package mount
|
||||
|
||||
import (
|
||||
"context"
|
||||
"syscall"
|
||||
|
||||
"github.com/hanwen/go-fuse/v2/fuse"
|
||||
|
@ -35,8 +36,8 @@ func (wfs *WFS) Lseek(cancel <-chan struct{}, in *fuse.LseekIn, out *fuse.LseekO
|
|||
}
|
||||
|
||||
// lock the file until the proper offset was calculated
|
||||
fh.Lock()
|
||||
defer fh.Unlock()
|
||||
fh.orderedMutex.Acquire(context.Background(), 1)
|
||||
defer fh.orderedMutex.Release(1)
|
||||
fh.entryLock.Lock()
|
||||
defer fh.entryLock.Unlock()
|
||||
|
||||
|
|
|
@ -1,6 +1,7 @@
|
|||
package mount
|
||||
|
||||
import (
|
||||
"context"
|
||||
"io"
|
||||
|
||||
"github.com/hanwen/go-fuse/v2/fuse"
|
||||
|
@ -39,8 +40,8 @@ func (wfs *WFS) Read(cancel <-chan struct{}, in *fuse.ReadIn, buff []byte) (fuse
|
|||
return nil, fuse.ENOENT
|
||||
}
|
||||
|
||||
fh.Lock()
|
||||
defer fh.Unlock()
|
||||
fh.orderedMutex.Acquire(context.Background(), 1)
|
||||
defer fh.orderedMutex.Release(1)
|
||||
|
||||
offset := int64(in.Offset)
|
||||
totalRead, err := readDataByFileHandle(buff, fh, offset)
|
||||
|
|
|
@ -55,8 +55,8 @@ func (wfs *WFS) Flush(cancel <-chan struct{}, in *fuse.FlushIn) fuse.Status {
|
|||
return fuse.ENOENT
|
||||
}
|
||||
|
||||
fh.Lock()
|
||||
defer fh.Unlock()
|
||||
fh.orderedMutex.Acquire(context.Background(), 1)
|
||||
defer fh.orderedMutex.Release(1)
|
||||
|
||||
return wfs.doFlush(fh, in.Uid, in.Gid)
|
||||
}
|
||||
|
@ -87,8 +87,8 @@ func (wfs *WFS) Fsync(cancel <-chan struct{}, in *fuse.FsyncIn) (code fuse.Statu
|
|||
return fuse.ENOENT
|
||||
}
|
||||
|
||||
fh.Lock()
|
||||
defer fh.Unlock()
|
||||
fh.orderedMutex.Acquire(context.Background(), 1)
|
||||
defer fh.orderedMutex.Release(1)
|
||||
|
||||
return wfs.doFlush(fh, in.Uid, in.Gid)
|
||||
|
||||
|
|
|
@ -1,6 +1,7 @@
|
|||
package mount
|
||||
|
||||
import (
|
||||
"context"
|
||||
"github.com/hanwen/go-fuse/v2/fuse"
|
||||
"net/http"
|
||||
"syscall"
|
||||
|
@ -45,8 +46,8 @@ func (wfs *WFS) Write(cancel <-chan struct{}, in *fuse.WriteIn, data []byte) (wr
|
|||
|
||||
fh.dirtyPages.writerPattern.MonitorWriteAt(int64(in.Offset), int(in.Size))
|
||||
|
||||
fh.Lock()
|
||||
defer fh.Unlock()
|
||||
fh.orderedMutex.Acquire(context.Background(), 1)
|
||||
defer fh.orderedMutex.Release(1)
|
||||
|
||||
entry := fh.entry
|
||||
if entry == nil {
|
||||
|
|
|
@ -156,14 +156,15 @@ func doUpload(reader io.Reader, option *UploadOption) (uploadResult *UploadResul
|
|||
|
||||
func retriedUploadData(data []byte, option *UploadOption) (uploadResult *UploadResult, err error) {
|
||||
for i := 0; i < 3; i++ {
|
||||
if i > 0 {
|
||||
time.Sleep(time.Millisecond * time.Duration(237*(i+1)))
|
||||
}
|
||||
uploadResult, err = doUploadData(data, option)
|
||||
if err == nil {
|
||||
uploadResult.RetryCount = i
|
||||
return
|
||||
} else {
|
||||
glog.Warningf("uploading %d to %s: %v", i, option.UploadUrl, err)
|
||||
}
|
||||
time.Sleep(time.Millisecond * time.Duration(237*(i+1)))
|
||||
glog.Warningf("uploading %d to %s: %v", i, option.UploadUrl, err)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
// Code generated by protoc-gen-go. DO NOT EDIT.
|
||||
// versions:
|
||||
// protoc-gen-go v1.28.0
|
||||
// protoc-gen-go v1.28.1
|
||||
// protoc v3.21.4
|
||||
// source: filer.proto
|
||||
|
||||
|
|
|
@ -1,4 +1,8 @@
|
|||
// Code generated by protoc-gen-go-grpc. DO NOT EDIT.
|
||||
// versions:
|
||||
// - protoc-gen-go-grpc v1.2.0
|
||||
// - protoc v3.21.4
|
||||
// source: filer.proto
|
||||
|
||||
package filer_pb
|
||||
|
||||
|
@ -11,6 +15,7 @@ import (
|
|||
|
||||
// This is a compile-time assertion to ensure that this generated file
|
||||
// is compatible with the grpc package it is being compiled against.
|
||||
// Requires gRPC-Go v1.32.0 or later.
|
||||
const _ = grpc.SupportPackageIsVersion7
|
||||
|
||||
// SeaweedFilerClient is the client API for SeaweedFiler service.
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
// Code generated by protoc-gen-go. DO NOT EDIT.
|
||||
// versions:
|
||||
// protoc-gen-go v1.28.0
|
||||
// protoc-gen-go v1.28.1
|
||||
// protoc v3.21.4
|
||||
// source: iam.proto
|
||||
|
||||
|
|
|
@ -1,4 +1,8 @@
|
|||
// Code generated by protoc-gen-go-grpc. DO NOT EDIT.
|
||||
// versions:
|
||||
// - protoc-gen-go-grpc v1.2.0
|
||||
// - protoc v3.21.4
|
||||
// source: iam.proto
|
||||
|
||||
package iam_pb
|
||||
|
||||
|
@ -8,6 +12,7 @@ import (
|
|||
|
||||
// This is a compile-time assertion to ensure that this generated file
|
||||
// is compatible with the grpc package it is being compiled against.
|
||||
// Requires gRPC-Go v1.32.0 or later.
|
||||
const _ = grpc.SupportPackageIsVersion7
|
||||
|
||||
// SeaweedIdentityAccessManagementClient is the client API for SeaweedIdentityAccessManagement service.
|
||||
|
|
|
@ -27,6 +27,10 @@ service Seaweed {
|
|||
}
|
||||
rpc VacuumVolume (VacuumVolumeRequest) returns (VacuumVolumeResponse) {
|
||||
}
|
||||
rpc VolumeMarkReadonly (VolumeMarkReadonlyRequest) returns (VolumeMarkReadonlyResponse) {
|
||||
}
|
||||
rpc VolumeMarkWritable (VolumeMarkWritableRequest) returns (VolumeMarkWritableResponse) {
|
||||
}
|
||||
rpc GetMasterConfiguration (GetMasterConfigurationRequest) returns (GetMasterConfigurationResponse) {
|
||||
}
|
||||
rpc ListClusterNodes (ListClusterNodesRequest) returns (ListClusterNodesResponse) {
|
||||
|
@ -303,6 +307,18 @@ message VacuumVolumeRequest {
|
|||
message VacuumVolumeResponse {
|
||||
}
|
||||
|
||||
message VolumeMarkReadonlyRequest {
|
||||
uint32 volume_id = 1;
|
||||
}
|
||||
message VolumeMarkReadonlyResponse {
|
||||
}
|
||||
|
||||
message VolumeMarkWritableRequest {
|
||||
uint32 volume_id = 1;
|
||||
}
|
||||
message VolumeMarkWritableResponse {
|
||||
}
|
||||
|
||||
message GetMasterConfigurationRequest {
|
||||
}
|
||||
message GetMasterConfigurationResponse {
|
||||
|
|
File diff suppressed because it is too large
Load diff
|
@ -11,6 +11,7 @@ import (
|
|||
|
||||
// This is a compile-time assertion to ensure that this generated file
|
||||
// is compatible with the grpc package it is being compiled against.
|
||||
// Requires gRPC-Go v1.32.0 or later.
|
||||
const _ = grpc.SupportPackageIsVersion7
|
||||
|
||||
// SeaweedClient is the client API for Seaweed service.
|
||||
|
@ -27,6 +28,8 @@ type SeaweedClient interface {
|
|||
VolumeList(ctx context.Context, in *VolumeListRequest, opts ...grpc.CallOption) (*VolumeListResponse, error)
|
||||
LookupEcVolume(ctx context.Context, in *LookupEcVolumeRequest, opts ...grpc.CallOption) (*LookupEcVolumeResponse, error)
|
||||
VacuumVolume(ctx context.Context, in *VacuumVolumeRequest, opts ...grpc.CallOption) (*VacuumVolumeResponse, error)
|
||||
VolumeMarkReadonly(ctx context.Context, in *VolumeMarkReadonlyRequest, opts ...grpc.CallOption) (*VolumeMarkReadonlyResponse, error)
|
||||
VolumeMarkWritable(ctx context.Context, in *VolumeMarkWritableRequest, opts ...grpc.CallOption) (*VolumeMarkWritableResponse, error)
|
||||
GetMasterConfiguration(ctx context.Context, in *GetMasterConfigurationRequest, opts ...grpc.CallOption) (*GetMasterConfigurationResponse, error)
|
||||
ListClusterNodes(ctx context.Context, in *ListClusterNodesRequest, opts ...grpc.CallOption) (*ListClusterNodesResponse, error)
|
||||
LeaseAdminToken(ctx context.Context, in *LeaseAdminTokenRequest, opts ...grpc.CallOption) (*LeaseAdminTokenResponse, error)
|
||||
|
@ -179,6 +182,24 @@ func (c *seaweedClient) VacuumVolume(ctx context.Context, in *VacuumVolumeReques
|
|||
return out, nil
|
||||
}
|
||||
|
||||
func (c *seaweedClient) VolumeMarkReadonly(ctx context.Context, in *VolumeMarkReadonlyRequest, opts ...grpc.CallOption) (*VolumeMarkReadonlyResponse, error) {
|
||||
out := new(VolumeMarkReadonlyResponse)
|
||||
err := c.cc.Invoke(ctx, "/master_pb.Seaweed/VolumeMarkReadonly", in, out, opts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return out, nil
|
||||
}
|
||||
|
||||
func (c *seaweedClient) VolumeMarkWritable(ctx context.Context, in *VolumeMarkWritableRequest, opts ...grpc.CallOption) (*VolumeMarkWritableResponse, error) {
|
||||
out := new(VolumeMarkWritableResponse)
|
||||
err := c.cc.Invoke(ctx, "/master_pb.Seaweed/VolumeMarkWritable", in, out, opts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return out, nil
|
||||
}
|
||||
|
||||
func (c *seaweedClient) GetMasterConfiguration(ctx context.Context, in *GetMasterConfigurationRequest, opts ...grpc.CallOption) (*GetMasterConfigurationResponse, error) {
|
||||
out := new(GetMasterConfigurationResponse)
|
||||
err := c.cc.Invoke(ctx, "/master_pb.Seaweed/GetMasterConfiguration", in, out, opts...)
|
||||
|
@ -265,6 +286,8 @@ type SeaweedServer interface {
|
|||
VolumeList(context.Context, *VolumeListRequest) (*VolumeListResponse, error)
|
||||
LookupEcVolume(context.Context, *LookupEcVolumeRequest) (*LookupEcVolumeResponse, error)
|
||||
VacuumVolume(context.Context, *VacuumVolumeRequest) (*VacuumVolumeResponse, error)
|
||||
VolumeMarkReadonly(context.Context, *VolumeMarkReadonlyRequest) (*VolumeMarkReadonlyResponse, error)
|
||||
VolumeMarkWritable(context.Context, *VolumeMarkWritableRequest) (*VolumeMarkWritableResponse, error)
|
||||
GetMasterConfiguration(context.Context, *GetMasterConfigurationRequest) (*GetMasterConfigurationResponse, error)
|
||||
ListClusterNodes(context.Context, *ListClusterNodesRequest) (*ListClusterNodesResponse, error)
|
||||
LeaseAdminToken(context.Context, *LeaseAdminTokenRequest) (*LeaseAdminTokenResponse, error)
|
||||
|
@ -310,6 +333,12 @@ func (UnimplementedSeaweedServer) LookupEcVolume(context.Context, *LookupEcVolum
|
|||
func (UnimplementedSeaweedServer) VacuumVolume(context.Context, *VacuumVolumeRequest) (*VacuumVolumeResponse, error) {
|
||||
return nil, status.Errorf(codes.Unimplemented, "method VacuumVolume not implemented")
|
||||
}
|
||||
func (UnimplementedSeaweedServer) VolumeMarkReadonly(context.Context, *VolumeMarkReadonlyRequest) (*VolumeMarkReadonlyResponse, error) {
|
||||
return nil, status.Errorf(codes.Unimplemented, "method VolumeMarkReadonly not implemented")
|
||||
}
|
||||
func (UnimplementedSeaweedServer) VolumeMarkWritable(context.Context, *VolumeMarkWritableRequest) (*VolumeMarkWritableResponse, error) {
|
||||
return nil, status.Errorf(codes.Unimplemented, "method VolumeMarkWritable not implemented")
|
||||
}
|
||||
func (UnimplementedSeaweedServer) GetMasterConfiguration(context.Context, *GetMasterConfigurationRequest) (*GetMasterConfigurationResponse, error) {
|
||||
return nil, status.Errorf(codes.Unimplemented, "method GetMasterConfiguration not implemented")
|
||||
}
|
||||
|
@ -543,6 +572,42 @@ func _Seaweed_VacuumVolume_Handler(srv interface{}, ctx context.Context, dec fun
|
|||
return interceptor(ctx, in, info, handler)
|
||||
}
|
||||
|
||||
func _Seaweed_VolumeMarkReadonly_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
|
||||
in := new(VolumeMarkReadonlyRequest)
|
||||
if err := dec(in); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if interceptor == nil {
|
||||
return srv.(SeaweedServer).VolumeMarkReadonly(ctx, in)
|
||||
}
|
||||
info := &grpc.UnaryServerInfo{
|
||||
Server: srv,
|
||||
FullMethod: "/master_pb.Seaweed/VolumeMarkReadonly",
|
||||
}
|
||||
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
|
||||
return srv.(SeaweedServer).VolumeMarkReadonly(ctx, req.(*VolumeMarkReadonlyRequest))
|
||||
}
|
||||
return interceptor(ctx, in, info, handler)
|
||||
}
|
||||
|
||||
func _Seaweed_VolumeMarkWritable_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
|
||||
in := new(VolumeMarkWritableRequest)
|
||||
if err := dec(in); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if interceptor == nil {
|
||||
return srv.(SeaweedServer).VolumeMarkWritable(ctx, in)
|
||||
}
|
||||
info := &grpc.UnaryServerInfo{
|
||||
Server: srv,
|
||||
FullMethod: "/master_pb.Seaweed/VolumeMarkWritable",
|
||||
}
|
||||
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
|
||||
return srv.(SeaweedServer).VolumeMarkWritable(ctx, req.(*VolumeMarkWritableRequest))
|
||||
}
|
||||
return interceptor(ctx, in, info, handler)
|
||||
}
|
||||
|
||||
func _Seaweed_GetMasterConfiguration_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
|
||||
in := new(GetMasterConfigurationRequest)
|
||||
if err := dec(in); err != nil {
|
||||
|
@ -726,6 +791,14 @@ var Seaweed_ServiceDesc = grpc.ServiceDesc{
|
|||
MethodName: "VacuumVolume",
|
||||
Handler: _Seaweed_VacuumVolume_Handler,
|
||||
},
|
||||
{
|
||||
MethodName: "VolumeMarkReadonly",
|
||||
Handler: _Seaweed_VolumeMarkReadonly_Handler,
|
||||
},
|
||||
{
|
||||
MethodName: "VolumeMarkWritable",
|
||||
Handler: _Seaweed_VolumeMarkWritable_Handler,
|
||||
},
|
||||
{
|
||||
MethodName: "GetMasterConfiguration",
|
||||
Handler: _Seaweed_GetMasterConfiguration_Handler,
|
||||
|
|
5
weed/pb/master_pb/master_helper.go
Normal file
5
weed/pb/master_pb/master_helper.go
Normal file
|
@ -0,0 +1,5 @@
|
|||
package master_pb
|
||||
|
||||
func (v *VolumeLocation) IsEmptyUrl() bool {
|
||||
return v.Url == "" || v.Url == ":0"
|
||||
}
|
|
@ -1,6 +1,6 @@
|
|||
// Code generated by protoc-gen-go. DO NOT EDIT.
|
||||
// versions:
|
||||
// protoc-gen-go v1.28.0
|
||||
// protoc-gen-go v1.28.1
|
||||
// protoc v3.21.4
|
||||
// source: mount.proto
|
||||
|
||||
|
|
|
@ -1,4 +1,8 @@
|
|||
// Code generated by protoc-gen-go-grpc. DO NOT EDIT.
|
||||
// versions:
|
||||
// - protoc-gen-go-grpc v1.2.0
|
||||
// - protoc v3.21.4
|
||||
// source: mount.proto
|
||||
|
||||
package mount_pb
|
||||
|
||||
|
@ -11,6 +15,7 @@ import (
|
|||
|
||||
// This is a compile-time assertion to ensure that this generated file
|
||||
// is compatible with the grpc package it is being compiled against.
|
||||
// Requires gRPC-Go v1.32.0 or later.
|
||||
const _ = grpc.SupportPackageIsVersion7
|
||||
|
||||
// SeaweedMountClient is the client API for SeaweedMount service.
|
||||
|
|
|
@ -1,4 +1,8 @@
|
|||
// Code generated by protoc-gen-go-grpc. DO NOT EDIT.
|
||||
// versions:
|
||||
// - protoc-gen-go-grpc v1.2.0
|
||||
// - protoc v3.21.4
|
||||
// source: mq.proto
|
||||
|
||||
package mq_pb
|
||||
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
// Code generated by protoc-gen-go. DO NOT EDIT.
|
||||
// versions:
|
||||
// protoc-gen-go v1.28.0
|
||||
// protoc-gen-go v1.28.1
|
||||
// protoc v3.21.4
|
||||
// source: remote.proto
|
||||
|
||||
|
@ -20,9 +20,9 @@ const (
|
|||
_ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
|
||||
)
|
||||
|
||||
/////////////////////////
|
||||
// ///////////////////////
|
||||
// Remote Storage related
|
||||
/////////////////////////
|
||||
// ///////////////////////
|
||||
type RemoteConf struct {
|
||||
state protoimpl.MessageState
|
||||
sizeCache protoimpl.SizeCache
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
// Code generated by protoc-gen-go. DO NOT EDIT.
|
||||
// versions:
|
||||
// protoc-gen-go v1.28.0
|
||||
// protoc-gen-go v1.28.1
|
||||
// protoc v3.21.4
|
||||
// source: s3.proto
|
||||
|
||||
|
|
|
@ -1,4 +1,8 @@
|
|||
// Code generated by protoc-gen-go-grpc. DO NOT EDIT.
|
||||
// versions:
|
||||
// - protoc-gen-go-grpc v1.2.0
|
||||
// - protoc v3.21.4
|
||||
// source: s3.proto
|
||||
|
||||
package s3_pb
|
||||
|
||||
|
@ -11,6 +15,7 @@ import (
|
|||
|
||||
// This is a compile-time assertion to ensure that this generated file
|
||||
// is compatible with the grpc package it is being compiled against.
|
||||
// Requires gRPC-Go v1.32.0 or later.
|
||||
const _ = grpc.SupportPackageIsVersion7
|
||||
|
||||
// SeaweedS3Client is the client API for SeaweedS3 service.
|
||||
|
|
|
@ -56,6 +56,8 @@ service VolumeServer {
|
|||
|
||||
rpc ReadNeedleBlob (ReadNeedleBlobRequest) returns (ReadNeedleBlobResponse) {
|
||||
}
|
||||
rpc ReadNeedleMeta (ReadNeedleMetaRequest) returns (ReadNeedleMetaResponse) {
|
||||
}
|
||||
rpc WriteNeedleBlob (WriteNeedleBlobRequest) returns (WriteNeedleBlobResponse) {
|
||||
}
|
||||
rpc ReadAllNeedles (ReadAllNeedlesRequest) returns (stream ReadAllNeedlesResponse) {
|
||||
|
@ -285,6 +287,19 @@ message ReadNeedleBlobResponse {
|
|||
bytes needle_blob = 1;
|
||||
}
|
||||
|
||||
message ReadNeedleMetaRequest {
|
||||
uint32 volume_id = 1;
|
||||
uint64 needle_id = 2;
|
||||
int64 offset = 3; // actual offset
|
||||
int32 size = 4;
|
||||
}
|
||||
message ReadNeedleMetaResponse {
|
||||
uint32 cookie = 1;
|
||||
uint64 last_modified = 2;
|
||||
uint32 crc = 3;
|
||||
string ttl = 4;
|
||||
}
|
||||
|
||||
message WriteNeedleBlobRequest {
|
||||
uint32 volume_id = 1;
|
||||
uint64 needle_id = 2;
|
||||
|
|
File diff suppressed because it is too large
Load diff
|
@ -1,4 +1,8 @@
|
|||
// Code generated by protoc-gen-go-grpc. DO NOT EDIT.
|
||||
// versions:
|
||||
// - protoc-gen-go-grpc v1.2.0
|
||||
// - protoc v3.21.4
|
||||
// source: volume_server.proto
|
||||
|
||||
package volume_server_pb
|
||||
|
||||
|
@ -11,13 +15,14 @@ import (
|
|||
|
||||
// This is a compile-time assertion to ensure that this generated file
|
||||
// is compatible with the grpc package it is being compiled against.
|
||||
// Requires gRPC-Go v1.32.0 or later.
|
||||
const _ = grpc.SupportPackageIsVersion7
|
||||
|
||||
// VolumeServerClient is the client API for VolumeServer service.
|
||||
//
|
||||
// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream.
|
||||
type VolumeServerClient interface {
|
||||
//Experts only: takes multiple fid parameters. This function does not propagate deletes to replicas.
|
||||
// Experts only: takes multiple fid parameters. This function does not propagate deletes to replicas.
|
||||
BatchDelete(ctx context.Context, in *BatchDeleteRequest, opts ...grpc.CallOption) (*BatchDeleteResponse, error)
|
||||
VacuumVolumeCheck(ctx context.Context, in *VacuumVolumeCheckRequest, opts ...grpc.CallOption) (*VacuumVolumeCheckResponse, error)
|
||||
VacuumVolumeCompact(ctx context.Context, in *VacuumVolumeCompactRequest, opts ...grpc.CallOption) (VolumeServer_VacuumVolumeCompactClient, error)
|
||||
|
@ -39,6 +44,7 @@ type VolumeServerClient interface {
|
|||
ReadVolumeFileStatus(ctx context.Context, in *ReadVolumeFileStatusRequest, opts ...grpc.CallOption) (*ReadVolumeFileStatusResponse, error)
|
||||
CopyFile(ctx context.Context, in *CopyFileRequest, opts ...grpc.CallOption) (VolumeServer_CopyFileClient, error)
|
||||
ReadNeedleBlob(ctx context.Context, in *ReadNeedleBlobRequest, opts ...grpc.CallOption) (*ReadNeedleBlobResponse, error)
|
||||
ReadNeedleMeta(ctx context.Context, in *ReadNeedleMetaRequest, opts ...grpc.CallOption) (*ReadNeedleMetaResponse, error)
|
||||
WriteNeedleBlob(ctx context.Context, in *WriteNeedleBlobRequest, opts ...grpc.CallOption) (*WriteNeedleBlobResponse, error)
|
||||
ReadAllNeedles(ctx context.Context, in *ReadAllNeedlesRequest, opts ...grpc.CallOption) (VolumeServer_ReadAllNeedlesClient, error)
|
||||
VolumeTailSender(ctx context.Context, in *VolumeTailSenderRequest, opts ...grpc.CallOption) (VolumeServer_VolumeTailSenderClient, error)
|
||||
|
@ -346,6 +352,15 @@ func (c *volumeServerClient) ReadNeedleBlob(ctx context.Context, in *ReadNeedleB
|
|||
return out, nil
|
||||
}
|
||||
|
||||
func (c *volumeServerClient) ReadNeedleMeta(ctx context.Context, in *ReadNeedleMetaRequest, opts ...grpc.CallOption) (*ReadNeedleMetaResponse, error) {
|
||||
out := new(ReadNeedleMetaResponse)
|
||||
err := c.cc.Invoke(ctx, "/volume_server_pb.VolumeServer/ReadNeedleMeta", in, out, opts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return out, nil
|
||||
}
|
||||
|
||||
func (c *volumeServerClient) WriteNeedleBlob(ctx context.Context, in *WriteNeedleBlobRequest, opts ...grpc.CallOption) (*WriteNeedleBlobResponse, error) {
|
||||
out := new(WriteNeedleBlobResponse)
|
||||
err := c.cc.Invoke(ctx, "/volume_server_pb.VolumeServer/WriteNeedleBlob", in, out, opts...)
|
||||
|
@ -677,7 +692,7 @@ func (c *volumeServerClient) Ping(ctx context.Context, in *PingRequest, opts ...
|
|||
// All implementations must embed UnimplementedVolumeServerServer
|
||||
// for forward compatibility
|
||||
type VolumeServerServer interface {
|
||||
//Experts only: takes multiple fid parameters. This function does not propagate deletes to replicas.
|
||||
// Experts only: takes multiple fid parameters. This function does not propagate deletes to replicas.
|
||||
BatchDelete(context.Context, *BatchDeleteRequest) (*BatchDeleteResponse, error)
|
||||
VacuumVolumeCheck(context.Context, *VacuumVolumeCheckRequest) (*VacuumVolumeCheckResponse, error)
|
||||
VacuumVolumeCompact(*VacuumVolumeCompactRequest, VolumeServer_VacuumVolumeCompactServer) error
|
||||
|
@ -699,6 +714,7 @@ type VolumeServerServer interface {
|
|||
ReadVolumeFileStatus(context.Context, *ReadVolumeFileStatusRequest) (*ReadVolumeFileStatusResponse, error)
|
||||
CopyFile(*CopyFileRequest, VolumeServer_CopyFileServer) error
|
||||
ReadNeedleBlob(context.Context, *ReadNeedleBlobRequest) (*ReadNeedleBlobResponse, error)
|
||||
ReadNeedleMeta(context.Context, *ReadNeedleMetaRequest) (*ReadNeedleMetaResponse, error)
|
||||
WriteNeedleBlob(context.Context, *WriteNeedleBlobRequest) (*WriteNeedleBlobResponse, error)
|
||||
ReadAllNeedles(*ReadAllNeedlesRequest, VolumeServer_ReadAllNeedlesServer) error
|
||||
VolumeTailSender(*VolumeTailSenderRequest, VolumeServer_VolumeTailSenderServer) error
|
||||
|
@ -791,6 +807,9 @@ func (UnimplementedVolumeServerServer) CopyFile(*CopyFileRequest, VolumeServer_C
|
|||
func (UnimplementedVolumeServerServer) ReadNeedleBlob(context.Context, *ReadNeedleBlobRequest) (*ReadNeedleBlobResponse, error) {
|
||||
return nil, status.Errorf(codes.Unimplemented, "method ReadNeedleBlob not implemented")
|
||||
}
|
||||
func (UnimplementedVolumeServerServer) ReadNeedleMeta(context.Context, *ReadNeedleMetaRequest) (*ReadNeedleMetaResponse, error) {
|
||||
return nil, status.Errorf(codes.Unimplemented, "method ReadNeedleMeta not implemented")
|
||||
}
|
||||
func (UnimplementedVolumeServerServer) WriteNeedleBlob(context.Context, *WriteNeedleBlobRequest) (*WriteNeedleBlobResponse, error) {
|
||||
return nil, status.Errorf(codes.Unimplemented, "method WriteNeedleBlob not implemented")
|
||||
}
|
||||
|
@ -1239,6 +1258,24 @@ func _VolumeServer_ReadNeedleBlob_Handler(srv interface{}, ctx context.Context,
|
|||
return interceptor(ctx, in, info, handler)
|
||||
}
|
||||
|
||||
func _VolumeServer_ReadNeedleMeta_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
|
||||
in := new(ReadNeedleMetaRequest)
|
||||
if err := dec(in); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if interceptor == nil {
|
||||
return srv.(VolumeServerServer).ReadNeedleMeta(ctx, in)
|
||||
}
|
||||
info := &grpc.UnaryServerInfo{
|
||||
Server: srv,
|
||||
FullMethod: "/volume_server_pb.VolumeServer/ReadNeedleMeta",
|
||||
}
|
||||
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
|
||||
return srv.(VolumeServerServer).ReadNeedleMeta(ctx, req.(*ReadNeedleMetaRequest))
|
||||
}
|
||||
return interceptor(ctx, in, info, handler)
|
||||
}
|
||||
|
||||
func _VolumeServer_WriteNeedleBlob_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
|
||||
in := new(WriteNeedleBlobRequest)
|
||||
if err := dec(in); err != nil {
|
||||
|
@ -1706,6 +1743,10 @@ var VolumeServer_ServiceDesc = grpc.ServiceDesc{
|
|||
MethodName: "ReadNeedleBlob",
|
||||
Handler: _VolumeServer_ReadNeedleBlob_Handler,
|
||||
},
|
||||
{
|
||||
MethodName: "ReadNeedleMeta",
|
||||
Handler: _VolumeServer_ReadNeedleMeta_Handler,
|
||||
},
|
||||
{
|
||||
MethodName: "WriteNeedleBlob",
|
||||
Handler: _VolumeServer_WriteNeedleBlob_Handler,
|
||||
|
|
|
@ -277,7 +277,7 @@ func adjustHeaderContentDisposition(w http.ResponseWriter, r *http.Request, file
|
|||
}
|
||||
}
|
||||
|
||||
func processRangeRequest(r *http.Request, w http.ResponseWriter, totalSize int64, mimeType string, writeFn func(writer io.Writer, offset int64, size int64) error) {
|
||||
func processRangeRequest(r *http.Request, w http.ResponseWriter, totalSize int64, mimeType string, writeFn func(writer io.Writer, offset int64, size int64) error) error {
|
||||
rangeReq := r.Header.Get("Range")
|
||||
bufferedWriter := bufio.NewWriterSize(w, 128*1024)
|
||||
defer bufferedWriter.Flush()
|
||||
|
@ -285,11 +285,11 @@ func processRangeRequest(r *http.Request, w http.ResponseWriter, totalSize int64
|
|||
if rangeReq == "" {
|
||||
w.Header().Set("Content-Length", strconv.FormatInt(totalSize, 10))
|
||||
if err := writeFn(bufferedWriter, 0, totalSize); err != nil {
|
||||
glog.Errorf("processRangeRequest headers: %+v err: %v", w.Header(), err)
|
||||
glog.Errorf("processRangeRequest: %v", err)
|
||||
http.Error(w, err.Error(), http.StatusInternalServerError)
|
||||
return
|
||||
return fmt.Errorf("processRangeRequest: %v", err)
|
||||
}
|
||||
return
|
||||
return nil
|
||||
}
|
||||
|
||||
//the rest is dealing with partial content request
|
||||
|
@ -298,17 +298,17 @@ func processRangeRequest(r *http.Request, w http.ResponseWriter, totalSize int64
|
|||
if err != nil {
|
||||
glog.Errorf("processRangeRequest headers: %+v err: %v", w.Header(), err)
|
||||
http.Error(w, err.Error(), http.StatusRequestedRangeNotSatisfiable)
|
||||
return
|
||||
return fmt.Errorf("processRangeRequest header: %v", err)
|
||||
}
|
||||
if sumRangesSize(ranges) > totalSize {
|
||||
// The total number of bytes in all the ranges
|
||||
// is larger than the size of the file by
|
||||
// itself, so this is probably an attack, or a
|
||||
// dumb client. Ignore the range request.
|
||||
return
|
||||
return nil
|
||||
}
|
||||
if len(ranges) == 0 {
|
||||
return
|
||||
return nil
|
||||
}
|
||||
if len(ranges) == 1 {
|
||||
// RFC 2616, Section 14.16:
|
||||
|
@ -329,18 +329,18 @@ func processRangeRequest(r *http.Request, w http.ResponseWriter, totalSize int64
|
|||
w.WriteHeader(http.StatusPartialContent)
|
||||
err = writeFn(bufferedWriter, ra.start, ra.length)
|
||||
if err != nil {
|
||||
glog.Errorf("processRangeRequest headers: %+v err: %v", w.Header(), err)
|
||||
glog.Errorf("processRangeRequest range[0]: %+v err: %v", w.Header(), err)
|
||||
http.Error(w, err.Error(), http.StatusInternalServerError)
|
||||
return
|
||||
return fmt.Errorf("processRangeRequest range[0]: %v", err)
|
||||
}
|
||||
return
|
||||
return nil
|
||||
}
|
||||
|
||||
// process multiple ranges
|
||||
for _, ra := range ranges {
|
||||
if ra.start > totalSize {
|
||||
http.Error(w, "Out of Range", http.StatusRequestedRangeNotSatisfiable)
|
||||
return
|
||||
return fmt.Errorf("out of range: %v", err)
|
||||
}
|
||||
}
|
||||
sendSize := rangesMIMESize(ranges, mimeType, totalSize)
|
||||
|
@ -371,6 +371,7 @@ func processRangeRequest(r *http.Request, w http.ResponseWriter, totalSize int64
|
|||
if _, err := io.CopyN(bufferedWriter, sendContent, sendSize); err != nil {
|
||||
glog.Errorf("processRangeRequest err: %v", err)
|
||||
http.Error(w, "Internal Error", http.StatusInternalServerError)
|
||||
return
|
||||
return fmt.Errorf("processRangeRequest err: %v", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
|
|
@ -107,7 +107,7 @@ func (fs *FilerServer) uploadReaderToChunks(w http.ResponseWriter, r *http.Reque
|
|||
wg.Done()
|
||||
}()
|
||||
|
||||
chunk, toChunkErr := fs.dataToChunk(fileName, contentType, bytesBuffer.Bytes(), offset, so)
|
||||
chunks, toChunkErr := fs.dataToChunk(fileName, contentType, bytesBuffer.Bytes(), offset, so)
|
||||
if toChunkErr != nil {
|
||||
uploadErrLock.Lock()
|
||||
if uploadErr == nil {
|
||||
|
@ -115,12 +115,14 @@ func (fs *FilerServer) uploadReaderToChunks(w http.ResponseWriter, r *http.Reque
|
|||
}
|
||||
uploadErrLock.Unlock()
|
||||
}
|
||||
if chunk != nil {
|
||||
if chunks != nil {
|
||||
fileChunksLock.Lock()
|
||||
fileChunks = append(fileChunks, chunk)
|
||||
fileChunksSize := len(fileChunks)
|
||||
fileChunksSize := len(fileChunks) + len(chunks)
|
||||
for _, chunk := range chunks {
|
||||
fileChunks = append(fileChunks, chunk)
|
||||
glog.V(4).Infof("uploaded %s chunk %d to %s [%d,%d)", fileName, fileChunksSize, chunk.FileId, offset, offset+int64(chunk.Size))
|
||||
}
|
||||
fileChunksLock.Unlock()
|
||||
glog.V(4).Infof("uploaded %s chunk %d to %s [%d,%d)", fileName, fileChunksSize, chunk.FileId, offset, offset+int64(chunk.Size))
|
||||
}
|
||||
}(chunkOffset)
|
||||
|
||||
|
@ -169,7 +171,7 @@ func (fs *FilerServer) doUpload(urlLocation string, limitedReader io.Reader, fil
|
|||
return uploadResult, err, data
|
||||
}
|
||||
|
||||
func (fs *FilerServer) dataToChunk(fileName, contentType string, data []byte, chunkOffset int64, so *operation.StorageOption) (*filer_pb.FileChunk, error) {
|
||||
func (fs *FilerServer) dataToChunk(fileName, contentType string, data []byte, chunkOffset int64, so *operation.StorageOption) ([]*filer_pb.FileChunk, error) {
|
||||
dataReader := util.NewBytesReader(data)
|
||||
|
||||
// retry to assign a different file id
|
||||
|
@ -177,6 +179,7 @@ func (fs *FilerServer) dataToChunk(fileName, contentType string, data []byte, ch
|
|||
var auth security.EncodedJwt
|
||||
var uploadErr error
|
||||
var uploadResult *operation.UploadResult
|
||||
var failedFileChunks []*filer_pb.FileChunk
|
||||
|
||||
err := util.Retry("filerDataToChunk", func() error {
|
||||
// assign one file id for one chunk
|
||||
|
@ -191,19 +194,25 @@ func (fs *FilerServer) dataToChunk(fileName, contentType string, data []byte, ch
|
|||
if uploadErr != nil {
|
||||
glog.V(4).Infof("retry later due to upload error: %v", uploadErr)
|
||||
stats.FilerRequestCounter.WithLabelValues(stats.ChunkDoUploadRetry).Inc()
|
||||
fid, _ := filer_pb.ToFileIdObject(fileId)
|
||||
fileChunk := filer_pb.FileChunk{
|
||||
FileId: fileId,
|
||||
Offset: chunkOffset,
|
||||
Fid: fid,
|
||||
}
|
||||
failedFileChunks = append(failedFileChunks, &fileChunk)
|
||||
return uploadErr
|
||||
}
|
||||
return nil
|
||||
})
|
||||
if err != nil {
|
||||
glog.Errorf("upload error: %v", err)
|
||||
return nil, err
|
||||
return failedFileChunks, err
|
||||
}
|
||||
|
||||
// if last chunk exhausted the reader exactly at the border
|
||||
if uploadResult.Size == 0 {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
return uploadResult.ToPbFileChunk(fileId, chunkOffset), nil
|
||||
return []*filer_pb.FileChunk{uploadResult.ToPbFileChunk(fileId, chunkOffset)}, nil
|
||||
}
|
||||
|
|
|
@ -70,8 +70,9 @@ func (ms *MasterServer) SendHeartbeat(stream master_pb.Seaweed_SendHeartbeatServ
|
|||
}
|
||||
|
||||
message := &master_pb.VolumeLocation{
|
||||
Url: dn.Url(),
|
||||
PublicUrl: dn.PublicUrl,
|
||||
DataCenter: dn.GetDataCenterId(),
|
||||
Url: dn.Url(),
|
||||
PublicUrl: dn.PublicUrl,
|
||||
}
|
||||
for _, v := range dn.GetVolumes() {
|
||||
message.DeletedVids = append(message.DeletedVids, uint32(v.Id))
|
||||
|
@ -104,9 +105,32 @@ func (ms *MasterServer) SendHeartbeat(stream master_pb.Seaweed_SendHeartbeatServ
|
|||
return err
|
||||
}
|
||||
|
||||
ms.Topo.Sequence.SetMax(heartbeat.MaxFileKey)
|
||||
if !ms.Topo.IsLeader() {
|
||||
// tell the volume servers about the leader
|
||||
newLeader, err := ms.Topo.Leader()
|
||||
if err != nil {
|
||||
glog.Warningf("SendHeartbeat find leader: %v", err)
|
||||
return err
|
||||
}
|
||||
if err := stream.Send(&master_pb.HeartbeatResponse{
|
||||
Leader: string(newLeader),
|
||||
}); err != nil {
|
||||
if dn != nil {
|
||||
glog.Warningf("SendHeartbeat.Send response to %s:%d %v", dn.Ip, dn.Port, err)
|
||||
} else {
|
||||
glog.Warningf("SendHeartbeat.Send response %v", err)
|
||||
}
|
||||
return err
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
||||
ms.Topo.Sequence.SetMax(heartbeat.MaxFileKey)
|
||||
if dn == nil {
|
||||
// Skip delta heartbeat for volume server versions better than 3.28 https://github.com/seaweedfs/seaweedfs/pull/3630
|
||||
if heartbeat.Ip == "" {
|
||||
continue
|
||||
} // ToDo must be removed after update major version
|
||||
dcName, rackName := ms.Topo.Configuration.Locate(heartbeat.Ip, heartbeat.DataCenter, heartbeat.Rack)
|
||||
dc := ms.Topo.GetOrCreateDataCenter(dcName)
|
||||
rack := dc.GetOrCreateRack(rackName)
|
||||
|
@ -162,8 +186,10 @@ func (ms *MasterServer) SendHeartbeat(stream master_pb.Seaweed_SendHeartbeatServ
|
|||
}
|
||||
|
||||
if len(heartbeat.Volumes) > 0 || heartbeat.HasNoVolumes {
|
||||
dcName, rackName := ms.Topo.Configuration.Locate(heartbeat.Ip, heartbeat.DataCenter, heartbeat.Rack)
|
||||
ms.Topo.DataNodeRegistration(dcName, rackName, dn)
|
||||
if heartbeat.Ip != "" {
|
||||
dcName, rackName := ms.Topo.Configuration.Locate(heartbeat.Ip, heartbeat.DataCenter, heartbeat.Rack)
|
||||
ms.Topo.DataNodeRegistration(dcName, rackName, dn)
|
||||
}
|
||||
|
||||
// process heartbeat.Volumes
|
||||
stats.MasterReceivedHeartbeatCounter.WithLabelValues("Volumes").Inc()
|
||||
|
@ -216,19 +242,6 @@ func (ms *MasterServer) SendHeartbeat(stream master_pb.Seaweed_SendHeartbeatServ
|
|||
if len(message.NewVids) > 0 || len(message.DeletedVids) > 0 || len(message.NewEcVids) > 0 || len(message.DeletedEcVids) > 0 {
|
||||
ms.broadcastToClients(&master_pb.KeepConnectedResponse{VolumeLocation: message})
|
||||
}
|
||||
|
||||
// tell the volume servers about the leader
|
||||
newLeader, err := ms.Topo.Leader()
|
||||
if err != nil {
|
||||
glog.Warningf("SendHeartbeat find leader: %v", err)
|
||||
return err
|
||||
}
|
||||
if err := stream.Send(&master_pb.HeartbeatResponse{
|
||||
Leader: string(newLeader),
|
||||
}); err != nil {
|
||||
glog.Warningf("SendHeartbeat.Send response to to %s:%d %v", dn.Ip, dn.Port, err)
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -148,6 +148,19 @@ func (vs *VolumeServer) VolumeMarkReadonly(ctx context.Context, req *volume_serv
|
|||
|
||||
resp := &volume_server_pb.VolumeMarkReadonlyResponse{}
|
||||
|
||||
if grpcErr := pb.WithMasterClient(false, vs.GetMaster(), vs.grpcDialOption, false, func(client master_pb.SeaweedClient) error {
|
||||
_, err := client.VolumeMarkReadonly(context.Background(), &master_pb.VolumeMarkReadonlyRequest{
|
||||
VolumeId: req.VolumeId,
|
||||
})
|
||||
if err != nil {
|
||||
return fmt.Errorf("set volume %d to read only on master: %v", req.VolumeId, err)
|
||||
}
|
||||
return nil
|
||||
}); grpcErr != nil {
|
||||
glog.V(0).Infof("connect to %s: %v", vs.GetMaster(), grpcErr)
|
||||
return resp, fmt.Errorf("grpc VolumeMarkReadonly with master %s: %v", vs.GetMaster(), grpcErr)
|
||||
}
|
||||
|
||||
err := vs.store.MarkVolumeReadonly(needle.VolumeId(req.VolumeId))
|
||||
|
||||
if err != nil {
|
||||
|
@ -163,6 +176,19 @@ func (vs *VolumeServer) VolumeMarkWritable(ctx context.Context, req *volume_serv
|
|||
|
||||
resp := &volume_server_pb.VolumeMarkWritableResponse{}
|
||||
|
||||
if grpcErr := pb.WithMasterClient(false, vs.GetMaster(), vs.grpcDialOption, false, func(client master_pb.SeaweedClient) error {
|
||||
_, err := client.VolumeMarkWritable(context.Background(), &master_pb.VolumeMarkWritableRequest{
|
||||
VolumeId: req.VolumeId,
|
||||
})
|
||||
if err != nil {
|
||||
return fmt.Errorf("set volume %d to writable on master: %v", req.VolumeId, err)
|
||||
}
|
||||
return nil
|
||||
}); grpcErr != nil {
|
||||
glog.V(0).Infof("connect to %s: %v", vs.GetMaster(), grpcErr)
|
||||
return resp, fmt.Errorf("grpc VolumeMarkWritable with master %s: %v", vs.GetMaster(), grpcErr)
|
||||
}
|
||||
|
||||
err := vs.store.MarkVolumeWritable(needle.VolumeId(req.VolumeId))
|
||||
|
||||
if err != nil {
|
||||
|
|
|
@ -160,11 +160,18 @@ func (vs *VolumeServer) doHeartbeat(masterAddress pb.ServerAddress, grpcDialOpti
|
|||
|
||||
volumeTickChan := time.Tick(sleepInterval)
|
||||
ecShardTickChan := time.Tick(17 * sleepInterval)
|
||||
|
||||
dataCenter := vs.store.GetDataCenter()
|
||||
rack := vs.store.GetRack()
|
||||
ip := vs.store.Ip
|
||||
port := uint32(vs.store.Port)
|
||||
for {
|
||||
select {
|
||||
case volumeMessage := <-vs.store.NewVolumesChan:
|
||||
deltaBeat := &master_pb.Heartbeat{
|
||||
Ip: ip,
|
||||
Port: port,
|
||||
DataCenter: dataCenter,
|
||||
Rack: rack,
|
||||
NewVolumes: []*master_pb.VolumeShortInformationMessage{
|
||||
&volumeMessage,
|
||||
},
|
||||
|
@ -176,6 +183,10 @@ func (vs *VolumeServer) doHeartbeat(masterAddress pb.ServerAddress, grpcDialOpti
|
|||
}
|
||||
case ecShardMessage := <-vs.store.NewEcShardsChan:
|
||||
deltaBeat := &master_pb.Heartbeat{
|
||||
Ip: ip,
|
||||
Port: port,
|
||||
DataCenter: dataCenter,
|
||||
Rack: rack,
|
||||
NewEcShards: []*master_pb.VolumeEcShardInformationMessage{
|
||||
&ecShardMessage,
|
||||
},
|
||||
|
@ -188,6 +199,10 @@ func (vs *VolumeServer) doHeartbeat(masterAddress pb.ServerAddress, grpcDialOpti
|
|||
}
|
||||
case volumeMessage := <-vs.store.DeletedVolumesChan:
|
||||
deltaBeat := &master_pb.Heartbeat{
|
||||
Ip: ip,
|
||||
Port: port,
|
||||
DataCenter: dataCenter,
|
||||
Rack: rack,
|
||||
DeletedVolumes: []*master_pb.VolumeShortInformationMessage{
|
||||
&volumeMessage,
|
||||
},
|
||||
|
@ -199,6 +214,10 @@ func (vs *VolumeServer) doHeartbeat(masterAddress pb.ServerAddress, grpcDialOpti
|
|||
}
|
||||
case ecShardMessage := <-vs.store.DeletedEcShardsChan:
|
||||
deltaBeat := &master_pb.Heartbeat{
|
||||
Ip: ip,
|
||||
Port: port,
|
||||
DataCenter: dataCenter,
|
||||
Rack: rack,
|
||||
DeletedEcShards: []*master_pb.VolumeEcShardInformationMessage{
|
||||
&ecShardMessage,
|
||||
},
|
||||
|
@ -227,12 +246,12 @@ func (vs *VolumeServer) doHeartbeat(masterAddress pb.ServerAddress, grpcDialOpti
|
|||
case <-vs.stopChan:
|
||||
var volumeMessages []*master_pb.VolumeInformationMessage
|
||||
emptyBeat := &master_pb.Heartbeat{
|
||||
Ip: vs.store.Ip,
|
||||
Port: uint32(vs.store.Port),
|
||||
Ip: ip,
|
||||
Port: port,
|
||||
PublicUrl: vs.store.PublicUrl,
|
||||
MaxFileKey: uint64(0),
|
||||
DataCenter: vs.store.GetDataCenter(),
|
||||
Rack: vs.store.GetRack(),
|
||||
DataCenter: dataCenter,
|
||||
Rack: rack,
|
||||
Volumes: volumeMessages,
|
||||
HasNoVolumes: len(volumeMessages) == 0,
|
||||
}
|
||||
|
|
|
@ -23,6 +23,35 @@ func (vs *VolumeServer) ReadNeedleBlob(ctx context.Context, req *volume_server_p
|
|||
return resp, nil
|
||||
}
|
||||
|
||||
func (vs *VolumeServer) ReadNeedleMeta(ctx context.Context, req *volume_server_pb.ReadNeedleMetaRequest) (resp *volume_server_pb.ReadNeedleMetaResponse, err error) {
|
||||
resp = &volume_server_pb.ReadNeedleMetaResponse{}
|
||||
volumeId := needle.VolumeId(req.VolumeId)
|
||||
|
||||
n := &needle.Needle{
|
||||
Id: types.NeedleId(req.NeedleId),
|
||||
Flags: 0x08,
|
||||
}
|
||||
size := req.Size
|
||||
offset := req.Offset
|
||||
|
||||
hasVolume := vs.store.HasVolume(volumeId)
|
||||
if !hasVolume {
|
||||
return nil, fmt.Errorf("not found volume id %d and read needle metadata at ec shards is not supported", req.VolumeId)
|
||||
}
|
||||
err = vs.store.ReadVolumeNeedleMetaAt(volumeId, n, offset, size)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
resp.Cookie = uint32(n.Cookie)
|
||||
resp.LastModified = n.LastModified
|
||||
resp.Crc = n.Checksum.Value()
|
||||
if n.HasTtl() {
|
||||
resp.Ttl = n.Ttl.String()
|
||||
}
|
||||
return resp, nil
|
||||
}
|
||||
|
||||
func (vs *VolumeServer) WriteNeedleBlob(ctx context.Context, req *volume_server_pb.WriteNeedleBlobRequest) (resp *volume_server_pb.WriteNeedleBlobResponse, err error) {
|
||||
resp = &volume_server_pb.WriteNeedleBlobResponse{}
|
||||
v := vs.store.GetVolume(needle.VolumeId(req.VolumeId))
|
||||
|
|
|
@ -50,6 +50,8 @@ func (vs *VolumeServer) privateStoreHandler(w http.ResponseWriter, r *http.Reque
|
|||
select {
|
||||
case <-r.Context().Done():
|
||||
glog.V(4).Infof("request cancelled from %s: %v", r.RemoteAddr, r.Context().Err())
|
||||
w.WriteHeader(http.StatusInternalServerError)
|
||||
vs.inFlightDownloadDataLimitCond.L.Unlock()
|
||||
return
|
||||
default:
|
||||
glog.V(4).Infof("wait because inflight download data %d > %d", inFlightDownloadSize, vs.concurrentDownloadLimit)
|
||||
|
|
|
@ -120,11 +120,11 @@ func (vs *VolumeServer) GetOrHeadHandler(w http.ResponseWriter, r *http.Request)
|
|||
}
|
||||
|
||||
var count int
|
||||
var needleSize types.Size
|
||||
var memoryCost types.Size
|
||||
readOption.AttemptMetaOnly, readOption.MustMetaOnly = shouldAttemptStreamWrite(hasVolume, ext, r)
|
||||
onReadSizeFn := func(size types.Size) {
|
||||
needleSize = size
|
||||
atomic.AddInt64(&vs.inFlightDownloadDataSize, int64(needleSize))
|
||||
memoryCost = size
|
||||
atomic.AddInt64(&vs.inFlightDownloadDataSize, int64(memoryCost))
|
||||
}
|
||||
if hasVolume {
|
||||
count, err = vs.store.ReadVolumeNeedle(volumeId, n, readOption, onReadSizeFn)
|
||||
|
@ -132,7 +132,7 @@ func (vs *VolumeServer) GetOrHeadHandler(w http.ResponseWriter, r *http.Request)
|
|||
count, err = vs.store.ReadEcShardNeedle(volumeId, n, onReadSizeFn)
|
||||
}
|
||||
defer func() {
|
||||
atomic.AddInt64(&vs.inFlightDownloadDataSize, -int64(needleSize))
|
||||
atomic.AddInt64(&vs.inFlightDownloadDataSize, -int64(memoryCost))
|
||||
vs.inFlightDownloadDataLimitCond.Signal()
|
||||
}()
|
||||
|
||||
|
@ -328,14 +328,13 @@ func writeResponseContent(filename, mimeType string, rs io.ReadSeeker, w http.Re
|
|||
return nil
|
||||
}
|
||||
|
||||
processRangeRequest(r, w, totalSize, mimeType, func(writer io.Writer, offset int64, size int64) error {
|
||||
return processRangeRequest(r, w, totalSize, mimeType, func(writer io.Writer, offset int64, size int64) error {
|
||||
if _, e = rs.Seek(offset, 0); e != nil {
|
||||
return e
|
||||
}
|
||||
_, e = io.CopyN(writer, rs, size)
|
||||
return e
|
||||
})
|
||||
return nil
|
||||
}
|
||||
|
||||
func (vs *VolumeServer) streamWriteResponseContent(filename string, mimeType string, volumeId needle.VolumeId, n *needle.Needle, w http.ResponseWriter, r *http.Request, readOption *storage.ReadOption) {
|
||||
|
|
|
@ -46,6 +46,9 @@ func (vs *VolumeServer) PostHandler(w http.ResponseWriter, r *http.Request) {
|
|||
|
||||
ret := operation.UploadResult{}
|
||||
isUnchanged, writeError := topology.ReplicatedWrite(vs.GetMaster, vs.grpcDialOption, vs.store, volumeId, reqNeedle, r)
|
||||
if writeError != nil {
|
||||
writeJsonError(w, r, http.StatusInternalServerError, writeError)
|
||||
}
|
||||
|
||||
// http 204 status code does not allow body
|
||||
if writeError == nil && isUnchanged {
|
||||
|
@ -55,10 +58,6 @@ func (vs *VolumeServer) PostHandler(w http.ResponseWriter, r *http.Request) {
|
|||
}
|
||||
|
||||
httpStatus := http.StatusCreated
|
||||
if writeError != nil {
|
||||
httpStatus = http.StatusInternalServerError
|
||||
ret.Error = writeError.Error()
|
||||
}
|
||||
if reqNeedle.HasName() {
|
||||
ret.Name = string(reqNeedle.Name)
|
||||
}
|
||||
|
|
|
@ -1,138 +0,0 @@
|
|||
package weed_server
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"fmt"
|
||||
"io"
|
||||
"net"
|
||||
"strings"
|
||||
|
||||
"github.com/seaweedfs/seaweedfs/weed/glog"
|
||||
"github.com/seaweedfs/seaweedfs/weed/storage/needle"
|
||||
"github.com/seaweedfs/seaweedfs/weed/util"
|
||||
)
|
||||
|
||||
func (vs *VolumeServer) HandleTcpConnection(c net.Conn) {
|
||||
defer c.Close()
|
||||
|
||||
glog.V(0).Infof("Serving writes from %s", c.RemoteAddr().String())
|
||||
|
||||
bufReader := bufio.NewReaderSize(c, 1024*1024)
|
||||
bufWriter := bufio.NewWriterSize(c, 1024*1024)
|
||||
|
||||
for {
|
||||
cmd, err := bufReader.ReadString('\n')
|
||||
if err != nil {
|
||||
if err != io.EOF {
|
||||
glog.Errorf("read command from %s: %v", c.RemoteAddr().String(), err)
|
||||
}
|
||||
return
|
||||
}
|
||||
cmd = cmd[:len(cmd)-1]
|
||||
switch cmd[0] {
|
||||
case '+':
|
||||
fileId := cmd[1:]
|
||||
err = vs.handleTcpPut(fileId, bufReader)
|
||||
if err == nil {
|
||||
bufWriter.Write([]byte("+OK\n"))
|
||||
} else {
|
||||
bufWriter.Write([]byte("-ERR " + string(err.Error()) + "\n"))
|
||||
}
|
||||
case '-':
|
||||
fileId := cmd[1:]
|
||||
err = vs.handleTcpDelete(fileId)
|
||||
if err == nil {
|
||||
bufWriter.Write([]byte("+OK\n"))
|
||||
} else {
|
||||
bufWriter.Write([]byte("-ERR " + string(err.Error()) + "\n"))
|
||||
}
|
||||
case '?':
|
||||
fileId := cmd[1:]
|
||||
err = vs.handleTcpGet(fileId, bufWriter)
|
||||
case '!':
|
||||
bufWriter.Flush()
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
func (vs *VolumeServer) handleTcpGet(fileId string, writer *bufio.Writer) (err error) {
|
||||
|
||||
volumeId, n, err2 := vs.parseFileId(fileId)
|
||||
if err2 != nil {
|
||||
return err2
|
||||
}
|
||||
|
||||
volume := vs.store.GetVolume(volumeId)
|
||||
if volume == nil {
|
||||
return fmt.Errorf("volume %d not found", volumeId)
|
||||
}
|
||||
|
||||
err = volume.StreamRead(n, writer)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (vs *VolumeServer) handleTcpPut(fileId string, bufReader *bufio.Reader) (err error) {
|
||||
|
||||
volumeId, n, err2 := vs.parseFileId(fileId)
|
||||
if err2 != nil {
|
||||
return err2
|
||||
}
|
||||
|
||||
volume := vs.store.GetVolume(volumeId)
|
||||
if volume == nil {
|
||||
return fmt.Errorf("volume %d not found", volumeId)
|
||||
}
|
||||
|
||||
sizeBuf := make([]byte, 4)
|
||||
if _, err = bufReader.Read(sizeBuf); err != nil {
|
||||
return err
|
||||
}
|
||||
dataSize := util.BytesToUint32(sizeBuf)
|
||||
|
||||
err = volume.StreamWrite(n, bufReader, dataSize)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (vs *VolumeServer) handleTcpDelete(fileId string) (err error) {
|
||||
|
||||
volumeId, n, err2 := vs.parseFileId(fileId)
|
||||
if err2 != nil {
|
||||
return err2
|
||||
}
|
||||
|
||||
_, err = vs.store.DeleteVolumeNeedle(volumeId, n)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (vs *VolumeServer) parseFileId(fileId string) (needle.VolumeId, *needle.Needle, error) {
|
||||
|
||||
commaIndex := strings.LastIndex(fileId, ",")
|
||||
if commaIndex <= 0 {
|
||||
return 0, nil, fmt.Errorf("unknown fileId %s", fileId)
|
||||
}
|
||||
|
||||
vid, fid := fileId[0:commaIndex], fileId[commaIndex+1:]
|
||||
|
||||
volumeId, ve := needle.NewVolumeId(vid)
|
||||
if ve != nil {
|
||||
return 0, nil, fmt.Errorf("unknown volume id in fileId %s", fileId)
|
||||
}
|
||||
|
||||
n := new(needle.Needle)
|
||||
n.ParsePath(fid)
|
||||
return volumeId, n, nil
|
||||
}
|
|
@ -64,7 +64,7 @@ func (c *commandVolumeBalance) Help() string {
|
|||
func (c *commandVolumeBalance) Do(args []string, commandEnv *CommandEnv, writer io.Writer) (err error) {
|
||||
|
||||
balanceCommand := flag.NewFlagSet(c.Name(), flag.ContinueOnError)
|
||||
collection := balanceCommand.String("collection", "EACH_COLLECTION", "collection name, or use \"ALL_COLLECTIONS\" across collections, \"EACH_COLLECTION\" for each collection")
|
||||
collection := balanceCommand.String("collection", "ALL_COLLECTIONS", "collection name, or use \"ALL_COLLECTIONS\" across collections, \"EACH_COLLECTION\" for each collection")
|
||||
dc := balanceCommand.String("dataCenter", "", "only apply the balancing for this dataCenter")
|
||||
applyBalancing := balanceCommand.Bool("force", false, "apply the balancing plan.")
|
||||
if err = balanceCommand.Parse(args); err != nil {
|
||||
|
@ -213,6 +213,18 @@ func (n *Node) localVolumeNextRatio(capacityFunc CapacityFunc) float64 {
|
|||
return divide(len(n.selectedVolumes)+1, capacityFunc(n.info))
|
||||
}
|
||||
|
||||
func (n *Node) isOneVolumeOnly() bool {
|
||||
if len(n.selectedVolumes) != 1 {
|
||||
return false
|
||||
}
|
||||
for _, disk := range n.info.DiskInfos {
|
||||
if disk.VolumeCount == 1 && disk.MaxVolumeCount == 1 {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func (n *Node) selectVolumes(fn func(v *master_pb.VolumeInformationMessage) bool) {
|
||||
n.selectedVolumes = make(map[uint32]*master_pb.VolumeInformationMessage)
|
||||
for _, diskInfo := range n.info.DiskInfos {
|
||||
|
@ -230,12 +242,6 @@ func sortWritableVolumes(volumes []*master_pb.VolumeInformationMessage) {
|
|||
})
|
||||
}
|
||||
|
||||
func sortReadOnlyVolumes(volumes []*master_pb.VolumeInformationMessage) {
|
||||
slices.SortFunc(volumes, func(a, b *master_pb.VolumeInformationMessage) bool {
|
||||
return a.Id < b.Id
|
||||
})
|
||||
}
|
||||
|
||||
func balanceSelectedVolume(commandEnv *CommandEnv, diskType types.DiskType, volumeReplicas map[uint32][]*VolumeReplica, nodes []*Node, capacityFunc CapacityFunc, sortCandidatesFn func(volumes []*master_pb.VolumeInformationMessage), applyBalancing bool) (err error) {
|
||||
selectedVolumeCount, volumeMaxCount := 0, 0
|
||||
var nodesWithCapacity []*Node
|
||||
|
@ -263,7 +269,14 @@ func balanceSelectedVolume(commandEnv *CommandEnv, diskType types.DiskType, volu
|
|||
fmt.Printf("no volume server found with capacity for %s", diskType.ReadableString())
|
||||
return nil
|
||||
}
|
||||
fullNode := nodesWithCapacity[len(nodesWithCapacity)-1]
|
||||
|
||||
var fullNode *Node
|
||||
for fullNodeIndex := len(nodesWithCapacity) - 1; fullNodeIndex >= 0; fullNodeIndex-- {
|
||||
fullNode = nodesWithCapacity[fullNodeIndex]
|
||||
if !fullNode.isOneVolumeOnly() {
|
||||
break
|
||||
}
|
||||
}
|
||||
var candidateVolumes []*master_pb.VolumeInformationMessage
|
||||
for _, v := range fullNode.selectedVolumes {
|
||||
candidateVolumes = append(candidateVolumes, v)
|
||||
|
@ -389,6 +402,18 @@ func adjustAfterMove(v *master_pb.VolumeInformationMessage, volumeReplicas map[u
|
|||
replica.location.dc == fullNode.dc {
|
||||
loc := newLocation(emptyNode.dc, emptyNode.rack, emptyNode.info)
|
||||
replica.location = &loc
|
||||
for diskType, diskInfo := range fullNode.info.DiskInfos {
|
||||
if diskType == v.DiskType {
|
||||
diskInfo.VolumeCount--
|
||||
diskInfo.FreeVolumeCount++
|
||||
}
|
||||
}
|
||||
for diskType, diskInfo := range emptyNode.info.DiskInfos {
|
||||
if diskType == v.DiskType {
|
||||
diskInfo.VolumeCount++
|
||||
diskInfo.FreeVolumeCount--
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
}
|
||||
|
|
|
@ -57,6 +57,7 @@ func (c *commandVolumeFixReplication) Do(args []string, commandEnv *CommandEnv,
|
|||
volFixReplicationCommand := flag.NewFlagSet(c.Name(), flag.ContinueOnError)
|
||||
c.collectionPattern = volFixReplicationCommand.String("collectionPattern", "", "match with wildcard characters '*' and '?'")
|
||||
skipChange := volFixReplicationCommand.Bool("n", false, "skip the changes")
|
||||
noDelete := volFixReplicationCommand.Bool("noDelete", false, "Do not delete over-replicated volumes, only fix under-replication")
|
||||
retryCount := volFixReplicationCommand.Int("retry", 0, "how many times to retry")
|
||||
volumesPerStep := volFixReplicationCommand.Int("volumesPerStep", 0, "how many volumes to fix in one cycle")
|
||||
|
||||
|
@ -69,6 +70,7 @@ func (c *commandVolumeFixReplication) Do(args []string, commandEnv *CommandEnv,
|
|||
}
|
||||
|
||||
takeAction := !*skipChange
|
||||
doDeletes := !*noDelete;
|
||||
|
||||
underReplicatedVolumeIdsCount := 1
|
||||
for underReplicatedVolumeIdsCount > 0 {
|
||||
|
@ -108,13 +110,13 @@ func (c *commandVolumeFixReplication) Do(args []string, commandEnv *CommandEnv,
|
|||
return fmt.Errorf("lock is lost")
|
||||
}
|
||||
|
||||
if len(overReplicatedVolumeIds) > 0 {
|
||||
if len(overReplicatedVolumeIds) > 0 && doDeletes {
|
||||
if err := c.deleteOneVolume(commandEnv, writer, takeAction, overReplicatedVolumeIds, volumeReplicas, allLocations, pickOneReplicaToDelete); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
if len(misplacedVolumeIds) > 0 {
|
||||
if len(misplacedVolumeIds) > 0 && doDeletes {
|
||||
if err := c.deleteOneVolume(commandEnv, writer, takeAction, misplacedVolumeIds, volumeReplicas, allLocations, pickOneMisplacedVolume); err != nil {
|
||||
return err
|
||||
}
|
||||
|
|
|
@ -2,7 +2,9 @@ package shell
|
|||
|
||||
import (
|
||||
"bufio"
|
||||
"bytes"
|
||||
"context"
|
||||
"errors"
|
||||
"flag"
|
||||
"fmt"
|
||||
"github.com/seaweedfs/seaweedfs/weed/filer"
|
||||
|
@ -11,6 +13,7 @@ import (
|
|||
"github.com/seaweedfs/seaweedfs/weed/pb/filer_pb"
|
||||
"github.com/seaweedfs/seaweedfs/weed/pb/master_pb"
|
||||
"github.com/seaweedfs/seaweedfs/weed/pb/volume_server_pb"
|
||||
"github.com/seaweedfs/seaweedfs/weed/storage/idx"
|
||||
"github.com/seaweedfs/seaweedfs/weed/storage/needle"
|
||||
"github.com/seaweedfs/seaweedfs/weed/storage/needle_map"
|
||||
"github.com/seaweedfs/seaweedfs/weed/storage/types"
|
||||
|
@ -72,6 +75,7 @@ func (c *commandVolumeFsck) Do(args []string, commandEnv *CommandEnv, writer io.
|
|||
c.forcePurging = fsckCommand.Bool("forcePurging", false, "delete missing data from volumes in one replica used together with applyPurging")
|
||||
purgeAbsent := fsckCommand.Bool("reallyDeleteFilerEntries", false, "<expert only!> delete missing file entries from filer if the corresponding volume is missing for any reason, please ensure all still existing/expected volumes are connected! used together with findMissingChunksInFiler")
|
||||
tempPath := fsckCommand.String("tempPath", path.Join(os.TempDir()), "path for temporary idx files")
|
||||
cutoffTimeAgo := fsckCommand.Duration("cutoffTimeAgo", 5*time.Minute, "only include entries on volume servers before this cutoff time to check orphan chunks")
|
||||
|
||||
if err = fsckCommand.Parse(args); err != nil {
|
||||
return nil
|
||||
|
@ -126,7 +130,8 @@ func (c *commandVolumeFsck) Do(args []string, commandEnv *CommandEnv, writer io.
|
|||
delete(volumeIdToVInfo, volumeId)
|
||||
continue
|
||||
}
|
||||
err = c.collectOneVolumeFileIds(tempFolder, dataNodeId, volumeId, vinfo, *verbose, writer)
|
||||
cutoffFrom := time.Now().Add(-*cutoffTimeAgo).UnixNano()
|
||||
err = c.collectOneVolumeFileIds(tempFolder, dataNodeId, volumeId, vinfo, *verbose, writer, uint64(cutoffFrom))
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to collect file ids from volume %d on %s: %v", volumeId, vinfo.server, err)
|
||||
}
|
||||
|
@ -351,7 +356,7 @@ func (c *commandVolumeFsck) findExtraChunksInVolumeServers(dataNodeVolumeIdToVIn
|
|||
return nil
|
||||
}
|
||||
|
||||
func (c *commandVolumeFsck) collectOneVolumeFileIds(tempFolder string, dataNodeId string, volumeId uint32, vinfo VInfo, verbose bool, writer io.Writer) error {
|
||||
func (c *commandVolumeFsck) collectOneVolumeFileIds(tempFolder string, dataNodeId string, volumeId uint32, vinfo VInfo, verbose bool, writer io.Writer, cutoffFrom uint64) error {
|
||||
|
||||
if verbose {
|
||||
fmt.Fprintf(writer, "collecting volume %d file ids from %s ...\n", volumeId, vinfo.server)
|
||||
|
@ -377,13 +382,42 @@ func (c *commandVolumeFsck) collectOneVolumeFileIds(tempFolder string, dataNodeI
|
|||
return fmt.Errorf("failed to start copying volume %d%s: %v", volumeId, ext, err)
|
||||
}
|
||||
|
||||
err = writeToFile(copyFileClient, getVolumeFileIdFile(tempFolder, dataNodeId, volumeId))
|
||||
var buf bytes.Buffer
|
||||
for {
|
||||
resp, err := copyFileClient.Recv()
|
||||
if errors.Is(err, io.EOF) {
|
||||
break
|
||||
}
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
buf.Write(resp.FileContent)
|
||||
}
|
||||
if vinfo.isReadOnly == false {
|
||||
index, err := idx.FirstInvalidIndex(buf.Bytes(), func(key types.NeedleId, offset types.Offset, size types.Size) (bool, error) {
|
||||
resp, err := volumeServerClient.ReadNeedleMeta(context.Background(), &volume_server_pb.ReadNeedleMetaRequest{
|
||||
VolumeId: volumeId,
|
||||
NeedleId: uint64(key),
|
||||
Offset: offset.ToActualOffset(),
|
||||
Size: int32(size),
|
||||
})
|
||||
if err != nil {
|
||||
return false, fmt.Errorf("to read needle meta with id %d from volume %d with error %v", key, volumeId, err)
|
||||
}
|
||||
return resp.LastModified <= cutoffFrom, nil
|
||||
})
|
||||
if err != nil {
|
||||
fmt.Fprintf(writer, "Failed to search for last vilad index on volume %d with error %v", volumeId, err)
|
||||
}
|
||||
buf.Truncate(index * types.NeedleMapEntrySize)
|
||||
}
|
||||
idxFilename := getVolumeFileIdFile(tempFolder, dataNodeId, volumeId)
|
||||
err = writeToFile(buf.Bytes(), idxFilename)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to copy %d%s from %s: %v", volumeId, ext, vinfo.server, err)
|
||||
}
|
||||
|
||||
return nil
|
||||
|
||||
})
|
||||
|
||||
}
|
||||
|
@ -673,7 +707,7 @@ func getFilerFileIdFile(tempFolder string, vid uint32) string {
|
|||
return filepath.Join(tempFolder, fmt.Sprintf("%d.fid", vid))
|
||||
}
|
||||
|
||||
func writeToFile(client volume_server_pb.VolumeServer_CopyFileClient, fileName string) error {
|
||||
func writeToFile(bytes []byte, fileName string) error {
|
||||
flags := os.O_WRONLY | os.O_CREATE | os.O_TRUNC
|
||||
dst, err := os.OpenFile(fileName, flags, 0644)
|
||||
if err != nil {
|
||||
|
@ -681,15 +715,6 @@ func writeToFile(client volume_server_pb.VolumeServer_CopyFileClient, fileName s
|
|||
}
|
||||
defer dst.Close()
|
||||
|
||||
for {
|
||||
resp, receiveErr := client.Recv()
|
||||
if receiveErr == io.EOF {
|
||||
break
|
||||
}
|
||||
if receiveErr != nil {
|
||||
return fmt.Errorf("receiving %s: %v", fileName, receiveErr)
|
||||
}
|
||||
dst.Write(resp.FileContent)
|
||||
}
|
||||
dst.Write(bytes)
|
||||
return nil
|
||||
}
|
||||
|
|
|
@ -90,5 +90,5 @@ func parseOutput(output string) *master_pb.TopologyInfo {
|
|||
return topo
|
||||
}
|
||||
|
||||
//go:embed sample.topo.txt
|
||||
//go:embed volume.list.txt
|
||||
var topoData string
|
||||
|
|
|
@ -210,10 +210,14 @@ func (ev *EcVolume) LocateEcShardNeedle(needleId types.NeedleId, version needle.
|
|||
return types.Offset{}, 0, nil, fmt.Errorf("FindNeedleFromEcx: %v", err)
|
||||
}
|
||||
|
||||
shard := ev.Shards[0]
|
||||
intervals = ev.LocateEcShardNeedleInterval(version, offset.ToActualOffset(), types.Size(needle.GetActualSize(size, version)))
|
||||
return
|
||||
}
|
||||
|
||||
func (ev *EcVolume) LocateEcShardNeedleInterval(version needle.Version, offset int64, size types.Size) (intervals []Interval) {
|
||||
shard := ev.Shards[0]
|
||||
// calculate the locations in the ec shards
|
||||
intervals = LocateData(ErasureCodingLargeBlockSize, ErasureCodingSmallBlockSize, DataShardsCount*shard.ecdFileSize, offset.ToActualOffset(), types.Size(needle.GetActualSize(size, version)))
|
||||
intervals = LocateData(ErasureCodingLargeBlockSize, ErasureCodingSmallBlockSize, DataShardsCount*shard.ecdFileSize, offset, types.Size(needle.GetActualSize(size, version)))
|
||||
|
||||
return
|
||||
}
|
||||
|
|
29
weed/storage/idx/binary_search.go
Normal file
29
weed/storage/idx/binary_search.go
Normal file
|
@ -0,0 +1,29 @@
|
|||
package idx
|
||||
|
||||
import (
|
||||
"github.com/seaweedfs/seaweedfs/weed/storage/types"
|
||||
)
|
||||
|
||||
// firstInvalidIndex find the first index the failed lessThanOrEqualToFn function's requirement.
|
||||
func FirstInvalidIndex(bytes []byte, lessThanOrEqualToFn func(key types.NeedleId, offset types.Offset, size types.Size) (bool, error)) (int, error) {
|
||||
left, right := 0, len(bytes)/types.NeedleMapEntrySize-1
|
||||
index := right + 1
|
||||
for left <= right {
|
||||
mid := left + (right-left)>>1
|
||||
loc := mid * types.NeedleMapEntrySize
|
||||
key := types.BytesToNeedleId(bytes[loc : loc+types.NeedleIdSize])
|
||||
offset := types.BytesToOffset(bytes[loc+types.NeedleIdSize : loc+types.NeedleIdSize+types.OffsetSize])
|
||||
size := types.BytesToSize(bytes[loc+types.NeedleIdSize+types.OffsetSize : loc+types.NeedleIdSize+types.OffsetSize+types.SizeSize])
|
||||
res, err := lessThanOrEqualToFn(key, offset, size)
|
||||
if err != nil {
|
||||
return -1, err
|
||||
}
|
||||
if res {
|
||||
left = mid + 1
|
||||
} else {
|
||||
index = mid
|
||||
right = mid - 1
|
||||
}
|
||||
}
|
||||
return index, nil
|
||||
}
|
57
weed/storage/idx_binary_search_test.go
Normal file
57
weed/storage/idx_binary_search_test.go
Normal file
|
@ -0,0 +1,57 @@
|
|||
package storage
|
||||
|
||||
import (
|
||||
"github.com/seaweedfs/seaweedfs/weed/storage/idx"
|
||||
"github.com/seaweedfs/seaweedfs/weed/storage/needle"
|
||||
"github.com/seaweedfs/seaweedfs/weed/storage/super_block"
|
||||
"github.com/seaweedfs/seaweedfs/weed/storage/types"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"os"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestFirstInvalidIndex(t *testing.T) {
|
||||
dir := t.TempDir()
|
||||
|
||||
v, err := NewVolume(dir, dir, "", 1, NeedleMapInMemory, &super_block.ReplicaPlacement{}, &needle.TTL{}, 0, 0)
|
||||
if err != nil {
|
||||
t.Fatalf("volume creation: %v", err)
|
||||
}
|
||||
type WriteInfo struct {
|
||||
offset int64
|
||||
size int32
|
||||
}
|
||||
// initialize 20 needles then update first 10 needles
|
||||
for i := 1; i <= 30; i++ {
|
||||
n := newRandomNeedle(uint64(i))
|
||||
n.Flags = 0x08
|
||||
_, _, _, err := v.writeNeedle2(n, true, false)
|
||||
if err != nil {
|
||||
t.Fatalf("write needle %d: %v", i, err)
|
||||
}
|
||||
}
|
||||
b, err := os.ReadFile(v.IndexFileName() + ".idx")
|
||||
// base case every record is valid -> nothing is filtered
|
||||
index, err := idx.FirstInvalidIndex(b, func(key types.NeedleId, offset types.Offset, size types.Size) (bool, error) {
|
||||
return true, nil
|
||||
})
|
||||
if err != nil {
|
||||
t.Fatalf("failed to complete binary search %v", err)
|
||||
}
|
||||
assert.Equal(t, 30, index, "when every record is valid nothing should be filtered from binary search")
|
||||
index, err = idx.FirstInvalidIndex(b, func(key types.NeedleId, offset types.Offset, size types.Size) (bool, error) {
|
||||
return false, nil
|
||||
})
|
||||
assert.Equal(t, 0, index, "when every record is invalid everything should be filtered from binary search")
|
||||
index, err = idx.FirstInvalidIndex(b, func(key types.NeedleId, offset types.Offset, size types.Size) (bool, error) {
|
||||
return key < 20, nil
|
||||
})
|
||||
// needle key range from 1 to 30 so < 20 means 19 keys are valid and cutoff the bytes at 19 * 16 = 304
|
||||
assert.Equal(t, 19, index, "when every record is invalid everything should be filtered from binary search")
|
||||
|
||||
index, err = idx.FirstInvalidIndex(b, func(key types.NeedleId, offset types.Offset, size types.Size) (bool, error) {
|
||||
return key <= 1, nil
|
||||
})
|
||||
// needle key range from 1 to 30 so <=1 1 means 1 key is valid and cutoff the bytes at 1 * 16 = 16
|
||||
assert.Equal(t, 1, index, "when every record is invalid everything should be filtered from binary search")
|
||||
}
|
|
@ -209,8 +209,8 @@ func NeedleBodyLength(needleSize Size, version Version) int64 {
|
|||
return int64(needleSize) + NeedleChecksumSize + int64(PaddingLength(needleSize, version))
|
||||
}
|
||||
|
||||
//n should be a needle already read the header
|
||||
//the input stream will read until next file entry
|
||||
// n should be a needle already read the header
|
||||
// the input stream will read until next file entry
|
||||
func (n *Needle) ReadNeedleBody(r backend.BackendStorageFile, version Version, offset int64, bodyLength int64) (bytes []byte, err error) {
|
||||
|
||||
if bodyLength <= 0 {
|
||||
|
|
|
@ -1,7 +1,6 @@
|
|||
package needle
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"github.com/seaweedfs/seaweedfs/weed/glog"
|
||||
"github.com/seaweedfs/seaweedfs/weed/storage/backend"
|
||||
. "github.com/seaweedfs/seaweedfs/weed/storage/types"
|
||||
|
@ -9,36 +8,6 @@ import (
|
|||
"io"
|
||||
)
|
||||
|
||||
// ReadNeedleDataInto uses a needle without n.Data to read the content into an io.Writer
|
||||
func (n *Needle) ReadNeedleDataInto(r backend.BackendStorageFile, volumeOffset int64, buf []byte, writer io.Writer, needleOffset int64, size int64) (err error) {
|
||||
crc := CRC(0)
|
||||
for x := needleOffset; x < needleOffset+size; x += int64(len(buf)) {
|
||||
count, err := n.ReadNeedleData(r, volumeOffset, buf, x)
|
||||
toWrite := min(int64(count), needleOffset+size-x)
|
||||
if toWrite > 0 {
|
||||
crc = crc.Update(buf[0:toWrite])
|
||||
if _, err = writer.Write(buf[0:toWrite]); err != nil {
|
||||
return fmt.Errorf("ReadNeedleData write: %v", err)
|
||||
}
|
||||
}
|
||||
if err != nil {
|
||||
if err == io.EOF {
|
||||
err = nil
|
||||
break
|
||||
}
|
||||
return fmt.Errorf("ReadNeedleData: %v", err)
|
||||
}
|
||||
if count <= 0 {
|
||||
break
|
||||
}
|
||||
}
|
||||
if needleOffset == 0 && size == int64(n.DataSize) && (n.Checksum != crc && uint32(n.Checksum) != crc.Value()) {
|
||||
// the crc.Value() function is to be deprecated. this double checking is for backward compatible.
|
||||
return fmt.Errorf("ReadNeedleData checksum %v expected %v", crc, n.Checksum)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// ReadNeedleData uses a needle without n.Data to read the content
|
||||
// volumeOffset: the offset within the volume
|
||||
// needleOffset: the offset within the needle Data
|
||||
|
@ -74,7 +43,6 @@ func (n *Needle) ReadNeedleMeta(r backend.BackendStorageFile, offset int64, size
|
|||
return ErrorSizeMismatch
|
||||
}
|
||||
}
|
||||
|
||||
n.DataSize = util.BytesToUint32(bytes[NeedleHeaderSize : NeedleHeaderSize+DataSizeSize])
|
||||
|
||||
startOffset := offset + NeedleHeaderSize + DataSizeSize + int64(n.DataSize)
|
||||
|
@ -90,7 +58,6 @@ func (n *Needle) ReadNeedleMeta(r backend.BackendStorageFile, offset int64, size
|
|||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
var index int
|
||||
index, err = n.readNeedleDataVersion2NonData(metaSlice)
|
||||
|
||||
|
|
|
@ -1,82 +0,0 @@
|
|||
package needle
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"github.com/seaweedfs/seaweedfs/weed/storage/backend"
|
||||
"io"
|
||||
"os"
|
||||
"testing"
|
||||
|
||||
"github.com/seaweedfs/seaweedfs/weed/storage/types"
|
||||
)
|
||||
|
||||
func TestPageRead(t *testing.T) {
|
||||
baseFileName := "43"
|
||||
offset := int64(8)
|
||||
size := types.Size(1153890) // actual file size 1153862
|
||||
|
||||
datFile, err := os.OpenFile(baseFileName+".dat", os.O_RDONLY, 0644)
|
||||
if err != nil {
|
||||
t.Fatalf("Open Volume Data File [ERROR]: %v", err)
|
||||
}
|
||||
datBackend := backend.NewDiskFile(datFile)
|
||||
defer datBackend.Close()
|
||||
{
|
||||
n := new(Needle)
|
||||
|
||||
bytes, err := ReadNeedleBlob(datBackend, offset, size, Version3)
|
||||
if err != nil {
|
||||
t.Fatalf("readNeedleBlob: %v", err)
|
||||
}
|
||||
if err = n.ReadBytes(bytes, offset, size, Version3); err != nil {
|
||||
t.Fatalf("readNeedleBlob: %v", err)
|
||||
}
|
||||
|
||||
fmt.Printf("bytes len %d\n", len(bytes))
|
||||
fmt.Printf("name %s size %d\n", n.Name, n.Size)
|
||||
|
||||
fmt.Printf("id %d\n", n.Id)
|
||||
fmt.Printf("DataSize %d\n", n.DataSize)
|
||||
fmt.Printf("Flags %v\n", n.Flags)
|
||||
fmt.Printf("NameSize %d\n", n.NameSize)
|
||||
fmt.Printf("MimeSize %d\n", n.MimeSize)
|
||||
fmt.Printf("PairsSize %d\n", n.PairsSize)
|
||||
fmt.Printf("LastModified %d\n", n.LastModified)
|
||||
fmt.Printf("AppendAtNs %d\n", n.AppendAtNs)
|
||||
fmt.Printf("Checksum %d\n", n.Checksum)
|
||||
}
|
||||
|
||||
{
|
||||
n, bytes, bodyLength, err := ReadNeedleHeader(datBackend, Version3, offset)
|
||||
if err != nil {
|
||||
t.Fatalf("ReadNeedleHeader: %v", err)
|
||||
}
|
||||
fmt.Printf("bytes len %d\n", len(bytes))
|
||||
fmt.Printf("name %s size %d bodyLength:%d\n", n.Name, n.Size, bodyLength)
|
||||
}
|
||||
|
||||
{
|
||||
n := new(Needle)
|
||||
err := n.ReadNeedleMeta(datBackend, offset, size, Version3)
|
||||
if err != nil {
|
||||
t.Fatalf("ReadNeedleHeader: %v", err)
|
||||
}
|
||||
fmt.Printf("name %s size %d\n", n.Name, n.Size)
|
||||
fmt.Printf("id %d\n", n.Id)
|
||||
fmt.Printf("DataSize %d\n", n.DataSize)
|
||||
fmt.Printf("Flags %v\n", n.Flags)
|
||||
fmt.Printf("NameSize %d\n", n.NameSize)
|
||||
fmt.Printf("MimeSize %d\n", n.MimeSize)
|
||||
fmt.Printf("PairsSize %d\n", n.PairsSize)
|
||||
fmt.Printf("LastModified %d\n", n.LastModified)
|
||||
fmt.Printf("AppendAtNs %d\n", n.AppendAtNs)
|
||||
fmt.Printf("Checksum %d\n", n.Checksum)
|
||||
|
||||
buf := make([]byte, 1024)
|
||||
if err = n.ReadNeedleDataInto(datBackend, offset, buf, io.Discard, 0, int64(n.DataSize)); err != nil {
|
||||
t.Fatalf("ReadNeedleDataInto: %v", err)
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
}
|
|
@ -150,15 +150,7 @@ func (m *LevelDbNeedleMap) Put(key NeedleId, offset Offset, size Size) error {
|
|||
func getWatermark(db *leveldb.DB) uint64 {
|
||||
data, err := db.Get(watermarkKey, nil)
|
||||
if err != nil || len(data) != 8 {
|
||||
glog.Warningf("get watermark from db error: %v, %d", err, len(data))
|
||||
/*
|
||||
if !strings.Contains(strings.ToLower(err.Error()), "not found") {
|
||||
err = setWatermark(db, 0)
|
||||
if err != nil {
|
||||
glog.Errorf("failed to set watermark: %v", err)
|
||||
}
|
||||
}
|
||||
*/
|
||||
glog.V(1).Infof("read previous watermark from db: %v, %d", err, len(data))
|
||||
return 0
|
||||
}
|
||||
return util.BytesToUint64(data)
|
||||
|
|
|
@ -384,6 +384,14 @@ func (s *Store) ReadVolumeNeedle(i needle.VolumeId, n *needle.Needle, readOption
|
|||
}
|
||||
return 0, fmt.Errorf("volume %d not found", i)
|
||||
}
|
||||
|
||||
func (s *Store) ReadVolumeNeedleMetaAt(i needle.VolumeId, n *needle.Needle, offset int64, size int32) error {
|
||||
if v := s.findVolume(i); v != nil {
|
||||
return v.readNeedleMetaAt(n, offset, size)
|
||||
}
|
||||
return fmt.Errorf("volume %d not found", i)
|
||||
}
|
||||
|
||||
func (s *Store) ReadVolumeNeedleDataInto(i needle.VolumeId, n *needle.Needle, readOption *ReadOption, writer io.Writer, offset int64, size int64) error {
|
||||
if v := s.findVolume(i); v != nil {
|
||||
return v.readNeedleDataInto(n, readOption, writer, offset, size)
|
||||
|
|
|
@ -80,12 +80,31 @@ func (v *Volume) readNeedle(n *needle.Needle, readOption *ReadOption, onReadSize
|
|||
return -1, ErrorNotFound
|
||||
}
|
||||
|
||||
// read fills in Needle content by looking up n.Id from NeedleMapper
|
||||
func (v *Volume) readNeedleDataInto(n *needle.Needle, readOption *ReadOption, writer io.Writer, offset int64, size int64) (err error) {
|
||||
// read needle at a specific offset
|
||||
func (v *Volume) readNeedleMetaAt(n *needle.Needle, offset int64, size int32) (err error) {
|
||||
v.dataFileAccessLock.RLock()
|
||||
defer v.dataFileAccessLock.RUnlock()
|
||||
// read deleted meta data
|
||||
if size < 0 {
|
||||
size = -size
|
||||
}
|
||||
err = n.ReadNeedleMeta(v.DataBackend, offset, Size(size), v.Version())
|
||||
if err == needle.ErrorSizeMismatch && OffsetSize == 4 {
|
||||
err = n.ReadNeedleMeta(v.DataBackend, offset+int64(MaxPossibleVolumeSize), Size(size), v.Version())
|
||||
}
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// read fills in Needle content by looking up n.Id from NeedleMapper
|
||||
func (v *Volume) readNeedleDataInto(n *needle.Needle, readOption *ReadOption, writer io.Writer, offset int64, size int64) (err error) {
|
||||
|
||||
v.dataFileAccessLock.RLock()
|
||||
nv, ok := v.nm.Get(n.Id)
|
||||
v.dataFileAccessLock.RUnlock()
|
||||
|
||||
if !ok || nv.Offset.IsZero() {
|
||||
return ErrorNotFound
|
||||
}
|
||||
|
@ -102,19 +121,57 @@ func (v *Volume) readNeedleDataInto(n *needle.Needle, readOption *ReadOption, wr
|
|||
return nil
|
||||
}
|
||||
|
||||
if readOption.VolumeRevision != v.SuperBlock.CompactionRevision {
|
||||
// the volume is compacted
|
||||
readOption.IsOutOfRange = false
|
||||
err = n.ReadNeedleMeta(v.DataBackend, nv.Offset.ToActualOffset(), readSize, v.Version())
|
||||
}
|
||||
buf := mem.Allocate(min(1024*1024, int(size)))
|
||||
defer mem.Free(buf)
|
||||
actualOffset := nv.Offset.ToActualOffset()
|
||||
if readOption.IsOutOfRange {
|
||||
actualOffset += int64(MaxPossibleVolumeSize)
|
||||
}
|
||||
|
||||
return n.ReadNeedleDataInto(v.DataBackend, actualOffset, buf, writer, offset, size)
|
||||
buf := mem.Allocate(min(1024*1024, int(size)))
|
||||
defer mem.Free(buf)
|
||||
|
||||
// read needle data
|
||||
crc := needle.CRC(0)
|
||||
for x := offset; x < offset+size; x += int64(len(buf)) {
|
||||
|
||||
v.dataFileAccessLock.RLock()
|
||||
// possibly re-read needle offset if volume is compacted
|
||||
if readOption.VolumeRevision != v.SuperBlock.CompactionRevision {
|
||||
// the volume is compacted
|
||||
nv, ok = v.nm.Get(n.Id)
|
||||
if !ok || nv.Offset.IsZero() {
|
||||
v.dataFileAccessLock.RUnlock()
|
||||
return ErrorNotFound
|
||||
}
|
||||
actualOffset = nv.Offset.ToActualOffset()
|
||||
readOption.VolumeRevision = v.SuperBlock.CompactionRevision
|
||||
}
|
||||
count, err := n.ReadNeedleData(v.DataBackend, actualOffset, buf, x)
|
||||
v.dataFileAccessLock.RUnlock()
|
||||
|
||||
toWrite := min(count, int(offset+size-x))
|
||||
if toWrite > 0 {
|
||||
crc = crc.Update(buf[0:toWrite])
|
||||
if _, err = writer.Write(buf[0:toWrite]); err != nil {
|
||||
return fmt.Errorf("ReadNeedleData write: %v", err)
|
||||
}
|
||||
}
|
||||
if err != nil {
|
||||
if err == io.EOF {
|
||||
err = nil
|
||||
break
|
||||
}
|
||||
return fmt.Errorf("ReadNeedleData: %v", err)
|
||||
}
|
||||
if count <= 0 {
|
||||
break
|
||||
}
|
||||
}
|
||||
if offset == 0 && size == int64(n.DataSize) && (n.Checksum != crc && uint32(n.Checksum) != crc.Value()) {
|
||||
// the crc.Value() function is to be deprecated. this double checking is for backward compatible.
|
||||
return fmt.Errorf("ReadNeedleData checksum %v expected %v", crc, n.Checksum)
|
||||
}
|
||||
return nil
|
||||
|
||||
}
|
||||
|
||||
func min(x, y int) int {
|
||||
|
|
91
weed/storage/volume_read_test.go
Normal file
91
weed/storage/volume_read_test.go
Normal file
|
@ -0,0 +1,91 @@
|
|||
package storage
|
||||
|
||||
import (
|
||||
"github.com/seaweedfs/seaweedfs/weed/storage/needle"
|
||||
"github.com/seaweedfs/seaweedfs/weed/storage/super_block"
|
||||
"github.com/seaweedfs/seaweedfs/weed/storage/types"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestReadNeedMetaWithWritesAndUpdates(t *testing.T) {
|
||||
dir := t.TempDir()
|
||||
|
||||
v, err := NewVolume(dir, dir, "", 1, NeedleMapInMemory, &super_block.ReplicaPlacement{}, &needle.TTL{}, 0, 0)
|
||||
if err != nil {
|
||||
t.Fatalf("volume creation: %v", err)
|
||||
}
|
||||
type WriteInfo struct {
|
||||
offset int64
|
||||
size int32
|
||||
}
|
||||
writeInfos := make([]WriteInfo, 30)
|
||||
mockLastUpdateTime := uint64(1000000000000)
|
||||
// initialize 20 needles then update first 10 needles
|
||||
for i := 1; i <= 30; i++ {
|
||||
n := newRandomNeedle(uint64(i % 20))
|
||||
n.Flags = 0x08
|
||||
n.LastModified = mockLastUpdateTime
|
||||
mockLastUpdateTime += 2000
|
||||
offset, _, _, err := v.writeNeedle2(n, true, false)
|
||||
if err != nil {
|
||||
t.Fatalf("write needle %d: %v", i, err)
|
||||
}
|
||||
writeInfos[i-1] = WriteInfo{offset: int64(offset), size: int32(n.Size)}
|
||||
}
|
||||
expectedLastUpdateTime := uint64(1000000000000)
|
||||
for i := 0; i < 30; i++ {
|
||||
testNeedle := new(needle.Needle)
|
||||
testNeedle.Id = types.Uint64ToNeedleId(uint64(i + 1%20))
|
||||
testNeedle.Flags = 0x08
|
||||
v.readNeedleMetaAt(testNeedle, writeInfos[i].offset, writeInfos[i].size)
|
||||
actualLastModifiedTime := testNeedle.LastModified
|
||||
assert.Equal(t, expectedLastUpdateTime, actualLastModifiedTime, "The two words should be the same.")
|
||||
expectedLastUpdateTime += 2000
|
||||
}
|
||||
}
|
||||
|
||||
func TestReadNeedMetaWithDeletesThenWrites(t *testing.T) {
|
||||
dir := t.TempDir()
|
||||
|
||||
v, err := NewVolume(dir, dir, "", 1, NeedleMapInMemory, &super_block.ReplicaPlacement{}, &needle.TTL{}, 0, 0)
|
||||
if err != nil {
|
||||
t.Fatalf("volume creation: %v", err)
|
||||
}
|
||||
type WriteInfo struct {
|
||||
offset int64
|
||||
size int32
|
||||
}
|
||||
writeInfos := make([]WriteInfo, 10)
|
||||
mockLastUpdateTime := uint64(1000000000000)
|
||||
for i := 1; i <= 10; i++ {
|
||||
n := newRandomNeedle(uint64(i % 5))
|
||||
n.Flags = 0x08
|
||||
n.LastModified = mockLastUpdateTime
|
||||
mockLastUpdateTime += 2000
|
||||
offset, _, _, err := v.writeNeedle2(n, true, false)
|
||||
if err != nil {
|
||||
t.Fatalf("write needle %d: %v", i, err)
|
||||
}
|
||||
if i < 5 {
|
||||
size, err := v.deleteNeedle2(n)
|
||||
if err != nil {
|
||||
t.Fatalf("delete needle %d: %v", i, err)
|
||||
}
|
||||
writeInfos[i-1] = WriteInfo{offset: int64(offset), size: int32(size)}
|
||||
} else {
|
||||
writeInfos[i-1] = WriteInfo{offset: int64(offset), size: int32(n.Size)}
|
||||
}
|
||||
}
|
||||
|
||||
expectedLastUpdateTime := uint64(1000000000000)
|
||||
for i := 0; i < 10; i++ {
|
||||
testNeedle := new(needle.Needle)
|
||||
testNeedle.Id = types.Uint64ToNeedleId(uint64(i + 1%5))
|
||||
testNeedle.Flags = 0x08
|
||||
v.readNeedleMetaAt(testNeedle, writeInfos[i].offset, writeInfos[i].size)
|
||||
actualLastModifiedTime := testNeedle.LastModified
|
||||
assert.Equal(t, expectedLastUpdateTime, actualLastModifiedTime, "The two words should be the same.")
|
||||
expectedLastUpdateTime += 2000
|
||||
}
|
||||
}
|
|
@ -1,105 +0,0 @@
|
|||
package storage
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"fmt"
|
||||
"io"
|
||||
"time"
|
||||
|
||||
"github.com/seaweedfs/seaweedfs/weed/util"
|
||||
|
||||
"github.com/seaweedfs/seaweedfs/weed/glog"
|
||||
"github.com/seaweedfs/seaweedfs/weed/storage/backend"
|
||||
"github.com/seaweedfs/seaweedfs/weed/storage/needle"
|
||||
. "github.com/seaweedfs/seaweedfs/weed/storage/types"
|
||||
)
|
||||
|
||||
func (v *Volume) StreamWrite(n *needle.Needle, data io.Reader, dataSize uint32) (err error) {
|
||||
|
||||
v.dataFileAccessLock.Lock()
|
||||
defer v.dataFileAccessLock.Unlock()
|
||||
|
||||
df, ok := v.DataBackend.(*backend.DiskFile)
|
||||
if !ok {
|
||||
return fmt.Errorf("unexpected volume backend")
|
||||
}
|
||||
offset, _, _ := v.DataBackend.GetStat()
|
||||
|
||||
header := make([]byte, NeedleHeaderSize+TimestampSize) // adding timestamp to reuse it and avoid extra allocation
|
||||
CookieToBytes(header[0:CookieSize], n.Cookie)
|
||||
NeedleIdToBytes(header[CookieSize:CookieSize+NeedleIdSize], n.Id)
|
||||
n.Size = 4 + Size(dataSize) + 1
|
||||
SizeToBytes(header[CookieSize+NeedleIdSize:CookieSize+NeedleIdSize+SizeSize], n.Size)
|
||||
|
||||
n.DataSize = dataSize
|
||||
|
||||
// needle header
|
||||
df.Write(header[0:NeedleHeaderSize])
|
||||
|
||||
// data size and data
|
||||
util.Uint32toBytes(header[0:4], n.DataSize)
|
||||
df.Write(header[0:4])
|
||||
// write and calculate CRC
|
||||
crcWriter := needle.NewCRCwriter(df)
|
||||
io.Copy(crcWriter, io.LimitReader(data, int64(dataSize)))
|
||||
|
||||
// flags
|
||||
util.Uint8toBytes(header[0:1], n.Flags)
|
||||
df.Write(header[0:1])
|
||||
|
||||
// data checksum
|
||||
util.Uint32toBytes(header[0:needle.NeedleChecksumSize], crcWriter.Sum())
|
||||
// write timestamp, padding
|
||||
n.AppendAtNs = uint64(time.Now().UnixNano())
|
||||
util.Uint64toBytes(header[needle.NeedleChecksumSize:needle.NeedleChecksumSize+TimestampSize], n.AppendAtNs)
|
||||
padding := needle.PaddingLength(n.Size, needle.Version3)
|
||||
df.Write(header[0 : needle.NeedleChecksumSize+TimestampSize+padding])
|
||||
|
||||
// add to needle map
|
||||
if err = v.nm.Put(n.Id, ToOffset(int64(offset)), n.Size); err != nil {
|
||||
glog.V(4).Infof("failed to save in needle map %d: %v", n.Id, err)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func (v *Volume) StreamRead(n *needle.Needle, writer io.Writer) (err error) {
|
||||
|
||||
v.dataFileAccessLock.Lock()
|
||||
defer v.dataFileAccessLock.Unlock()
|
||||
|
||||
nv, ok := v.nm.Get(n.Id)
|
||||
if !ok || nv.Offset.IsZero() {
|
||||
return ErrorNotFound
|
||||
}
|
||||
|
||||
sr := &StreamReader{
|
||||
readerAt: v.DataBackend,
|
||||
offset: nv.Offset.ToActualOffset(),
|
||||
}
|
||||
bufReader := bufio.NewReader(sr)
|
||||
bufReader.Discard(NeedleHeaderSize)
|
||||
sizeBuf := make([]byte, 4)
|
||||
bufReader.Read(sizeBuf)
|
||||
if _, err = writer.Write(sizeBuf); err != nil {
|
||||
return err
|
||||
}
|
||||
dataSize := util.BytesToUint32(sizeBuf)
|
||||
|
||||
_, err = io.Copy(writer, io.LimitReader(bufReader, int64(dataSize)))
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
type StreamReader struct {
|
||||
offset int64
|
||||
readerAt io.ReaderAt
|
||||
}
|
||||
|
||||
func (sr *StreamReader) Read(p []byte) (n int, err error) {
|
||||
n, err = sr.readerAt.ReadAt(p, sr.offset)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
sr.offset += int64(n)
|
||||
return
|
||||
}
|
|
@ -20,9 +20,8 @@ type topology struct {
|
|||
DataCenters []dataCenter `xml:"DataCenter"`
|
||||
}
|
||||
type Configuration struct {
|
||||
XMLName xml.Name `xml:"Configuration"`
|
||||
Topo topology `xml:"Topology"`
|
||||
ip2location map[string]loc // this is not used any more. leave it here for later.
|
||||
XMLName xml.Name `xml:"Configuration"`
|
||||
Topo topology `xml:"Topology"`
|
||||
}
|
||||
|
||||
func (c *Configuration) String() string {
|
||||
|
@ -33,12 +32,6 @@ func (c *Configuration) String() string {
|
|||
}
|
||||
|
||||
func (c *Configuration) Locate(ip string, dcName string, rackName string) (dc string, rack string) {
|
||||
if c != nil && c.ip2location != nil {
|
||||
if loc, ok := c.ip2location[ip]; ok {
|
||||
return loc.dcName, loc.rackName
|
||||
}
|
||||
}
|
||||
|
||||
if dcName == "" {
|
||||
dcName = "DefaultDataCenter"
|
||||
}
|
||||
|
|
|
@ -20,14 +20,16 @@ func NewDataCenter(id string) *DataCenter {
|
|||
}
|
||||
|
||||
func (dc *DataCenter) GetOrCreateRack(rackName string) *Rack {
|
||||
for _, c := range dc.Children() {
|
||||
dc.Lock()
|
||||
defer dc.Unlock()
|
||||
for _, c := range dc.children {
|
||||
rack := c.(*Rack)
|
||||
if string(rack.Id()) == rackName {
|
||||
return rack
|
||||
}
|
||||
}
|
||||
rack := NewRack(rackName)
|
||||
dc.LinkChildNode(rack)
|
||||
dc.doLinkChildNode(rack)
|
||||
return rack
|
||||
}
|
||||
|
||||
|
|
|
@ -9,6 +9,7 @@ import (
|
|||
"github.com/seaweedfs/seaweedfs/weed/storage/needle"
|
||||
"github.com/seaweedfs/seaweedfs/weed/storage/types"
|
||||
"github.com/seaweedfs/seaweedfs/weed/util"
|
||||
"sync/atomic"
|
||||
)
|
||||
|
||||
type DataNode struct {
|
||||
|
@ -53,14 +54,14 @@ func (dn *DataNode) getOrCreateDisk(diskType string) *Disk {
|
|||
return disk
|
||||
}
|
||||
|
||||
func (dn *DataNode) doAddOrUpdateVolume(v storage.VolumeInfo) (isNew, isChangedRO bool) {
|
||||
func (dn *DataNode) doAddOrUpdateVolume(v storage.VolumeInfo) (isNew, isChanged bool) {
|
||||
disk := dn.getOrCreateDisk(v.DiskType)
|
||||
return disk.AddOrUpdateVolume(v)
|
||||
}
|
||||
|
||||
// UpdateVolumes detects new/deleted/changed volumes on a volume server
|
||||
// used in master to notify master clients of these changes.
|
||||
func (dn *DataNode) UpdateVolumes(actualVolumes []storage.VolumeInfo) (newVolumes, deletedVolumes, changeRO []storage.VolumeInfo) {
|
||||
func (dn *DataNode) UpdateVolumes(actualVolumes []storage.VolumeInfo) (newVolumes, deletedVolumes, changedVolumes []storage.VolumeInfo) {
|
||||
|
||||
actualVolumeMap := make(map[needle.VolumeId]storage.VolumeInfo)
|
||||
for _, v := range actualVolumes {
|
||||
|
@ -93,12 +94,12 @@ func (dn *DataNode) UpdateVolumes(actualVolumes []storage.VolumeInfo) (newVolume
|
|||
}
|
||||
}
|
||||
for _, v := range actualVolumes {
|
||||
isNew, isChangedRO := dn.doAddOrUpdateVolume(v)
|
||||
isNew, isChanged := dn.doAddOrUpdateVolume(v)
|
||||
if isNew {
|
||||
newVolumes = append(newVolumes, v)
|
||||
}
|
||||
if isChangedRO {
|
||||
changeRO = append(changeRO, v)
|
||||
if isChanged {
|
||||
changedVolumes = append(changedVolumes, v)
|
||||
}
|
||||
}
|
||||
return
|
||||
|
@ -141,12 +142,13 @@ func (dn *DataNode) AdjustMaxVolumeCounts(maxVolumeCounts map[string]uint32) {
|
|||
}
|
||||
dt := types.ToDiskType(diskType)
|
||||
currentDiskUsage := dn.diskUsages.getOrCreateDisk(dt)
|
||||
if currentDiskUsage.maxVolumeCount == int64(maxVolumeCount) {
|
||||
currentDiskUsageMaxVolumeCount := atomic.LoadInt64(¤tDiskUsage.maxVolumeCount)
|
||||
if currentDiskUsageMaxVolumeCount == int64(maxVolumeCount) {
|
||||
continue
|
||||
}
|
||||
disk := dn.getOrCreateDisk(dt.String())
|
||||
deltaDiskUsage := deltaDiskUsages.getOrCreateDisk(dt)
|
||||
deltaDiskUsage.maxVolumeCount = int64(maxVolumeCount) - currentDiskUsage.maxVolumeCount
|
||||
deltaDiskUsage.maxVolumeCount = int64(maxVolumeCount) - currentDiskUsageMaxVolumeCount
|
||||
disk.UpAdjustDiskUsageDelta(deltaDiskUsages)
|
||||
}
|
||||
}
|
||||
|
|
|
@ -144,13 +144,13 @@ func (d *Disk) String() string {
|
|||
return fmt.Sprintf("Disk:%s, volumes:%v, ecShards:%v", d.NodeImpl.String(), d.volumes, d.ecShards)
|
||||
}
|
||||
|
||||
func (d *Disk) AddOrUpdateVolume(v storage.VolumeInfo) (isNew, isChangedRO bool) {
|
||||
func (d *Disk) AddOrUpdateVolume(v storage.VolumeInfo) (isNew, isChanged bool) {
|
||||
d.Lock()
|
||||
defer d.Unlock()
|
||||
return d.doAddOrUpdateVolume(v)
|
||||
}
|
||||
|
||||
func (d *Disk) doAddOrUpdateVolume(v storage.VolumeInfo) (isNew, isChangedRO bool) {
|
||||
func (d *Disk) doAddOrUpdateVolume(v storage.VolumeInfo) (isNew, isChanged bool) {
|
||||
deltaDiskUsages := newDiskUsages()
|
||||
deltaDiskUsage := deltaDiskUsages.getOrCreateDisk(types.ToDiskType(v.DiskType))
|
||||
if oldV, ok := d.volumes[v.Id]; !ok {
|
||||
|
@ -175,7 +175,7 @@ func (d *Disk) doAddOrUpdateVolume(v storage.VolumeInfo) (isNew, isChangedRO boo
|
|||
}
|
||||
d.UpAdjustDiskUsageDelta(deltaDiskUsages)
|
||||
}
|
||||
isChangedRO = d.volumes[v.Id].ReadOnly != v.ReadOnly
|
||||
isChanged = d.volumes[v.Id].ReadOnly != v.ReadOnly
|
||||
d.volumes[v.Id] = v
|
||||
}
|
||||
return
|
||||
|
|
|
@ -10,6 +10,7 @@ import (
|
|||
"math/rand"
|
||||
"strings"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
)
|
||||
|
||||
type NodeId string
|
||||
|
@ -139,9 +140,10 @@ func (n *NodeImpl) getOrCreateDisk(diskType types.DiskType) *DiskUsageCounts {
|
|||
}
|
||||
func (n *NodeImpl) AvailableSpaceFor(option *VolumeGrowOption) int64 {
|
||||
t := n.getOrCreateDisk(option.DiskType)
|
||||
freeVolumeSlotCount := t.maxVolumeCount + t.remoteVolumeCount - t.volumeCount
|
||||
if t.ecShardCount > 0 {
|
||||
freeVolumeSlotCount = freeVolumeSlotCount - t.ecShardCount/erasure_coding.DataShardsCount - 1
|
||||
freeVolumeSlotCount := atomic.LoadInt64(&t.maxVolumeCount) + atomic.LoadInt64(&t.remoteVolumeCount) - atomic.LoadInt64(&t.volumeCount)
|
||||
ecShardCount := atomic.LoadInt64(&t.ecShardCount)
|
||||
if ecShardCount > 0 {
|
||||
freeVolumeSlotCount = freeVolumeSlotCount - ecShardCount/erasure_coding.DataShardsCount - 1
|
||||
}
|
||||
return freeVolumeSlotCount
|
||||
}
|
||||
|
|
|
@ -32,7 +32,9 @@ func (r *Rack) FindDataNode(ip string, port int) *DataNode {
|
|||
return nil
|
||||
}
|
||||
func (r *Rack) GetOrCreateDataNode(ip string, port int, grpcPort int, publicUrl string, maxVolumeCounts map[string]uint32) *DataNode {
|
||||
for _, c := range r.Children() {
|
||||
r.Lock()
|
||||
defer r.Unlock()
|
||||
for _, c := range r.children {
|
||||
dn := c.(*DataNode)
|
||||
if dn.MatchLocation(ip, port) {
|
||||
dn.LastSeen = time.Now().Unix()
|
||||
|
@ -45,7 +47,7 @@ func (r *Rack) GetOrCreateDataNode(ip string, port int, grpcPort int, publicUrl
|
|||
dn.GrpcPort = grpcPort
|
||||
dn.PublicUrl = publicUrl
|
||||
dn.LastSeen = time.Now().Unix()
|
||||
r.LinkChildNode(dn)
|
||||
r.doLinkChildNode(dn)
|
||||
for diskType, maxVolumeCount := range maxVolumeCounts {
|
||||
disk := NewDisk(diskType)
|
||||
disk.diskUsages.getOrCreateDisk(types.ToDiskType(diskType)).maxVolumeCount = int64(maxVolumeCount)
|
||||
|
|
|
@ -197,7 +197,7 @@ func GetWritableRemoteReplications(s *storage.Store, grpcDialOption grpc.DialOpt
|
|||
}
|
||||
}
|
||||
} else {
|
||||
err = fmt.Errorf("failed to lookup for %d: %v", volumeId, lookupErr)
|
||||
err = fmt.Errorf("replicating lookup failed for %d: %v", volumeId, lookupErr)
|
||||
return
|
||||
}
|
||||
|
||||
|
|
|
@ -258,14 +258,16 @@ func (t *Topology) UnRegisterVolumeLayout(v storage.VolumeInfo, dn *DataNode) {
|
|||
}
|
||||
|
||||
func (t *Topology) GetOrCreateDataCenter(dcName string) *DataCenter {
|
||||
for _, c := range t.Children() {
|
||||
t.Lock()
|
||||
defer t.Unlock()
|
||||
for _, c := range t.children {
|
||||
dc := c.(*DataCenter)
|
||||
if string(dc.Id()) == dcName {
|
||||
return dc
|
||||
}
|
||||
}
|
||||
dc := NewDataCenter(dcName)
|
||||
t.LinkChildNode(dc)
|
||||
t.doLinkChildNode(dc)
|
||||
return dc
|
||||
}
|
||||
|
||||
|
|
|
@ -5,7 +5,7 @@ import (
|
|||
)
|
||||
|
||||
var (
|
||||
VERSION_NUMBER = fmt.Sprintf("%.02f", 3.25)
|
||||
VERSION_NUMBER = fmt.Sprintf("%.02f", 3.27)
|
||||
VERSION = sizeLimit + " " + VERSION_NUMBER
|
||||
COMMIT = ""
|
||||
)
|
||||
|
|
|
@ -27,7 +27,7 @@ type MasterClient struct {
|
|||
masters map[string]pb.ServerAddress
|
||||
grpcDialOption grpc.DialOption
|
||||
|
||||
vidMap
|
||||
*vidMap
|
||||
vidMapCacheSize int
|
||||
OnPeerUpdate func(update *master_pb.ClusterNodeUpdate, startFrom time.Time)
|
||||
OnPeerUpdateLock sync.RWMutex
|
||||
|
@ -262,8 +262,11 @@ func (mc *MasterClient) tryConnectToMaster(master pb.ServerAddress) (nextHintedL
|
|||
}
|
||||
|
||||
func (mc *MasterClient) updateVidMap(resp *master_pb.KeepConnectedResponse) {
|
||||
if resp.VolumeLocation.IsEmptyUrl() {
|
||||
glog.V(0).Infof("updateVidMap ignore short heartbeat: %+v", resp)
|
||||
return
|
||||
}
|
||||
// process new volume location
|
||||
glog.V(1).Infof("updateVidMap() resp.VolumeLocation.DataCenter %v", resp.VolumeLocation.DataCenter)
|
||||
loc := Location{
|
||||
Url: resp.VolumeLocation.Url,
|
||||
PublicUrl: resp.VolumeLocation.PublicUrl,
|
||||
|
@ -271,21 +274,25 @@ func (mc *MasterClient) updateVidMap(resp *master_pb.KeepConnectedResponse) {
|
|||
GrpcPort: int(resp.VolumeLocation.GrpcPort),
|
||||
}
|
||||
for _, newVid := range resp.VolumeLocation.NewVids {
|
||||
glog.V(1).Infof("%s.%s: %s masterClient adds volume %d", mc.FilerGroup, mc.clientType, loc.Url, newVid)
|
||||
glog.V(2).Infof("%s.%s: %s masterClient adds volume %d", mc.FilerGroup, mc.clientType, loc.Url, newVid)
|
||||
mc.addLocation(newVid, loc)
|
||||
}
|
||||
for _, deletedVid := range resp.VolumeLocation.DeletedVids {
|
||||
glog.V(1).Infof("%s.%s: %s masterClient removes volume %d", mc.FilerGroup, mc.clientType, loc.Url, deletedVid)
|
||||
glog.V(2).Infof("%s.%s: %s masterClient removes volume %d", mc.FilerGroup, mc.clientType, loc.Url, deletedVid)
|
||||
mc.deleteLocation(deletedVid, loc)
|
||||
}
|
||||
for _, newEcVid := range resp.VolumeLocation.NewEcVids {
|
||||
glog.V(1).Infof("%s.%s: %s masterClient adds ec volume %d", mc.FilerGroup, mc.clientType, loc.Url, newEcVid)
|
||||
glog.V(2).Infof("%s.%s: %s masterClient adds ec volume %d", mc.FilerGroup, mc.clientType, loc.Url, newEcVid)
|
||||
mc.addEcLocation(newEcVid, loc)
|
||||
}
|
||||
for _, deletedEcVid := range resp.VolumeLocation.DeletedEcVids {
|
||||
glog.V(1).Infof("%s.%s: %s masterClient removes ec volume %d", mc.FilerGroup, mc.clientType, loc.Url, deletedEcVid)
|
||||
glog.V(2).Infof("%s.%s: %s masterClient removes ec volume %d", mc.FilerGroup, mc.clientType, loc.Url, deletedEcVid)
|
||||
mc.deleteEcLocation(deletedEcVid, loc)
|
||||
}
|
||||
glog.V(1).Infof("updateVidMap(%s) %s.%s: %s volume add: %d, del: %d, add ec: %d del ec: %d",
|
||||
resp.VolumeLocation.DataCenter, mc.FilerGroup, mc.clientType, loc.Url,
|
||||
len(resp.VolumeLocation.NewVids), len(resp.VolumeLocation.DeletedVids),
|
||||
len(resp.VolumeLocation.NewEcVids), len(resp.VolumeLocation.DeletedEcVids))
|
||||
}
|
||||
|
||||
func (mc *MasterClient) WithClient(streamingMode bool, fn func(client master_pb.SeaweedClient) error) error {
|
||||
|
@ -303,9 +310,12 @@ func (mc *MasterClient) resetVidMap() {
|
|||
DataCenter: mc.DataCenter,
|
||||
cache: mc.cache,
|
||||
}
|
||||
mc.vidMap = newVidMap(mc.DataCenter)
|
||||
mc.vidMap.cache = tail
|
||||
|
||||
nvm := newVidMap(mc.DataCenter)
|
||||
nvm.cache = tail
|
||||
mc.vidMap = nvm
|
||||
|
||||
//trim
|
||||
for i := 0; i < mc.vidMapCacheSize && tail.cache != nil; i++ {
|
||||
if i == mc.vidMapCacheSize-1 {
|
||||
tail.cache = nil
|
||||
|
|
|
@ -43,8 +43,8 @@ type vidMap struct {
|
|||
cache *vidMap
|
||||
}
|
||||
|
||||
func newVidMap(dataCenter string) vidMap {
|
||||
return vidMap{
|
||||
func newVidMap(dataCenter string) *vidMap {
|
||||
return &vidMap{
|
||||
vid2Locations: make(map[uint32][]Location),
|
||||
ecVid2Locations: make(map[uint32][]Location),
|
||||
DataCenter: dataCenter,
|
||||
|
|
|
@ -1,15 +1,17 @@
|
|||
package wdclient
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"google.golang.org/grpc"
|
||||
"strconv"
|
||||
"sync"
|
||||
"testing"
|
||||
"time"
|
||||
)
|
||||
|
||||
func TestLocationIndex(t *testing.T) {
|
||||
vm := vidMap{}
|
||||
vm := &vidMap{}
|
||||
// test must be failed
|
||||
mustFailed := func(length int) {
|
||||
_, err := vm.getLocationIndex(length)
|
||||
|
@ -132,6 +134,43 @@ func TestLookupFileId(t *testing.T) {
|
|||
wg.Wait()
|
||||
}
|
||||
|
||||
func TestConcurrentGetLocations(t *testing.T) {
|
||||
mc := NewMasterClient(grpc.EmptyDialOption{}, "", "", "", "", "", nil)
|
||||
location := Location{Url: "TestDataRacing"}
|
||||
mc.addLocation(1, location)
|
||||
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
wg := sync.WaitGroup{}
|
||||
for i := 0; i < 50; i++ {
|
||||
wg.Add(1)
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
for {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return
|
||||
default:
|
||||
_, found := mc.GetLocations(1)
|
||||
if !found {
|
||||
cancel()
|
||||
t.Error("vid map invalid due to data racing. ")
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
//Simulate vidmap reset with cache when leader changes
|
||||
for i := 0; i < 100; i++ {
|
||||
mc.resetVidMap()
|
||||
mc.addLocation(1, location)
|
||||
time.Sleep(1 * time.Microsecond)
|
||||
}
|
||||
cancel()
|
||||
wg.Wait()
|
||||
}
|
||||
|
||||
func BenchmarkLocationIndex(b *testing.B) {
|
||||
b.SetParallelism(8)
|
||||
vm := vidMap{
|
||||
|
|
Loading…
Reference in a new issue