mirror of
https://github.com/seaweedfs/seaweedfs.git
synced 2024-01-19 02:48:24 +00:00
commit
34240606f7
13
.github/workflows/binaries_dev.yml
vendored
13
.github/workflows/binaries_dev.yml
vendored
|
@ -3,6 +3,8 @@ name: "go: build dev binaries"
|
|||
on:
|
||||
push:
|
||||
branches: [ master ]
|
||||
pull_request:
|
||||
branches: [ master ]
|
||||
|
||||
jobs:
|
||||
|
||||
|
@ -36,8 +38,11 @@ jobs:
|
|||
- name: Set BUILD_TIME env
|
||||
run: echo BUILD_TIME=$(date -u +%Y%m%d-%H%M) >> ${GITHUB_ENV}
|
||||
|
||||
- name: disable http2 env
|
||||
run: export GODEBUG=http2client=0
|
||||
|
||||
- name: Go Release Binaries Large Disk
|
||||
uses: wangyoucao577/go-release-action@v1.20
|
||||
uses: wangyoucao577/go-release-action@bugfix/upload-fail
|
||||
with:
|
||||
github_token: ${{ secrets.GITHUB_TOKEN }}
|
||||
goos: ${{ matrix.goos }}
|
||||
|
@ -53,7 +58,7 @@ jobs:
|
|||
asset_name: "weed-large-disk-${{ env.BUILD_TIME }}-${{ matrix.goos }}-${{ matrix.goarch }}"
|
||||
|
||||
- name: Go Release Binaries Normal Volume Size
|
||||
uses: wangyoucao577/go-release-action@v1.20
|
||||
uses: wangyoucao577/go-release-action@bugfix/upload-fail
|
||||
with:
|
||||
github_token: ${{ secrets.GITHUB_TOKEN }}
|
||||
goos: ${{ matrix.goos }}
|
||||
|
@ -84,7 +89,7 @@ jobs:
|
|||
run: echo BUILD_TIME=$(date -u +%Y%m%d-%H%M) >> ${GITHUB_ENV}
|
||||
|
||||
- name: Go Release Binaries Large Disk
|
||||
uses: wangyoucao577/go-release-action@v1.20
|
||||
uses: wangyoucao577/go-release-action@bugfix/upload-fail
|
||||
with:
|
||||
github_token: ${{ secrets.GITHUB_TOKEN }}
|
||||
goos: ${{ matrix.goos }}
|
||||
|
@ -100,7 +105,7 @@ jobs:
|
|||
asset_name: "weed-large-disk-${{ env.BUILD_TIME }}-${{ matrix.goos }}-${{ matrix.goarch }}"
|
||||
|
||||
- name: Go Release Binaries Normal Volume Size
|
||||
uses: wangyoucao577/go-release-action@v1.20
|
||||
uses: wangyoucao577/go-release-action@bugfix/upload-fail
|
||||
with:
|
||||
github_token: ${{ secrets.GITHUB_TOKEN }}
|
||||
goos: ${{ matrix.goos }}
|
||||
|
|
|
@ -5,12 +5,16 @@
|
|||
|
||||
<h2 align="center">Generous Backers ($50+)</h2>
|
||||
|
||||
- [4Sight Imaging](https://www.4sightimaging.com/)
|
||||
- [Evercam Camera Management Software](https://evercam.io/)
|
||||
- [Admiral](https://getadmiral.com)
|
||||
|
||||
<h2 align="center">Backers</h2>
|
||||
|
||||
- [ColorfulClouds Tech Co. Ltd.](https://caiyunai.com/)
|
||||
- [Haravan - Ecommerce Platform](https://www.haravan.com)
|
||||
- PeterCxy - Creator of Shelter App
|
||||
- [Hive Games](https://playhive.com/)
|
||||
- Flowm
|
||||
- Yoni Nakache
|
||||
- Catalin Constantin
|
||||
- MingLi Yuan
|
||||
- Leroy van Logchem
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
FROM amd64/golang:1.17-alpine as builder
|
||||
FROM golang:1.17-alpine as builder
|
||||
RUN apk add git g++ fuse
|
||||
RUN mkdir -p /go/src/github.com/chrislusf/
|
||||
RUN git clone https://github.com/chrislusf/seaweedfs /go/src/github.com/chrislusf/seaweedfs
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
FROM amd64/golang:1.17-alpine as builder
|
||||
FROM golang:1.17-alpine as builder
|
||||
RUN apk add git g++ fuse
|
||||
RUN mkdir -p /go/src/github.com/chrislusf/
|
||||
RUN git clone https://github.com/chrislusf/seaweedfs /go/src/github.com/chrislusf/seaweedfs
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
apiVersion: v1
|
||||
description: SeaweedFS
|
||||
name: seaweedfs
|
||||
appVersion: "2.75"
|
||||
version: "2.75"
|
||||
appVersion: "2.77"
|
||||
version: "2.77"
|
||||
|
|
|
@ -133,11 +133,6 @@ spec:
|
|||
-encryptVolumeData \
|
||||
{{- end }}
|
||||
-ip=${POD_IP} \
|
||||
{{- if .Values.filer.enable_peers }}
|
||||
{{- if gt (.Values.filer.replicas | int) 1 }}
|
||||
-peers=$(echo -n "{{ range $index := until (.Values.filer.replicas | int) }}${SEAWEEDFS_FULLNAME}-filer-{{ $index }}.${SEAWEEDFS_FULLNAME}-filer:{{ $.Values.filer.port }}{{ if lt $index (sub ($.Values.filer.replicas | int) 1) }},{{ end }}{{ end }}" | sed "s/$HOSTNAME.${SEAWEEDFS_FULLNAME}-filer:{{ $.Values.filer.port }}//" | sed 's/,$//; 's/^,//'; s/,,/,/;' ) \
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- if .Values.filer.s3.enabled }}
|
||||
-s3 \
|
||||
-s3.port={{ .Values.filer.s3.port }} \
|
||||
|
|
|
@ -246,8 +246,6 @@ filer:
|
|||
maxMB: null
|
||||
# encrypt data on volume servers
|
||||
encryptVolumeData: false
|
||||
# enable peers sync metadata, for leveldb (localdb for filer but with sync across)
|
||||
enable_peers: false
|
||||
|
||||
# Whether proxy or redirect to volume server during file GET request
|
||||
redirectOnRead: false
|
||||
|
|
|
@ -5,7 +5,7 @@
|
|||
|
||||
<groupId>com.github.chrislusf</groupId>
|
||||
<artifactId>seaweedfs-client</artifactId>
|
||||
<version>1.6.9</version>
|
||||
<version>1.7.0</version>
|
||||
|
||||
<parent>
|
||||
<groupId>org.sonatype.oss</groupId>
|
||||
|
@ -60,7 +60,7 @@
|
|||
<dependency>
|
||||
<groupId>org.apache.httpcomponents</groupId>
|
||||
<artifactId>httpmime</artifactId>
|
||||
<version>4.5.6</version>
|
||||
<version>4.5.13</version>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>junit</groupId>
|
||||
|
|
|
@ -5,7 +5,7 @@
|
|||
|
||||
<groupId>com.github.chrislusf</groupId>
|
||||
<artifactId>seaweedfs-client</artifactId>
|
||||
<version>1.6.9</version>
|
||||
<version>1.7.0</version>
|
||||
|
||||
<parent>
|
||||
<groupId>org.sonatype.oss</groupId>
|
||||
|
@ -60,7 +60,7 @@
|
|||
<dependency>
|
||||
<groupId>org.apache.httpcomponents</groupId>
|
||||
<artifactId>httpmime</artifactId>
|
||||
<version>4.5.6</version>
|
||||
<version>4.5.13</version>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>junit</groupId>
|
||||
|
|
|
@ -5,7 +5,7 @@
|
|||
|
||||
<groupId>com.github.chrislusf</groupId>
|
||||
<artifactId>seaweedfs-client</artifactId>
|
||||
<version>1.6.9</version>
|
||||
<version>1.7.0</version>
|
||||
|
||||
<parent>
|
||||
<groupId>org.sonatype.oss</groupId>
|
||||
|
@ -60,7 +60,7 @@
|
|||
<dependency>
|
||||
<groupId>org.apache.httpcomponents</groupId>
|
||||
<artifactId>httpmime</artifactId>
|
||||
<version>4.5.6</version>
|
||||
<version>4.5.13</version>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>junit</groupId>
|
||||
|
|
|
@ -14,10 +14,10 @@ public class RemoteUtil {
|
|||
String dir = SeaweedOutputStream.getParentDirectory(fullpath);
|
||||
String name = SeaweedOutputStream.getFileName(fullpath);
|
||||
|
||||
final FilerProto.DownloadToLocalResponse downloadToLocalResponse = filerClient.getBlockingStub()
|
||||
.downloadToLocal(FilerProto.DownloadToLocalRequest.newBuilder()
|
||||
final FilerProto.CacheRemoteObjectToLocalClusterResponse response = filerClient.getBlockingStub()
|
||||
.cacheRemoteObjectToLocalCluster(FilerProto.CacheRemoteObjectToLocalClusterRequest.newBuilder()
|
||||
.setDirectory(dir).setName(name).build());
|
||||
|
||||
return downloadToLocalResponse.getEntry();
|
||||
return response.getEntry();
|
||||
}
|
||||
}
|
||||
|
|
|
@ -69,7 +69,7 @@ service SeaweedFiler {
|
|||
rpc KvPut (KvPutRequest) returns (KvPutResponse) {
|
||||
}
|
||||
|
||||
rpc DownloadToLocal (DownloadToLocalRequest) returns (DownloadToLocalResponse) {
|
||||
rpc CacheRemoteObjectToLocalCluster (CacheRemoteObjectToLocalClusterRequest) returns (CacheRemoteObjectToLocalClusterResponse) {
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -403,10 +403,10 @@ message FilerConf {
|
|||
/////////////////////////
|
||||
// Remote Storage related
|
||||
/////////////////////////
|
||||
message DownloadToLocalRequest {
|
||||
message CacheRemoteObjectToLocalClusterRequest {
|
||||
string directory = 1;
|
||||
string name = 2;
|
||||
}
|
||||
message DownloadToLocalResponse {
|
||||
message CacheRemoteObjectToLocalClusterResponse {
|
||||
Entry entry = 1;
|
||||
}
|
||||
|
|
|
@ -11,13 +11,13 @@
|
|||
<dependency>
|
||||
<groupId>com.github.chrislusf</groupId>
|
||||
<artifactId>seaweedfs-client</artifactId>
|
||||
<version>1.6.9</version>
|
||||
<version>1.7.0</version>
|
||||
<scope>compile</scope>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>com.github.chrislusf</groupId>
|
||||
<artifactId>seaweedfs-hadoop2-client</artifactId>
|
||||
<version>1.6.9</version>
|
||||
<version>1.7.0</version>
|
||||
<scope>compile</scope>
|
||||
</dependency>
|
||||
<dependency>
|
||||
|
|
|
@ -301,7 +301,7 @@
|
|||
</snapshotRepository>
|
||||
</distributionManagement>
|
||||
<properties>
|
||||
<seaweedfs.client.version>1.6.9</seaweedfs.client.version>
|
||||
<seaweedfs.client.version>1.7.0</seaweedfs.client.version>
|
||||
<hadoop.version>2.9.2</hadoop.version>
|
||||
</properties>
|
||||
</project>
|
||||
|
|
|
@ -5,7 +5,7 @@
|
|||
<modelVersion>4.0.0</modelVersion>
|
||||
|
||||
<properties>
|
||||
<seaweedfs.client.version>1.6.9</seaweedfs.client.version>
|
||||
<seaweedfs.client.version>1.7.0</seaweedfs.client.version>
|
||||
<hadoop.version>2.9.2</hadoop.version>
|
||||
</properties>
|
||||
|
||||
|
|
|
@ -309,7 +309,7 @@
|
|||
</snapshotRepository>
|
||||
</distributionManagement>
|
||||
<properties>
|
||||
<seaweedfs.client.version>1.6.9</seaweedfs.client.version>
|
||||
<seaweedfs.client.version>1.7.0</seaweedfs.client.version>
|
||||
<hadoop.version>3.1.1</hadoop.version>
|
||||
</properties>
|
||||
</project>
|
||||
|
|
|
@ -5,7 +5,7 @@
|
|||
<modelVersion>4.0.0</modelVersion>
|
||||
|
||||
<properties>
|
||||
<seaweedfs.client.version>1.6.9</seaweedfs.client.version>
|
||||
<seaweedfs.client.version>1.7.0</seaweedfs.client.version>
|
||||
<hadoop.version>3.1.1</hadoop.version>
|
||||
</properties>
|
||||
|
||||
|
|
|
@ -2,10 +2,13 @@ BINARY = weed
|
|||
|
||||
SOURCE_DIR = .
|
||||
|
||||
all: debug_mount
|
||||
all: install
|
||||
|
||||
.PHONY : clean debug_mount
|
||||
|
||||
install:
|
||||
go install
|
||||
|
||||
clean:
|
||||
go clean $(SOURCE_DIR)
|
||||
rm -f $(BINARY)
|
||||
|
|
264
weed/cluster/cluster.go
Normal file
264
weed/cluster/cluster.go
Normal file
|
@ -0,0 +1,264 @@
|
|||
package cluster
|
||||
|
||||
import (
|
||||
"github.com/chrislusf/seaweedfs/weed/pb"
|
||||
"github.com/chrislusf/seaweedfs/weed/pb/master_pb"
|
||||
"math"
|
||||
"sync"
|
||||
"time"
|
||||
)
|
||||
|
||||
const (
|
||||
MasterType = "master"
|
||||
FilerType = "filer"
|
||||
BrokerType = "broker"
|
||||
)
|
||||
|
||||
type ClusterNode struct {
|
||||
Address pb.ServerAddress
|
||||
Version string
|
||||
counter int
|
||||
createdTs time.Time
|
||||
}
|
||||
|
||||
type Leaders struct {
|
||||
leaders [3]pb.ServerAddress
|
||||
}
|
||||
|
||||
type Cluster struct {
|
||||
filers map[pb.ServerAddress]*ClusterNode
|
||||
filersLock sync.RWMutex
|
||||
filerLeaders *Leaders
|
||||
brokers map[pb.ServerAddress]*ClusterNode
|
||||
brokersLock sync.RWMutex
|
||||
}
|
||||
|
||||
func NewCluster() *Cluster {
|
||||
return &Cluster{
|
||||
filers: make(map[pb.ServerAddress]*ClusterNode),
|
||||
filerLeaders: &Leaders{},
|
||||
brokers: make(map[pb.ServerAddress]*ClusterNode),
|
||||
}
|
||||
}
|
||||
|
||||
func (cluster *Cluster) AddClusterNode(nodeType string, address pb.ServerAddress, version string) []*master_pb.KeepConnectedResponse {
|
||||
switch nodeType {
|
||||
case FilerType:
|
||||
cluster.filersLock.Lock()
|
||||
defer cluster.filersLock.Unlock()
|
||||
if existingNode, found := cluster.filers[address]; found {
|
||||
existingNode.counter++
|
||||
return nil
|
||||
}
|
||||
cluster.filers[address] = &ClusterNode{
|
||||
Address: address,
|
||||
Version: version,
|
||||
counter: 1,
|
||||
createdTs: time.Now(),
|
||||
}
|
||||
return cluster.ensureFilerLeaders(true, nodeType, address)
|
||||
case BrokerType:
|
||||
cluster.brokersLock.Lock()
|
||||
defer cluster.brokersLock.Unlock()
|
||||
if existingNode, found := cluster.brokers[address]; found {
|
||||
existingNode.counter++
|
||||
return nil
|
||||
}
|
||||
cluster.brokers[address] = &ClusterNode{
|
||||
Address: address,
|
||||
Version: version,
|
||||
counter: 1,
|
||||
createdTs: time.Now(),
|
||||
}
|
||||
return []*master_pb.KeepConnectedResponse{
|
||||
{
|
||||
ClusterNodeUpdate: &master_pb.ClusterNodeUpdate{
|
||||
NodeType: nodeType,
|
||||
Address: string(address),
|
||||
IsAdd: true,
|
||||
},
|
||||
},
|
||||
}
|
||||
case MasterType:
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (cluster *Cluster) RemoveClusterNode(nodeType string, address pb.ServerAddress) []*master_pb.KeepConnectedResponse {
|
||||
switch nodeType {
|
||||
case FilerType:
|
||||
cluster.filersLock.Lock()
|
||||
defer cluster.filersLock.Unlock()
|
||||
if existingNode, found := cluster.filers[address]; !found {
|
||||
return nil
|
||||
} else {
|
||||
existingNode.counter--
|
||||
if existingNode.counter <= 0 {
|
||||
delete(cluster.filers, address)
|
||||
return cluster.ensureFilerLeaders(false, nodeType, address)
|
||||
}
|
||||
}
|
||||
case BrokerType:
|
||||
cluster.brokersLock.Lock()
|
||||
defer cluster.brokersLock.Unlock()
|
||||
if existingNode, found := cluster.brokers[address]; !found {
|
||||
return nil
|
||||
} else {
|
||||
existingNode.counter--
|
||||
if existingNode.counter <= 0 {
|
||||
delete(cluster.brokers, address)
|
||||
return []*master_pb.KeepConnectedResponse{
|
||||
{
|
||||
ClusterNodeUpdate: &master_pb.ClusterNodeUpdate{
|
||||
NodeType: nodeType,
|
||||
Address: string(address),
|
||||
IsAdd: false,
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
}
|
||||
case MasterType:
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (cluster *Cluster) ListClusterNode(nodeType string) (nodes []*ClusterNode) {
|
||||
switch nodeType {
|
||||
case FilerType:
|
||||
cluster.filersLock.RLock()
|
||||
defer cluster.filersLock.RUnlock()
|
||||
for _, node := range cluster.filers {
|
||||
nodes = append(nodes, node)
|
||||
}
|
||||
case BrokerType:
|
||||
cluster.brokersLock.RLock()
|
||||
defer cluster.brokersLock.RUnlock()
|
||||
for _, node := range cluster.brokers {
|
||||
nodes = append(nodes, node)
|
||||
}
|
||||
case MasterType:
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func (cluster *Cluster) IsOneLeader(address pb.ServerAddress) bool {
|
||||
return cluster.filerLeaders.isOneLeader(address)
|
||||
}
|
||||
|
||||
func (cluster *Cluster) ensureFilerLeaders(isAdd bool, nodeType string, address pb.ServerAddress) (result []*master_pb.KeepConnectedResponse) {
|
||||
if isAdd {
|
||||
if cluster.filerLeaders.addLeaderIfVacant(address) {
|
||||
// has added the address as one leader
|
||||
result = append(result, &master_pb.KeepConnectedResponse{
|
||||
ClusterNodeUpdate: &master_pb.ClusterNodeUpdate{
|
||||
NodeType: nodeType,
|
||||
Address: string(address),
|
||||
IsLeader: true,
|
||||
IsAdd: true,
|
||||
},
|
||||
})
|
||||
} else {
|
||||
result = append(result, &master_pb.KeepConnectedResponse{
|
||||
ClusterNodeUpdate: &master_pb.ClusterNodeUpdate{
|
||||
NodeType: nodeType,
|
||||
Address: string(address),
|
||||
IsLeader: false,
|
||||
IsAdd: true,
|
||||
},
|
||||
})
|
||||
}
|
||||
} else {
|
||||
if cluster.filerLeaders.removeLeaderIfExists(address) {
|
||||
|
||||
result = append(result, &master_pb.KeepConnectedResponse{
|
||||
ClusterNodeUpdate: &master_pb.ClusterNodeUpdate{
|
||||
NodeType: nodeType,
|
||||
Address: string(address),
|
||||
IsLeader: true,
|
||||
IsAdd: false,
|
||||
},
|
||||
})
|
||||
|
||||
// pick the freshest one, since it is less likely to go away
|
||||
var shortestDuration int64 = math.MaxInt64
|
||||
now := time.Now()
|
||||
var candidateAddress pb.ServerAddress
|
||||
for _, node := range cluster.filers {
|
||||
if cluster.filerLeaders.isOneLeader(node.Address) {
|
||||
continue
|
||||
}
|
||||
duration := now.Sub(node.createdTs).Nanoseconds()
|
||||
if duration < shortestDuration {
|
||||
shortestDuration = duration
|
||||
candidateAddress = node.Address
|
||||
}
|
||||
}
|
||||
if candidateAddress != "" {
|
||||
cluster.filerLeaders.addLeaderIfVacant(candidateAddress)
|
||||
// added a new leader
|
||||
result = append(result, &master_pb.KeepConnectedResponse{
|
||||
ClusterNodeUpdate: &master_pb.ClusterNodeUpdate{
|
||||
NodeType: nodeType,
|
||||
Address: string(candidateAddress),
|
||||
IsLeader: true,
|
||||
IsAdd: true,
|
||||
},
|
||||
})
|
||||
}
|
||||
} else {
|
||||
result = append(result, &master_pb.KeepConnectedResponse{
|
||||
ClusterNodeUpdate: &master_pb.ClusterNodeUpdate{
|
||||
NodeType: nodeType,
|
||||
Address: string(address),
|
||||
IsLeader: false,
|
||||
IsAdd: false,
|
||||
},
|
||||
})
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func (leaders *Leaders) addLeaderIfVacant(address pb.ServerAddress) (hasChanged bool) {
|
||||
if leaders.isOneLeader(address) {
|
||||
return
|
||||
}
|
||||
for i := 0; i < len(leaders.leaders); i++ {
|
||||
if leaders.leaders[i] == "" {
|
||||
leaders.leaders[i] = address
|
||||
hasChanged = true
|
||||
return
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
func (leaders *Leaders) removeLeaderIfExists(address pb.ServerAddress) (hasChanged bool) {
|
||||
if !leaders.isOneLeader(address) {
|
||||
return
|
||||
}
|
||||
for i := 0; i < len(leaders.leaders); i++ {
|
||||
if leaders.leaders[i] == address {
|
||||
leaders.leaders[i] = ""
|
||||
hasChanged = true
|
||||
return
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
func (leaders *Leaders) isOneLeader(address pb.ServerAddress) bool {
|
||||
for i := 0; i < len(leaders.leaders); i++ {
|
||||
if leaders.leaders[i] == address {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
func (leaders *Leaders) GetLeaders() (addresses []pb.ServerAddress) {
|
||||
for i := 0; i < len(leaders.leaders); i++ {
|
||||
if leaders.leaders[i] != "" {
|
||||
addresses = append(addresses, leaders.leaders[i])
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
47
weed/cluster/cluster_test.go
Normal file
47
weed/cluster/cluster_test.go
Normal file
|
@ -0,0 +1,47 @@
|
|||
package cluster
|
||||
|
||||
import (
|
||||
"github.com/chrislusf/seaweedfs/weed/pb"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestClusterAddRemoveNodes(t *testing.T) {
|
||||
c := NewCluster()
|
||||
|
||||
c.AddClusterNode("filer", pb.ServerAddress("111:1"), "23.45")
|
||||
c.AddClusterNode("filer", pb.ServerAddress("111:2"), "23.45")
|
||||
assert.Equal(t, []pb.ServerAddress{
|
||||
pb.ServerAddress("111:1"),
|
||||
pb.ServerAddress("111:2"),
|
||||
}, c.filerLeaders.GetLeaders())
|
||||
|
||||
c.AddClusterNode("filer", pb.ServerAddress("111:3"), "23.45")
|
||||
c.AddClusterNode("filer", pb.ServerAddress("111:4"), "23.45")
|
||||
assert.Equal(t, []pb.ServerAddress{
|
||||
pb.ServerAddress("111:1"),
|
||||
pb.ServerAddress("111:2"),
|
||||
pb.ServerAddress("111:3"),
|
||||
}, c.filerLeaders.GetLeaders())
|
||||
|
||||
c.AddClusterNode("filer", pb.ServerAddress("111:5"), "23.45")
|
||||
c.AddClusterNode("filer", pb.ServerAddress("111:6"), "23.45")
|
||||
c.RemoveClusterNode("filer", pb.ServerAddress("111:4"))
|
||||
assert.Equal(t, []pb.ServerAddress{
|
||||
pb.ServerAddress("111:1"),
|
||||
pb.ServerAddress("111:2"),
|
||||
pb.ServerAddress("111:3"),
|
||||
}, c.filerLeaders.GetLeaders())
|
||||
|
||||
// remove oldest
|
||||
c.RemoveClusterNode("filer", pb.ServerAddress("111:1"))
|
||||
assert.Equal(t, []pb.ServerAddress{
|
||||
pb.ServerAddress("111:6"),
|
||||
pb.ServerAddress("111:2"),
|
||||
pb.ServerAddress("111:3"),
|
||||
}, c.filerLeaders.GetLeaders())
|
||||
|
||||
// remove oldest
|
||||
c.RemoveClusterNode("filer", pb.ServerAddress("111:1"))
|
||||
|
||||
}
|
|
@ -4,7 +4,6 @@ import (
|
|||
"fmt"
|
||||
"net/http"
|
||||
"os"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"google.golang.org/grpc/reflection"
|
||||
|
@ -46,7 +45,6 @@ type FilerOptions struct {
|
|||
enableNotification *bool
|
||||
disableHttp *bool
|
||||
cipher *bool
|
||||
peers *string
|
||||
metricsHttpPort *int
|
||||
saveToFilerLimit *int
|
||||
defaultLevelDbDirectory *string
|
||||
|
@ -72,7 +70,6 @@ func init() {
|
|||
f.rack = cmdFiler.Flag.String("rack", "", "prefer to write to volumes in this rack")
|
||||
f.disableHttp = cmdFiler.Flag.Bool("disableHttp", false, "disable http request, only gRpc operations are allowed")
|
||||
f.cipher = cmdFiler.Flag.Bool("encryptVolumeData", false, "encrypt data on volume servers")
|
||||
f.peers = cmdFiler.Flag.String("peers", "", "all filers sharing the same filer store in comma separated ip:port list")
|
||||
f.metricsHttpPort = cmdFiler.Flag.Int("metricsPort", 0, "Prometheus metrics listen port")
|
||||
f.saveToFilerLimit = cmdFiler.Flag.Int("saveToFilerLimit", 0, "files smaller than this limit will be saved in filer store")
|
||||
f.defaultLevelDbDirectory = cmdFiler.Flag.String("defaultStoreDir", ".", "if filer.toml is empty, use an embedded filer store in the directory")
|
||||
|
@ -186,11 +183,6 @@ func (fo *FilerOptions) startFiler() {
|
|||
|
||||
defaultLevelDbDirectory := util.ResolvePath(*fo.defaultLevelDbDirectory + "/filerldb2")
|
||||
|
||||
var peers []string
|
||||
if *fo.peers != "" {
|
||||
peers = strings.Split(*fo.peers, ",")
|
||||
}
|
||||
|
||||
filerAddress := pb.NewServerAddress(*fo.ip, *fo.port, *fo.portGrpc)
|
||||
|
||||
fs, nfs_err := weed_server.NewFilerServer(defaultMux, publicVolumeMux, &weed_server.FilerOption{
|
||||
|
@ -207,7 +199,6 @@ func (fo *FilerOptions) startFiler() {
|
|||
Host: filerAddress,
|
||||
Cipher: *fo.cipher,
|
||||
SaveToFilerLimit: int64(*fo.saveToFilerLimit),
|
||||
Filers: pb.FromAddressStrings(peers),
|
||||
ConcurrentUploadLimit: int64(*fo.concurrentUploadLimitMB) * 1024 * 1024,
|
||||
})
|
||||
if nfs_err != nil {
|
||||
|
|
|
@ -30,6 +30,8 @@ var cmdFilerMetaTail = &Command{
|
|||
weed filer.meta.tail -timeAgo=30h | jq .
|
||||
weed filer.meta.tail -timeAgo=30h | jq .eventNotification.newEntry.name
|
||||
|
||||
weed filer.meta.tail -timeAgo=30h -es=http://<elasticSearchServerHost>:<port> -es.index=seaweedfs
|
||||
|
||||
`,
|
||||
}
|
||||
|
||||
|
|
|
@ -106,7 +106,6 @@ func init() {
|
|||
filerOptions.maxMB = cmdServer.Flag.Int("filer.maxMB", 4, "split files larger than the limit")
|
||||
filerOptions.dirListingLimit = cmdServer.Flag.Int("filer.dirListLimit", 1000, "limit sub dir listing size")
|
||||
filerOptions.cipher = cmdServer.Flag.Bool("filer.encryptVolumeData", false, "encrypt data on volume servers")
|
||||
filerOptions.peers = cmdServer.Flag.String("filer.peers", "", "all filers sharing the same filer store in comma separated ip:port list")
|
||||
filerOptions.saveToFilerLimit = cmdServer.Flag.Int("filer.saveToFilerLimit", 0, "Small files smaller than this limit can be cached in filer store.")
|
||||
filerOptions.concurrentUploadLimitMB = cmdServer.Flag.Int("filer.concurrentUploadLimitMB", 64, "limit total concurrent upload size")
|
||||
|
||||
|
|
|
@ -37,7 +37,7 @@ func runShell(command *Command, args []string) bool {
|
|||
util.LoadConfiguration("security", false)
|
||||
shellOptions.GrpcDialOption = security.LoadClientTLS(util.GetViper(), "grpc.client")
|
||||
|
||||
if *shellOptions.Masters == "" && *shellInitialFiler == "" {
|
||||
if *shellOptions.Masters == "" {
|
||||
util.LoadConfiguration("shell", false)
|
||||
v := util.GetViper()
|
||||
cluster := v.GetString("cluster.default")
|
||||
|
@ -45,15 +45,14 @@ func runShell(command *Command, args []string) bool {
|
|||
cluster = *shellCluster
|
||||
}
|
||||
if cluster == "" {
|
||||
*shellOptions.Masters, *shellInitialFiler = "localhost:9333", "localhost:8888"
|
||||
*shellOptions.Masters = "localhost:9333"
|
||||
} else {
|
||||
*shellOptions.Masters = v.GetString("cluster." + cluster + ".master")
|
||||
*shellInitialFiler = v.GetString("cluster." + cluster + ".filer")
|
||||
fmt.Printf("master: %s filer: %s\n", *shellOptions.Masters, *shellInitialFiler)
|
||||
}
|
||||
}
|
||||
|
||||
fmt.Printf("master: %s filer: %s\n", *shellOptions.Masters, *shellInitialFiler)
|
||||
|
||||
shellOptions.FilerAddress = pb.ServerAddress(*shellInitialFiler)
|
||||
shellOptions.Directory = "/"
|
||||
|
||||
|
|
|
@ -6,6 +6,7 @@ import (
|
|||
"github.com/chrislusf/seaweedfs/weed/wdclient"
|
||||
"io"
|
||||
"math"
|
||||
"math/rand"
|
||||
"net/url"
|
||||
"strings"
|
||||
"time"
|
||||
|
@ -142,6 +143,9 @@ func retriedStreamFetchChunkData(writer io.Writer, urlStrings []string, cipherKe
|
|||
var shouldRetry bool
|
||||
var totalWritten int
|
||||
|
||||
rand.Shuffle(len(urlStrings), func(i, j int) {
|
||||
urlStrings[i], urlStrings[j] = urlStrings[j], urlStrings[i]
|
||||
})
|
||||
for waitTime := time.Second; waitTime < util.RetryWaitTime; waitTime += waitTime / 2 {
|
||||
for _, urlString := range urlStrings {
|
||||
var localProcesed int
|
||||
|
|
|
@ -40,7 +40,7 @@ func readResolvedChunks(chunks []*filer_pb.FileChunk) (visibles []VisibleInterva
|
|||
for _, point := range points {
|
||||
if point.isStart {
|
||||
if len(queue) > 0 {
|
||||
lastIndex := len(queue) -1
|
||||
lastIndex := len(queue) - 1
|
||||
lastPoint := queue[lastIndex]
|
||||
if point.x != prevX && lastPoint.ts < point.ts {
|
||||
visibles = addToVisibles(visibles, prevX, lastPoint, point)
|
||||
|
|
|
@ -52,7 +52,7 @@ func TestReadResolvedChunks(t *testing.T) {
|
|||
|
||||
func TestRandomizedReadResolvedChunks(t *testing.T) {
|
||||
|
||||
var limit int64 = 1024*1024
|
||||
var limit int64 = 1024 * 1024
|
||||
array := make([]int64, limit)
|
||||
var chunks []*filer_pb.FileChunk
|
||||
for ts := int64(0); ts < 1024; ts++ {
|
||||
|
@ -75,7 +75,7 @@ func TestRandomizedReadResolvedChunks(t *testing.T) {
|
|||
visibles := readResolvedChunks(chunks)
|
||||
|
||||
for _, visible := range visibles {
|
||||
for i := visible.start; i<visible.stop;i++{
|
||||
for i := visible.start; i < visible.stop; i++ {
|
||||
if array[i] != visible.modifiedTime {
|
||||
t.Errorf("position %d expected ts %d actual ts %d", i, array[i], visible.modifiedTime)
|
||||
}
|
||||
|
@ -101,12 +101,12 @@ func randomWrite(array []int64, start int64, size int64, ts int64) *filer_pb.Fil
|
|||
|
||||
func TestSequentialReadResolvedChunks(t *testing.T) {
|
||||
|
||||
var chunkSize int64 = 1024*1024*2
|
||||
var chunkSize int64 = 1024 * 1024 * 2
|
||||
var chunks []*filer_pb.FileChunk
|
||||
for ts := int64(0); ts < 13; ts++ {
|
||||
chunks = append(chunks, &filer_pb.FileChunk{
|
||||
FileId: "",
|
||||
Offset: chunkSize*ts,
|
||||
Offset: chunkSize * ts,
|
||||
Size: uint64(chunkSize),
|
||||
Mtime: 1,
|
||||
})
|
||||
|
|
|
@ -3,7 +3,9 @@ package filer
|
|||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"github.com/chrislusf/seaweedfs/weed/cluster"
|
||||
"github.com/chrislusf/seaweedfs/weed/pb"
|
||||
"github.com/chrislusf/seaweedfs/weed/pb/master_pb"
|
||||
"os"
|
||||
"strings"
|
||||
"time"
|
||||
|
@ -50,7 +52,7 @@ type Filer struct {
|
|||
func NewFiler(masters []pb.ServerAddress, grpcDialOption grpc.DialOption,
|
||||
filerHost pb.ServerAddress, collection string, replication string, dataCenter string, notifyFn func()) *Filer {
|
||||
f := &Filer{
|
||||
MasterClient: wdclient.NewMasterClient(grpcDialOption, "filer", filerHost, dataCenter, masters),
|
||||
MasterClient: wdclient.NewMasterClient(grpcDialOption, cluster.FilerType, filerHost, dataCenter, masters),
|
||||
fileIdDeletionQueue: util.NewUnboundedQueue(),
|
||||
GrpcDialOption: grpcDialOption,
|
||||
FilerConf: NewFilerConf(),
|
||||
|
@ -66,22 +68,38 @@ func NewFiler(masters []pb.ServerAddress, grpcDialOption grpc.DialOption,
|
|||
return f
|
||||
}
|
||||
|
||||
func (f *Filer) AggregateFromPeers(self pb.ServerAddress, filers []pb.ServerAddress) {
|
||||
func (f *Filer) AggregateFromPeers(self pb.ServerAddress) {
|
||||
|
||||
// set peers
|
||||
found := false
|
||||
for _, peer := range filers {
|
||||
if peer == self {
|
||||
found = true
|
||||
f.MetaAggregator = NewMetaAggregator(f, self, f.GrpcDialOption)
|
||||
f.MasterClient.OnPeerUpdate = f.MetaAggregator.OnPeerUpdate
|
||||
|
||||
for _, peerUpdate := range f.ListExistingPeerUpdates() {
|
||||
f.MetaAggregator.OnPeerUpdate(peerUpdate)
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
func (f *Filer) ListExistingPeerUpdates() (existingNodes []*master_pb.ClusterNodeUpdate){
|
||||
|
||||
if grpcErr := pb.WithMasterClient(f.MasterClient.GetMaster(), f.GrpcDialOption, func(client master_pb.SeaweedClient) error {
|
||||
resp, err := client.ListClusterNodes(context.Background(), &master_pb.ListClusterNodesRequest{
|
||||
ClientType: cluster.FilerType,
|
||||
})
|
||||
|
||||
glog.V(0).Infof("the cluster has %d filers\n", len(resp.ClusterNodes))
|
||||
for _, node := range resp.ClusterNodes {
|
||||
existingNodes = append(existingNodes, &master_pb.ClusterNodeUpdate{
|
||||
NodeType: cluster.FilerType,
|
||||
Address: node.Address,
|
||||
IsLeader: node.IsLeader,
|
||||
IsAdd: true,
|
||||
})
|
||||
}
|
||||
return err
|
||||
}); grpcErr != nil {
|
||||
glog.V(0).Infof("connect to %s: %v", f.MasterClient.GetMaster(), grpcErr)
|
||||
}
|
||||
if !found {
|
||||
filers = append(filers, self)
|
||||
}
|
||||
|
||||
f.MetaAggregator = NewMetaAggregator(filers, f.GrpcDialOption)
|
||||
f.MetaAggregator.StartLoopSubscribe(f, self)
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
func (f *Filer) SetStore(store FilerStore) {
|
||||
|
@ -117,7 +135,7 @@ func (fs *Filer) GetMaster() pb.ServerAddress {
|
|||
return fs.MasterClient.GetMaster()
|
||||
}
|
||||
|
||||
func (fs *Filer) KeepConnectedToMaster() {
|
||||
func (fs *Filer) KeepMasterClientConnected() {
|
||||
fs.MasterClient.KeepConnectedToMaster()
|
||||
}
|
||||
|
||||
|
|
|
@ -15,6 +15,9 @@ const (
|
|||
MsgFailDelNonEmptyFolder = "fail to delete non-empty folder"
|
||||
)
|
||||
|
||||
type OnChunksFunc func([]*filer_pb.FileChunk) error
|
||||
type OnHardLinkIdsFunc func([]HardLinkId) error
|
||||
|
||||
func (f *Filer) DeleteEntryMetaAndData(ctx context.Context, p util.FullPath, isRecursive, ignoreRecursiveError, shouldDeleteChunks, isFromOtherCluster bool, signatures []int32) (err error) {
|
||||
if p == "/" {
|
||||
return nil
|
||||
|
@ -27,20 +30,29 @@ func (f *Filer) DeleteEntryMetaAndData(ctx context.Context, p util.FullPath, isR
|
|||
|
||||
isDeleteCollection := f.isBucket(entry)
|
||||
|
||||
var chunks []*filer_pb.FileChunk
|
||||
var hardLinkIds []HardLinkId
|
||||
chunks = append(chunks, entry.Chunks...)
|
||||
if entry.IsDirectory() {
|
||||
// delete the folder children, not including the folder itself
|
||||
var dirChunks []*filer_pb.FileChunk
|
||||
var dirHardLinkIds []HardLinkId
|
||||
dirChunks, dirHardLinkIds, err = f.doBatchDeleteFolderMetaAndData(ctx, entry, isRecursive, ignoreRecursiveError, shouldDeleteChunks && !isDeleteCollection, isDeleteCollection, isFromOtherCluster, signatures)
|
||||
err = f.doBatchDeleteFolderMetaAndData(ctx, entry, isRecursive, ignoreRecursiveError, shouldDeleteChunks && !isDeleteCollection, isDeleteCollection, isFromOtherCluster, signatures, func(chunks []*filer_pb.FileChunk) error {
|
||||
if shouldDeleteChunks && !isDeleteCollection {
|
||||
f.DirectDeleteChunks(chunks)
|
||||
}
|
||||
return nil
|
||||
}, func(hardLinkIds []HardLinkId) error {
|
||||
// A case not handled:
|
||||
// what if the chunk is in a different collection?
|
||||
if shouldDeleteChunks {
|
||||
f.maybeDeleteHardLinks(hardLinkIds)
|
||||
}
|
||||
return nil
|
||||
})
|
||||
if err != nil {
|
||||
glog.V(0).Infof("delete directory %s: %v", p, err)
|
||||
return fmt.Errorf("delete directory %s: %v", p, err)
|
||||
}
|
||||
chunks = append(chunks, dirChunks...)
|
||||
hardLinkIds = append(hardLinkIds, dirHardLinkIds...)
|
||||
}
|
||||
|
||||
if shouldDeleteChunks && !isDeleteCollection {
|
||||
f.DirectDeleteChunks(entry.Chunks)
|
||||
}
|
||||
|
||||
// delete the file or folder
|
||||
|
@ -49,15 +61,6 @@ func (f *Filer) DeleteEntryMetaAndData(ctx context.Context, p util.FullPath, isR
|
|||
return fmt.Errorf("delete file %s: %v", p, err)
|
||||
}
|
||||
|
||||
if shouldDeleteChunks && !isDeleteCollection {
|
||||
f.DirectDeleteChunks(chunks)
|
||||
}
|
||||
// A case not handled:
|
||||
// what if the chunk is in a different collection?
|
||||
if shouldDeleteChunks {
|
||||
f.maybeDeleteHardLinks(hardLinkIds)
|
||||
}
|
||||
|
||||
if isDeleteCollection {
|
||||
collectionName := entry.Name()
|
||||
f.doDeleteCollection(collectionName)
|
||||
|
@ -67,7 +70,7 @@ func (f *Filer) DeleteEntryMetaAndData(ctx context.Context, p util.FullPath, isR
|
|||
return nil
|
||||
}
|
||||
|
||||
func (f *Filer) doBatchDeleteFolderMetaAndData(ctx context.Context, entry *Entry, isRecursive, ignoreRecursiveError, shouldDeleteChunks, isDeletingBucket, isFromOtherCluster bool, signatures []int32) (chunks []*filer_pb.FileChunk, hardlinkIds []HardLinkId, err error) {
|
||||
func (f *Filer) doBatchDeleteFolderMetaAndData(ctx context.Context, entry *Entry, isRecursive, ignoreRecursiveError, shouldDeleteChunks, isDeletingBucket, isFromOtherCluster bool, signatures []int32, onChunksFn OnChunksFunc, onHardLinkIdsFn OnHardLinkIdsFunc) (err error) {
|
||||
|
||||
lastFileName := ""
|
||||
includeLastFile := false
|
||||
|
@ -76,34 +79,30 @@ func (f *Filer) doBatchDeleteFolderMetaAndData(ctx context.Context, entry *Entry
|
|||
entries, _, err := f.ListDirectoryEntries(ctx, entry.FullPath, lastFileName, includeLastFile, PaginationSize, "", "", "")
|
||||
if err != nil {
|
||||
glog.Errorf("list folder %s: %v", entry.FullPath, err)
|
||||
return nil, nil, fmt.Errorf("list folder %s: %v", entry.FullPath, err)
|
||||
return fmt.Errorf("list folder %s: %v", entry.FullPath, err)
|
||||
}
|
||||
if lastFileName == "" && !isRecursive && len(entries) > 0 {
|
||||
// only for first iteration in the loop
|
||||
glog.Errorf("deleting a folder %s has children: %+v ...", entry.FullPath, entries[0].Name())
|
||||
return nil, nil, fmt.Errorf("%s: %s", MsgFailDelNonEmptyFolder, entry.FullPath)
|
||||
glog.V(0).Infof("deleting a folder %s has children: %+v ...", entry.FullPath, entries[0].Name())
|
||||
return fmt.Errorf("%s: %s", MsgFailDelNonEmptyFolder, entry.FullPath)
|
||||
}
|
||||
|
||||
for _, sub := range entries {
|
||||
lastFileName = sub.Name()
|
||||
var dirChunks []*filer_pb.FileChunk
|
||||
var dirHardLinkIds []HardLinkId
|
||||
if sub.IsDirectory() {
|
||||
subIsDeletingBucket := f.isBucket(sub)
|
||||
dirChunks, dirHardLinkIds, err = f.doBatchDeleteFolderMetaAndData(ctx, sub, isRecursive, ignoreRecursiveError, shouldDeleteChunks, subIsDeletingBucket, false, nil)
|
||||
chunks = append(chunks, dirChunks...)
|
||||
hardlinkIds = append(hardlinkIds, dirHardLinkIds...)
|
||||
err = f.doBatchDeleteFolderMetaAndData(ctx, sub, isRecursive, ignoreRecursiveError, shouldDeleteChunks, subIsDeletingBucket, false, nil, onChunksFn, onHardLinkIdsFn)
|
||||
} else {
|
||||
f.NotifyUpdateEvent(ctx, sub, nil, shouldDeleteChunks, isFromOtherCluster, nil)
|
||||
if len(sub.HardLinkId) != 0 {
|
||||
// hard link chunk data are deleted separately
|
||||
hardlinkIds = append(hardlinkIds, sub.HardLinkId)
|
||||
err = onHardLinkIdsFn([]HardLinkId{sub.HardLinkId})
|
||||
} else {
|
||||
chunks = append(chunks, sub.Chunks...)
|
||||
err = onChunksFn(sub.Chunks)
|
||||
}
|
||||
}
|
||||
if err != nil && !ignoreRecursiveError {
|
||||
return nil, nil, err
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -113,15 +112,15 @@ func (f *Filer) doBatchDeleteFolderMetaAndData(ctx context.Context, entry *Entry
|
|||
}
|
||||
}
|
||||
|
||||
glog.V(3).Infof("deleting directory %v delete %d chunks: %v", entry.FullPath, len(chunks), shouldDeleteChunks)
|
||||
glog.V(3).Infof("deleting directory %v delete chunks: %v", entry.FullPath, shouldDeleteChunks)
|
||||
|
||||
if storeDeletionErr := f.Store.DeleteFolderChildren(ctx, entry.FullPath); storeDeletionErr != nil {
|
||||
return nil, nil, fmt.Errorf("filer store delete: %v", storeDeletionErr)
|
||||
return fmt.Errorf("filer store delete: %v", storeDeletionErr)
|
||||
}
|
||||
|
||||
f.NotifyUpdateEvent(ctx, entry, nil, shouldDeleteChunks, isFromOtherCluster, signatures)
|
||||
|
||||
return chunks, hardlinkIds, nil
|
||||
return nil
|
||||
}
|
||||
|
||||
func (f *Filer) doDeleteEntryMetaAndData(ctx context.Context, entry *Entry, shouldDeleteChunks bool, isFromOtherCluster bool, signatures []int32) (err error) {
|
||||
|
|
|
@ -3,6 +3,8 @@ package filer
|
|||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"github.com/chrislusf/seaweedfs/weed/cluster"
|
||||
"github.com/chrislusf/seaweedfs/weed/pb/master_pb"
|
||||
"github.com/chrislusf/seaweedfs/weed/util"
|
||||
"io"
|
||||
"sync"
|
||||
|
@ -18,9 +20,13 @@ import (
|
|||
)
|
||||
|
||||
type MetaAggregator struct {
|
||||
filers []pb.ServerAddress
|
||||
grpcDialOption grpc.DialOption
|
||||
MetaLogBuffer *log_buffer.LogBuffer
|
||||
filer *Filer
|
||||
self pb.ServerAddress
|
||||
isLeader bool
|
||||
grpcDialOption grpc.DialOption
|
||||
MetaLogBuffer *log_buffer.LogBuffer
|
||||
peerStatues map[pb.ServerAddress]struct{}
|
||||
peerStatuesLock sync.Mutex
|
||||
// notifying clients
|
||||
ListenersLock sync.Mutex
|
||||
ListenersCond *sync.Cond
|
||||
|
@ -28,10 +34,12 @@ type MetaAggregator struct {
|
|||
|
||||
// MetaAggregator only aggregates data "on the fly". The logs are not re-persisted to disk.
|
||||
// The old data comes from what each LocalMetadata persisted on disk.
|
||||
func NewMetaAggregator(filers []pb.ServerAddress, grpcDialOption grpc.DialOption) *MetaAggregator {
|
||||
func NewMetaAggregator(filer *Filer, self pb.ServerAddress, grpcDialOption grpc.DialOption) *MetaAggregator {
|
||||
t := &MetaAggregator{
|
||||
filers: filers,
|
||||
filer: filer,
|
||||
self: self,
|
||||
grpcDialOption: grpcDialOption,
|
||||
peerStatues: make(map[pb.ServerAddress]struct{}),
|
||||
}
|
||||
t.ListenersCond = sync.NewCond(&t.ListenersLock)
|
||||
t.MetaLogBuffer = log_buffer.NewLogBuffer("aggr", LogFlushInterval, nil, func() {
|
||||
|
@ -40,10 +48,35 @@ func NewMetaAggregator(filers []pb.ServerAddress, grpcDialOption grpc.DialOption
|
|||
return t
|
||||
}
|
||||
|
||||
func (ma *MetaAggregator) StartLoopSubscribe(f *Filer, self pb.ServerAddress) {
|
||||
for _, filer := range ma.filers {
|
||||
go ma.subscribeToOneFiler(f, self, filer)
|
||||
func (ma *MetaAggregator) OnPeerUpdate(update *master_pb.ClusterNodeUpdate) {
|
||||
if update.NodeType != cluster.FilerType {
|
||||
return
|
||||
}
|
||||
|
||||
address := pb.ServerAddress(update.Address)
|
||||
if update.IsAdd {
|
||||
// every filer should subscribe to a new filer
|
||||
ma.setActive(address, true)
|
||||
go ma.subscribeToOneFiler(ma.filer, ma.self, address)
|
||||
} else {
|
||||
ma.setActive(address, false)
|
||||
}
|
||||
}
|
||||
|
||||
func (ma *MetaAggregator) setActive(address pb.ServerAddress, isActive bool) {
|
||||
ma.peerStatuesLock.Lock()
|
||||
defer ma.peerStatuesLock.Unlock()
|
||||
if isActive {
|
||||
ma.peerStatues[address] = struct{}{}
|
||||
} else {
|
||||
delete(ma.peerStatues, address)
|
||||
}
|
||||
}
|
||||
func (ma *MetaAggregator) isActive(address pb.ServerAddress)(isActive bool) {
|
||||
ma.peerStatuesLock.Lock()
|
||||
defer ma.peerStatuesLock.Unlock()
|
||||
_, isActive = ma.peerStatues[address]
|
||||
return
|
||||
}
|
||||
|
||||
func (ma *MetaAggregator) subscribeToOneFiler(f *Filer, self pb.ServerAddress, peer pb.ServerAddress) {
|
||||
|
@ -149,6 +182,10 @@ func (ma *MetaAggregator) subscribeToOneFiler(f *Filer, self pb.ServerAddress, p
|
|||
|
||||
}
|
||||
})
|
||||
if !ma.isActive(peer) {
|
||||
glog.V(0).Infof("stop subscribing remote %s meta change", peer)
|
||||
return
|
||||
}
|
||||
if err != nil {
|
||||
glog.V(0).Infof("subscribing remote %s meta change: %v", peer, err)
|
||||
time.Sleep(1733 * time.Millisecond)
|
||||
|
|
|
@ -25,9 +25,9 @@ func MapRemoteStorageLocationPathToFullPath(localMountedDir util.FullPath, remot
|
|||
return localMountedDir.Child(remoteLocationPath[len(remoteMountedLocation.Path):])
|
||||
}
|
||||
|
||||
func DownloadToLocal(filerClient filer_pb.FilerClient, remoteConf *remote_pb.RemoteConf, remoteLocation *remote_pb.RemoteStorageLocation, parent util.FullPath, entry *filer_pb.Entry) error {
|
||||
func CacheRemoteObjectToLocalCluster(filerClient filer_pb.FilerClient, remoteConf *remote_pb.RemoteConf, remoteLocation *remote_pb.RemoteStorageLocation, parent util.FullPath, entry *filer_pb.Entry) error {
|
||||
return filerClient.WithFilerClient(func(client filer_pb.SeaweedFilerClient) error {
|
||||
_, err := client.DownloadToLocal(context.Background(), &filer_pb.DownloadToLocalRequest{
|
||||
_, err := client.CacheRemoteObjectToLocalCluster(context.Background(), &filer_pb.CacheRemoteObjectToLocalClusterRequest{
|
||||
Directory: string(parent),
|
||||
Name: entry.Name,
|
||||
})
|
||||
|
|
|
@ -134,8 +134,8 @@ func (store *UniversalRedis2Store) DeleteFolderChildren(ctx context.Context, ful
|
|||
}
|
||||
|
||||
members, err := store.Client.ZRangeByLex(ctx, genDirectoryListKey(string(fullpath)), &redis.ZRangeBy{
|
||||
Min: "-",
|
||||
Max: "+",
|
||||
Min: "-",
|
||||
Max: "+",
|
||||
}).Result()
|
||||
if err != nil {
|
||||
return fmt.Errorf("DeleteFolderChildren %s : %v", fullpath, err)
|
||||
|
|
|
@ -67,7 +67,6 @@ func (dir *Dir) Rename(ctx context.Context, req *fuse.RenameRequest, newDirector
|
|||
return nil
|
||||
}
|
||||
|
||||
|
||||
func (dir *Dir) handleRenameResponse(ctx context.Context, resp *filer_pb.StreamRenameEntryResponse) error {
|
||||
// comes from filer StreamRenameEntry, can only be create or delete entry
|
||||
|
||||
|
@ -119,7 +118,7 @@ func (dir *Dir) handleRenameResponse(ctx context.Context, resp *filer_pb.StreamR
|
|||
}
|
||||
dir.wfs.handlesLock.Unlock()
|
||||
|
||||
}else if resp.EventNotification.OldEntry != nil {
|
||||
} else if resp.EventNotification.OldEntry != nil {
|
||||
// without new entry, only old entry name exists. This is the second step to delete old entry
|
||||
if err := dir.wfs.metaCache.AtomicUpdateEntryFromFiler(ctx, util.NewFullPath(resp.Directory, resp.EventNotification.OldEntry.Name), nil); err != nil {
|
||||
return err
|
||||
|
|
|
@ -364,15 +364,15 @@ func (file *File) getEntry() *filer_pb.Entry {
|
|||
func (file *File) downloadRemoteEntry(entry *filer_pb.Entry) (*filer_pb.Entry, error) {
|
||||
err := file.wfs.WithFilerClient(func(client filer_pb.SeaweedFilerClient) error {
|
||||
|
||||
request := &filer_pb.DownloadToLocalRequest{
|
||||
request := &filer_pb.CacheRemoteObjectToLocalClusterRequest{
|
||||
Directory: file.dir.FullPath(),
|
||||
Name: entry.Name,
|
||||
}
|
||||
|
||||
glog.V(4).Infof("download entry: %v", request)
|
||||
resp, err := client.DownloadToLocal(context.Background(), request)
|
||||
resp, err := client.CacheRemoteObjectToLocalCluster(context.Background(), request)
|
||||
if err != nil {
|
||||
glog.Errorf("DownloadToLocal file %s/%s: %v", file.dir.FullPath(), file.Name, err)
|
||||
glog.Errorf("CacheRemoteObjectToLocalCluster file %s/%s: %v", file.dir.FullPath(), file.Name, err)
|
||||
return fuse.EIO
|
||||
}
|
||||
|
||||
|
|
|
@ -8,7 +8,7 @@ import (
|
|||
"net/http"
|
||||
)
|
||||
|
||||
func writeIamErrorResponse(w http.ResponseWriter, err error, object string, value string, msg error) {
|
||||
func writeIamErrorResponse(w http.ResponseWriter, r *http.Request, err error, object string, value string, msg error) {
|
||||
errCode := err.Error()
|
||||
errorResp := ErrorResponse{}
|
||||
errorResp.Error.Type = "Sender"
|
||||
|
@ -22,10 +22,10 @@ func writeIamErrorResponse(w http.ResponseWriter, err error, object string, valu
|
|||
case iam.ErrCodeNoSuchEntityException:
|
||||
msg := fmt.Sprintf("The %s with name %s cannot be found.", object, value)
|
||||
errorResp.Error.Message = &msg
|
||||
s3err.WriteXMLResponse(w, http.StatusNotFound, errorResp)
|
||||
s3err.WriteXMLResponse(w, r, http.StatusNotFound, errorResp)
|
||||
case iam.ErrCodeServiceFailureException:
|
||||
s3err.WriteXMLResponse(w, http.StatusInternalServerError, errorResp)
|
||||
s3err.WriteXMLResponse(w, r, http.StatusInternalServerError, errorResp)
|
||||
default:
|
||||
s3err.WriteXMLResponse(w, http.StatusInternalServerError, errorResp)
|
||||
s3err.WriteXMLResponse(w, r, http.StatusInternalServerError, errorResp)
|
||||
}
|
||||
}
|
||||
|
|
|
@ -362,7 +362,7 @@ func (iama *IamApiServer) DeleteAccessKey(s3cfg *iam_pb.S3ApiConfiguration, valu
|
|||
|
||||
func (iama *IamApiServer) DoActions(w http.ResponseWriter, r *http.Request) {
|
||||
if err := r.ParseForm(); err != nil {
|
||||
s3err.WriteErrorResponse(w, s3err.ErrInvalidRequest, r)
|
||||
s3err.WriteErrorResponse(w, r, s3err.ErrInvalidRequest)
|
||||
return
|
||||
}
|
||||
values := r.PostForm
|
||||
|
@ -370,7 +370,7 @@ func (iama *IamApiServer) DoActions(w http.ResponseWriter, r *http.Request) {
|
|||
s3cfgLock.RLock()
|
||||
s3cfg := &iam_pb.S3ApiConfiguration{}
|
||||
if err := iama.s3ApiConfig.GetS3ApiConfiguration(s3cfg); err != nil {
|
||||
s3err.WriteErrorResponse(w, s3err.ErrInternalError, r)
|
||||
s3err.WriteErrorResponse(w, r, s3err.ErrInternalError)
|
||||
return
|
||||
}
|
||||
s3cfgLock.RUnlock()
|
||||
|
@ -392,7 +392,7 @@ func (iama *IamApiServer) DoActions(w http.ResponseWriter, r *http.Request) {
|
|||
userName := values.Get("UserName")
|
||||
response, err = iama.GetUser(s3cfg, userName)
|
||||
if err != nil {
|
||||
writeIamErrorResponse(w, err, "user", userName, nil)
|
||||
writeIamErrorResponse(w, r, err, "user", userName, nil)
|
||||
return
|
||||
}
|
||||
changed = false
|
||||
|
@ -400,7 +400,7 @@ func (iama *IamApiServer) DoActions(w http.ResponseWriter, r *http.Request) {
|
|||
userName := values.Get("UserName")
|
||||
response, err = iama.DeleteUser(s3cfg, userName)
|
||||
if err != nil {
|
||||
writeIamErrorResponse(w, err, "user", userName, nil)
|
||||
writeIamErrorResponse(w, r, err, "user", userName, nil)
|
||||
return
|
||||
}
|
||||
case "CreateAccessKey":
|
||||
|
@ -411,33 +411,33 @@ func (iama *IamApiServer) DoActions(w http.ResponseWriter, r *http.Request) {
|
|||
response, err = iama.CreatePolicy(s3cfg, values)
|
||||
if err != nil {
|
||||
glog.Errorf("CreatePolicy: %+v", err)
|
||||
s3err.WriteErrorResponse(w, s3err.ErrInvalidRequest, r)
|
||||
s3err.WriteErrorResponse(w, r, s3err.ErrInvalidRequest)
|
||||
return
|
||||
}
|
||||
case "PutUserPolicy":
|
||||
response, err = iama.PutUserPolicy(s3cfg, values)
|
||||
if err != nil {
|
||||
glog.Errorf("PutUserPolicy: %+v", err)
|
||||
s3err.WriteErrorResponse(w, s3err.ErrInvalidRequest, r)
|
||||
s3err.WriteErrorResponse(w, r, s3err.ErrInvalidRequest)
|
||||
return
|
||||
}
|
||||
case "GetUserPolicy":
|
||||
response, err = iama.GetUserPolicy(s3cfg, values)
|
||||
if err != nil {
|
||||
writeIamErrorResponse(w, err, "user", values.Get("UserName"), nil)
|
||||
writeIamErrorResponse(w, r, err, "user", values.Get("UserName"), nil)
|
||||
return
|
||||
}
|
||||
changed = false
|
||||
case "DeleteUserPolicy":
|
||||
if response, err = iama.DeleteUserPolicy(s3cfg, values); err != nil {
|
||||
writeIamErrorResponse(w, err, "user", values.Get("UserName"), nil)
|
||||
writeIamErrorResponse(w, r, err, "user", values.Get("UserName"), nil)
|
||||
}
|
||||
default:
|
||||
errNotImplemented := s3err.GetAPIError(s3err.ErrNotImplemented)
|
||||
errorResponse := ErrorResponse{}
|
||||
errorResponse.Error.Code = &errNotImplemented.Code
|
||||
errorResponse.Error.Message = &errNotImplemented.Description
|
||||
s3err.WriteXMLResponse(w, errNotImplemented.HTTPStatusCode, errorResponse)
|
||||
s3err.WriteXMLResponse(w, r, errNotImplemented.HTTPStatusCode, errorResponse)
|
||||
return
|
||||
}
|
||||
if changed {
|
||||
|
@ -445,9 +445,9 @@ func (iama *IamApiServer) DoActions(w http.ResponseWriter, r *http.Request) {
|
|||
err := iama.s3ApiConfig.PutS3ApiConfiguration(s3cfg)
|
||||
s3cfgLock.Unlock()
|
||||
if err != nil {
|
||||
writeIamErrorResponse(w, fmt.Errorf(iam.ErrCodeServiceFailureException), "", "", err)
|
||||
writeIamErrorResponse(w, r, fmt.Errorf(iam.ErrCodeServiceFailureException), "", "", err)
|
||||
return
|
||||
}
|
||||
}
|
||||
s3err.WriteXMLResponse(w, http.StatusOK, response)
|
||||
s3err.WriteXMLResponse(w, r, http.StatusOK, response)
|
||||
}
|
||||
|
|
|
@ -49,7 +49,7 @@ var s3ApiConfigure IamS3ApiConfig
|
|||
func NewIamApiServer(router *mux.Router, option *IamServerOption) (iamApiServer *IamApiServer, err error) {
|
||||
s3ApiConfigure = IamS3ApiConfigure{
|
||||
option: option,
|
||||
masterClient: wdclient.NewMasterClient(option.GrpcDialOption, pb.AdminShellClient, "", "", option.Masters),
|
||||
masterClient: wdclient.NewMasterClient(option.GrpcDialOption, "iam", "", "", option.Masters),
|
||||
}
|
||||
s3Option := s3api.S3ApiServerOption{Filer: option.Filer}
|
||||
iamApiServer = &IamApiServer{
|
||||
|
|
|
@ -3,6 +3,7 @@ package broker
|
|||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"github.com/chrislusf/seaweedfs/weed/cluster"
|
||||
"github.com/chrislusf/seaweedfs/weed/pb"
|
||||
"time"
|
||||
|
||||
|
@ -93,14 +94,16 @@ func (broker *MessageBroker) checkFilers() {
|
|||
for !found {
|
||||
for _, master := range masters {
|
||||
err := broker.withMasterClient(master, func(client master_pb.SeaweedClient) error {
|
||||
resp, err := client.ListMasterClients(context.Background(), &master_pb.ListMasterClientsRequest{
|
||||
ClientType: "filer",
|
||||
resp, err := client.ListClusterNodes(context.Background(), &master_pb.ListClusterNodesRequest{
|
||||
ClientType: cluster.FilerType,
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
filers = append(filers, pb.FromAddressStrings(resp.GrpcAddresses)...)
|
||||
for _, clusterNode := range resp.ClusterNodes {
|
||||
filers = append(filers, pb.ServerAddress(clusterNode.Address))
|
||||
}
|
||||
|
||||
return nil
|
||||
})
|
||||
|
|
|
@ -69,7 +69,7 @@ service SeaweedFiler {
|
|||
rpc KvPut (KvPutRequest) returns (KvPutResponse) {
|
||||
}
|
||||
|
||||
rpc DownloadToLocal (DownloadToLocalRequest) returns (DownloadToLocalResponse) {
|
||||
rpc CacheRemoteObjectToLocalCluster (CacheRemoteObjectToLocalClusterRequest) returns (CacheRemoteObjectToLocalClusterResponse) {
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -403,10 +403,10 @@ message FilerConf {
|
|||
/////////////////////////
|
||||
// Remote Storage related
|
||||
/////////////////////////
|
||||
message DownloadToLocalRequest {
|
||||
message CacheRemoteObjectToLocalClusterRequest {
|
||||
string directory = 1;
|
||||
string name = 2;
|
||||
}
|
||||
message DownloadToLocalResponse {
|
||||
message CacheRemoteObjectToLocalClusterResponse {
|
||||
Entry entry = 1;
|
||||
}
|
||||
|
|
|
@ -3334,7 +3334,7 @@ func (x *FilerConf) GetLocations() []*FilerConf_PathConf {
|
|||
/////////////////////////
|
||||
// Remote Storage related
|
||||
/////////////////////////
|
||||
type DownloadToLocalRequest struct {
|
||||
type CacheRemoteObjectToLocalClusterRequest struct {
|
||||
state protoimpl.MessageState
|
||||
sizeCache protoimpl.SizeCache
|
||||
unknownFields protoimpl.UnknownFields
|
||||
|
@ -3343,8 +3343,8 @@ type DownloadToLocalRequest struct {
|
|||
Name string `protobuf:"bytes,2,opt,name=name,proto3" json:"name,omitempty"`
|
||||
}
|
||||
|
||||
func (x *DownloadToLocalRequest) Reset() {
|
||||
*x = DownloadToLocalRequest{}
|
||||
func (x *CacheRemoteObjectToLocalClusterRequest) Reset() {
|
||||
*x = CacheRemoteObjectToLocalClusterRequest{}
|
||||
if protoimpl.UnsafeEnabled {
|
||||
mi := &file_filer_proto_msgTypes[51]
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
|
@ -3352,13 +3352,13 @@ func (x *DownloadToLocalRequest) Reset() {
|
|||
}
|
||||
}
|
||||
|
||||
func (x *DownloadToLocalRequest) String() string {
|
||||
func (x *CacheRemoteObjectToLocalClusterRequest) String() string {
|
||||
return protoimpl.X.MessageStringOf(x)
|
||||
}
|
||||
|
||||
func (*DownloadToLocalRequest) ProtoMessage() {}
|
||||
func (*CacheRemoteObjectToLocalClusterRequest) ProtoMessage() {}
|
||||
|
||||
func (x *DownloadToLocalRequest) ProtoReflect() protoreflect.Message {
|
||||
func (x *CacheRemoteObjectToLocalClusterRequest) ProtoReflect() protoreflect.Message {
|
||||
mi := &file_filer_proto_msgTypes[51]
|
||||
if protoimpl.UnsafeEnabled && x != nil {
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
|
@ -3370,26 +3370,26 @@ func (x *DownloadToLocalRequest) ProtoReflect() protoreflect.Message {
|
|||
return mi.MessageOf(x)
|
||||
}
|
||||
|
||||
// Deprecated: Use DownloadToLocalRequest.ProtoReflect.Descriptor instead.
|
||||
func (*DownloadToLocalRequest) Descriptor() ([]byte, []int) {
|
||||
// Deprecated: Use CacheRemoteObjectToLocalClusterRequest.ProtoReflect.Descriptor instead.
|
||||
func (*CacheRemoteObjectToLocalClusterRequest) Descriptor() ([]byte, []int) {
|
||||
return file_filer_proto_rawDescGZIP(), []int{51}
|
||||
}
|
||||
|
||||
func (x *DownloadToLocalRequest) GetDirectory() string {
|
||||
func (x *CacheRemoteObjectToLocalClusterRequest) GetDirectory() string {
|
||||
if x != nil {
|
||||
return x.Directory
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func (x *DownloadToLocalRequest) GetName() string {
|
||||
func (x *CacheRemoteObjectToLocalClusterRequest) GetName() string {
|
||||
if x != nil {
|
||||
return x.Name
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
type DownloadToLocalResponse struct {
|
||||
type CacheRemoteObjectToLocalClusterResponse struct {
|
||||
state protoimpl.MessageState
|
||||
sizeCache protoimpl.SizeCache
|
||||
unknownFields protoimpl.UnknownFields
|
||||
|
@ -3397,8 +3397,8 @@ type DownloadToLocalResponse struct {
|
|||
Entry *Entry `protobuf:"bytes,1,opt,name=entry,proto3" json:"entry,omitempty"`
|
||||
}
|
||||
|
||||
func (x *DownloadToLocalResponse) Reset() {
|
||||
*x = DownloadToLocalResponse{}
|
||||
func (x *CacheRemoteObjectToLocalClusterResponse) Reset() {
|
||||
*x = CacheRemoteObjectToLocalClusterResponse{}
|
||||
if protoimpl.UnsafeEnabled {
|
||||
mi := &file_filer_proto_msgTypes[52]
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
|
@ -3406,13 +3406,13 @@ func (x *DownloadToLocalResponse) Reset() {
|
|||
}
|
||||
}
|
||||
|
||||
func (x *DownloadToLocalResponse) String() string {
|
||||
func (x *CacheRemoteObjectToLocalClusterResponse) String() string {
|
||||
return protoimpl.X.MessageStringOf(x)
|
||||
}
|
||||
|
||||
func (*DownloadToLocalResponse) ProtoMessage() {}
|
||||
func (*CacheRemoteObjectToLocalClusterResponse) ProtoMessage() {}
|
||||
|
||||
func (x *DownloadToLocalResponse) ProtoReflect() protoreflect.Message {
|
||||
func (x *CacheRemoteObjectToLocalClusterResponse) ProtoReflect() protoreflect.Message {
|
||||
mi := &file_filer_proto_msgTypes[52]
|
||||
if protoimpl.UnsafeEnabled && x != nil {
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
|
@ -3424,12 +3424,12 @@ func (x *DownloadToLocalResponse) ProtoReflect() protoreflect.Message {
|
|||
return mi.MessageOf(x)
|
||||
}
|
||||
|
||||
// Deprecated: Use DownloadToLocalResponse.ProtoReflect.Descriptor instead.
|
||||
func (*DownloadToLocalResponse) Descriptor() ([]byte, []int) {
|
||||
// Deprecated: Use CacheRemoteObjectToLocalClusterResponse.ProtoReflect.Descriptor instead.
|
||||
func (*CacheRemoteObjectToLocalClusterResponse) Descriptor() ([]byte, []int) {
|
||||
return file_filer_proto_rawDescGZIP(), []int{52}
|
||||
}
|
||||
|
||||
func (x *DownloadToLocalResponse) GetEntry() *Entry {
|
||||
func (x *CacheRemoteObjectToLocalClusterResponse) GetEntry() *Entry {
|
||||
if x != nil {
|
||||
return x.Entry
|
||||
}
|
||||
|
@ -4042,16 +4042,18 @@ var file_filer_proto_rawDesc = []byte{
|
|||
0x68, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x11, 0x76,
|
||||
0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x47, 0x72, 0x6f, 0x77, 0x74, 0x68, 0x43, 0x6f, 0x75, 0x6e, 0x74,
|
||||
0x12, 0x1b, 0x0a, 0x09, 0x72, 0x65, 0x61, 0x64, 0x5f, 0x6f, 0x6e, 0x6c, 0x79, 0x18, 0x08, 0x20,
|
||||
0x01, 0x28, 0x08, 0x52, 0x08, 0x72, 0x65, 0x61, 0x64, 0x4f, 0x6e, 0x6c, 0x79, 0x22, 0x4a, 0x0a,
|
||||
0x16, 0x44, 0x6f, 0x77, 0x6e, 0x6c, 0x6f, 0x61, 0x64, 0x54, 0x6f, 0x4c, 0x6f, 0x63, 0x61, 0x6c,
|
||||
0x01, 0x28, 0x08, 0x52, 0x08, 0x72, 0x65, 0x61, 0x64, 0x4f, 0x6e, 0x6c, 0x79, 0x22, 0x5a, 0x0a,
|
||||
0x26, 0x43, 0x61, 0x63, 0x68, 0x65, 0x52, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x4f, 0x62, 0x6a, 0x65,
|
||||
0x63, 0x74, 0x54, 0x6f, 0x4c, 0x6f, 0x63, 0x61, 0x6c, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72,
|
||||
0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1c, 0x0a, 0x09, 0x64, 0x69, 0x72, 0x65, 0x63,
|
||||
0x74, 0x6f, 0x72, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x64, 0x69, 0x72, 0x65,
|
||||
0x63, 0x74, 0x6f, 0x72, 0x79, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20,
|
||||
0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x22, 0x40, 0x0a, 0x17, 0x44, 0x6f, 0x77,
|
||||
0x6e, 0x6c, 0x6f, 0x61, 0x64, 0x54, 0x6f, 0x4c, 0x6f, 0x63, 0x61, 0x6c, 0x52, 0x65, 0x73, 0x70,
|
||||
0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x22, 0x50, 0x0a, 0x27, 0x43, 0x61, 0x63,
|
||||
0x68, 0x65, 0x52, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x54, 0x6f,
|
||||
0x4c, 0x6f, 0x63, 0x61, 0x6c, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x52, 0x65, 0x73, 0x70,
|
||||
0x6f, 0x6e, 0x73, 0x65, 0x12, 0x25, 0x0a, 0x05, 0x65, 0x6e, 0x74, 0x72, 0x79, 0x18, 0x01, 0x20,
|
||||
0x01, 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x66, 0x69, 0x6c, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x45,
|
||||
0x6e, 0x74, 0x72, 0x79, 0x52, 0x05, 0x65, 0x6e, 0x74, 0x72, 0x79, 0x32, 0x98, 0x0e, 0x0a, 0x0c,
|
||||
0x6e, 0x74, 0x72, 0x79, 0x52, 0x05, 0x65, 0x6e, 0x74, 0x72, 0x79, 0x32, 0xc9, 0x0e, 0x0a, 0x0c,
|
||||
0x53, 0x65, 0x61, 0x77, 0x65, 0x65, 0x64, 0x46, 0x69, 0x6c, 0x65, 0x72, 0x12, 0x67, 0x0a, 0x14,
|
||||
0x4c, 0x6f, 0x6f, 0x6b, 0x75, 0x70, 0x44, 0x69, 0x72, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x79, 0x45,
|
||||
0x6e, 0x74, 0x72, 0x79, 0x12, 0x25, 0x2e, 0x66, 0x69, 0x6c, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e,
|
||||
|
@ -4159,18 +4161,21 @@ var file_filer_proto_rawDesc = []byte{
|
|||
0x12, 0x3a, 0x0a, 0x05, 0x4b, 0x76, 0x50, 0x75, 0x74, 0x12, 0x16, 0x2e, 0x66, 0x69, 0x6c, 0x65,
|
||||
0x72, 0x5f, 0x70, 0x62, 0x2e, 0x4b, 0x76, 0x50, 0x75, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73,
|
||||
0x74, 0x1a, 0x17, 0x2e, 0x66, 0x69, 0x6c, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x4b, 0x76, 0x50,
|
||||
0x75, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x58, 0x0a, 0x0f,
|
||||
0x44, 0x6f, 0x77, 0x6e, 0x6c, 0x6f, 0x61, 0x64, 0x54, 0x6f, 0x4c, 0x6f, 0x63, 0x61, 0x6c, 0x12,
|
||||
0x20, 0x2e, 0x66, 0x69, 0x6c, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x44, 0x6f, 0x77, 0x6e, 0x6c,
|
||||
0x6f, 0x61, 0x64, 0x54, 0x6f, 0x4c, 0x6f, 0x63, 0x61, 0x6c, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73,
|
||||
0x74, 0x1a, 0x21, 0x2e, 0x66, 0x69, 0x6c, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x44, 0x6f, 0x77,
|
||||
0x6e, 0x6c, 0x6f, 0x61, 0x64, 0x54, 0x6f, 0x4c, 0x6f, 0x63, 0x61, 0x6c, 0x52, 0x65, 0x73, 0x70,
|
||||
0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x42, 0x4f, 0x0a, 0x10, 0x73, 0x65, 0x61, 0x77, 0x65, 0x65,
|
||||
0x64, 0x66, 0x73, 0x2e, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x42, 0x0a, 0x46, 0x69, 0x6c, 0x65,
|
||||
0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x5a, 0x2f, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63,
|
||||
0x6f, 0x6d, 0x2f, 0x63, 0x68, 0x72, 0x69, 0x73, 0x6c, 0x75, 0x73, 0x66, 0x2f, 0x73, 0x65, 0x61,
|
||||
0x77, 0x65, 0x65, 0x64, 0x66, 0x73, 0x2f, 0x77, 0x65, 0x65, 0x64, 0x2f, 0x70, 0x62, 0x2f, 0x66,
|
||||
0x69, 0x6c, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
|
||||
0x75, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x88, 0x01, 0x0a,
|
||||
0x1f, 0x43, 0x61, 0x63, 0x68, 0x65, 0x52, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x4f, 0x62, 0x6a, 0x65,
|
||||
0x63, 0x74, 0x54, 0x6f, 0x4c, 0x6f, 0x63, 0x61, 0x6c, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72,
|
||||
0x12, 0x30, 0x2e, 0x66, 0x69, 0x6c, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x43, 0x61, 0x63, 0x68,
|
||||
0x65, 0x52, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x54, 0x6f, 0x4c,
|
||||
0x6f, 0x63, 0x61, 0x6c, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x52, 0x65, 0x71, 0x75, 0x65,
|
||||
0x73, 0x74, 0x1a, 0x31, 0x2e, 0x66, 0x69, 0x6c, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x43, 0x61,
|
||||
0x63, 0x68, 0x65, 0x52, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x54,
|
||||
0x6f, 0x4c, 0x6f, 0x63, 0x61, 0x6c, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x52, 0x65, 0x73,
|
||||
0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x42, 0x4f, 0x0a, 0x10, 0x73, 0x65, 0x61, 0x77, 0x65,
|
||||
0x65, 0x64, 0x66, 0x73, 0x2e, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x42, 0x0a, 0x46, 0x69, 0x6c,
|
||||
0x65, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x5a, 0x2f, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e,
|
||||
0x63, 0x6f, 0x6d, 0x2f, 0x63, 0x68, 0x72, 0x69, 0x73, 0x6c, 0x75, 0x73, 0x66, 0x2f, 0x73, 0x65,
|
||||
0x61, 0x77, 0x65, 0x65, 0x64, 0x66, 0x73, 0x2f, 0x77, 0x65, 0x65, 0x64, 0x2f, 0x70, 0x62, 0x2f,
|
||||
0x66, 0x69, 0x6c, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
|
||||
}
|
||||
|
||||
var (
|
||||
|
@ -4187,59 +4192,59 @@ func file_filer_proto_rawDescGZIP() []byte {
|
|||
|
||||
var file_filer_proto_msgTypes = make([]protoimpl.MessageInfo, 57)
|
||||
var file_filer_proto_goTypes = []interface{}{
|
||||
(*LookupDirectoryEntryRequest)(nil), // 0: filer_pb.LookupDirectoryEntryRequest
|
||||
(*LookupDirectoryEntryResponse)(nil), // 1: filer_pb.LookupDirectoryEntryResponse
|
||||
(*ListEntriesRequest)(nil), // 2: filer_pb.ListEntriesRequest
|
||||
(*ListEntriesResponse)(nil), // 3: filer_pb.ListEntriesResponse
|
||||
(*RemoteEntry)(nil), // 4: filer_pb.RemoteEntry
|
||||
(*Entry)(nil), // 5: filer_pb.Entry
|
||||
(*FullEntry)(nil), // 6: filer_pb.FullEntry
|
||||
(*EventNotification)(nil), // 7: filer_pb.EventNotification
|
||||
(*FileChunk)(nil), // 8: filer_pb.FileChunk
|
||||
(*FileChunkManifest)(nil), // 9: filer_pb.FileChunkManifest
|
||||
(*FileId)(nil), // 10: filer_pb.FileId
|
||||
(*FuseAttributes)(nil), // 11: filer_pb.FuseAttributes
|
||||
(*CreateEntryRequest)(nil), // 12: filer_pb.CreateEntryRequest
|
||||
(*CreateEntryResponse)(nil), // 13: filer_pb.CreateEntryResponse
|
||||
(*UpdateEntryRequest)(nil), // 14: filer_pb.UpdateEntryRequest
|
||||
(*UpdateEntryResponse)(nil), // 15: filer_pb.UpdateEntryResponse
|
||||
(*AppendToEntryRequest)(nil), // 16: filer_pb.AppendToEntryRequest
|
||||
(*AppendToEntryResponse)(nil), // 17: filer_pb.AppendToEntryResponse
|
||||
(*DeleteEntryRequest)(nil), // 18: filer_pb.DeleteEntryRequest
|
||||
(*DeleteEntryResponse)(nil), // 19: filer_pb.DeleteEntryResponse
|
||||
(*AtomicRenameEntryRequest)(nil), // 20: filer_pb.AtomicRenameEntryRequest
|
||||
(*AtomicRenameEntryResponse)(nil), // 21: filer_pb.AtomicRenameEntryResponse
|
||||
(*StreamRenameEntryRequest)(nil), // 22: filer_pb.StreamRenameEntryRequest
|
||||
(*StreamRenameEntryResponse)(nil), // 23: filer_pb.StreamRenameEntryResponse
|
||||
(*AssignVolumeRequest)(nil), // 24: filer_pb.AssignVolumeRequest
|
||||
(*AssignVolumeResponse)(nil), // 25: filer_pb.AssignVolumeResponse
|
||||
(*LookupVolumeRequest)(nil), // 26: filer_pb.LookupVolumeRequest
|
||||
(*Locations)(nil), // 27: filer_pb.Locations
|
||||
(*Location)(nil), // 28: filer_pb.Location
|
||||
(*LookupVolumeResponse)(nil), // 29: filer_pb.LookupVolumeResponse
|
||||
(*Collection)(nil), // 30: filer_pb.Collection
|
||||
(*CollectionListRequest)(nil), // 31: filer_pb.CollectionListRequest
|
||||
(*CollectionListResponse)(nil), // 32: filer_pb.CollectionListResponse
|
||||
(*DeleteCollectionRequest)(nil), // 33: filer_pb.DeleteCollectionRequest
|
||||
(*DeleteCollectionResponse)(nil), // 34: filer_pb.DeleteCollectionResponse
|
||||
(*StatisticsRequest)(nil), // 35: filer_pb.StatisticsRequest
|
||||
(*StatisticsResponse)(nil), // 36: filer_pb.StatisticsResponse
|
||||
(*GetFilerConfigurationRequest)(nil), // 37: filer_pb.GetFilerConfigurationRequest
|
||||
(*GetFilerConfigurationResponse)(nil), // 38: filer_pb.GetFilerConfigurationResponse
|
||||
(*SubscribeMetadataRequest)(nil), // 39: filer_pb.SubscribeMetadataRequest
|
||||
(*SubscribeMetadataResponse)(nil), // 40: filer_pb.SubscribeMetadataResponse
|
||||
(*LogEntry)(nil), // 41: filer_pb.LogEntry
|
||||
(*KeepConnectedRequest)(nil), // 42: filer_pb.KeepConnectedRequest
|
||||
(*KeepConnectedResponse)(nil), // 43: filer_pb.KeepConnectedResponse
|
||||
(*LocateBrokerRequest)(nil), // 44: filer_pb.LocateBrokerRequest
|
||||
(*LocateBrokerResponse)(nil), // 45: filer_pb.LocateBrokerResponse
|
||||
(*KvGetRequest)(nil), // 46: filer_pb.KvGetRequest
|
||||
(*KvGetResponse)(nil), // 47: filer_pb.KvGetResponse
|
||||
(*KvPutRequest)(nil), // 48: filer_pb.KvPutRequest
|
||||
(*KvPutResponse)(nil), // 49: filer_pb.KvPutResponse
|
||||
(*FilerConf)(nil), // 50: filer_pb.FilerConf
|
||||
(*DownloadToLocalRequest)(nil), // 51: filer_pb.DownloadToLocalRequest
|
||||
(*DownloadToLocalResponse)(nil), // 52: filer_pb.DownloadToLocalResponse
|
||||
(*LookupDirectoryEntryRequest)(nil), // 0: filer_pb.LookupDirectoryEntryRequest
|
||||
(*LookupDirectoryEntryResponse)(nil), // 1: filer_pb.LookupDirectoryEntryResponse
|
||||
(*ListEntriesRequest)(nil), // 2: filer_pb.ListEntriesRequest
|
||||
(*ListEntriesResponse)(nil), // 3: filer_pb.ListEntriesResponse
|
||||
(*RemoteEntry)(nil), // 4: filer_pb.RemoteEntry
|
||||
(*Entry)(nil), // 5: filer_pb.Entry
|
||||
(*FullEntry)(nil), // 6: filer_pb.FullEntry
|
||||
(*EventNotification)(nil), // 7: filer_pb.EventNotification
|
||||
(*FileChunk)(nil), // 8: filer_pb.FileChunk
|
||||
(*FileChunkManifest)(nil), // 9: filer_pb.FileChunkManifest
|
||||
(*FileId)(nil), // 10: filer_pb.FileId
|
||||
(*FuseAttributes)(nil), // 11: filer_pb.FuseAttributes
|
||||
(*CreateEntryRequest)(nil), // 12: filer_pb.CreateEntryRequest
|
||||
(*CreateEntryResponse)(nil), // 13: filer_pb.CreateEntryResponse
|
||||
(*UpdateEntryRequest)(nil), // 14: filer_pb.UpdateEntryRequest
|
||||
(*UpdateEntryResponse)(nil), // 15: filer_pb.UpdateEntryResponse
|
||||
(*AppendToEntryRequest)(nil), // 16: filer_pb.AppendToEntryRequest
|
||||
(*AppendToEntryResponse)(nil), // 17: filer_pb.AppendToEntryResponse
|
||||
(*DeleteEntryRequest)(nil), // 18: filer_pb.DeleteEntryRequest
|
||||
(*DeleteEntryResponse)(nil), // 19: filer_pb.DeleteEntryResponse
|
||||
(*AtomicRenameEntryRequest)(nil), // 20: filer_pb.AtomicRenameEntryRequest
|
||||
(*AtomicRenameEntryResponse)(nil), // 21: filer_pb.AtomicRenameEntryResponse
|
||||
(*StreamRenameEntryRequest)(nil), // 22: filer_pb.StreamRenameEntryRequest
|
||||
(*StreamRenameEntryResponse)(nil), // 23: filer_pb.StreamRenameEntryResponse
|
||||
(*AssignVolumeRequest)(nil), // 24: filer_pb.AssignVolumeRequest
|
||||
(*AssignVolumeResponse)(nil), // 25: filer_pb.AssignVolumeResponse
|
||||
(*LookupVolumeRequest)(nil), // 26: filer_pb.LookupVolumeRequest
|
||||
(*Locations)(nil), // 27: filer_pb.Locations
|
||||
(*Location)(nil), // 28: filer_pb.Location
|
||||
(*LookupVolumeResponse)(nil), // 29: filer_pb.LookupVolumeResponse
|
||||
(*Collection)(nil), // 30: filer_pb.Collection
|
||||
(*CollectionListRequest)(nil), // 31: filer_pb.CollectionListRequest
|
||||
(*CollectionListResponse)(nil), // 32: filer_pb.CollectionListResponse
|
||||
(*DeleteCollectionRequest)(nil), // 33: filer_pb.DeleteCollectionRequest
|
||||
(*DeleteCollectionResponse)(nil), // 34: filer_pb.DeleteCollectionResponse
|
||||
(*StatisticsRequest)(nil), // 35: filer_pb.StatisticsRequest
|
||||
(*StatisticsResponse)(nil), // 36: filer_pb.StatisticsResponse
|
||||
(*GetFilerConfigurationRequest)(nil), // 37: filer_pb.GetFilerConfigurationRequest
|
||||
(*GetFilerConfigurationResponse)(nil), // 38: filer_pb.GetFilerConfigurationResponse
|
||||
(*SubscribeMetadataRequest)(nil), // 39: filer_pb.SubscribeMetadataRequest
|
||||
(*SubscribeMetadataResponse)(nil), // 40: filer_pb.SubscribeMetadataResponse
|
||||
(*LogEntry)(nil), // 41: filer_pb.LogEntry
|
||||
(*KeepConnectedRequest)(nil), // 42: filer_pb.KeepConnectedRequest
|
||||
(*KeepConnectedResponse)(nil), // 43: filer_pb.KeepConnectedResponse
|
||||
(*LocateBrokerRequest)(nil), // 44: filer_pb.LocateBrokerRequest
|
||||
(*LocateBrokerResponse)(nil), // 45: filer_pb.LocateBrokerResponse
|
||||
(*KvGetRequest)(nil), // 46: filer_pb.KvGetRequest
|
||||
(*KvGetResponse)(nil), // 47: filer_pb.KvGetResponse
|
||||
(*KvPutRequest)(nil), // 48: filer_pb.KvPutRequest
|
||||
(*KvPutResponse)(nil), // 49: filer_pb.KvPutResponse
|
||||
(*FilerConf)(nil), // 50: filer_pb.FilerConf
|
||||
(*CacheRemoteObjectToLocalClusterRequest)(nil), // 51: filer_pb.CacheRemoteObjectToLocalClusterRequest
|
||||
(*CacheRemoteObjectToLocalClusterResponse)(nil), // 52: filer_pb.CacheRemoteObjectToLocalClusterResponse
|
||||
nil, // 53: filer_pb.Entry.ExtendedEntry
|
||||
nil, // 54: filer_pb.LookupVolumeResponse.LocationsMapEntry
|
||||
(*LocateBrokerResponse_Resource)(nil), // 55: filer_pb.LocateBrokerResponse.Resource
|
||||
|
@ -4269,7 +4274,7 @@ var file_filer_proto_depIdxs = []int32{
|
|||
7, // 20: filer_pb.SubscribeMetadataResponse.event_notification:type_name -> filer_pb.EventNotification
|
||||
55, // 21: filer_pb.LocateBrokerResponse.resources:type_name -> filer_pb.LocateBrokerResponse.Resource
|
||||
56, // 22: filer_pb.FilerConf.locations:type_name -> filer_pb.FilerConf.PathConf
|
||||
5, // 23: filer_pb.DownloadToLocalResponse.entry:type_name -> filer_pb.Entry
|
||||
5, // 23: filer_pb.CacheRemoteObjectToLocalClusterResponse.entry:type_name -> filer_pb.Entry
|
||||
27, // 24: filer_pb.LookupVolumeResponse.LocationsMapEntry.value:type_name -> filer_pb.Locations
|
||||
0, // 25: filer_pb.SeaweedFiler.LookupDirectoryEntry:input_type -> filer_pb.LookupDirectoryEntryRequest
|
||||
2, // 26: filer_pb.SeaweedFiler.ListEntries:input_type -> filer_pb.ListEntriesRequest
|
||||
|
@ -4291,7 +4296,7 @@ var file_filer_proto_depIdxs = []int32{
|
|||
44, // 42: filer_pb.SeaweedFiler.LocateBroker:input_type -> filer_pb.LocateBrokerRequest
|
||||
46, // 43: filer_pb.SeaweedFiler.KvGet:input_type -> filer_pb.KvGetRequest
|
||||
48, // 44: filer_pb.SeaweedFiler.KvPut:input_type -> filer_pb.KvPutRequest
|
||||
51, // 45: filer_pb.SeaweedFiler.DownloadToLocal:input_type -> filer_pb.DownloadToLocalRequest
|
||||
51, // 45: filer_pb.SeaweedFiler.CacheRemoteObjectToLocalCluster:input_type -> filer_pb.CacheRemoteObjectToLocalClusterRequest
|
||||
1, // 46: filer_pb.SeaweedFiler.LookupDirectoryEntry:output_type -> filer_pb.LookupDirectoryEntryResponse
|
||||
3, // 47: filer_pb.SeaweedFiler.ListEntries:output_type -> filer_pb.ListEntriesResponse
|
||||
13, // 48: filer_pb.SeaweedFiler.CreateEntry:output_type -> filer_pb.CreateEntryResponse
|
||||
|
@ -4312,7 +4317,7 @@ var file_filer_proto_depIdxs = []int32{
|
|||
45, // 63: filer_pb.SeaweedFiler.LocateBroker:output_type -> filer_pb.LocateBrokerResponse
|
||||
47, // 64: filer_pb.SeaweedFiler.KvGet:output_type -> filer_pb.KvGetResponse
|
||||
49, // 65: filer_pb.SeaweedFiler.KvPut:output_type -> filer_pb.KvPutResponse
|
||||
52, // 66: filer_pb.SeaweedFiler.DownloadToLocal:output_type -> filer_pb.DownloadToLocalResponse
|
||||
52, // 66: filer_pb.SeaweedFiler.CacheRemoteObjectToLocalCluster:output_type -> filer_pb.CacheRemoteObjectToLocalClusterResponse
|
||||
46, // [46:67] is the sub-list for method output_type
|
||||
25, // [25:46] is the sub-list for method input_type
|
||||
25, // [25:25] is the sub-list for extension type_name
|
||||
|
@ -4939,7 +4944,7 @@ func file_filer_proto_init() {
|
|||
}
|
||||
}
|
||||
file_filer_proto_msgTypes[51].Exporter = func(v interface{}, i int) interface{} {
|
||||
switch v := v.(*DownloadToLocalRequest); i {
|
||||
switch v := v.(*CacheRemoteObjectToLocalClusterRequest); i {
|
||||
case 0:
|
||||
return &v.state
|
||||
case 1:
|
||||
|
@ -4951,7 +4956,7 @@ func file_filer_proto_init() {
|
|||
}
|
||||
}
|
||||
file_filer_proto_msgTypes[52].Exporter = func(v interface{}, i int) interface{} {
|
||||
switch v := v.(*DownloadToLocalResponse); i {
|
||||
switch v := v.(*CacheRemoteObjectToLocalClusterResponse); i {
|
||||
case 0:
|
||||
return &v.state
|
||||
case 1:
|
||||
|
@ -5039,7 +5044,7 @@ type SeaweedFilerClient interface {
|
|||
LocateBroker(ctx context.Context, in *LocateBrokerRequest, opts ...grpc.CallOption) (*LocateBrokerResponse, error)
|
||||
KvGet(ctx context.Context, in *KvGetRequest, opts ...grpc.CallOption) (*KvGetResponse, error)
|
||||
KvPut(ctx context.Context, in *KvPutRequest, opts ...grpc.CallOption) (*KvPutResponse, error)
|
||||
DownloadToLocal(ctx context.Context, in *DownloadToLocalRequest, opts ...grpc.CallOption) (*DownloadToLocalResponse, error)
|
||||
CacheRemoteObjectToLocalCluster(ctx context.Context, in *CacheRemoteObjectToLocalClusterRequest, opts ...grpc.CallOption) (*CacheRemoteObjectToLocalClusterResponse, error)
|
||||
}
|
||||
|
||||
type seaweedFilerClient struct {
|
||||
|
@ -5344,9 +5349,9 @@ func (c *seaweedFilerClient) KvPut(ctx context.Context, in *KvPutRequest, opts .
|
|||
return out, nil
|
||||
}
|
||||
|
||||
func (c *seaweedFilerClient) DownloadToLocal(ctx context.Context, in *DownloadToLocalRequest, opts ...grpc.CallOption) (*DownloadToLocalResponse, error) {
|
||||
out := new(DownloadToLocalResponse)
|
||||
err := c.cc.Invoke(ctx, "/filer_pb.SeaweedFiler/DownloadToLocal", in, out, opts...)
|
||||
func (c *seaweedFilerClient) CacheRemoteObjectToLocalCluster(ctx context.Context, in *CacheRemoteObjectToLocalClusterRequest, opts ...grpc.CallOption) (*CacheRemoteObjectToLocalClusterResponse, error) {
|
||||
out := new(CacheRemoteObjectToLocalClusterResponse)
|
||||
err := c.cc.Invoke(ctx, "/filer_pb.SeaweedFiler/CacheRemoteObjectToLocalCluster", in, out, opts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
@ -5375,7 +5380,7 @@ type SeaweedFilerServer interface {
|
|||
LocateBroker(context.Context, *LocateBrokerRequest) (*LocateBrokerResponse, error)
|
||||
KvGet(context.Context, *KvGetRequest) (*KvGetResponse, error)
|
||||
KvPut(context.Context, *KvPutRequest) (*KvPutResponse, error)
|
||||
DownloadToLocal(context.Context, *DownloadToLocalRequest) (*DownloadToLocalResponse, error)
|
||||
CacheRemoteObjectToLocalCluster(context.Context, *CacheRemoteObjectToLocalClusterRequest) (*CacheRemoteObjectToLocalClusterResponse, error)
|
||||
}
|
||||
|
||||
// UnimplementedSeaweedFilerServer can be embedded to have forward compatible implementations.
|
||||
|
@ -5442,8 +5447,8 @@ func (*UnimplementedSeaweedFilerServer) KvGet(context.Context, *KvGetRequest) (*
|
|||
func (*UnimplementedSeaweedFilerServer) KvPut(context.Context, *KvPutRequest) (*KvPutResponse, error) {
|
||||
return nil, status.Errorf(codes.Unimplemented, "method KvPut not implemented")
|
||||
}
|
||||
func (*UnimplementedSeaweedFilerServer) DownloadToLocal(context.Context, *DownloadToLocalRequest) (*DownloadToLocalResponse, error) {
|
||||
return nil, status.Errorf(codes.Unimplemented, "method DownloadToLocal not implemented")
|
||||
func (*UnimplementedSeaweedFilerServer) CacheRemoteObjectToLocalCluster(context.Context, *CacheRemoteObjectToLocalClusterRequest) (*CacheRemoteObjectToLocalClusterResponse, error) {
|
||||
return nil, status.Errorf(codes.Unimplemented, "method CacheRemoteObjectToLocalCluster not implemented")
|
||||
}
|
||||
|
||||
func RegisterSeaweedFilerServer(s *grpc.Server, srv SeaweedFilerServer) {
|
||||
|
@ -5830,20 +5835,20 @@ func _SeaweedFiler_KvPut_Handler(srv interface{}, ctx context.Context, dec func(
|
|||
return interceptor(ctx, in, info, handler)
|
||||
}
|
||||
|
||||
func _SeaweedFiler_DownloadToLocal_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
|
||||
in := new(DownloadToLocalRequest)
|
||||
func _SeaweedFiler_CacheRemoteObjectToLocalCluster_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
|
||||
in := new(CacheRemoteObjectToLocalClusterRequest)
|
||||
if err := dec(in); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if interceptor == nil {
|
||||
return srv.(SeaweedFilerServer).DownloadToLocal(ctx, in)
|
||||
return srv.(SeaweedFilerServer).CacheRemoteObjectToLocalCluster(ctx, in)
|
||||
}
|
||||
info := &grpc.UnaryServerInfo{
|
||||
Server: srv,
|
||||
FullMethod: "/filer_pb.SeaweedFiler/DownloadToLocal",
|
||||
FullMethod: "/filer_pb.SeaweedFiler/CacheRemoteObjectToLocalCluster",
|
||||
}
|
||||
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
|
||||
return srv.(SeaweedFilerServer).DownloadToLocal(ctx, req.(*DownloadToLocalRequest))
|
||||
return srv.(SeaweedFilerServer).CacheRemoteObjectToLocalCluster(ctx, req.(*CacheRemoteObjectToLocalClusterRequest))
|
||||
}
|
||||
return interceptor(ctx, in, info, handler)
|
||||
}
|
||||
|
@ -5913,8 +5918,8 @@ var _SeaweedFiler_serviceDesc = grpc.ServiceDesc{
|
|||
Handler: _SeaweedFiler_KvPut_Handler,
|
||||
},
|
||||
{
|
||||
MethodName: "DownloadToLocal",
|
||||
Handler: _SeaweedFiler_DownloadToLocal_Handler,
|
||||
MethodName: "CacheRemoteObjectToLocalCluster",
|
||||
Handler: _SeaweedFiler_CacheRemoteObjectToLocalCluster_Handler,
|
||||
},
|
||||
},
|
||||
Streams: []grpc.StreamDesc{
|
||||
|
|
|
@ -7,209 +7,222 @@ option go_package = "github.com/chrislusf/seaweedfs/weed/pb/master_pb";
|
|||
//////////////////////////////////////////////////
|
||||
|
||||
service Seaweed {
|
||||
rpc SendHeartbeat (stream Heartbeat) returns (stream HeartbeatResponse) {
|
||||
}
|
||||
rpc KeepConnected (stream KeepConnectedRequest) returns (stream VolumeLocation) {
|
||||
}
|
||||
rpc LookupVolume (LookupVolumeRequest) returns (LookupVolumeResponse) {
|
||||
}
|
||||
rpc Assign (AssignRequest) returns (AssignResponse) {
|
||||
}
|
||||
rpc Statistics (StatisticsRequest) returns (StatisticsResponse) {
|
||||
}
|
||||
rpc CollectionList (CollectionListRequest) returns (CollectionListResponse) {
|
||||
}
|
||||
rpc CollectionDelete (CollectionDeleteRequest) returns (CollectionDeleteResponse) {
|
||||
}
|
||||
rpc VolumeList (VolumeListRequest) returns (VolumeListResponse) {
|
||||
}
|
||||
rpc LookupEcVolume (LookupEcVolumeRequest) returns (LookupEcVolumeResponse) {
|
||||
}
|
||||
rpc VacuumVolume (VacuumVolumeRequest) returns (VacuumVolumeResponse) {
|
||||
}
|
||||
rpc GetMasterConfiguration (GetMasterConfigurationRequest) returns (GetMasterConfigurationResponse) {
|
||||
}
|
||||
rpc ListMasterClients (ListMasterClientsRequest) returns (ListMasterClientsResponse) {
|
||||
}
|
||||
rpc LeaseAdminToken (LeaseAdminTokenRequest) returns (LeaseAdminTokenResponse) {
|
||||
}
|
||||
rpc ReleaseAdminToken (ReleaseAdminTokenRequest) returns (ReleaseAdminTokenResponse) {
|
||||
}
|
||||
rpc SendHeartbeat (stream Heartbeat) returns (stream HeartbeatResponse) {
|
||||
}
|
||||
rpc KeepConnected (stream KeepConnectedRequest) returns (stream KeepConnectedResponse) {
|
||||
}
|
||||
rpc LookupVolume (LookupVolumeRequest) returns (LookupVolumeResponse) {
|
||||
}
|
||||
rpc Assign (AssignRequest) returns (AssignResponse) {
|
||||
}
|
||||
rpc Statistics (StatisticsRequest) returns (StatisticsResponse) {
|
||||
}
|
||||
rpc CollectionList (CollectionListRequest) returns (CollectionListResponse) {
|
||||
}
|
||||
rpc CollectionDelete (CollectionDeleteRequest) returns (CollectionDeleteResponse) {
|
||||
}
|
||||
rpc VolumeList (VolumeListRequest) returns (VolumeListResponse) {
|
||||
}
|
||||
rpc LookupEcVolume (LookupEcVolumeRequest) returns (LookupEcVolumeResponse) {
|
||||
}
|
||||
rpc VacuumVolume (VacuumVolumeRequest) returns (VacuumVolumeResponse) {
|
||||
}
|
||||
rpc GetMasterConfiguration (GetMasterConfigurationRequest) returns (GetMasterConfigurationResponse) {
|
||||
}
|
||||
rpc ListClusterNodes (ListClusterNodesRequest) returns (ListClusterNodesResponse) {
|
||||
}
|
||||
rpc LeaseAdminToken (LeaseAdminTokenRequest) returns (LeaseAdminTokenResponse) {
|
||||
}
|
||||
rpc ReleaseAdminToken (ReleaseAdminTokenRequest) returns (ReleaseAdminTokenResponse) {
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
//////////////////////////////////////////////////
|
||||
|
||||
message Heartbeat {
|
||||
string ip = 1;
|
||||
uint32 port = 2;
|
||||
string public_url = 3;
|
||||
uint64 max_file_key = 5;
|
||||
string data_center = 6;
|
||||
string rack = 7;
|
||||
uint32 admin_port = 8;
|
||||
repeated VolumeInformationMessage volumes = 9;
|
||||
// delta volumes
|
||||
repeated VolumeShortInformationMessage new_volumes = 10;
|
||||
repeated VolumeShortInformationMessage deleted_volumes = 11;
|
||||
bool has_no_volumes = 12;
|
||||
string ip = 1;
|
||||
uint32 port = 2;
|
||||
string public_url = 3;
|
||||
uint64 max_file_key = 5;
|
||||
string data_center = 6;
|
||||
string rack = 7;
|
||||
uint32 admin_port = 8;
|
||||
repeated VolumeInformationMessage volumes = 9;
|
||||
// delta volumes
|
||||
repeated VolumeShortInformationMessage new_volumes = 10;
|
||||
repeated VolumeShortInformationMessage deleted_volumes = 11;
|
||||
bool has_no_volumes = 12;
|
||||
|
||||
// erasure coding
|
||||
repeated VolumeEcShardInformationMessage ec_shards = 16;
|
||||
// delta erasure coding shards
|
||||
repeated VolumeEcShardInformationMessage new_ec_shards = 17;
|
||||
repeated VolumeEcShardInformationMessage deleted_ec_shards = 18;
|
||||
bool has_no_ec_shards = 19;
|
||||
// erasure coding
|
||||
repeated VolumeEcShardInformationMessage ec_shards = 16;
|
||||
// delta erasure coding shards
|
||||
repeated VolumeEcShardInformationMessage new_ec_shards = 17;
|
||||
repeated VolumeEcShardInformationMessage deleted_ec_shards = 18;
|
||||
bool has_no_ec_shards = 19;
|
||||
|
||||
map<string, uint32> max_volume_counts = 4;
|
||||
uint32 grpc_port = 20;
|
||||
map<string, uint32> max_volume_counts = 4;
|
||||
uint32 grpc_port = 20;
|
||||
|
||||
}
|
||||
|
||||
message HeartbeatResponse {
|
||||
uint64 volume_size_limit = 1;
|
||||
string leader = 2;
|
||||
string metrics_address = 3;
|
||||
uint32 metrics_interval_seconds = 4;
|
||||
repeated StorageBackend storage_backends = 5;
|
||||
uint64 volume_size_limit = 1;
|
||||
string leader = 2;
|
||||
string metrics_address = 3;
|
||||
uint32 metrics_interval_seconds = 4;
|
||||
repeated StorageBackend storage_backends = 5;
|
||||
}
|
||||
|
||||
message VolumeInformationMessage {
|
||||
uint32 id = 1;
|
||||
uint64 size = 2;
|
||||
string collection = 3;
|
||||
uint64 file_count = 4;
|
||||
uint64 delete_count = 5;
|
||||
uint64 deleted_byte_count = 6;
|
||||
bool read_only = 7;
|
||||
uint32 replica_placement = 8;
|
||||
uint32 version = 9;
|
||||
uint32 ttl = 10;
|
||||
uint32 compact_revision = 11;
|
||||
int64 modified_at_second = 12;
|
||||
string remote_storage_name = 13;
|
||||
string remote_storage_key = 14;
|
||||
string disk_type = 15;
|
||||
uint32 id = 1;
|
||||
uint64 size = 2;
|
||||
string collection = 3;
|
||||
uint64 file_count = 4;
|
||||
uint64 delete_count = 5;
|
||||
uint64 deleted_byte_count = 6;
|
||||
bool read_only = 7;
|
||||
uint32 replica_placement = 8;
|
||||
uint32 version = 9;
|
||||
uint32 ttl = 10;
|
||||
uint32 compact_revision = 11;
|
||||
int64 modified_at_second = 12;
|
||||
string remote_storage_name = 13;
|
||||
string remote_storage_key = 14;
|
||||
string disk_type = 15;
|
||||
}
|
||||
|
||||
message VolumeShortInformationMessage {
|
||||
uint32 id = 1;
|
||||
string collection = 3;
|
||||
uint32 replica_placement = 8;
|
||||
uint32 version = 9;
|
||||
uint32 ttl = 10;
|
||||
string disk_type = 15;
|
||||
uint32 id = 1;
|
||||
string collection = 3;
|
||||
uint32 replica_placement = 8;
|
||||
uint32 version = 9;
|
||||
uint32 ttl = 10;
|
||||
string disk_type = 15;
|
||||
}
|
||||
|
||||
message VolumeEcShardInformationMessage {
|
||||
uint32 id = 1;
|
||||
string collection = 2;
|
||||
uint32 ec_index_bits = 3;
|
||||
string disk_type = 4;
|
||||
uint32 id = 1;
|
||||
string collection = 2;
|
||||
uint32 ec_index_bits = 3;
|
||||
string disk_type = 4;
|
||||
}
|
||||
|
||||
message StorageBackend {
|
||||
string type = 1;
|
||||
string id = 2;
|
||||
map<string, string> properties = 3;
|
||||
string type = 1;
|
||||
string id = 2;
|
||||
map<string, string> properties = 3;
|
||||
}
|
||||
|
||||
message Empty {
|
||||
}
|
||||
|
||||
message SuperBlockExtra {
|
||||
message ErasureCoding {
|
||||
uint32 data = 1;
|
||||
uint32 parity = 2;
|
||||
repeated uint32 volume_ids = 3;
|
||||
}
|
||||
ErasureCoding erasure_coding = 1;
|
||||
message ErasureCoding {
|
||||
uint32 data = 1;
|
||||
uint32 parity = 2;
|
||||
repeated uint32 volume_ids = 3;
|
||||
}
|
||||
ErasureCoding erasure_coding = 1;
|
||||
}
|
||||
|
||||
message KeepConnectedRequest {
|
||||
string name = 1;
|
||||
string client_address = 3;
|
||||
string client_type = 1;
|
||||
string client_address = 3;
|
||||
string version = 4;
|
||||
}
|
||||
|
||||
message VolumeLocation {
|
||||
string url = 1;
|
||||
string public_url = 2;
|
||||
repeated uint32 new_vids = 3;
|
||||
repeated uint32 deleted_vids = 4;
|
||||
string leader = 5; // optional when leader is not itself
|
||||
string data_center = 6; // optional when DataCenter is in use
|
||||
uint32 grpc_port = 7;
|
||||
string url = 1;
|
||||
string public_url = 2;
|
||||
repeated uint32 new_vids = 3;
|
||||
repeated uint32 deleted_vids = 4;
|
||||
string leader = 5; // optional when leader is not itself
|
||||
string data_center = 6; // optional when DataCenter is in use
|
||||
uint32 grpc_port = 7;
|
||||
}
|
||||
|
||||
message ClusterNodeUpdate {
|
||||
string node_type = 1;
|
||||
string address = 2;
|
||||
bool is_leader = 3;
|
||||
bool is_add = 4;
|
||||
}
|
||||
|
||||
message KeepConnectedResponse {
|
||||
VolumeLocation volume_location = 1;
|
||||
ClusterNodeUpdate cluster_node_update = 2;
|
||||
}
|
||||
|
||||
message LookupVolumeRequest {
|
||||
repeated string volume_or_file_ids = 1;
|
||||
string collection = 2; // optional, a bit faster if provided.
|
||||
repeated string volume_or_file_ids = 1;
|
||||
string collection = 2; // optional, a bit faster if provided.
|
||||
}
|
||||
message LookupVolumeResponse {
|
||||
message VolumeIdLocation {
|
||||
string volume_or_file_id = 1;
|
||||
repeated Location locations = 2;
|
||||
string error = 3;
|
||||
string auth = 4;
|
||||
}
|
||||
repeated VolumeIdLocation volume_id_locations = 1;
|
||||
message VolumeIdLocation {
|
||||
string volume_or_file_id = 1;
|
||||
repeated Location locations = 2;
|
||||
string error = 3;
|
||||
string auth = 4;
|
||||
}
|
||||
repeated VolumeIdLocation volume_id_locations = 1;
|
||||
}
|
||||
|
||||
message Location {
|
||||
string url = 1;
|
||||
string public_url = 2;
|
||||
uint32 grpc_port = 3;
|
||||
string url = 1;
|
||||
string public_url = 2;
|
||||
uint32 grpc_port = 3;
|
||||
}
|
||||
|
||||
message AssignRequest {
|
||||
uint64 count = 1;
|
||||
string replication = 2;
|
||||
string collection = 3;
|
||||
string ttl = 4;
|
||||
string data_center = 5;
|
||||
string rack = 6;
|
||||
string data_node = 7;
|
||||
uint32 memory_map_max_size_mb = 8;
|
||||
uint32 Writable_volume_count = 9;
|
||||
string disk_type = 10;
|
||||
uint64 count = 1;
|
||||
string replication = 2;
|
||||
string collection = 3;
|
||||
string ttl = 4;
|
||||
string data_center = 5;
|
||||
string rack = 6;
|
||||
string data_node = 7;
|
||||
uint32 memory_map_max_size_mb = 8;
|
||||
uint32 Writable_volume_count = 9;
|
||||
string disk_type = 10;
|
||||
}
|
||||
message AssignResponse {
|
||||
string fid = 1;
|
||||
uint64 count = 4;
|
||||
string error = 5;
|
||||
string auth = 6;
|
||||
repeated Location replicas = 7;
|
||||
Location location = 8;
|
||||
string fid = 1;
|
||||
uint64 count = 4;
|
||||
string error = 5;
|
||||
string auth = 6;
|
||||
repeated Location replicas = 7;
|
||||
Location location = 8;
|
||||
}
|
||||
|
||||
message StatisticsRequest {
|
||||
string replication = 1;
|
||||
string collection = 2;
|
||||
string ttl = 3;
|
||||
string disk_type = 4;
|
||||
string replication = 1;
|
||||
string collection = 2;
|
||||
string ttl = 3;
|
||||
string disk_type = 4;
|
||||
}
|
||||
message StatisticsResponse {
|
||||
uint64 total_size = 4;
|
||||
uint64 used_size = 5;
|
||||
uint64 file_count = 6;
|
||||
uint64 total_size = 4;
|
||||
uint64 used_size = 5;
|
||||
uint64 file_count = 6;
|
||||
}
|
||||
|
||||
//
|
||||
// collection related
|
||||
//
|
||||
message Collection {
|
||||
string name = 1;
|
||||
string name = 1;
|
||||
}
|
||||
message CollectionListRequest {
|
||||
bool include_normal_volumes = 1;
|
||||
bool include_ec_volumes = 2;
|
||||
bool include_normal_volumes = 1;
|
||||
bool include_ec_volumes = 2;
|
||||
}
|
||||
message CollectionListResponse {
|
||||
repeated Collection collections = 1;
|
||||
repeated Collection collections = 1;
|
||||
}
|
||||
|
||||
message CollectionDeleteRequest {
|
||||
string name = 1;
|
||||
string name = 1;
|
||||
}
|
||||
message CollectionDeleteResponse {
|
||||
}
|
||||
|
@ -218,56 +231,56 @@ message CollectionDeleteResponse {
|
|||
// volume related
|
||||
//
|
||||
message DiskInfo {
|
||||
string type = 1;
|
||||
int64 volume_count = 2;
|
||||
int64 max_volume_count = 3;
|
||||
int64 free_volume_count = 4;
|
||||
int64 active_volume_count = 5;
|
||||
repeated VolumeInformationMessage volume_infos = 6;
|
||||
repeated VolumeEcShardInformationMessage ec_shard_infos = 7;
|
||||
int64 remote_volume_count = 8;
|
||||
string type = 1;
|
||||
int64 volume_count = 2;
|
||||
int64 max_volume_count = 3;
|
||||
int64 free_volume_count = 4;
|
||||
int64 active_volume_count = 5;
|
||||
repeated VolumeInformationMessage volume_infos = 6;
|
||||
repeated VolumeEcShardInformationMessage ec_shard_infos = 7;
|
||||
int64 remote_volume_count = 8;
|
||||
}
|
||||
message DataNodeInfo {
|
||||
string id = 1;
|
||||
map<string, DiskInfo> diskInfos = 2;
|
||||
uint32 grpc_port = 3;
|
||||
string id = 1;
|
||||
map<string, DiskInfo> diskInfos = 2;
|
||||
uint32 grpc_port = 3;
|
||||
}
|
||||
message RackInfo {
|
||||
string id = 1;
|
||||
repeated DataNodeInfo data_node_infos = 2;
|
||||
map<string, DiskInfo> diskInfos = 3;
|
||||
string id = 1;
|
||||
repeated DataNodeInfo data_node_infos = 2;
|
||||
map<string, DiskInfo> diskInfos = 3;
|
||||
}
|
||||
message DataCenterInfo {
|
||||
string id = 1;
|
||||
repeated RackInfo rack_infos = 2;
|
||||
map<string, DiskInfo> diskInfos = 3;
|
||||
string id = 1;
|
||||
repeated RackInfo rack_infos = 2;
|
||||
map<string, DiskInfo> diskInfos = 3;
|
||||
}
|
||||
message TopologyInfo {
|
||||
string id = 1;
|
||||
repeated DataCenterInfo data_center_infos = 2;
|
||||
map<string, DiskInfo> diskInfos = 3;
|
||||
string id = 1;
|
||||
repeated DataCenterInfo data_center_infos = 2;
|
||||
map<string, DiskInfo> diskInfos = 3;
|
||||
}
|
||||
message VolumeListRequest {
|
||||
}
|
||||
message VolumeListResponse {
|
||||
TopologyInfo topology_info = 1;
|
||||
uint64 volume_size_limit_mb = 2;
|
||||
TopologyInfo topology_info = 1;
|
||||
uint64 volume_size_limit_mb = 2;
|
||||
}
|
||||
|
||||
message LookupEcVolumeRequest {
|
||||
uint32 volume_id = 1;
|
||||
uint32 volume_id = 1;
|
||||
}
|
||||
message LookupEcVolumeResponse {
|
||||
uint32 volume_id = 1;
|
||||
message EcShardIdLocation {
|
||||
uint32 shard_id = 1;
|
||||
repeated Location locations = 2;
|
||||
}
|
||||
repeated EcShardIdLocation shard_id_locations = 2;
|
||||
uint32 volume_id = 1;
|
||||
message EcShardIdLocation {
|
||||
uint32 shard_id = 1;
|
||||
repeated Location locations = 2;
|
||||
}
|
||||
repeated EcShardIdLocation shard_id_locations = 2;
|
||||
}
|
||||
|
||||
message VacuumVolumeRequest {
|
||||
float garbage_threshold = 1;
|
||||
float garbage_threshold = 1;
|
||||
}
|
||||
message VacuumVolumeResponse {
|
||||
}
|
||||
|
@ -275,37 +288,42 @@ message VacuumVolumeResponse {
|
|||
message GetMasterConfigurationRequest {
|
||||
}
|
||||
message GetMasterConfigurationResponse {
|
||||
string metrics_address = 1;
|
||||
uint32 metrics_interval_seconds = 2;
|
||||
repeated StorageBackend storage_backends = 3;
|
||||
string default_replication = 4;
|
||||
string leader = 5;
|
||||
uint32 volume_size_limit_m_b = 6;
|
||||
bool volume_preallocate = 7;
|
||||
string metrics_address = 1;
|
||||
uint32 metrics_interval_seconds = 2;
|
||||
repeated StorageBackend storage_backends = 3;
|
||||
string default_replication = 4;
|
||||
string leader = 5;
|
||||
uint32 volume_size_limit_m_b = 6;
|
||||
bool volume_preallocate = 7;
|
||||
}
|
||||
|
||||
message ListMasterClientsRequest {
|
||||
string client_type = 1;
|
||||
message ListClusterNodesRequest {
|
||||
string client_type = 1;
|
||||
}
|
||||
message ListMasterClientsResponse {
|
||||
repeated string grpc_addresses = 1;
|
||||
message ListClusterNodesResponse {
|
||||
message ClusterNode {
|
||||
string address = 1;
|
||||
string version = 2;
|
||||
bool is_leader = 3;
|
||||
}
|
||||
repeated ClusterNode cluster_nodes = 1;
|
||||
}
|
||||
|
||||
message LeaseAdminTokenRequest {
|
||||
int64 previous_token = 1;
|
||||
int64 previous_lock_time = 2;
|
||||
string lock_name = 3;
|
||||
string client_name = 4;
|
||||
int64 previous_token = 1;
|
||||
int64 previous_lock_time = 2;
|
||||
string lock_name = 3;
|
||||
string client_name = 4;
|
||||
}
|
||||
message LeaseAdminTokenResponse {
|
||||
int64 token = 1;
|
||||
int64 lock_ts_ns = 2;
|
||||
int64 token = 1;
|
||||
int64 lock_ts_ns = 2;
|
||||
}
|
||||
|
||||
message ReleaseAdminTokenRequest {
|
||||
int64 previous_token = 1;
|
||||
int64 previous_lock_time = 2;
|
||||
string lock_name = 3;
|
||||
int64 previous_token = 1;
|
||||
int64 previous_lock_time = 2;
|
||||
string lock_name = 3;
|
||||
}
|
||||
message ReleaseAdminTokenResponse {
|
||||
}
|
||||
|
|
File diff suppressed because it is too large
Load diff
|
@ -83,6 +83,7 @@ func (s *s3RemoteStorageClient) Traverse(remote *remote_pb.RemoteStorageLocation
|
|||
}
|
||||
isLastPage := false
|
||||
for !isLastPage && err == nil {
|
||||
var localErr error
|
||||
listErr := s.conn.ListObjectsV2Pages(listInput, func(page *s3.ListObjectsV2Output, lastPage bool) bool {
|
||||
for _, content := range page.Contents {
|
||||
key := *content.Key
|
||||
|
@ -94,6 +95,7 @@ func (s *s3RemoteStorageClient) Traverse(remote *remote_pb.RemoteStorageLocation
|
|||
RemoteETag: *content.ETag,
|
||||
StorageName: s.conf.Name,
|
||||
}); err != nil {
|
||||
localErr = err
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
@ -104,6 +106,9 @@ func (s *s3RemoteStorageClient) Traverse(remote *remote_pb.RemoteStorageLocation
|
|||
if listErr != nil {
|
||||
err = fmt.Errorf("list %v: %v", remote, listErr)
|
||||
}
|
||||
if localErr != nil {
|
||||
err = fmt.Errorf("process %v: %v", remote, localErr)
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
|
|
@ -5,6 +5,7 @@ import (
|
|||
"net/http"
|
||||
"os"
|
||||
"strings"
|
||||
"sync"
|
||||
|
||||
"github.com/chrislusf/seaweedfs/weed/filer"
|
||||
"github.com/chrislusf/seaweedfs/weed/glog"
|
||||
|
@ -23,6 +24,8 @@ type Iam interface {
|
|||
}
|
||||
|
||||
type IdentityAccessManagement struct {
|
||||
m sync.RWMutex
|
||||
|
||||
identities []*Identity
|
||||
domain string
|
||||
}
|
||||
|
@ -131,31 +134,38 @@ func (iam *IdentityAccessManagement) loadS3ApiConfiguration(config *iam_pb.S3Api
|
|||
}
|
||||
identities = append(identities, t)
|
||||
}
|
||||
|
||||
iam.m.Lock()
|
||||
// atomically switch
|
||||
iam.identities = identities
|
||||
iam.m.Unlock()
|
||||
return nil
|
||||
}
|
||||
|
||||
func (iam *IdentityAccessManagement) isEnabled() bool {
|
||||
|
||||
iam.m.RLock()
|
||||
defer iam.m.RUnlock()
|
||||
return len(iam.identities) > 0
|
||||
}
|
||||
|
||||
func (iam *IdentityAccessManagement) lookupByAccessKey(accessKey string) (identity *Identity, cred *Credential, found bool) {
|
||||
|
||||
iam.m.RLock()
|
||||
defer iam.m.RUnlock()
|
||||
for _, ident := range iam.identities {
|
||||
for _, cred := range ident.Credentials {
|
||||
// println("checking", ident.Name, cred.AccessKey)
|
||||
if cred.AccessKey == accessKey {
|
||||
return ident, cred, true
|
||||
}
|
||||
}
|
||||
}
|
||||
glog.V(1).Infof("could not find accessKey %s", accessKey)
|
||||
return nil, nil, false
|
||||
}
|
||||
|
||||
func (iam *IdentityAccessManagement) lookupAnonymous() (identity *Identity, found bool) {
|
||||
|
||||
iam.m.RLock()
|
||||
defer iam.m.RUnlock()
|
||||
for _, ident := range iam.identities {
|
||||
if ident.Name == "anonymous" {
|
||||
return ident, true
|
||||
|
@ -177,12 +187,14 @@ func (iam *IdentityAccessManagement) Auth(f http.HandlerFunc, action Action) htt
|
|||
r.Header.Set(xhttp.AmzIdentityId, identity.Name)
|
||||
if identity.isAdmin() {
|
||||
r.Header.Set(xhttp.AmzIsAdmin, "true")
|
||||
} else if _, ok := r.Header[xhttp.AmzIsAdmin]; ok {
|
||||
r.Header.Del(xhttp.AmzIsAdmin)
|
||||
}
|
||||
}
|
||||
f(w, r)
|
||||
return
|
||||
}
|
||||
s3err.WriteErrorResponse(w, errCode, r)
|
||||
s3err.WriteErrorResponse(w, r, errCode)
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -36,7 +36,7 @@ func (s3a *S3ApiServer) ListBucketsHandler(w http.ResponseWriter, r *http.Reques
|
|||
if s3a.iam.isEnabled() {
|
||||
identity, s3Err = s3a.iam.authUser(r)
|
||||
if s3Err != s3err.ErrNone {
|
||||
s3err.WriteErrorResponse(w, s3Err, r)
|
||||
s3err.WriteErrorResponse(w, r, s3Err)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
@ -46,7 +46,7 @@ func (s3a *S3ApiServer) ListBucketsHandler(w http.ResponseWriter, r *http.Reques
|
|||
entries, _, err := s3a.list(s3a.option.BucketsPath, "", "", false, math.MaxInt32)
|
||||
|
||||
if err != nil {
|
||||
s3err.WriteErrorResponse(w, s3err.ErrInternalError, r)
|
||||
s3err.WriteErrorResponse(w, r, s3err.ErrInternalError)
|
||||
return
|
||||
}
|
||||
|
||||
|
@ -73,7 +73,7 @@ func (s3a *S3ApiServer) ListBucketsHandler(w http.ResponseWriter, r *http.Reques
|
|||
Buckets: buckets,
|
||||
}
|
||||
|
||||
writeSuccessResponseXML(w, response)
|
||||
writeSuccessResponseXML(w, r, response)
|
||||
}
|
||||
|
||||
func (s3a *S3ApiServer) PutBucketHandler(w http.ResponseWriter, r *http.Request) {
|
||||
|
@ -100,17 +100,24 @@ func (s3a *S3ApiServer) PutBucketHandler(w http.ResponseWriter, r *http.Request)
|
|||
}
|
||||
return nil
|
||||
}); err != nil {
|
||||
s3err.WriteErrorResponse(w, s3err.ErrInternalError, r)
|
||||
s3err.WriteErrorResponse(w, r, s3err.ErrInternalError)
|
||||
return
|
||||
}
|
||||
if exist, err := s3a.exists(s3a.option.BucketsPath, bucket, true); err == nil && exist {
|
||||
errCode = s3err.ErrBucketAlreadyExists
|
||||
}
|
||||
if errCode != s3err.ErrNone {
|
||||
s3err.WriteErrorResponse(w, errCode, r)
|
||||
s3err.WriteErrorResponse(w, r, errCode)
|
||||
return
|
||||
}
|
||||
|
||||
if s3a.iam.isEnabled() {
|
||||
if _, errCode = s3a.iam.authRequest(r, s3_constants.ACTION_ADMIN); errCode != s3err.ErrNone {
|
||||
s3err.WriteErrorResponse(w, r, errCode)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
fn := func(entry *filer_pb.Entry) {
|
||||
if identityId := r.Header.Get(xhttp.AmzIdentityId); identityId != "" {
|
||||
if entry.Extended == nil {
|
||||
|
@ -123,11 +130,11 @@ func (s3a *S3ApiServer) PutBucketHandler(w http.ResponseWriter, r *http.Request)
|
|||
// create the folder for bucket, but lazily create actual collection
|
||||
if err := s3a.mkdir(s3a.option.BucketsPath, bucket, fn); err != nil {
|
||||
glog.Errorf("PutBucketHandler mkdir: %v", err)
|
||||
s3err.WriteErrorResponse(w, s3err.ErrInternalError, r)
|
||||
s3err.WriteErrorResponse(w, r, s3err.ErrInternalError)
|
||||
return
|
||||
}
|
||||
|
||||
writeSuccessResponseEmpty(w)
|
||||
writeSuccessResponseEmpty(w, r)
|
||||
}
|
||||
|
||||
func (s3a *S3ApiServer) DeleteBucketHandler(w http.ResponseWriter, r *http.Request) {
|
||||
|
@ -136,7 +143,7 @@ func (s3a *S3ApiServer) DeleteBucketHandler(w http.ResponseWriter, r *http.Reque
|
|||
glog.V(3).Infof("DeleteBucketHandler %s", bucket)
|
||||
|
||||
if err := s3a.checkBucket(r, bucket); err != s3err.ErrNone {
|
||||
s3err.WriteErrorResponse(w, err, r)
|
||||
s3err.WriteErrorResponse(w, r, err)
|
||||
return
|
||||
}
|
||||
|
||||
|
@ -158,11 +165,11 @@ func (s3a *S3ApiServer) DeleteBucketHandler(w http.ResponseWriter, r *http.Reque
|
|||
err = s3a.rm(s3a.option.BucketsPath, bucket, false, true)
|
||||
|
||||
if err != nil {
|
||||
s3err.WriteErrorResponse(w, s3err.ErrInternalError, r)
|
||||
s3err.WriteErrorResponse(w, r, s3err.ErrInternalError)
|
||||
return
|
||||
}
|
||||
|
||||
s3err.WriteEmptyResponse(w, http.StatusNoContent)
|
||||
s3err.WriteEmptyResponse(w, r, http.StatusNoContent)
|
||||
}
|
||||
|
||||
func (s3a *S3ApiServer) HeadBucketHandler(w http.ResponseWriter, r *http.Request) {
|
||||
|
@ -171,11 +178,11 @@ func (s3a *S3ApiServer) HeadBucketHandler(w http.ResponseWriter, r *http.Request
|
|||
glog.V(3).Infof("HeadBucketHandler %s", bucket)
|
||||
|
||||
if err := s3a.checkBucket(r, bucket); err != s3err.ErrNone {
|
||||
s3err.WriteErrorResponse(w, err, r)
|
||||
s3err.WriteErrorResponse(w, r, err)
|
||||
return
|
||||
}
|
||||
|
||||
writeSuccessResponseEmpty(w)
|
||||
writeSuccessResponseEmpty(w, r)
|
||||
}
|
||||
|
||||
func (s3a *S3ApiServer) checkBucket(r *http.Request, bucket string) s3err.ErrorCode {
|
||||
|
@ -216,7 +223,7 @@ func (s3a *S3ApiServer) GetBucketAclHandler(w http.ResponseWriter, r *http.Reque
|
|||
glog.V(3).Infof("GetBucketAclHandler %s", bucket)
|
||||
|
||||
if err := s3a.checkBucket(r, bucket); err != s3err.ErrNone {
|
||||
s3err.WriteErrorResponse(w, err, r)
|
||||
s3err.WriteErrorResponse(w, r, err)
|
||||
return
|
||||
}
|
||||
|
||||
|
@ -245,7 +252,7 @@ func (s3a *S3ApiServer) GetBucketAclHandler(w http.ResponseWriter, r *http.Reque
|
|||
})
|
||||
}
|
||||
}
|
||||
writeSuccessResponseXML(w, response)
|
||||
writeSuccessResponseXML(w, r, response)
|
||||
}
|
||||
|
||||
// GetBucketLifecycleConfigurationHandler Get Bucket Lifecycle configuration
|
||||
|
@ -256,18 +263,18 @@ func (s3a *S3ApiServer) GetBucketLifecycleConfigurationHandler(w http.ResponseWr
|
|||
glog.V(3).Infof("GetBucketAclHandler %s", bucket)
|
||||
|
||||
if err := s3a.checkBucket(r, bucket); err != s3err.ErrNone {
|
||||
s3err.WriteErrorResponse(w, err, r)
|
||||
s3err.WriteErrorResponse(w, r, err)
|
||||
return
|
||||
}
|
||||
fc, err := filer.ReadFilerConf(s3a.option.Filer, s3a.option.GrpcDialOption, nil)
|
||||
if err != nil {
|
||||
glog.Errorf("GetBucketLifecycleConfigurationHandler: %s", err)
|
||||
s3err.WriteErrorResponse(w, s3err.ErrInternalError, r)
|
||||
s3err.WriteErrorResponse(w, r, s3err.ErrInternalError)
|
||||
return
|
||||
}
|
||||
ttls := fc.GetCollectionTtls(bucket)
|
||||
if len(ttls) == 0 {
|
||||
s3err.WriteErrorResponse(w, s3err.ErrNoSuchLifecycleConfiguration, r)
|
||||
s3err.WriteErrorResponse(w, r, s3err.ErrNoSuchLifecycleConfiguration)
|
||||
return
|
||||
}
|
||||
response := Lifecycle{}
|
||||
|
@ -285,14 +292,14 @@ func (s3a *S3ApiServer) GetBucketLifecycleConfigurationHandler(w http.ResponseWr
|
|||
Expiration: Expiration{Days: days, set: true},
|
||||
})
|
||||
}
|
||||
writeSuccessResponseXML(w, response)
|
||||
writeSuccessResponseXML(w, r, response)
|
||||
}
|
||||
|
||||
// PutBucketLifecycleConfigurationHandler Put Bucket Lifecycle configuration
|
||||
// https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketLifecycleConfiguration.html
|
||||
func (s3a *S3ApiServer) PutBucketLifecycleConfigurationHandler(w http.ResponseWriter, r *http.Request) {
|
||||
|
||||
s3err.WriteErrorResponse(w, s3err.ErrNotImplemented, r)
|
||||
s3err.WriteErrorResponse(w, r, s3err.ErrNotImplemented)
|
||||
|
||||
}
|
||||
|
||||
|
@ -300,6 +307,6 @@ func (s3a *S3ApiServer) PutBucketLifecycleConfigurationHandler(w http.ResponseWr
|
|||
// https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketLifecycle.html
|
||||
func (s3a *S3ApiServer) DeleteBucketLifecycleHandler(w http.ResponseWriter, r *http.Request) {
|
||||
|
||||
s3err.WriteEmptyResponse(w, http.StatusNoContent)
|
||||
s3err.WriteEmptyResponse(w, r, http.StatusNoContent)
|
||||
|
||||
}
|
||||
|
|
|
@ -26,12 +26,12 @@ func (s3a *S3ApiServer) AdjustedUrl(location *filer_pb.Location) string {
|
|||
return location.Url
|
||||
}
|
||||
|
||||
func writeSuccessResponseXML(w http.ResponseWriter, response interface{}) {
|
||||
s3err.WriteXMLResponse(w, http.StatusOK, response)
|
||||
func writeSuccessResponseXML(w http.ResponseWriter, r *http.Request, response interface{}) {
|
||||
s3err.WriteXMLResponse(w, r, http.StatusOK, response)
|
||||
}
|
||||
|
||||
func writeSuccessResponseEmpty(w http.ResponseWriter) {
|
||||
s3err.WriteEmptyResponse(w, http.StatusOK)
|
||||
func writeSuccessResponseEmpty(w http.ResponseWriter, r *http.Request) {
|
||||
s3err.WriteEmptyResponse(w, r, http.StatusOK)
|
||||
}
|
||||
|
||||
func validateContentMd5(h http.Header) ([]byte, error) {
|
||||
|
|
|
@ -34,16 +34,16 @@ func (s3a *S3ApiServer) CopyObjectHandler(w http.ResponseWriter, r *http.Request
|
|||
dir, name := fullPath.DirAndName()
|
||||
entry, err := s3a.getEntry(dir, name)
|
||||
if err != nil {
|
||||
s3err.WriteErrorResponse(w, s3err.ErrInvalidCopySource, r)
|
||||
s3err.WriteErrorResponse(w, r, s3err.ErrInvalidCopySource)
|
||||
return
|
||||
}
|
||||
entry.Extended = weed_server.SaveAmzMetaData(r, entry.Extended, isReplace(r))
|
||||
err = s3a.touch(dir, name, entry)
|
||||
if err != nil {
|
||||
s3err.WriteErrorResponse(w, s3err.ErrInvalidCopySource, r)
|
||||
s3err.WriteErrorResponse(w, r, s3err.ErrInvalidCopySource)
|
||||
return
|
||||
}
|
||||
writeSuccessResponseXML(w, CopyObjectResult{
|
||||
writeSuccessResponseXML(w, r, CopyObjectResult{
|
||||
ETag: fmt.Sprintf("%x", entry.Attributes.Md5),
|
||||
LastModified: time.Now().UTC(),
|
||||
})
|
||||
|
@ -52,19 +52,19 @@ func (s3a *S3ApiServer) CopyObjectHandler(w http.ResponseWriter, r *http.Request
|
|||
|
||||
// If source object is empty or bucket is empty, reply back invalid copy source.
|
||||
if srcObject == "" || srcBucket == "" {
|
||||
s3err.WriteErrorResponse(w, s3err.ErrInvalidCopySource, r)
|
||||
s3err.WriteErrorResponse(w, r, s3err.ErrInvalidCopySource)
|
||||
return
|
||||
}
|
||||
srcPath := util.FullPath(fmt.Sprintf("%s/%s%s", s3a.option.BucketsPath, srcBucket, srcObject))
|
||||
dir, name := srcPath.DirAndName()
|
||||
_, err = s3a.getEntry(dir, name)
|
||||
if err != nil {
|
||||
s3err.WriteErrorResponse(w, s3err.ErrInvalidCopySource, r)
|
||||
s3err.WriteErrorResponse(w, r, s3err.ErrInvalidCopySource)
|
||||
return
|
||||
}
|
||||
|
||||
if srcBucket == dstBucket && srcObject == dstObject {
|
||||
s3err.WriteErrorResponse(w, s3err.ErrInvalidCopyDest, r)
|
||||
s3err.WriteErrorResponse(w, r, s3err.ErrInvalidCopyDest)
|
||||
return
|
||||
}
|
||||
|
||||
|
@ -75,7 +75,7 @@ func (s3a *S3ApiServer) CopyObjectHandler(w http.ResponseWriter, r *http.Request
|
|||
|
||||
_, _, resp, err := util.DownloadFile(srcUrl, "")
|
||||
if err != nil {
|
||||
s3err.WriteErrorResponse(w, s3err.ErrInvalidCopySource, r)
|
||||
s3err.WriteErrorResponse(w, r, s3err.ErrInvalidCopySource)
|
||||
return
|
||||
}
|
||||
defer util.CloseResponse(resp)
|
||||
|
@ -84,7 +84,7 @@ func (s3a *S3ApiServer) CopyObjectHandler(w http.ResponseWriter, r *http.Request
|
|||
etag, errCode := s3a.putToFiler(r, dstUrl, resp.Body)
|
||||
|
||||
if errCode != s3err.ErrNone {
|
||||
s3err.WriteErrorResponse(w, errCode, r)
|
||||
s3err.WriteErrorResponse(w, r, errCode)
|
||||
return
|
||||
}
|
||||
|
||||
|
@ -95,7 +95,7 @@ func (s3a *S3ApiServer) CopyObjectHandler(w http.ResponseWriter, r *http.Request
|
|||
LastModified: time.Now().UTC(),
|
||||
}
|
||||
|
||||
writeSuccessResponseXML(w, response)
|
||||
writeSuccessResponseXML(w, r, response)
|
||||
|
||||
}
|
||||
|
||||
|
@ -128,7 +128,7 @@ func (s3a *S3ApiServer) CopyObjectPartHandler(w http.ResponseWriter, r *http.Req
|
|||
srcBucket, srcObject := pathToBucketAndObject(cpSrcPath)
|
||||
// If source object is empty or bucket is empty, reply back invalid copy source.
|
||||
if srcObject == "" || srcBucket == "" {
|
||||
s3err.WriteErrorResponse(w, s3err.ErrInvalidCopySource, r)
|
||||
s3err.WriteErrorResponse(w, r, s3err.ErrInvalidCopySource)
|
||||
return
|
||||
}
|
||||
|
||||
|
@ -137,7 +137,7 @@ func (s3a *S3ApiServer) CopyObjectPartHandler(w http.ResponseWriter, r *http.Req
|
|||
|
||||
partID, err := strconv.Atoi(partIDString)
|
||||
if err != nil {
|
||||
s3err.WriteErrorResponse(w, s3err.ErrInvalidPart, r)
|
||||
s3err.WriteErrorResponse(w, r, s3err.ErrInvalidPart)
|
||||
return
|
||||
}
|
||||
|
||||
|
@ -145,7 +145,7 @@ func (s3a *S3ApiServer) CopyObjectPartHandler(w http.ResponseWriter, r *http.Req
|
|||
|
||||
// check partID with maximum part ID for multipart objects
|
||||
if partID > globalMaxPartID {
|
||||
s3err.WriteErrorResponse(w, s3err.ErrInvalidMaxParts, r)
|
||||
s3err.WriteErrorResponse(w, r, s3err.ErrInvalidMaxParts)
|
||||
return
|
||||
}
|
||||
|
||||
|
@ -158,7 +158,7 @@ func (s3a *S3ApiServer) CopyObjectPartHandler(w http.ResponseWriter, r *http.Req
|
|||
|
||||
dataReader, err := util.ReadUrlAsReaderCloser(srcUrl, rangeHeader)
|
||||
if err != nil {
|
||||
s3err.WriteErrorResponse(w, s3err.ErrInvalidCopySource, r)
|
||||
s3err.WriteErrorResponse(w, r, s3err.ErrInvalidCopySource)
|
||||
return
|
||||
}
|
||||
defer dataReader.Close()
|
||||
|
@ -167,7 +167,7 @@ func (s3a *S3ApiServer) CopyObjectPartHandler(w http.ResponseWriter, r *http.Req
|
|||
etag, errCode := s3a.putToFiler(r, dstUrl, dataReader)
|
||||
|
||||
if errCode != s3err.ErrNone {
|
||||
s3err.WriteErrorResponse(w, errCode, r)
|
||||
s3err.WriteErrorResponse(w, r, errCode)
|
||||
return
|
||||
}
|
||||
|
||||
|
@ -178,7 +178,7 @@ func (s3a *S3ApiServer) CopyObjectPartHandler(w http.ResponseWriter, r *http.Req
|
|||
LastModified: time.Now().UTC(),
|
||||
}
|
||||
|
||||
writeSuccessResponseXML(w, response)
|
||||
writeSuccessResponseXML(w, r, response)
|
||||
|
||||
}
|
||||
|
||||
|
|
|
@ -56,20 +56,20 @@ func (s3a *S3ApiServer) PutObjectHandler(w http.ResponseWriter, r *http.Request)
|
|||
|
||||
_, err := validateContentMd5(r.Header)
|
||||
if err != nil {
|
||||
s3err.WriteErrorResponse(w, s3err.ErrInvalidDigest, r)
|
||||
s3err.WriteErrorResponse(w, r, s3err.ErrInvalidDigest)
|
||||
return
|
||||
}
|
||||
|
||||
if r.Header.Get("Cache-Control") != "" {
|
||||
if _, err = cacheobject.ParseRequestCacheControl(r.Header.Get("Cache-Control")); err != nil {
|
||||
s3err.WriteErrorResponse(w, s3err.ErrInvalidDigest, r)
|
||||
s3err.WriteErrorResponse(w, r, s3err.ErrInvalidDigest)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
if r.Header.Get("Expires") != "" {
|
||||
if _, err = time.Parse(http.TimeFormat, r.Header.Get("Expires")); err != nil {
|
||||
s3err.WriteErrorResponse(w, s3err.ErrInvalidDigest, r)
|
||||
s3err.WriteErrorResponse(w, r, s3err.ErrInvalidDigest)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
@ -87,12 +87,12 @@ func (s3a *S3ApiServer) PutObjectHandler(w http.ResponseWriter, r *http.Request)
|
|||
_, s3ErrCode = s3a.iam.reqSignatureV4Verify(r)
|
||||
}
|
||||
if s3ErrCode != s3err.ErrNone {
|
||||
s3err.WriteErrorResponse(w, s3ErrCode, r)
|
||||
s3err.WriteErrorResponse(w, r, s3ErrCode)
|
||||
return
|
||||
}
|
||||
} else {
|
||||
if authTypeStreamingSigned == rAuthType {
|
||||
s3err.WriteErrorResponse(w, s3err.ErrAuthNotSetup, r)
|
||||
s3err.WriteErrorResponse(w, r, s3err.ErrAuthNotSetup)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
@ -100,7 +100,7 @@ func (s3a *S3ApiServer) PutObjectHandler(w http.ResponseWriter, r *http.Request)
|
|||
|
||||
if strings.HasSuffix(object, "/") {
|
||||
if err := s3a.mkdir(s3a.option.BucketsPath, bucket+object, nil); err != nil {
|
||||
s3err.WriteErrorResponse(w, s3err.ErrInternalError, r)
|
||||
s3err.WriteErrorResponse(w, r, s3err.ErrInternalError)
|
||||
return
|
||||
}
|
||||
} else {
|
||||
|
@ -113,14 +113,14 @@ func (s3a *S3ApiServer) PutObjectHandler(w http.ResponseWriter, r *http.Request)
|
|||
etag, errCode := s3a.putToFiler(r, uploadUrl, dataReader)
|
||||
|
||||
if errCode != s3err.ErrNone {
|
||||
s3err.WriteErrorResponse(w, errCode, r)
|
||||
s3err.WriteErrorResponse(w, r, errCode)
|
||||
return
|
||||
}
|
||||
|
||||
setEtag(w, etag)
|
||||
}
|
||||
|
||||
writeSuccessResponseEmpty(w)
|
||||
writeSuccessResponseEmpty(w, r)
|
||||
}
|
||||
|
||||
func urlPathEscape(object string) string {
|
||||
|
@ -137,7 +137,7 @@ func (s3a *S3ApiServer) GetObjectHandler(w http.ResponseWriter, r *http.Request)
|
|||
glog.V(3).Infof("GetObjectHandler %s %s", bucket, object)
|
||||
|
||||
if strings.HasSuffix(r.URL.Path, "/") {
|
||||
s3err.WriteErrorResponse(w, s3err.ErrNotImplemented, r)
|
||||
s3err.WriteErrorResponse(w, r, s3err.ErrNotImplemented)
|
||||
return
|
||||
}
|
||||
|
||||
|
@ -215,13 +215,13 @@ func (s3a *S3ApiServer) DeleteMultipleObjectsHandler(w http.ResponseWriter, r *h
|
|||
|
||||
deleteXMLBytes, err := io.ReadAll(r.Body)
|
||||
if err != nil {
|
||||
s3err.WriteErrorResponse(w, s3err.ErrInternalError, r)
|
||||
s3err.WriteErrorResponse(w, r, s3err.ErrInternalError)
|
||||
return
|
||||
}
|
||||
|
||||
deleteObjects := &DeleteObjectsRequest{}
|
||||
if err := xml.Unmarshal(deleteXMLBytes, deleteObjects); err != nil {
|
||||
s3err.WriteErrorResponse(w, s3err.ErrMalformedXML, r)
|
||||
s3err.WriteErrorResponse(w, r, s3err.ErrMalformedXML)
|
||||
return
|
||||
}
|
||||
|
||||
|
@ -273,7 +273,7 @@ func (s3a *S3ApiServer) DeleteMultipleObjectsHandler(w http.ResponseWriter, r *h
|
|||
}
|
||||
deleteResp.Errors = deleteErrors
|
||||
|
||||
writeSuccessResponseXML(w, deleteResp)
|
||||
writeSuccessResponseXML(w, r, deleteResp)
|
||||
|
||||
}
|
||||
|
||||
|
@ -317,7 +317,7 @@ func (s3a *S3ApiServer) proxyToFiler(w http.ResponseWriter, r *http.Request, des
|
|||
|
||||
if err != nil {
|
||||
glog.Errorf("NewRequest %s: %v", destUrl, err)
|
||||
s3err.WriteErrorResponse(w, s3err.ErrInternalError, r)
|
||||
s3err.WriteErrorResponse(w, r, s3err.ErrInternalError)
|
||||
return
|
||||
}
|
||||
|
||||
|
@ -346,19 +346,19 @@ func (s3a *S3ApiServer) proxyToFiler(w http.ResponseWriter, r *http.Request, des
|
|||
|
||||
if postErr != nil {
|
||||
glog.Errorf("post to filer: %v", postErr)
|
||||
s3err.WriteErrorResponse(w, s3err.ErrInternalError, r)
|
||||
s3err.WriteErrorResponse(w, r, s3err.ErrInternalError)
|
||||
return
|
||||
}
|
||||
defer util.CloseResponse(resp)
|
||||
|
||||
if resp.StatusCode == http.StatusPreconditionFailed {
|
||||
s3err.WriteErrorResponse(w, s3err.ErrPreconditionFailed, r)
|
||||
s3err.WriteErrorResponse(w, r, s3err.ErrPreconditionFailed)
|
||||
return
|
||||
}
|
||||
|
||||
if (resp.ContentLength == -1 || resp.StatusCode == 404) && resp.StatusCode != 304 {
|
||||
if r.Method != "DELETE" {
|
||||
s3err.WriteErrorResponse(w, s3err.ErrNoSuchKey, r)
|
||||
s3err.WriteErrorResponse(w, r, s3err.ErrNoSuchKey)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
|
|
@ -29,23 +29,23 @@ func (s3a *S3ApiServer) PostPolicyBucketHandler(w http.ResponseWriter, r *http.R
|
|||
|
||||
reader, err := r.MultipartReader()
|
||||
if err != nil {
|
||||
s3err.WriteErrorResponse(w, s3err.ErrMalformedPOSTRequest, r)
|
||||
s3err.WriteErrorResponse(w, r, s3err.ErrMalformedPOSTRequest)
|
||||
return
|
||||
}
|
||||
form, err := reader.ReadForm(int64(5 * humanize.MiByte))
|
||||
if err != nil {
|
||||
s3err.WriteErrorResponse(w, s3err.ErrMalformedPOSTRequest, r)
|
||||
s3err.WriteErrorResponse(w, r, s3err.ErrMalformedPOSTRequest)
|
||||
return
|
||||
}
|
||||
defer form.RemoveAll()
|
||||
|
||||
fileBody, fileName, fileSize, formValues, err := extractPostPolicyFormValues(form)
|
||||
if err != nil {
|
||||
s3err.WriteErrorResponse(w, s3err.ErrMalformedPOSTRequest, r)
|
||||
s3err.WriteErrorResponse(w, r, s3err.ErrMalformedPOSTRequest)
|
||||
return
|
||||
}
|
||||
if fileBody == nil {
|
||||
s3err.WriteErrorResponse(w, s3err.ErrPOSTFileRequired, r)
|
||||
s3err.WriteErrorResponse(w, r, s3err.ErrPOSTFileRequired)
|
||||
return
|
||||
}
|
||||
defer fileBody.Close()
|
||||
|
@ -63,7 +63,7 @@ func (s3a *S3ApiServer) PostPolicyBucketHandler(w http.ResponseWriter, r *http.R
|
|||
if successRedirect != "" {
|
||||
redirectURL, err = url.Parse(successRedirect)
|
||||
if err != nil {
|
||||
s3err.WriteErrorResponse(w, s3err.ErrMalformedPOSTRequest, r)
|
||||
s3err.WriteErrorResponse(w, r, s3err.ErrMalformedPOSTRequest)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
@ -71,13 +71,13 @@ func (s3a *S3ApiServer) PostPolicyBucketHandler(w http.ResponseWriter, r *http.R
|
|||
// Verify policy signature.
|
||||
errCode := s3a.iam.doesPolicySignatureMatch(formValues)
|
||||
if errCode != s3err.ErrNone {
|
||||
s3err.WriteErrorResponse(w, errCode, r)
|
||||
s3err.WriteErrorResponse(w, r, errCode)
|
||||
return
|
||||
}
|
||||
|
||||
policyBytes, err := base64.StdEncoding.DecodeString(formValues.Get("Policy"))
|
||||
if err != nil {
|
||||
s3err.WriteErrorResponse(w, s3err.ErrMalformedPOSTRequest, r)
|
||||
s3err.WriteErrorResponse(w, r, s3err.ErrMalformedPOSTRequest)
|
||||
return
|
||||
}
|
||||
|
||||
|
@ -86,7 +86,7 @@ func (s3a *S3ApiServer) PostPolicyBucketHandler(w http.ResponseWriter, r *http.R
|
|||
|
||||
postPolicyForm, err := policy.ParsePostPolicyForm(string(policyBytes))
|
||||
if err != nil {
|
||||
s3err.WriteErrorResponse(w, s3err.ErrPostPolicyConditionInvalidFormat, r)
|
||||
s3err.WriteErrorResponse(w, r, s3err.ErrPostPolicyConditionInvalidFormat)
|
||||
return
|
||||
}
|
||||
|
||||
|
@ -102,12 +102,12 @@ func (s3a *S3ApiServer) PostPolicyBucketHandler(w http.ResponseWriter, r *http.R
|
|||
lengthRange := postPolicyForm.Conditions.ContentLengthRange
|
||||
if lengthRange.Valid {
|
||||
if fileSize < lengthRange.Min {
|
||||
s3err.WriteErrorResponse(w, s3err.ErrEntityTooSmall, r)
|
||||
s3err.WriteErrorResponse(w, r, s3err.ErrEntityTooSmall)
|
||||
return
|
||||
}
|
||||
|
||||
if fileSize > lengthRange.Max {
|
||||
s3err.WriteErrorResponse(w, s3err.ErrEntityTooLarge, r)
|
||||
s3err.WriteErrorResponse(w, r, s3err.ErrEntityTooLarge)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
@ -118,7 +118,7 @@ func (s3a *S3ApiServer) PostPolicyBucketHandler(w http.ResponseWriter, r *http.R
|
|||
etag, errCode := s3a.putToFiler(r, uploadUrl, fileBody)
|
||||
|
||||
if errCode != s3err.ErrNone {
|
||||
s3err.WriteErrorResponse(w, errCode, r)
|
||||
s3err.WriteErrorResponse(w, r, errCode)
|
||||
return
|
||||
}
|
||||
|
||||
|
@ -126,7 +126,7 @@ func (s3a *S3ApiServer) PostPolicyBucketHandler(w http.ResponseWriter, r *http.R
|
|||
// Replace raw query params..
|
||||
redirectURL.RawQuery = getRedirectPostRawQuery(bucket, object, etag)
|
||||
w.Header().Set("Location", redirectURL.String())
|
||||
s3err.WriteEmptyResponse(w, http.StatusSeeOther)
|
||||
s3err.WriteEmptyResponse(w, r, http.StatusSeeOther)
|
||||
return
|
||||
}
|
||||
|
||||
|
@ -141,11 +141,11 @@ func (s3a *S3ApiServer) PostPolicyBucketHandler(w http.ResponseWriter, r *http.R
|
|||
ETag: `"` + etag + `"`,
|
||||
Location: w.Header().Get("Location"),
|
||||
}
|
||||
s3err.WriteXMLResponse(w, http.StatusCreated, resp)
|
||||
s3err.WriteXMLResponse(w, r, http.StatusCreated, resp)
|
||||
case "200":
|
||||
s3err.WriteEmptyResponse(w, http.StatusOK)
|
||||
s3err.WriteEmptyResponse(w, r, http.StatusOK)
|
||||
default:
|
||||
writeSuccessResponseEmpty(w)
|
||||
writeSuccessResponseEmpty(w, r)
|
||||
}
|
||||
|
||||
}
|
||||
|
|
|
@ -45,11 +45,11 @@ func (s3a *S3ApiServer) NewMultipartUploadHandler(w http.ResponseWriter, r *http
|
|||
glog.V(2).Info("NewMultipartUploadHandler", string(s3err.EncodeXMLResponse(response)), errCode)
|
||||
|
||||
if errCode != s3err.ErrNone {
|
||||
s3err.WriteErrorResponse(w, errCode, r)
|
||||
s3err.WriteErrorResponse(w, r, errCode)
|
||||
return
|
||||
}
|
||||
|
||||
writeSuccessResponseXML(w, response)
|
||||
writeSuccessResponseXML(w, r, response)
|
||||
|
||||
}
|
||||
|
||||
|
@ -69,11 +69,11 @@ func (s3a *S3ApiServer) CompleteMultipartUploadHandler(w http.ResponseWriter, r
|
|||
glog.V(2).Info("CompleteMultipartUploadHandler", string(s3err.EncodeXMLResponse(response)), errCode)
|
||||
|
||||
if errCode != s3err.ErrNone {
|
||||
s3err.WriteErrorResponse(w, errCode, r)
|
||||
s3err.WriteErrorResponse(w, r, errCode)
|
||||
return
|
||||
}
|
||||
|
||||
writeSuccessResponseXML(w, response)
|
||||
writeSuccessResponseXML(w, r, response)
|
||||
|
||||
}
|
||||
|
||||
|
@ -91,13 +91,13 @@ func (s3a *S3ApiServer) AbortMultipartUploadHandler(w http.ResponseWriter, r *ht
|
|||
})
|
||||
|
||||
if errCode != s3err.ErrNone {
|
||||
s3err.WriteErrorResponse(w, errCode, r)
|
||||
s3err.WriteErrorResponse(w, r, errCode)
|
||||
return
|
||||
}
|
||||
|
||||
glog.V(2).Info("AbortMultipartUploadHandler", string(s3err.EncodeXMLResponse(response)))
|
||||
|
||||
writeSuccessResponseXML(w, response)
|
||||
writeSuccessResponseXML(w, r, response)
|
||||
|
||||
}
|
||||
|
||||
|
@ -107,13 +107,13 @@ func (s3a *S3ApiServer) ListMultipartUploadsHandler(w http.ResponseWriter, r *ht
|
|||
|
||||
prefix, keyMarker, uploadIDMarker, delimiter, maxUploads, encodingType := getBucketMultipartResources(r.URL.Query())
|
||||
if maxUploads < 0 {
|
||||
s3err.WriteErrorResponse(w, s3err.ErrInvalidMaxUploads, r)
|
||||
s3err.WriteErrorResponse(w, r, s3err.ErrInvalidMaxUploads)
|
||||
return
|
||||
}
|
||||
if keyMarker != "" {
|
||||
// Marker not common with prefix is not implemented.
|
||||
if !strings.HasPrefix(keyMarker, prefix) {
|
||||
s3err.WriteErrorResponse(w, s3err.ErrNotImplemented, r)
|
||||
s3err.WriteErrorResponse(w, r, s3err.ErrNotImplemented)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
@ -131,13 +131,13 @@ func (s3a *S3ApiServer) ListMultipartUploadsHandler(w http.ResponseWriter, r *ht
|
|||
glog.V(2).Infof("ListMultipartUploadsHandler %s errCode=%d", string(s3err.EncodeXMLResponse(response)), errCode)
|
||||
|
||||
if errCode != s3err.ErrNone {
|
||||
s3err.WriteErrorResponse(w, errCode, r)
|
||||
s3err.WriteErrorResponse(w, r, errCode)
|
||||
return
|
||||
}
|
||||
|
||||
// TODO handle encodingType
|
||||
|
||||
writeSuccessResponseXML(w, response)
|
||||
writeSuccessResponseXML(w, r, response)
|
||||
}
|
||||
|
||||
// ListObjectPartsHandler - Lists object parts in a multipart upload.
|
||||
|
@ -146,11 +146,11 @@ func (s3a *S3ApiServer) ListObjectPartsHandler(w http.ResponseWriter, r *http.Re
|
|||
|
||||
uploadID, partNumberMarker, maxParts, _ := getObjectResources(r.URL.Query())
|
||||
if partNumberMarker < 0 {
|
||||
s3err.WriteErrorResponse(w, s3err.ErrInvalidPartNumberMarker, r)
|
||||
s3err.WriteErrorResponse(w, r, s3err.ErrInvalidPartNumberMarker)
|
||||
return
|
||||
}
|
||||
if maxParts < 0 {
|
||||
s3err.WriteErrorResponse(w, s3err.ErrInvalidMaxParts, r)
|
||||
s3err.WriteErrorResponse(w, r, s3err.ErrInvalidMaxParts)
|
||||
return
|
||||
}
|
||||
|
||||
|
@ -165,11 +165,11 @@ func (s3a *S3ApiServer) ListObjectPartsHandler(w http.ResponseWriter, r *http.Re
|
|||
glog.V(2).Infof("ListObjectPartsHandler %s count=%d", string(s3err.EncodeXMLResponse(response)), len(response.Part))
|
||||
|
||||
if errCode != s3err.ErrNone {
|
||||
s3err.WriteErrorResponse(w, errCode, r)
|
||||
s3err.WriteErrorResponse(w, r, errCode)
|
||||
return
|
||||
}
|
||||
|
||||
writeSuccessResponseXML(w, response)
|
||||
writeSuccessResponseXML(w, r, response)
|
||||
|
||||
}
|
||||
|
||||
|
@ -180,18 +180,18 @@ func (s3a *S3ApiServer) PutObjectPartHandler(w http.ResponseWriter, r *http.Requ
|
|||
uploadID := r.URL.Query().Get("uploadId")
|
||||
exists, err := s3a.exists(s3a.genUploadsFolder(bucket), uploadID, true)
|
||||
if !exists {
|
||||
s3err.WriteErrorResponse(w, s3err.ErrNoSuchUpload, r)
|
||||
s3err.WriteErrorResponse(w, r, s3err.ErrNoSuchUpload)
|
||||
return
|
||||
}
|
||||
|
||||
partIDString := r.URL.Query().Get("partNumber")
|
||||
partID, err := strconv.Atoi(partIDString)
|
||||
if err != nil {
|
||||
s3err.WriteErrorResponse(w, s3err.ErrInvalidPart, r)
|
||||
s3err.WriteErrorResponse(w, r, s3err.ErrInvalidPart)
|
||||
return
|
||||
}
|
||||
if partID > globalMaxPartID {
|
||||
s3err.WriteErrorResponse(w, s3err.ErrInvalidMaxParts, r)
|
||||
s3err.WriteErrorResponse(w, r, s3err.ErrInvalidMaxParts)
|
||||
return
|
||||
}
|
||||
|
||||
|
@ -208,7 +208,7 @@ func (s3a *S3ApiServer) PutObjectPartHandler(w http.ResponseWriter, r *http.Requ
|
|||
_, s3ErrCode = s3a.iam.reqSignatureV4Verify(r)
|
||||
}
|
||||
if s3ErrCode != s3err.ErrNone {
|
||||
s3err.WriteErrorResponse(w, s3ErrCode, r)
|
||||
s3err.WriteErrorResponse(w, r, s3ErrCode)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
@ -225,13 +225,13 @@ func (s3a *S3ApiServer) PutObjectPartHandler(w http.ResponseWriter, r *http.Requ
|
|||
|
||||
etag, errCode := s3a.putToFiler(r, uploadUrl, dataReader)
|
||||
if errCode != s3err.ErrNone {
|
||||
s3err.WriteErrorResponse(w, errCode, r)
|
||||
s3err.WriteErrorResponse(w, r, errCode)
|
||||
return
|
||||
}
|
||||
|
||||
setEtag(w, etag)
|
||||
|
||||
writeSuccessResponseEmpty(w)
|
||||
writeSuccessResponseEmpty(w, r)
|
||||
|
||||
}
|
||||
|
||||
|
|
|
@ -26,15 +26,15 @@ func (s3a *S3ApiServer) GetObjectTaggingHandler(w http.ResponseWriter, r *http.R
|
|||
if err != nil {
|
||||
if err == filer_pb.ErrNotFound {
|
||||
glog.Errorf("GetObjectTaggingHandler %s: %v", r.URL, err)
|
||||
s3err.WriteErrorResponse(w, s3err.ErrNoSuchKey, r)
|
||||
s3err.WriteErrorResponse(w, r, s3err.ErrNoSuchKey)
|
||||
} else {
|
||||
glog.Errorf("GetObjectTaggingHandler %s: %v", r.URL, err)
|
||||
s3err.WriteErrorResponse(w, s3err.ErrInternalError, r)
|
||||
s3err.WriteErrorResponse(w, r, s3err.ErrInternalError)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
writeSuccessResponseXML(w, FromTags(tags))
|
||||
writeSuccessResponseXML(w, r, FromTags(tags))
|
||||
|
||||
}
|
||||
|
||||
|
@ -52,29 +52,29 @@ func (s3a *S3ApiServer) PutObjectTaggingHandler(w http.ResponseWriter, r *http.R
|
|||
input, err := io.ReadAll(io.LimitReader(r.Body, r.ContentLength))
|
||||
if err != nil {
|
||||
glog.Errorf("PutObjectTaggingHandler read input %s: %v", r.URL, err)
|
||||
s3err.WriteErrorResponse(w, s3err.ErrInternalError, r)
|
||||
s3err.WriteErrorResponse(w, r, s3err.ErrInternalError)
|
||||
return
|
||||
}
|
||||
if err = xml.Unmarshal(input, tagging); err != nil {
|
||||
glog.Errorf("PutObjectTaggingHandler Unmarshal %s: %v", r.URL, err)
|
||||
s3err.WriteErrorResponse(w, s3err.ErrMalformedXML, r)
|
||||
s3err.WriteErrorResponse(w, r, s3err.ErrMalformedXML)
|
||||
return
|
||||
}
|
||||
tags := tagging.ToTags()
|
||||
if len(tags) > 10 {
|
||||
glog.Errorf("PutObjectTaggingHandler tags %s: %d tags more than 10", r.URL, len(tags))
|
||||
s3err.WriteErrorResponse(w, s3err.ErrInvalidTag, r)
|
||||
s3err.WriteErrorResponse(w, r, s3err.ErrInvalidTag)
|
||||
return
|
||||
}
|
||||
for k, v := range tags {
|
||||
if len(k) > 128 {
|
||||
glog.Errorf("PutObjectTaggingHandler tags %s: tag key %s longer than 128", r.URL, k)
|
||||
s3err.WriteErrorResponse(w, s3err.ErrInvalidTag, r)
|
||||
s3err.WriteErrorResponse(w, r, s3err.ErrInvalidTag)
|
||||
return
|
||||
}
|
||||
if len(v) > 256 {
|
||||
glog.Errorf("PutObjectTaggingHandler tags %s: tag value %s longer than 256", r.URL, v)
|
||||
s3err.WriteErrorResponse(w, s3err.ErrInvalidTag, r)
|
||||
s3err.WriteErrorResponse(w, r, s3err.ErrInvalidTag)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
@ -82,10 +82,10 @@ func (s3a *S3ApiServer) PutObjectTaggingHandler(w http.ResponseWriter, r *http.R
|
|||
if err = s3a.setTags(dir, name, tagging.ToTags()); err != nil {
|
||||
if err == filer_pb.ErrNotFound {
|
||||
glog.Errorf("PutObjectTaggingHandler setTags %s: %v", r.URL, err)
|
||||
s3err.WriteErrorResponse(w, s3err.ErrNoSuchKey, r)
|
||||
s3err.WriteErrorResponse(w, r, s3err.ErrNoSuchKey)
|
||||
} else {
|
||||
glog.Errorf("PutObjectTaggingHandler setTags %s: %v", r.URL, err)
|
||||
s3err.WriteErrorResponse(w, s3err.ErrInternalError, r)
|
||||
s3err.WriteErrorResponse(w, r, s3err.ErrInternalError)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
@ -108,10 +108,10 @@ func (s3a *S3ApiServer) DeleteObjectTaggingHandler(w http.ResponseWriter, r *htt
|
|||
if err != nil {
|
||||
if err == filer_pb.ErrNotFound {
|
||||
glog.Errorf("DeleteObjectTaggingHandler %s: %v", r.URL, err)
|
||||
s3err.WriteErrorResponse(w, s3err.ErrNoSuchKey, r)
|
||||
s3err.WriteErrorResponse(w, r, s3err.ErrNoSuchKey)
|
||||
} else {
|
||||
glog.Errorf("DeleteObjectTaggingHandler %s: %v", r.URL, err)
|
||||
s3err.WriteErrorResponse(w, s3err.ErrInternalError, r)
|
||||
s3err.WriteErrorResponse(w, r, s3err.ErrInternalError)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
|
|
@ -45,11 +45,11 @@ func (s3a *S3ApiServer) ListObjectsV2Handler(w http.ResponseWriter, r *http.Requ
|
|||
originalPrefix, continuationToken, startAfter, delimiter, _, maxKeys := getListObjectsV2Args(r.URL.Query())
|
||||
|
||||
if maxKeys < 0 {
|
||||
s3err.WriteErrorResponse(w, s3err.ErrInvalidMaxKeys, r)
|
||||
s3err.WriteErrorResponse(w, r, s3err.ErrInvalidMaxKeys)
|
||||
return
|
||||
}
|
||||
if delimiter != "" && delimiter != "/" {
|
||||
s3err.WriteErrorResponse(w, s3err.ErrNotImplemented, r)
|
||||
s3err.WriteErrorResponse(w, r, s3err.ErrNotImplemented)
|
||||
return
|
||||
}
|
||||
|
||||
|
@ -61,13 +61,13 @@ func (s3a *S3ApiServer) ListObjectsV2Handler(w http.ResponseWriter, r *http.Requ
|
|||
response, err := s3a.listFilerEntries(bucket, originalPrefix, maxKeys, marker, delimiter)
|
||||
|
||||
if err != nil {
|
||||
s3err.WriteErrorResponse(w, s3err.ErrInternalError, r)
|
||||
s3err.WriteErrorResponse(w, r, s3err.ErrInternalError)
|
||||
return
|
||||
}
|
||||
|
||||
if len(response.Contents) == 0 {
|
||||
if exists, existErr := s3a.exists(s3a.option.BucketsPath, bucket, true); existErr == nil && !exists {
|
||||
s3err.WriteErrorResponse(w, s3err.ErrNoSuchBucket, r)
|
||||
s3err.WriteErrorResponse(w, r, s3err.ErrNoSuchBucket)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
@ -87,7 +87,7 @@ func (s3a *S3ApiServer) ListObjectsV2Handler(w http.ResponseWriter, r *http.Requ
|
|||
StartAfter: startAfter,
|
||||
}
|
||||
|
||||
writeSuccessResponseXML(w, responseV2)
|
||||
writeSuccessResponseXML(w, r, responseV2)
|
||||
}
|
||||
|
||||
func (s3a *S3ApiServer) ListObjectsV1Handler(w http.ResponseWriter, r *http.Request) {
|
||||
|
@ -101,29 +101,29 @@ func (s3a *S3ApiServer) ListObjectsV1Handler(w http.ResponseWriter, r *http.Requ
|
|||
originalPrefix, marker, delimiter, maxKeys := getListObjectsV1Args(r.URL.Query())
|
||||
|
||||
if maxKeys < 0 {
|
||||
s3err.WriteErrorResponse(w, s3err.ErrInvalidMaxKeys, r)
|
||||
s3err.WriteErrorResponse(w, r, s3err.ErrInvalidMaxKeys)
|
||||
return
|
||||
}
|
||||
if delimiter != "" && delimiter != "/" {
|
||||
s3err.WriteErrorResponse(w, s3err.ErrNotImplemented, r)
|
||||
s3err.WriteErrorResponse(w, r, s3err.ErrNotImplemented)
|
||||
return
|
||||
}
|
||||
|
||||
response, err := s3a.listFilerEntries(bucket, originalPrefix, maxKeys, marker, delimiter)
|
||||
|
||||
if err != nil {
|
||||
s3err.WriteErrorResponse(w, s3err.ErrInternalError, r)
|
||||
s3err.WriteErrorResponse(w, r, s3err.ErrInternalError)
|
||||
return
|
||||
}
|
||||
|
||||
if len(response.Contents) == 0 {
|
||||
if exists, existErr := s3a.exists(s3a.option.BucketsPath, bucket, true); existErr == nil && !exists {
|
||||
s3err.WriteErrorResponse(w, s3err.ErrNoSuchBucket, r)
|
||||
s3err.WriteErrorResponse(w, r, s3err.ErrNoSuchBucket)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
writeSuccessResponseXML(w, response)
|
||||
writeSuccessResponseXML(w, r, response)
|
||||
}
|
||||
|
||||
func (s3a *S3ApiServer) listFilerEntries(bucket string, originalPrefix string, maxKeys int, marker string, delimiter string) (response ListBucketResult, err error) {
|
||||
|
@ -220,12 +220,16 @@ func (s3a *S3ApiServer) doListFilerEntries(client filer_pb.SeaweedFilerClient, d
|
|||
err = subErr
|
||||
return
|
||||
}
|
||||
counter += subCounter
|
||||
isTruncated = isTruncated || subIsTruncated
|
||||
maxKeys -= subCounter
|
||||
nextMarker = subDir + "/" + subNextMarker
|
||||
// finished processing this sub directory
|
||||
marker = subDir
|
||||
}
|
||||
if maxKeys <= 0 {
|
||||
return
|
||||
}
|
||||
|
||||
// now marker is also a direct child of dir
|
||||
request := &filer_pb.ListEntriesRequest{
|
||||
|
|
|
@ -66,7 +66,7 @@ func (s3a *S3ApiServer) registerRouter(router *mux.Router) {
|
|||
// HeadObject
|
||||
bucket.Methods("HEAD").Path("/{object:.+}").HandlerFunc(track(s3a.iam.Auth(s3a.HeadObjectHandler, ACTION_READ), "GET"))
|
||||
// HeadBucket
|
||||
bucket.Methods("HEAD").HandlerFunc(track(s3a.iam.Auth(s3a.HeadBucketHandler, ACTION_ADMIN), "GET"))
|
||||
bucket.Methods("HEAD").HandlerFunc(track(s3a.iam.Auth(s3a.HeadBucketHandler, ACTION_READ), "GET"))
|
||||
|
||||
// CopyObjectPart
|
||||
bucket.Methods("PUT").Path("/{object:.+}").HeadersRegexp("X-Amz-Copy-Source", `.*?(\/|%2F).*?`).HandlerFunc(track(s3a.iam.Auth(s3a.CopyObjectPartHandler, ACTION_WRITE), "PUT")).Queries("partNumber", "{partNumber:[0-9]+}", "uploadId", "{uploadId:.*}")
|
||||
|
@ -137,7 +137,7 @@ func (s3a *S3ApiServer) registerRouter(router *mux.Router) {
|
|||
bucket.Methods("GET").HandlerFunc(track(s3a.iam.Auth(s3a.ListObjectsV1Handler, ACTION_LIST), "LIST"))
|
||||
|
||||
// PutBucket
|
||||
bucket.Methods("PUT").HandlerFunc(track(s3a.iam.Auth(s3a.PutBucketHandler, ACTION_ADMIN), "PUT"))
|
||||
bucket.Methods("PUT").HandlerFunc(track(s3a.PutBucketHandler, "PUT"))
|
||||
|
||||
// DeleteBucket
|
||||
bucket.Methods("DELETE").HandlerFunc(track(s3a.iam.Auth(s3a.DeleteBucketHandler, ACTION_WRITE), "DELETE"))
|
||||
|
|
|
@ -4,5 +4,5 @@ import "net/http"
|
|||
|
||||
func (s3a *S3ApiServer) StatusHandler(w http.ResponseWriter, r *http.Request) {
|
||||
// write out the response code and content type header
|
||||
writeSuccessResponseEmpty(w)
|
||||
writeSuccessResponseEmpty(w, r)
|
||||
}
|
||||
|
|
|
@ -19,15 +19,15 @@ const (
|
|||
MimeXML mimeType = "application/xml"
|
||||
)
|
||||
|
||||
func WriteXMLResponse(w http.ResponseWriter, statusCode int, response interface{}) {
|
||||
WriteResponse(w, statusCode, EncodeXMLResponse(response), MimeXML)
|
||||
func WriteXMLResponse(w http.ResponseWriter, r *http.Request, statusCode int, response interface{}) {
|
||||
WriteResponse(w, r, statusCode, EncodeXMLResponse(response), MimeXML)
|
||||
}
|
||||
|
||||
func WriteEmptyResponse(w http.ResponseWriter, statusCode int) {
|
||||
WriteResponse(w, statusCode, []byte{}, mimeNone)
|
||||
func WriteEmptyResponse(w http.ResponseWriter, r *http.Request, statusCode int) {
|
||||
WriteResponse(w, r, statusCode, []byte{}, mimeNone)
|
||||
}
|
||||
|
||||
func WriteErrorResponse(w http.ResponseWriter, errorCode ErrorCode, r *http.Request) {
|
||||
func WriteErrorResponse(w http.ResponseWriter, r *http.Request, errorCode ErrorCode) {
|
||||
vars := mux.Vars(r)
|
||||
bucket := vars["bucket"]
|
||||
object := vars["object"]
|
||||
|
@ -38,7 +38,7 @@ func WriteErrorResponse(w http.ResponseWriter, errorCode ErrorCode, r *http.Requ
|
|||
apiError := GetAPIError(errorCode)
|
||||
errorResponse := getRESTErrorResponse(apiError, r.URL.Path, bucket, object)
|
||||
encodedErrorResponse := EncodeXMLResponse(errorResponse)
|
||||
WriteResponse(w, apiError.HTTPStatusCode, encodedErrorResponse, MimeXML)
|
||||
WriteResponse(w, r, apiError.HTTPStatusCode, encodedErrorResponse, MimeXML)
|
||||
}
|
||||
|
||||
func getRESTErrorResponse(err APIError, resource string, bucket, object string) RESTErrorResponse {
|
||||
|
@ -61,13 +61,17 @@ func EncodeXMLResponse(response interface{}) []byte {
|
|||
return bytesBuffer.Bytes()
|
||||
}
|
||||
|
||||
func setCommonHeaders(w http.ResponseWriter) {
|
||||
func setCommonHeaders(w http.ResponseWriter, r *http.Request) {
|
||||
w.Header().Set("x-amz-request-id", fmt.Sprintf("%d", time.Now().UnixNano()))
|
||||
w.Header().Set("Accept-Ranges", "bytes")
|
||||
if r.Header.Get("Origin") != "" {
|
||||
w.Header().Set("Access-Control-Allow-Origin", "*")
|
||||
w.Header().Set("Access-Control-Allow-Credentials", "true")
|
||||
}
|
||||
}
|
||||
|
||||
func WriteResponse(w http.ResponseWriter, statusCode int, response []byte, mType mimeType) {
|
||||
setCommonHeaders(w)
|
||||
func WriteResponse(w http.ResponseWriter, r *http.Request, statusCode int, response []byte, mType mimeType) {
|
||||
setCommonHeaders(w, r)
|
||||
if response != nil {
|
||||
w.Header().Set("Content-Length", strconv.Itoa(len(response)))
|
||||
}
|
||||
|
@ -88,5 +92,5 @@ func WriteResponse(w http.ResponseWriter, statusCode int, response []byte, mType
|
|||
// If none of the http routes match respond with MethodNotAllowed
|
||||
func NotFoundHandler(w http.ResponseWriter, r *http.Request) {
|
||||
glog.V(0).Infof("unsupported %s %s", r.Method, r.RequestURI)
|
||||
WriteErrorResponse(w, ErrMethodNotAllowed, r)
|
||||
WriteErrorResponse(w, r, ErrMethodNotAllowed)
|
||||
}
|
||||
|
|
|
@ -2,7 +2,6 @@ package s3api
|
|||
|
||||
import (
|
||||
stats_collect "github.com/chrislusf/seaweedfs/weed/stats"
|
||||
"github.com/chrislusf/seaweedfs/weed/util"
|
||||
"net/http"
|
||||
"strconv"
|
||||
"time"
|
||||
|
@ -28,7 +27,7 @@ func (r *StatusRecorder) Flush() {
|
|||
|
||||
func track(f http.HandlerFunc, action string) http.HandlerFunc {
|
||||
return func(w http.ResponseWriter, r *http.Request) {
|
||||
w.Header().Set("Server", "SeaweedFS S3 "+util.VERSION)
|
||||
w.Header().Set("Server", "SeaweedFS S3")
|
||||
recorder := NewStatusResponseWriter(w)
|
||||
start := time.Now()
|
||||
f(recorder, r)
|
||||
|
|
|
@ -17,7 +17,7 @@ import (
|
|||
"time"
|
||||
)
|
||||
|
||||
func (fs *FilerServer) DownloadToLocal(ctx context.Context, req *filer_pb.DownloadToLocalRequest) (*filer_pb.DownloadToLocalResponse, error) {
|
||||
func (fs *FilerServer) CacheRemoteObjectToLocalCluster(ctx context.Context, req *filer_pb.CacheRemoteObjectToLocalClusterRequest) (*filer_pb.CacheRemoteObjectToLocalClusterResponse, error) {
|
||||
|
||||
// load all mappings
|
||||
mappingEntry, err := fs.filer.FindEntry(ctx, util.JoinPath(filer.DirectoryEtcRemote, filer.REMOTE_STORAGE_MOUNT_FILE))
|
||||
|
@ -57,7 +57,7 @@ func (fs *FilerServer) DownloadToLocal(ctx context.Context, req *filer_pb.Downlo
|
|||
return nil, err
|
||||
}
|
||||
|
||||
resp := &filer_pb.DownloadToLocalResponse{}
|
||||
resp := &filer_pb.CacheRemoteObjectToLocalClusterResponse{}
|
||||
if entry.Remote == nil || entry.Remote.RemoteSize == 0 {
|
||||
return resp, nil
|
||||
}
|
||||
|
|
|
@ -161,7 +161,7 @@ func (fs *FilerServer) moveSelfEntry(ctx context.Context, stream filer_pb.Seawee
|
|||
if err := stream.Send(&filer_pb.StreamRenameEntryResponse{
|
||||
Directory: string(newParent),
|
||||
EventNotification: &filer_pb.EventNotification{
|
||||
OldEntry: &filer_pb.Entry{
|
||||
OldEntry: &filer_pb.Entry{
|
||||
Name: entry.Name(),
|
||||
},
|
||||
NewEntry: newEntry.ToProtoEntry(),
|
||||
|
|
|
@ -61,7 +61,6 @@ type FilerOption struct {
|
|||
recursiveDelete bool
|
||||
Cipher bool
|
||||
SaveToFilerLimit int64
|
||||
Filers []pb.ServerAddress
|
||||
ConcurrentUploadLimit int64
|
||||
}
|
||||
|
||||
|
@ -108,7 +107,7 @@ func NewFilerServer(defaultMux, readonlyMux *http.ServeMux, option *FilerOption)
|
|||
fs.checkWithMaster()
|
||||
|
||||
go stats.LoopPushingMetric("filer", string(fs.option.Host), fs.metricsAddress, fs.metricsIntervalSec)
|
||||
go fs.filer.KeepConnectedToMaster()
|
||||
go fs.filer.KeepMasterClientConnected()
|
||||
|
||||
v := util.GetViper()
|
||||
if !util.LoadConfiguration("filer", false) {
|
||||
|
@ -143,7 +142,7 @@ func NewFilerServer(defaultMux, readonlyMux *http.ServeMux, option *FilerOption)
|
|||
readonlyMux.HandleFunc("/", fs.readonlyFilerHandler)
|
||||
}
|
||||
|
||||
fs.filer.AggregateFromPeers(option.Host, option.Filers)
|
||||
fs.filer.AggregateFromPeers(option.Host)
|
||||
|
||||
fs.filer.LoadBuckets()
|
||||
|
||||
|
|
|
@ -169,11 +169,11 @@ func (fs *FilerServer) GetOrHeadHandler(w http.ResponseWriter, r *http.Request)
|
|||
chunks := entry.Chunks
|
||||
if entry.IsInRemoteOnly() {
|
||||
dir, name := entry.FullPath.DirAndName()
|
||||
if resp, err := fs.DownloadToLocal(context.Background(), &filer_pb.DownloadToLocalRequest{
|
||||
if resp, err := fs.CacheRemoteObjectToLocalCluster(context.Background(), &filer_pb.CacheRemoteObjectToLocalClusterRequest{
|
||||
Directory: dir,
|
||||
Name: name,
|
||||
}); err != nil {
|
||||
glog.Errorf("DownloadToLocal %s: %v", entry.FullPath, err)
|
||||
glog.Errorf("CacheRemoteObjectToLocalCluster %s: %v", entry.FullPath, err)
|
||||
return fmt.Errorf("cache %s: %v", entry.FullPath, err)
|
||||
} else {
|
||||
chunks = resp.Entry.Chunks
|
||||
|
|
|
@ -6,7 +6,6 @@ import (
|
|||
"github.com/chrislusf/seaweedfs/weed/storage/backend"
|
||||
"github.com/chrislusf/seaweedfs/weed/util"
|
||||
"net"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/chrislusf/raft"
|
||||
|
@ -45,11 +44,7 @@ func (ms *MasterServer) SendHeartbeat(stream master_pb.Seaweed_SendHeartbeatServ
|
|||
}
|
||||
|
||||
if len(message.DeletedVids) > 0 {
|
||||
ms.clientChansLock.RLock()
|
||||
for _, ch := range ms.clientChans {
|
||||
ch <- message
|
||||
}
|
||||
ms.clientChansLock.RUnlock()
|
||||
ms.broadcastToClients(&master_pb.KeepConnectedResponse{VolumeLocation: message})
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
@ -154,12 +149,7 @@ func (ms *MasterServer) SendHeartbeat(stream master_pb.Seaweed_SendHeartbeatServ
|
|||
|
||||
}
|
||||
if len(message.NewVids) > 0 || len(message.DeletedVids) > 0 {
|
||||
ms.clientChansLock.RLock()
|
||||
for host, ch := range ms.clientChans {
|
||||
glog.V(0).Infof("master send to %s: %s", host, message.String())
|
||||
ch <- message
|
||||
}
|
||||
ms.clientChansLock.RUnlock()
|
||||
ms.broadcastToClients(&master_pb.KeepConnectedResponse{VolumeLocation: message})
|
||||
}
|
||||
|
||||
// tell the volume servers about the leader
|
||||
|
@ -195,12 +185,20 @@ func (ms *MasterServer) KeepConnected(stream master_pb.Seaweed_KeepConnectedServ
|
|||
// buffer by 1 so we don't end up getting stuck writing to stopChan forever
|
||||
stopChan := make(chan bool, 1)
|
||||
|
||||
clientName, messageChan := ms.addClient(req.Name, peerAddress)
|
||||
clientName, messageChan := ms.addClient(req.ClientType, peerAddress)
|
||||
for _, update := range ms.Cluster.AddClusterNode(req.ClientType, peerAddress, req.Version) {
|
||||
ms.broadcastToClients(update)
|
||||
}
|
||||
|
||||
defer ms.deleteClient(clientName)
|
||||
defer func() {
|
||||
for _, update := range ms.Cluster.RemoveClusterNode(req.ClientType, peerAddress) {
|
||||
ms.broadcastToClients(update)
|
||||
}
|
||||
ms.deleteClient(clientName)
|
||||
}()
|
||||
|
||||
for _, message := range ms.Topo.ToVolumeLocations() {
|
||||
if sendErr := stream.Send(message); sendErr != nil {
|
||||
if sendErr := stream.Send(&master_pb.KeepConnectedResponse{VolumeLocation: message}); sendErr != nil {
|
||||
return sendErr
|
||||
}
|
||||
}
|
||||
|
@ -235,21 +233,31 @@ func (ms *MasterServer) KeepConnected(stream master_pb.Seaweed_KeepConnectedServ
|
|||
|
||||
}
|
||||
|
||||
func (ms *MasterServer) broadcastToClients(message *master_pb.KeepConnectedResponse) {
|
||||
ms.clientChansLock.RLock()
|
||||
for _, ch := range ms.clientChans {
|
||||
ch <- message
|
||||
}
|
||||
ms.clientChansLock.RUnlock()
|
||||
}
|
||||
|
||||
func (ms *MasterServer) informNewLeader(stream master_pb.Seaweed_KeepConnectedServer) error {
|
||||
leader, err := ms.Topo.Leader()
|
||||
if err != nil {
|
||||
glog.Errorf("topo leader: %v", err)
|
||||
return raft.NotLeaderError
|
||||
}
|
||||
if err := stream.Send(&master_pb.VolumeLocation{
|
||||
Leader: string(leader),
|
||||
if err := stream.Send(&master_pb.KeepConnectedResponse{
|
||||
VolumeLocation: &master_pb.VolumeLocation{
|
||||
Leader: string(leader),
|
||||
},
|
||||
}); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (ms *MasterServer) addClient(clientType string, clientAddress pb.ServerAddress) (clientName string, messageChan chan *master_pb.VolumeLocation) {
|
||||
func (ms *MasterServer) addClient(clientType string, clientAddress pb.ServerAddress) (clientName string, messageChan chan *master_pb.KeepConnectedResponse) {
|
||||
clientName = clientType + "@" + string(clientAddress)
|
||||
glog.V(0).Infof("+ client %v", clientName)
|
||||
|
||||
|
@ -258,7 +266,7 @@ func (ms *MasterServer) addClient(clientType string, clientAddress pb.ServerAddr
|
|||
// trying to send to it in SendHeartbeat and so we can't lock the
|
||||
// clientChansLock to remove the channel and we're stuck writing to it
|
||||
// 100 is probably overkill
|
||||
messageChan = make(chan *master_pb.VolumeLocation, 100)
|
||||
messageChan = make(chan *master_pb.KeepConnectedResponse, 100)
|
||||
|
||||
ms.clientChansLock.Lock()
|
||||
ms.clientChans[clientName] = messageChan
|
||||
|
@ -295,19 +303,6 @@ func findClientAddress(ctx context.Context, grpcPort uint32) string {
|
|||
|
||||
}
|
||||
|
||||
func (ms *MasterServer) ListMasterClients(ctx context.Context, req *master_pb.ListMasterClientsRequest) (*master_pb.ListMasterClientsResponse, error) {
|
||||
resp := &master_pb.ListMasterClientsResponse{}
|
||||
ms.clientChansLock.RLock()
|
||||
defer ms.clientChansLock.RUnlock()
|
||||
|
||||
for k := range ms.clientChans {
|
||||
if strings.HasPrefix(k, req.ClientType+"@") {
|
||||
resp.GrpcAddresses = append(resp.GrpcAddresses, k[len(req.ClientType)+1:])
|
||||
}
|
||||
}
|
||||
return resp, nil
|
||||
}
|
||||
|
||||
func (ms *MasterServer) GetMasterConfiguration(ctx context.Context, req *master_pb.GetMasterConfigurationRequest) (*master_pb.GetMasterConfigurationResponse, error) {
|
||||
|
||||
// tell the volume servers about the leader
|
||||
|
|
21
weed/server/master_grpc_server_cluster.go
Normal file
21
weed/server/master_grpc_server_cluster.go
Normal file
|
@ -0,0 +1,21 @@
|
|||
package weed_server
|
||||
|
||||
import (
|
||||
"context"
|
||||
"github.com/chrislusf/seaweedfs/weed/pb/master_pb"
|
||||
)
|
||||
|
||||
func (ms *MasterServer) ListClusterNodes(ctx context.Context, req *master_pb.ListClusterNodesRequest) (*master_pb.ListClusterNodesResponse, error) {
|
||||
resp := &master_pb.ListClusterNodesResponse{}
|
||||
|
||||
clusterNodes := ms.Cluster.ListClusterNode(req.ClientType)
|
||||
|
||||
for _, node := range clusterNodes {
|
||||
resp.ClusterNodes = append(resp.ClusterNodes, &master_pb.ListClusterNodesResponse_ClusterNode{
|
||||
Address: string(node.Address),
|
||||
Version: node.Version,
|
||||
IsLeader: ms.Cluster.IsOneLeader(node.Address),
|
||||
})
|
||||
}
|
||||
return resp, nil
|
||||
}
|
|
@ -2,6 +2,7 @@ package weed_server
|
|||
|
||||
import (
|
||||
"fmt"
|
||||
"github.com/chrislusf/seaweedfs/weed/cluster"
|
||||
"github.com/chrislusf/seaweedfs/weed/pb"
|
||||
"net/http"
|
||||
"net/http/httputil"
|
||||
|
@ -60,13 +61,15 @@ type MasterServer struct {
|
|||
|
||||
// notifying clients
|
||||
clientChansLock sync.RWMutex
|
||||
clientChans map[string]chan *master_pb.VolumeLocation
|
||||
clientChans map[string]chan *master_pb.KeepConnectedResponse
|
||||
|
||||
grpcDialOption grpc.DialOption
|
||||
|
||||
MasterClient *wdclient.MasterClient
|
||||
|
||||
adminLocks *AdminLocks
|
||||
|
||||
Cluster *cluster.Cluster
|
||||
}
|
||||
|
||||
func NewMasterServer(r *mux.Router, option *MasterOption, peers []pb.ServerAddress) *MasterServer {
|
||||
|
@ -99,10 +102,11 @@ func NewMasterServer(r *mux.Router, option *MasterOption, peers []pb.ServerAddre
|
|||
option: option,
|
||||
preallocateSize: preallocateSize,
|
||||
vgCh: make(chan *topology.VolumeGrowRequest, 1<<6),
|
||||
clientChans: make(map[string]chan *master_pb.VolumeLocation),
|
||||
clientChans: make(map[string]chan *master_pb.KeepConnectedResponse),
|
||||
grpcDialOption: grpcDialOption,
|
||||
MasterClient: wdclient.NewMasterClient(grpcDialOption, "master", option.Master, "", peers),
|
||||
MasterClient: wdclient.NewMasterClient(grpcDialOption, cluster.MasterType, option.Master, "", peers),
|
||||
adminLocks: NewAdminLocks(),
|
||||
Cluster: cluster.NewCluster(),
|
||||
}
|
||||
ms.boundedLeaderChan = make(chan int, 16)
|
||||
|
||||
|
|
|
@ -80,7 +80,7 @@ func (vs *VolumeServer) VolumeCopy(req *volume_server_pb.VolumeCopyRequest, stre
|
|||
|
||||
// println("source:", volFileInfoResp.String())
|
||||
copyResponse := &volume_server_pb.VolumeCopyResponse{}
|
||||
reportInterval := int64(1024*1024*128)
|
||||
reportInterval := int64(1024 * 1024 * 128)
|
||||
nextReportTarget := reportInterval
|
||||
var modifiedTsNs int64
|
||||
var sendErr error
|
||||
|
|
|
@ -62,13 +62,8 @@ func (vs *VolumeServer) VolumeTierMoveDatToRemote(req *volume_server_pb.VolumeTi
|
|||
})
|
||||
}
|
||||
|
||||
// remember the file original source
|
||||
attributes := make(map[string]string)
|
||||
attributes["volumeId"] = v.Id.String()
|
||||
attributes["collection"] = v.Collection
|
||||
attributes["ext"] = ".dat"
|
||||
// copy the data file
|
||||
key, size, err := backendStorage.CopyFile(diskFile.File, attributes, fn)
|
||||
key, size, err := backendStorage.CopyFile(diskFile.File, fn)
|
||||
if err != nil {
|
||||
return fmt.Errorf("backend %s copy file %s: %v", req.DestinationBackendName, diskFile.Name(), err)
|
||||
}
|
||||
|
|
|
@ -27,7 +27,7 @@ func (vs *VolumeServer) VacuumVolumeCheck(ctx context.Context, req *volume_serve
|
|||
func (vs *VolumeServer) VacuumVolumeCompact(req *volume_server_pb.VacuumVolumeCompactRequest, stream volume_server_pb.VolumeServer_VacuumVolumeCompactServer) error {
|
||||
|
||||
resp := &volume_server_pb.VacuumVolumeCompactResponse{}
|
||||
reportInterval := int64(1024*1024*128)
|
||||
reportInterval := int64(1024 * 1024 * 128)
|
||||
nextReportTarget := reportInterval
|
||||
|
||||
var sendErr error
|
||||
|
|
55
weed/shell/command_cluster_ps.go
Normal file
55
weed/shell/command_cluster_ps.go
Normal file
|
@ -0,0 +1,55 @@
|
|||
package shell
|
||||
|
||||
import (
|
||||
"context"
|
||||
"flag"
|
||||
"fmt"
|
||||
"github.com/chrislusf/seaweedfs/weed/cluster"
|
||||
"io"
|
||||
|
||||
"github.com/chrislusf/seaweedfs/weed/pb/master_pb"
|
||||
)
|
||||
|
||||
func init() {
|
||||
Commands = append(Commands, &commandClusterPs{})
|
||||
}
|
||||
|
||||
type commandClusterPs struct {
|
||||
}
|
||||
|
||||
func (c *commandClusterPs) Name() string {
|
||||
return "cluster.ps"
|
||||
}
|
||||
|
||||
func (c *commandClusterPs) Help() string {
|
||||
return `check current cluster process status
|
||||
|
||||
cluster.ps
|
||||
|
||||
`
|
||||
}
|
||||
|
||||
func (c *commandClusterPs) Do(args []string, commandEnv *CommandEnv, writer io.Writer) (err error) {
|
||||
|
||||
clusterPsCommand := flag.NewFlagSet(c.Name(), flag.ContinueOnError)
|
||||
if err = clusterPsCommand.Parse(args); err != nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
err = commandEnv.MasterClient.WithClient(func(client master_pb.SeaweedClient) error {
|
||||
resp, err := client.ListClusterNodes(context.Background(), &master_pb.ListClusterNodesRequest{
|
||||
ClientType: cluster.FilerType,
|
||||
})
|
||||
|
||||
fmt.Fprintf(writer, "the cluster has %d filers\n", len(resp.ClusterNodes))
|
||||
for _, node := range resp.ClusterNodes {
|
||||
fmt.Fprintf(writer, " * %s (%v)\n", node.Address, node.Version)
|
||||
}
|
||||
return err
|
||||
})
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
|
@ -6,6 +6,7 @@ import (
|
|||
"fmt"
|
||||
"github.com/chrislusf/seaweedfs/weed/pb"
|
||||
"io"
|
||||
"math/rand"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
|
@ -33,7 +34,7 @@ func (c *commandEcEncode) Name() string {
|
|||
func (c *commandEcEncode) Help() string {
|
||||
return `apply erasure coding to a volume
|
||||
|
||||
ec.encode [-collection=""] [-fullPercent=95] [-quietFor=1h]
|
||||
ec.encode [-collection=""] [-fullPercent=95 -quietFor=1h]
|
||||
ec.encode [-collection=""] [-volumeId=<volume_id>]
|
||||
|
||||
This command will:
|
||||
|
@ -248,7 +249,7 @@ func parallelCopyEcShardsFromSource(grpcDialOption grpc.DialOption, targetServer
|
|||
func balancedEcDistribution(servers []*EcNode) (allocated [][]uint32) {
|
||||
allocated = make([][]uint32, len(servers))
|
||||
allocatedShardIdIndex := uint32(0)
|
||||
serverIndex := 0
|
||||
serverIndex := rand.Intn(len(servers))
|
||||
for allocatedShardIdIndex < erasure_coding.TotalShardsCount {
|
||||
if servers[serverIndex].freeEcSlot > 0 {
|
||||
allocated[serverIndex] = append(allocated[serverIndex], allocatedShardIdIndex)
|
||||
|
|
|
@ -26,7 +26,7 @@ func (c *commandRemoteCache) Help() string {
|
|||
return `cache the file content for mounted directories or files
|
||||
|
||||
# assume a remote storage is configured to name "cloud1"
|
||||
remote.configure -name=cloud1 -type=s3 -access_key=xxx -secret_key=yyy
|
||||
remote.configure -name=cloud1 -type=s3 -s3.access_key=xxx -s3.secret_key=yyy
|
||||
# mount and pull one bucket
|
||||
remote.mount -dir=/xxx -remote=cloud1/bucket
|
||||
|
||||
|
@ -163,10 +163,10 @@ func (c *commandRemoteCache) cacheContentData(commandEnv *CommandEnv, writer io.
|
|||
|
||||
remoteLocation := filer.MapFullPathToRemoteStorageLocation(localMountedDir, remoteMountedLocation, dir.Child(entry.Name))
|
||||
|
||||
if err := filer.DownloadToLocal(commandEnv, remoteConf, remoteLocation, dir, entry); err != nil {
|
||||
fmt.Fprintf(writer, "DownloadToLocal %+v: %v\n", remoteLocation, err)
|
||||
if err := filer.CacheRemoteObjectToLocalCluster(commandEnv, remoteConf, remoteLocation, dir, entry); err != nil {
|
||||
fmt.Fprintf(writer, "CacheRemoteObjectToLocalCluster %+v: %v\n", remoteLocation, err)
|
||||
if executionErr == nil {
|
||||
executionErr = fmt.Errorf("DownloadToLocal %+v: %v\n", remoteLocation, err)
|
||||
executionErr = fmt.Errorf("CacheRemoteObjectToLocalCluster %+v: %v\n", remoteLocation, err)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
|
|
@ -27,7 +27,7 @@ func (c *commandRemoteMetaSync) Help() string {
|
|||
return `synchronize the local file meta data with the remote file metadata
|
||||
|
||||
# assume a remote storage is configured to name "cloud1"
|
||||
remote.configure -name=cloud1 -type=s3 -access_key=xxx -secret_key=yyy
|
||||
remote.configure -name=cloud1 -type=s3 -s3.access_key=xxx -s3.secret_key=yyy
|
||||
# mount and pull one bucket
|
||||
remote.mount -dir=/xxx -remote=cloud1/bucket
|
||||
|
||||
|
|
|
@ -32,7 +32,7 @@ func (c *commandRemoteMount) Help() string {
|
|||
return `mount remote storage and pull its metadata
|
||||
|
||||
# assume a remote storage is configured to name "cloud1"
|
||||
remote.configure -name=cloud1 -type=s3 -access_key=xxx -secret_key=yyy
|
||||
remote.configure -name=cloud1 -type=s3 -s3.access_key=xxx -s3.secret_key=yyy
|
||||
|
||||
# mount and pull one bucket
|
||||
remote.mount -dir=/xxx -remote=cloud1/bucket
|
||||
|
|
|
@ -27,7 +27,7 @@ func (c *commandRemoteMountBuckets) Help() string {
|
|||
return `mount all buckets in remote storage and pull its metadata
|
||||
|
||||
# assume a remote storage is configured to name "cloud1"
|
||||
remote.configure -name=cloud1 -type=s3 -access_key=xxx -secret_key=yyy
|
||||
remote.configure -name=cloud1 -type=s3 -s3.access_key=xxx -s3.secret_key=yyy
|
||||
|
||||
# mount all buckets
|
||||
remote.mount.buckets -remote=cloud1
|
||||
|
|
|
@ -27,7 +27,7 @@ func (c *commandRemoteUnmount) Help() string {
|
|||
return `unmount remote storage
|
||||
|
||||
# assume a remote storage is configured to name "s3_1"
|
||||
remote.configure -name=s3_1 -type=s3 -access_key=xxx -secret_key=yyy
|
||||
remote.configure -name=s3_1 -type=s3 -s3.access_key=xxx -s3.secret_key=yyy
|
||||
# mount and pull one bucket
|
||||
remote.mount -dir=/xxx -remote=s3_1/bucket
|
||||
|
||||
|
|
|
@ -261,6 +261,29 @@ func TestSatisfyReplicaPlacement00x(t *testing.T) {
|
|||
|
||||
}
|
||||
|
||||
func TestSatisfyReplicaPlacement100(t *testing.T) {
|
||||
|
||||
var tests = []testcase{
|
||||
{
|
||||
name: "test 100",
|
||||
replication: "100",
|
||||
replicas: []*VolumeReplica{
|
||||
{
|
||||
location: &location{"dc1", "r1", &master_pb.DataNodeInfo{Id: "dn1"}},
|
||||
},
|
||||
{
|
||||
location: &location{"dc1", "r2", &master_pb.DataNodeInfo{Id: "dn2"}},
|
||||
},
|
||||
},
|
||||
possibleLocation: location{"dc2", "r3", &master_pb.DataNodeInfo{Id: "dn3"}},
|
||||
expected: true,
|
||||
},
|
||||
}
|
||||
|
||||
runTests(tests, t)
|
||||
|
||||
}
|
||||
|
||||
func runTests(tests []testcase, t *testing.T) {
|
||||
for _, tt := range tests {
|
||||
replicaPlacement, _ := super_block.NewReplicaPlacementFromString(tt.replication)
|
||||
|
|
|
@ -3,9 +3,13 @@ package shell
|
|||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"github.com/chrislusf/seaweedfs/weed/cluster"
|
||||
"github.com/chrislusf/seaweedfs/weed/pb"
|
||||
"github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
|
||||
"github.com/chrislusf/seaweedfs/weed/pb/master_pb"
|
||||
"github.com/chrislusf/seaweedfs/weed/util/grace"
|
||||
"io"
|
||||
"math/rand"
|
||||
"os"
|
||||
"path"
|
||||
"regexp"
|
||||
|
@ -47,6 +51,29 @@ func RunShell(options ShellOptions) {
|
|||
go commandEnv.MasterClient.KeepConnectedToMaster()
|
||||
commandEnv.MasterClient.WaitUntilConnected()
|
||||
|
||||
if commandEnv.option.FilerAddress == "" {
|
||||
var filers []pb.ServerAddress
|
||||
commandEnv.MasterClient.WithClient(func(client master_pb.SeaweedClient) error {
|
||||
resp, err := client.ListClusterNodes(context.Background(), &master_pb.ListClusterNodesRequest{
|
||||
ClientType: cluster.FilerType,
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for _, clusterNode := range resp.ClusterNodes {
|
||||
filers = append(filers, pb.ServerAddress(clusterNode.Address))
|
||||
}
|
||||
return nil
|
||||
})
|
||||
fmt.Printf("master: %s ", *options.Masters)
|
||||
if len(filers) > 0 {
|
||||
fmt.Printf("filers: %v", filers)
|
||||
commandEnv.option.FilerAddress = filers[rand.Intn(len(filers))]
|
||||
}
|
||||
fmt.Println()
|
||||
}
|
||||
|
||||
if commandEnv.option.FilerAddress != "" {
|
||||
commandEnv.WithFilerClient(func(filerClient filer_pb.SeaweedFilerClient) error {
|
||||
resp, err := filerClient.GetFilerConfiguration(context.Background(), &filer_pb.GetFilerConfigurationRequest{})
|
||||
|
|
|
@ -25,7 +25,7 @@ type BackendStorageFile interface {
|
|||
type BackendStorage interface {
|
||||
ToProperties() map[string]string
|
||||
NewStorageFile(key string, tierInfo *volume_server_pb.VolumeInfo) BackendStorageFile
|
||||
CopyFile(f *os.File, attributes map[string]string, fn func(progressed int64, percentage float32) error) (key string, size int64, err error)
|
||||
CopyFile(f *os.File, fn func(progressed int64, percentage float32) error) (key string, size int64, err error)
|
||||
DownloadFile(fileName string, key string, fn func(progressed int64, percentage float32) error) (size int64, err error)
|
||||
DeleteFile(key string) (err error)
|
||||
}
|
||||
|
|
|
@ -79,13 +79,13 @@ func (s *S3BackendStorage) NewStorageFile(key string, tierInfo *volume_server_pb
|
|||
return f
|
||||
}
|
||||
|
||||
func (s *S3BackendStorage) CopyFile(f *os.File, attributes map[string]string, fn func(progressed int64, percentage float32) error) (key string, size int64, err error) {
|
||||
func (s *S3BackendStorage) CopyFile(f *os.File, fn func(progressed int64, percentage float32) error) (key string, size int64, err error) {
|
||||
randomUuid, _ := uuid.NewRandom()
|
||||
key = randomUuid.String()
|
||||
|
||||
glog.V(1).Infof("copying dat file of %s to remote s3.%s as %s", f.Name(), s.id, key)
|
||||
|
||||
size, err = uploadToS3(s.conn, f.Name(), s.bucket, key, attributes, fn)
|
||||
size, err = uploadToS3(s.conn, f.Name(), s.bucket, key, fn)
|
||||
|
||||
return
|
||||
}
|
||||
|
|
|
@ -2,6 +2,8 @@ package s3_backend
|
|||
|
||||
import (
|
||||
"fmt"
|
||||
"github.com/aws/aws-sdk-go/aws/request"
|
||||
"github.com/chrislusf/seaweedfs/weed/util"
|
||||
"sync"
|
||||
|
||||
"github.com/aws/aws-sdk-go/aws"
|
||||
|
@ -47,6 +49,9 @@ func createSession(awsAccessKeyId, awsSecretAccessKey, region, endpoint string)
|
|||
if err != nil {
|
||||
return nil, fmt.Errorf("create aws session in region %s: %v", region, err)
|
||||
}
|
||||
sess.Handlers.Build.PushBack(func(r *request.Request) {
|
||||
r.HTTPRequest.Header.Set("User-Agent", "SeaweedFS/"+util.VERSION_NUMBER)
|
||||
})
|
||||
|
||||
t := s3.New(sess)
|
||||
|
||||
|
|
|
@ -12,9 +12,7 @@ import (
|
|||
"github.com/chrislusf/seaweedfs/weed/glog"
|
||||
)
|
||||
|
||||
func uploadToS3(sess s3iface.S3API, filename string, destBucket string, destKey string,
|
||||
attributes map[string]string,
|
||||
fn func(progressed int64, percentage float32) error) (fileSize int64, err error) {
|
||||
func uploadToS3(sess s3iface.S3API, filename string, destBucket string, destKey string, fn func(progressed int64, percentage float32) error) (fileSize int64, err error) {
|
||||
|
||||
//open the file
|
||||
f, err := os.Open(filename)
|
||||
|
@ -48,25 +46,13 @@ func uploadToS3(sess s3iface.S3API, filename string, destBucket string, destKey
|
|||
fn: fn,
|
||||
}
|
||||
|
||||
// process tagging
|
||||
tags := ""
|
||||
for k, v := range attributes {
|
||||
if len(tags) > 0 {
|
||||
tags = tags + "&"
|
||||
}
|
||||
tags = tags + k + "=" + v
|
||||
}
|
||||
|
||||
// Upload the file to S3.
|
||||
var result *s3manager.UploadOutput
|
||||
result, err = uploader.Upload(&s3manager.UploadInput{
|
||||
Bucket: aws.String(destBucket),
|
||||
Key: aws.String(destKey),
|
||||
Body: fileReader,
|
||||
ACL: aws.String("private"),
|
||||
ServerSideEncryption: aws.String("AES256"),
|
||||
StorageClass: aws.String("STANDARD_IA"),
|
||||
Tagging: aws.String(tags),
|
||||
Bucket: aws.String(destBucket),
|
||||
Key: aws.String(destKey),
|
||||
Body: fileReader,
|
||||
StorageClass: aws.String("STANDARD_IA"),
|
||||
})
|
||||
|
||||
//in case it fails to upload
|
||||
|
|
|
@ -5,7 +5,7 @@ import (
|
|||
)
|
||||
|
||||
var (
|
||||
VERSION_NUMBER = fmt.Sprintf("%.02f", 2.75)
|
||||
VERSION_NUMBER = fmt.Sprintf("%.02f", 2.77)
|
||||
VERSION = sizeLimit + " " + VERSION_NUMBER
|
||||
COMMIT = ""
|
||||
)
|
||||
|
|
|
@ -21,6 +21,8 @@ type MasterClient struct {
|
|||
grpcDialOption grpc.DialOption
|
||||
|
||||
vidMap
|
||||
|
||||
OnPeerUpdate func(update *master_pb.ClusterNodeUpdate)
|
||||
}
|
||||
|
||||
func NewMasterClient(grpcDialOption grpc.DialOption, clientType string, clientHost pb.ServerAddress, clientDataCenter string, masters []pb.ServerAddress) *MasterClient {
|
||||
|
@ -93,7 +95,7 @@ func (mc *MasterClient) tryAllMasters() {
|
|||
}
|
||||
|
||||
func (mc *MasterClient) tryConnectToMaster(master pb.ServerAddress) (nextHintedLeader pb.ServerAddress) {
|
||||
glog.V(0).Infof("%s masterClient Connecting to master %v", mc.clientType, master)
|
||||
glog.V(1).Infof("%s masterClient Connecting to master %v", mc.clientType, master)
|
||||
gprcErr := pb.WithMasterClient(master, mc.grpcDialOption, func(client master_pb.SeaweedClient) error {
|
||||
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
|
@ -105,7 +107,11 @@ func (mc *MasterClient) tryConnectToMaster(master pb.ServerAddress) (nextHintedL
|
|||
return err
|
||||
}
|
||||
|
||||
if err = stream.Send(&master_pb.KeepConnectedRequest{Name: mc.clientType, ClientAddress: string(mc.clientHost)}); err != nil {
|
||||
if err = stream.Send(&master_pb.KeepConnectedRequest{
|
||||
ClientType: mc.clientType,
|
||||
ClientAddress: string(mc.clientHost),
|
||||
Version: util.Version(),
|
||||
}); err != nil {
|
||||
glog.V(0).Infof("%s masterClient failed to send to %s: %v", mc.clientType, master, err)
|
||||
return err
|
||||
}
|
||||
|
@ -114,34 +120,49 @@ func (mc *MasterClient) tryConnectToMaster(master pb.ServerAddress) (nextHintedL
|
|||
mc.currentMaster = master
|
||||
|
||||
for {
|
||||
volumeLocation, err := stream.Recv()
|
||||
resp, err := stream.Recv()
|
||||
if err != nil {
|
||||
glog.V(0).Infof("%s masterClient failed to receive from %s: %v", mc.clientType, master, err)
|
||||
return err
|
||||
}
|
||||
|
||||
// maybe the leader is changed
|
||||
if volumeLocation.Leader != "" {
|
||||
glog.V(0).Infof("redirected to leader %v", volumeLocation.Leader)
|
||||
nextHintedLeader = pb.ServerAddress(volumeLocation.Leader)
|
||||
return nil
|
||||
if resp.VolumeLocation != nil {
|
||||
// maybe the leader is changed
|
||||
if resp.VolumeLocation.Leader != "" {
|
||||
glog.V(0).Infof("redirected to leader %v", resp.VolumeLocation.Leader)
|
||||
nextHintedLeader = pb.ServerAddress(resp.VolumeLocation.Leader)
|
||||
return nil
|
||||
}
|
||||
|
||||
// process new volume location
|
||||
loc := Location{
|
||||
Url: resp.VolumeLocation.Url,
|
||||
PublicUrl: resp.VolumeLocation.PublicUrl,
|
||||
DataCenter: resp.VolumeLocation.DataCenter,
|
||||
GrpcPort: int(resp.VolumeLocation.GrpcPort),
|
||||
}
|
||||
for _, newVid := range resp.VolumeLocation.NewVids {
|
||||
glog.V(1).Infof("%s: %s masterClient adds volume %d", mc.clientType, loc.Url, newVid)
|
||||
mc.addLocation(newVid, loc)
|
||||
}
|
||||
for _, deletedVid := range resp.VolumeLocation.DeletedVids {
|
||||
glog.V(1).Infof("%s: %s masterClient removes volume %d", mc.clientType, loc.Url, deletedVid)
|
||||
mc.deleteLocation(deletedVid, loc)
|
||||
}
|
||||
}
|
||||
|
||||
// process new volume location
|
||||
loc := Location{
|
||||
Url: volumeLocation.Url,
|
||||
PublicUrl: volumeLocation.PublicUrl,
|
||||
DataCenter: volumeLocation.DataCenter,
|
||||
GrpcPort: int(volumeLocation.GrpcPort),
|
||||
}
|
||||
for _, newVid := range volumeLocation.NewVids {
|
||||
glog.V(1).Infof("%s: %s masterClient adds volume %d", mc.clientType, loc.Url, newVid)
|
||||
mc.addLocation(newVid, loc)
|
||||
}
|
||||
for _, deletedVid := range volumeLocation.DeletedVids {
|
||||
glog.V(1).Infof("%s: %s masterClient removes volume %d", mc.clientType, loc.Url, deletedVid)
|
||||
mc.deleteLocation(deletedVid, loc)
|
||||
if resp.ClusterNodeUpdate != nil {
|
||||
update := resp.ClusterNodeUpdate
|
||||
if mc.OnPeerUpdate != nil {
|
||||
if update.IsAdd {
|
||||
glog.V(0).Infof("+ %s %s leader:%v\n", update.NodeType, update.Address, update.IsLeader)
|
||||
} else {
|
||||
glog.V(0).Infof("- %s %s leader:%v\n", update.NodeType, update.Address, update.IsLeader)
|
||||
}
|
||||
mc.OnPeerUpdate(update)
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
})
|
||||
|
|
Loading…
Reference in a new issue