2016-06-03 01:09:14 +00:00
|
|
|
package command
|
2012-08-07 08:29:22 +00:00
|
|
|
|
|
|
|
import (
|
2019-11-17 03:40:36 +00:00
|
|
|
"fmt"
|
2018-10-11 07:08:13 +00:00
|
|
|
"net/http"
|
2012-09-17 00:31:15 +00:00
|
|
|
"os"
|
2012-10-06 17:50:52 +00:00
|
|
|
"runtime"
|
2018-10-11 07:08:13 +00:00
|
|
|
"runtime/pprof"
|
2012-08-24 05:46:54 +00:00
|
|
|
"strconv"
|
|
|
|
"strings"
|
2018-10-11 07:08:13 +00:00
|
|
|
"time"
|
|
|
|
|
2019-06-05 08:30:24 +00:00
|
|
|
"github.com/chrislusf/seaweedfs/weed/security"
|
2019-11-17 03:40:36 +00:00
|
|
|
"github.com/chrislusf/seaweedfs/weed/util/httpdown"
|
2019-06-05 08:30:24 +00:00
|
|
|
"github.com/spf13/viper"
|
2019-11-17 03:40:36 +00:00
|
|
|
"google.golang.org/grpc"
|
2019-06-05 08:30:24 +00:00
|
|
|
|
2016-06-03 01:09:14 +00:00
|
|
|
"github.com/chrislusf/seaweedfs/weed/glog"
|
2018-10-11 08:16:33 +00:00
|
|
|
"github.com/chrislusf/seaweedfs/weed/pb/volume_server_pb"
|
2018-10-11 07:04:31 +00:00
|
|
|
"github.com/chrislusf/seaweedfs/weed/server"
|
2018-10-11 07:08:13 +00:00
|
|
|
"github.com/chrislusf/seaweedfs/weed/storage"
|
|
|
|
"github.com/chrislusf/seaweedfs/weed/util"
|
2018-10-11 08:16:33 +00:00
|
|
|
"google.golang.org/grpc/reflection"
|
2012-08-07 08:29:22 +00:00
|
|
|
)
|
|
|
|
|
2015-01-13 08:27:51 +00:00
|
|
|
var (
|
|
|
|
v VolumeServerOptions
|
|
|
|
)
|
|
|
|
|
|
|
|
type VolumeServerOptions struct {
|
|
|
|
port *int
|
2015-03-09 08:10:01 +00:00
|
|
|
publicPort *int
|
2015-01-13 08:27:51 +00:00
|
|
|
folders []string
|
|
|
|
folderMaxLimits []int
|
|
|
|
ip *string
|
2015-02-02 23:51:25 +00:00
|
|
|
publicUrl *string
|
2015-01-13 08:27:51 +00:00
|
|
|
bindIp *string
|
2018-06-01 07:39:39 +00:00
|
|
|
masters *string
|
2015-01-13 08:27:51 +00:00
|
|
|
pulseSeconds *int
|
|
|
|
idleConnectionTimeout *int
|
|
|
|
dataCenter *string
|
|
|
|
rack *string
|
|
|
|
whiteList []string
|
Add boltdb for volume needle map
boltdb is fairly slow to write, about 6 minutes for recreating index
for 1553934 files. Boltdb loads 1,553,934 x 16 = 24,862,944bytes from
disk, and generate the boltdb as large as 134,217,728 bytes in 6
minutes.
To compare, for leveldb, it recreates index in leveldb as large as
27,188,148 bytes in 8 seconds.
For in memory version, it loads the index in
To test the memory consumption, the leveldb or boltdb index are
created. And the server is restarted. Using the benchmark tool to read
lots of files. There are 7 volumes in benchmark collection, each with
about 1553K files.
For leveldb, the memory starts at 142,884KB, and stays at 179,340KB.
For boltdb, the memory starts at 73,756KB, and stays at 144,564KB.
For in-memory, the memory starts at 368,152KB, and stays at 448,032KB.
2015-03-29 18:04:32 +00:00
|
|
|
indexType *string
|
2015-01-13 08:27:51 +00:00
|
|
|
fixJpgOrientation *bool
|
2015-08-03 21:43:15 +00:00
|
|
|
readRedirect *bool
|
2017-06-22 08:33:58 +00:00
|
|
|
cpuProfile *string
|
|
|
|
memProfile *string
|
2019-05-04 00:22:39 +00:00
|
|
|
compactionMBPerSecond *int
|
2015-01-13 08:27:51 +00:00
|
|
|
}
|
|
|
|
|
2012-08-07 08:29:22 +00:00
|
|
|
func init() {
|
2012-08-24 05:46:54 +00:00
|
|
|
cmdVolume.Run = runVolume // break init cycle
|
2015-01-13 08:27:51 +00:00
|
|
|
v.port = cmdVolume.Flag.Int("port", 8080, "http listen port")
|
2015-03-09 08:10:01 +00:00
|
|
|
v.publicPort = cmdVolume.Flag.Int("port.public", 0, "port opened to public")
|
2017-01-12 21:42:53 +00:00
|
|
|
v.ip = cmdVolume.Flag.String("ip", "", "ip or server name")
|
2015-02-02 23:51:25 +00:00
|
|
|
v.publicUrl = cmdVolume.Flag.String("publicUrl", "", "Publicly accessible address")
|
2015-01-13 08:27:51 +00:00
|
|
|
v.bindIp = cmdVolume.Flag.String("ip.bind", "0.0.0.0", "ip address to bind to")
|
2018-06-01 07:39:39 +00:00
|
|
|
v.masters = cmdVolume.Flag.String("mserver", "localhost:9333", "comma-separated master servers")
|
2015-01-13 08:27:51 +00:00
|
|
|
v.pulseSeconds = cmdVolume.Flag.Int("pulseSeconds", 5, "number of seconds between heartbeats, must be smaller than or equal to the master's setting")
|
2017-01-10 09:01:12 +00:00
|
|
|
v.idleConnectionTimeout = cmdVolume.Flag.Int("idleTimeout", 30, "connection idle seconds")
|
2015-01-13 08:27:51 +00:00
|
|
|
v.dataCenter = cmdVolume.Flag.String("dataCenter", "", "current volume server's data center name")
|
|
|
|
v.rack = cmdVolume.Flag.String("rack", "", "current volume server's rack name")
|
2019-04-09 16:42:06 +00:00
|
|
|
v.indexType = cmdVolume.Flag.String("index", "memory", "Choose [memory|leveldb|leveldbMedium|leveldbLarge] mode for memory~performance balance.")
|
2018-11-22 00:50:42 +00:00
|
|
|
v.fixJpgOrientation = cmdVolume.Flag.Bool("images.fix.orientation", false, "Adjust jpg orientation when uploading.")
|
2015-08-03 21:43:15 +00:00
|
|
|
v.readRedirect = cmdVolume.Flag.Bool("read.redirect", true, "Redirect moved or non-local volumes.")
|
2017-06-22 08:33:58 +00:00
|
|
|
v.cpuProfile = cmdVolume.Flag.String("cpuprofile", "", "cpu profile output file")
|
|
|
|
v.memProfile = cmdVolume.Flag.String("memprofile", "", "memory profile output file")
|
2019-05-06 21:12:19 +00:00
|
|
|
v.compactionMBPerSecond = cmdVolume.Flag.Int("compactionMBps", 0, "limit background compaction or copying speed in mega bytes per second")
|
2012-08-07 08:29:22 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
var cmdVolume = &Command{
|
2012-09-26 08:55:56 +00:00
|
|
|
UsageLine: "volume -port=8080 -dir=/tmp -max=5 -ip=server_name -mserver=localhost:9333",
|
2012-08-24 05:46:54 +00:00
|
|
|
Short: "start a volume server",
|
|
|
|
Long: `start a volume server to provide storage spaces
|
2012-08-07 08:29:22 +00:00
|
|
|
|
|
|
|
`,
|
|
|
|
}
|
|
|
|
|
|
|
|
var (
|
2013-08-28 17:39:15 +00:00
|
|
|
volumeFolders = cmdVolume.Flag.String("dir", os.TempDir(), "directories to store data files. dir[,dir]...")
|
2013-08-13 16:22:06 +00:00
|
|
|
maxVolumeCounts = cmdVolume.Flag.String("max", "7", "maximum numbers of volumes, count[,count]...")
|
|
|
|
volumeWhiteListOption = cmdVolume.Flag.String("whiteList", "", "comma separated Ip addresses having write permission. No limit if empty.")
|
2012-08-07 08:29:22 +00:00
|
|
|
)
|
|
|
|
|
|
|
|
func runVolume(cmd *Command, args []string) bool {
|
2019-02-18 20:11:52 +00:00
|
|
|
|
2019-06-05 08:30:24 +00:00
|
|
|
util.LoadConfiguration("security", false)
|
2019-02-18 20:11:52 +00:00
|
|
|
|
2019-06-23 10:08:27 +00:00
|
|
|
runtime.GOMAXPROCS(runtime.NumCPU())
|
2017-06-22 08:33:58 +00:00
|
|
|
util.SetupProfiling(*v.cpuProfile, *v.memProfile)
|
2015-01-13 08:27:51 +00:00
|
|
|
|
2018-10-11 07:04:31 +00:00
|
|
|
v.startVolumeServer(*volumeFolders, *maxVolumeCounts, *volumeWhiteListOption)
|
|
|
|
|
|
|
|
return true
|
|
|
|
}
|
|
|
|
|
|
|
|
func (v VolumeServerOptions) startVolumeServer(volumeFolders, maxVolumeCounts, volumeWhiteListOption string) {
|
|
|
|
|
2019-11-17 03:40:36 +00:00
|
|
|
// Set multiple folders and each folder's max volume count limit'
|
2018-10-11 07:04:31 +00:00
|
|
|
v.folders = strings.Split(volumeFolders, ",")
|
|
|
|
maxCountStrings := strings.Split(maxVolumeCounts, ",")
|
2013-07-13 18:38:01 +00:00
|
|
|
for _, maxString := range maxCountStrings {
|
|
|
|
if max, e := strconv.Atoi(maxString); e == nil {
|
2015-01-13 08:27:51 +00:00
|
|
|
v.folderMaxLimits = append(v.folderMaxLimits, max)
|
2013-07-13 18:38:01 +00:00
|
|
|
} else {
|
2014-04-17 07:16:44 +00:00
|
|
|
glog.Fatalf("The max specified in -max not a valid number %s", maxString)
|
2013-07-13 18:38:01 +00:00
|
|
|
}
|
2012-09-17 00:31:15 +00:00
|
|
|
}
|
2015-01-13 08:27:51 +00:00
|
|
|
if len(v.folders) != len(v.folderMaxLimits) {
|
|
|
|
glog.Fatalf("%d directories by -dir, but only %d max is set by -max", len(v.folders), len(v.folderMaxLimits))
|
2013-07-13 18:38:01 +00:00
|
|
|
}
|
2015-01-13 08:27:51 +00:00
|
|
|
for _, folder := range v.folders {
|
2013-12-09 21:27:09 +00:00
|
|
|
if err := util.TestFolderWritable(folder); err != nil {
|
|
|
|
glog.Fatalf("Check Data Folder(-dir) Writable %s : %s", folder, err)
|
2013-07-13 18:38:01 +00:00
|
|
|
}
|
2012-09-17 00:31:15 +00:00
|
|
|
}
|
2015-01-13 08:36:44 +00:00
|
|
|
|
2019-11-17 03:40:36 +00:00
|
|
|
// security related white list configuration
|
2018-10-11 07:04:31 +00:00
|
|
|
if volumeWhiteListOption != "" {
|
|
|
|
v.whiteList = strings.Split(volumeWhiteListOption, ",")
|
2015-01-13 08:27:51 +00:00
|
|
|
}
|
2012-09-26 10:27:10 +00:00
|
|
|
|
2015-02-02 23:51:25 +00:00
|
|
|
if *v.ip == "" {
|
|
|
|
*v.ip = "127.0.0.1"
|
2012-09-26 09:29:16 +00:00
|
|
|
}
|
2012-09-20 09:11:08 +00:00
|
|
|
|
2015-03-09 08:10:01 +00:00
|
|
|
if *v.publicPort == 0 {
|
|
|
|
*v.publicPort = *v.port
|
2015-01-19 01:03:38 +00:00
|
|
|
}
|
2015-04-08 18:08:08 +00:00
|
|
|
if *v.publicUrl == "" {
|
|
|
|
*v.publicUrl = *v.ip + ":" + strconv.Itoa(*v.publicPort)
|
|
|
|
}
|
2015-01-19 01:03:38 +00:00
|
|
|
|
2015-03-09 08:10:01 +00:00
|
|
|
volumeMux := http.NewServeMux()
|
|
|
|
publicVolumeMux := volumeMux
|
2019-11-17 03:40:36 +00:00
|
|
|
if v.isSeparatedPublicPort() {
|
2015-03-09 08:10:01 +00:00
|
|
|
publicVolumeMux = http.NewServeMux()
|
2015-01-19 01:03:38 +00:00
|
|
|
}
|
2012-08-24 05:46:54 +00:00
|
|
|
|
Add boltdb for volume needle map
boltdb is fairly slow to write, about 6 minutes for recreating index
for 1553934 files. Boltdb loads 1,553,934 x 16 = 24,862,944bytes from
disk, and generate the boltdb as large as 134,217,728 bytes in 6
minutes.
To compare, for leveldb, it recreates index in leveldb as large as
27,188,148 bytes in 8 seconds.
For in memory version, it loads the index in
To test the memory consumption, the leveldb or boltdb index are
created. And the server is restarted. Using the benchmark tool to read
lots of files. There are 7 volumes in benchmark collection, each with
about 1553K files.
For leveldb, the memory starts at 142,884KB, and stays at 179,340KB.
For boltdb, the memory starts at 73,756KB, and stays at 144,564KB.
For in-memory, the memory starts at 368,152KB, and stays at 448,032KB.
2015-03-29 18:04:32 +00:00
|
|
|
volumeNeedleMapKind := storage.NeedleMapInMemory
|
|
|
|
switch *v.indexType {
|
|
|
|
case "leveldb":
|
|
|
|
volumeNeedleMapKind = storage.NeedleMapLevelDb
|
2019-04-09 16:42:06 +00:00
|
|
|
case "leveldbMedium":
|
2019-04-09 17:08:59 +00:00
|
|
|
volumeNeedleMapKind = storage.NeedleMapLevelDbMedium
|
2019-04-09 16:42:06 +00:00
|
|
|
case "leveldbLarge":
|
2019-04-09 17:08:59 +00:00
|
|
|
volumeNeedleMapKind = storage.NeedleMapLevelDbLarge
|
Add boltdb for volume needle map
boltdb is fairly slow to write, about 6 minutes for recreating index
for 1553934 files. Boltdb loads 1,553,934 x 16 = 24,862,944bytes from
disk, and generate the boltdb as large as 134,217,728 bytes in 6
minutes.
To compare, for leveldb, it recreates index in leveldb as large as
27,188,148 bytes in 8 seconds.
For in memory version, it loads the index in
To test the memory consumption, the leveldb or boltdb index are
created. And the server is restarted. Using the benchmark tool to read
lots of files. There are 7 volumes in benchmark collection, each with
about 1553K files.
For leveldb, the memory starts at 142,884KB, and stays at 179,340KB.
For boltdb, the memory starts at 73,756KB, and stays at 144,564KB.
For in-memory, the memory starts at 368,152KB, and stays at 448,032KB.
2015-03-29 18:04:32 +00:00
|
|
|
}
|
2018-06-01 07:39:39 +00:00
|
|
|
|
|
|
|
masters := *v.masters
|
|
|
|
|
2015-03-09 08:10:01 +00:00
|
|
|
volumeServer := weed_server.NewVolumeServer(volumeMux, publicVolumeMux,
|
|
|
|
*v.ip, *v.port, *v.publicUrl,
|
2015-01-19 01:03:38 +00:00
|
|
|
v.folders, v.folderMaxLimits,
|
Add boltdb for volume needle map
boltdb is fairly slow to write, about 6 minutes for recreating index
for 1553934 files. Boltdb loads 1,553,934 x 16 = 24,862,944bytes from
disk, and generate the boltdb as large as 134,217,728 bytes in 6
minutes.
To compare, for leveldb, it recreates index in leveldb as large as
27,188,148 bytes in 8 seconds.
For in memory version, it loads the index in
To test the memory consumption, the leveldb or boltdb index are
created. And the server is restarted. Using the benchmark tool to read
lots of files. There are 7 volumes in benchmark collection, each with
about 1553K files.
For leveldb, the memory starts at 142,884KB, and stays at 179,340KB.
For boltdb, the memory starts at 73,756KB, and stays at 144,564KB.
For in-memory, the memory starts at 368,152KB, and stays at 448,032KB.
2015-03-29 18:04:32 +00:00
|
|
|
volumeNeedleMapKind,
|
2018-06-01 07:39:39 +00:00
|
|
|
strings.Split(masters, ","), *v.pulseSeconds, *v.dataCenter, *v.rack,
|
2015-01-13 08:27:51 +00:00
|
|
|
v.whiteList,
|
2015-08-03 21:43:15 +00:00
|
|
|
*v.fixJpgOrientation, *v.readRedirect,
|
2019-05-04 00:22:39 +00:00
|
|
|
*v.compactionMBPerSecond,
|
2013-12-02 09:37:36 +00:00
|
|
|
)
|
2012-08-24 05:46:54 +00:00
|
|
|
|
2019-11-17 03:40:36 +00:00
|
|
|
// starting grpc server
|
|
|
|
grpcS := v.startGrpcService(volumeServer)
|
|
|
|
|
|
|
|
// starting public http server
|
|
|
|
var publicHttpDown httpdown.Server
|
|
|
|
if v.isSeparatedPublicPort() {
|
|
|
|
publicHttpDown = v.startPublicHttpService(publicVolumeMux)
|
|
|
|
if nil == publicHttpDown {
|
|
|
|
glog.Fatalf("start public http service failed")
|
2015-01-19 01:03:38 +00:00
|
|
|
}
|
|
|
|
}
|
2014-03-20 18:07:15 +00:00
|
|
|
|
2019-11-17 03:40:36 +00:00
|
|
|
// starting the cluster http server
|
|
|
|
clusterHttpServer := v.startClusterHttpService(volumeMux)
|
|
|
|
|
|
|
|
stopChain := make(chan struct{})
|
2017-06-22 08:33:58 +00:00
|
|
|
util.OnInterrupt(func() {
|
2019-11-17 03:40:36 +00:00
|
|
|
fmt.Println("volume server has be killed")
|
|
|
|
var startTime time.Time
|
|
|
|
|
|
|
|
// firstly, stop the public http service to prevent from receiving new user request
|
|
|
|
if nil != publicHttpDown {
|
|
|
|
startTime = time.Now()
|
|
|
|
if err := publicHttpDown.Stop(); err != nil {
|
|
|
|
glog.Warningf("stop the public http server failed, %v", err)
|
|
|
|
}
|
2019-11-29 07:17:17 +00:00
|
|
|
delta := time.Now().Sub(startTime).Nanoseconds() / 1e6
|
|
|
|
glog.V(0).Infof("stop public http server, elapsed %dms", delta)
|
2019-11-17 03:40:36 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
startTime = time.Now()
|
|
|
|
if err := clusterHttpServer.Stop(); err != nil {
|
|
|
|
glog.Warningf("stop the cluster http server failed, %v", err)
|
|
|
|
}
|
2019-11-29 07:17:17 +00:00
|
|
|
delta := time.Now().Sub(startTime).Nanoseconds() / 1e6
|
|
|
|
glog.V(0).Infof("graceful stop cluster http server, elapsed [%d]", delta)
|
2019-11-17 03:40:36 +00:00
|
|
|
|
|
|
|
startTime = time.Now()
|
|
|
|
grpcS.GracefulStop()
|
2019-11-29 07:17:17 +00:00
|
|
|
delta = time.Now().Sub(startTime).Nanoseconds() / 1e6
|
|
|
|
glog.V(0).Infof("graceful stop gRPC, elapsed [%d]", delta)
|
2019-11-17 03:40:36 +00:00
|
|
|
|
|
|
|
startTime = time.Now()
|
2014-05-13 22:04:04 +00:00
|
|
|
volumeServer.Shutdown()
|
2019-11-29 07:17:17 +00:00
|
|
|
delta = time.Now().Sub(startTime).Nanoseconds() / 1e6
|
|
|
|
glog.V(0).Infof("stop volume server, elapsed [%d]", delta)
|
2019-11-17 03:40:36 +00:00
|
|
|
|
2018-10-11 07:04:31 +00:00
|
|
|
pprof.StopCPUProfile()
|
2019-11-17 03:40:36 +00:00
|
|
|
|
|
|
|
close(stopChain) // notify exit
|
2014-05-13 22:04:04 +00:00
|
|
|
})
|
2014-05-13 07:04:28 +00:00
|
|
|
|
2019-11-17 03:40:36 +00:00
|
|
|
select {
|
|
|
|
case <-stopChain:
|
|
|
|
}
|
|
|
|
glog.Warningf("the volume server exit.")
|
|
|
|
}
|
|
|
|
|
|
|
|
// check whether configure the public port
|
|
|
|
func (v VolumeServerOptions) isSeparatedPublicPort() bool {
|
|
|
|
return *v.publicPort != *v.port
|
|
|
|
}
|
|
|
|
|
|
|
|
func (v VolumeServerOptions) startGrpcService(vs volume_server_pb.VolumeServerServer) *grpc.Server {
|
2018-10-11 08:16:33 +00:00
|
|
|
grpcPort := *v.port + 10000
|
|
|
|
grpcL, err := util.NewListener(*v.bindIp+":"+strconv.Itoa(grpcPort), 0)
|
|
|
|
if err != nil {
|
|
|
|
glog.Fatalf("failed to listen on grpc port %d: %v", grpcPort, err)
|
|
|
|
}
|
2019-02-18 20:11:52 +00:00
|
|
|
grpcS := util.NewGrpcServer(security.LoadServerTLS(viper.Sub("grpc"), "volume"))
|
2019-11-17 03:40:36 +00:00
|
|
|
volume_server_pb.RegisterVolumeServerServer(grpcS, vs)
|
2018-10-11 08:16:33 +00:00
|
|
|
reflection.Register(grpcS)
|
2019-11-17 03:40:36 +00:00
|
|
|
go func() {
|
|
|
|
if err := grpcS.Serve(grpcL); err != nil {
|
|
|
|
glog.Fatalf("start gRPC service failed, %s", err)
|
2019-02-25 08:43:36 +00:00
|
|
|
}
|
2019-11-17 03:40:36 +00:00
|
|
|
}()
|
|
|
|
return grpcS
|
|
|
|
}
|
|
|
|
|
|
|
|
func (v VolumeServerOptions) startPublicHttpService(handler http.Handler) httpdown.Server {
|
|
|
|
publicListeningAddress := *v.bindIp + ":" + strconv.Itoa(*v.publicPort)
|
|
|
|
glog.V(0).Infoln("Start Seaweed volume server", util.VERSION, "public at", publicListeningAddress)
|
|
|
|
publicListener, e := util.NewListener(publicListeningAddress, time.Duration(*v.idleConnectionTimeout)*time.Second)
|
|
|
|
if e != nil {
|
|
|
|
glog.Fatalf("Volume server listener error:%v", e)
|
|
|
|
}
|
|
|
|
|
|
|
|
pubHttp := httpdown.HTTP{StopTimeout: 5 * time.Minute, KillTimeout: 5 * time.Minute}
|
|
|
|
publicHttpDown := pubHttp.Serve(&http.Server{Handler: handler}, publicListener)
|
|
|
|
go func() {
|
|
|
|
if err := publicHttpDown.Wait(); err != nil {
|
|
|
|
glog.Errorf("public http down wait failed, %v", err)
|
2019-02-25 08:43:36 +00:00
|
|
|
}
|
2019-11-17 03:40:36 +00:00
|
|
|
}()
|
|
|
|
|
|
|
|
return publicHttpDown
|
|
|
|
}
|
|
|
|
|
|
|
|
func (v VolumeServerOptions) startClusterHttpService(handler http.Handler) httpdown.Server {
|
|
|
|
var (
|
|
|
|
certFile, keyFile string
|
|
|
|
)
|
|
|
|
if viper.GetString("https.volume.key") != "" {
|
|
|
|
certFile = viper.GetString("https.volume.cert")
|
|
|
|
keyFile = viper.GetString("https.volume.key")
|
|
|
|
}
|
|
|
|
|
|
|
|
listeningAddress := *v.bindIp + ":" + strconv.Itoa(*v.port)
|
|
|
|
glog.V(0).Infof("Start Seaweed volume server %s at %s", util.VERSION, listeningAddress)
|
|
|
|
listener, e := util.NewListener(listeningAddress, time.Duration(*v.idleConnectionTimeout)*time.Second)
|
|
|
|
if e != nil {
|
|
|
|
glog.Fatalf("Volume server listener error:%v", e)
|
2012-08-24 05:46:54 +00:00
|
|
|
}
|
2018-10-11 07:04:31 +00:00
|
|
|
|
2019-11-17 03:40:36 +00:00
|
|
|
httpDown := httpdown.HTTP{
|
|
|
|
KillTimeout: 5 * time.Minute,
|
|
|
|
StopTimeout: 5 * time.Minute,
|
|
|
|
CertFile: certFile,
|
|
|
|
KeyFile: keyFile}
|
|
|
|
clusterHttpServer := httpDown.Serve(&http.Server{Handler: handler}, listener)
|
|
|
|
go func() {
|
|
|
|
if e := clusterHttpServer.Wait(); e != nil {
|
|
|
|
glog.Fatalf("Volume server fail to serve: %v", e)
|
|
|
|
}
|
|
|
|
}()
|
|
|
|
return clusterHttpServer
|
2012-08-07 08:29:22 +00:00
|
|
|
}
|