mirror of
https://github.com/seaweedfs/seaweedfs.git
synced 2024-01-19 02:48:24 +00:00
async chan write read, no write for closed chan
This commit is contained in:
parent
78afb8bf46
commit
6bf3eb69cb
|
@ -1,11 +1,15 @@
|
|||
package broker
|
||||
|
||||
import (
|
||||
"crypto/md5"
|
||||
"fmt"
|
||||
"io"
|
||||
|
||||
"github.com/golang/protobuf/proto"
|
||||
|
||||
"github.com/chrislusf/seaweedfs/weed/filer2"
|
||||
"github.com/chrislusf/seaweedfs/weed/glog"
|
||||
"github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
|
||||
"github.com/chrislusf/seaweedfs/weed/pb/messaging_pb"
|
||||
)
|
||||
|
||||
|
@ -44,9 +48,19 @@ func (broker *MessageBroker) Publish(stream messaging_pb.SeaweedMessaging_Publis
|
|||
Topic: in.Init.Topic,
|
||||
Partition: in.Init.Partition,
|
||||
}
|
||||
|
||||
tpDir := fmt.Sprintf("%s/%s/%s", filer2.TopicsDir, tp.Namespace, tp.Topic)
|
||||
md5File := fmt.Sprintf("p%02d.md5", tp.Partition)
|
||||
// println("chan data stored under", tpDir, "as", md5File)
|
||||
|
||||
if exists, err := filer_pb.Exists(broker, tpDir, md5File, false); err == nil && exists {
|
||||
return fmt.Errorf("channel is already closed")
|
||||
}
|
||||
|
||||
tl := broker.topicLocks.RequestLock(tp, topicConfig, true)
|
||||
defer broker.topicLocks.ReleaseLock(tp, true)
|
||||
|
||||
md5hash := md5.New()
|
||||
// process each message
|
||||
for {
|
||||
// println("recv")
|
||||
|
@ -78,8 +92,16 @@ func (broker *MessageBroker) Publish(stream messaging_pb.SeaweedMessaging_Publis
|
|||
break
|
||||
}
|
||||
|
||||
md5hash.Write(in.Data.Value)
|
||||
|
||||
}
|
||||
|
||||
if err := broker.appendToFile(tpDir+"/"+md5File, topicConfig, md5hash.Sum(nil)); err != nil {
|
||||
glog.V(0).Infof("err writing %s: %v", md5File, err)
|
||||
}
|
||||
|
||||
// fmt.Printf("received md5 %X\n", md5hash.Sum(nil))
|
||||
|
||||
// send the close ack
|
||||
// println("server send ack closing")
|
||||
if err := stream.Send(&messaging_pb.PublishResponse{IsClosed: true}); err != nil {
|
||||
|
|
|
@ -57,6 +57,7 @@ func (broker *MessageBroker) Subscribe(stream messaging_pb.SeaweedMessaging_Subs
|
|||
lastReadTime = time.Unix(0, in.Init.TimestampNs)
|
||||
case messaging_pb.SubscriberMessage_InitMessage_LATEST:
|
||||
case messaging_pb.SubscriberMessage_InitMessage_EARLIEST:
|
||||
lastReadTime = time.Unix(0, 0)
|
||||
}
|
||||
var processedTsNs int64
|
||||
|
||||
|
|
|
@ -17,7 +17,7 @@ type TopicPartition struct {
|
|||
Partition int32
|
||||
}
|
||||
const (
|
||||
TopicPartitionFmt = "%s/%s_%2d"
|
||||
TopicPartitionFmt = "%s/%s_%02d"
|
||||
)
|
||||
func (tp *TopicPartition) String() string {
|
||||
return fmt.Sprintf(TopicPartitionFmt, tp.Namespace, tp.Topic, tp.Partition)
|
||||
|
@ -106,6 +106,7 @@ func (tl *TopicLocks) ReleaseLock(partition TopicPartition, isPublisher bool) {
|
|||
}
|
||||
if lock.subscriberCount <= 0 && lock.publisherCount <= 0 {
|
||||
delete(tl.locks, partition)
|
||||
lock.logBuffer.Shutdown()
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -55,8 +55,8 @@ func (mc *MessagingClient) NewSubChannel(chanName string) (*SubChannel, error) {
|
|||
close(t.ch)
|
||||
return
|
||||
}
|
||||
t.md5hash.Write(resp.Data.Value)
|
||||
t.ch <- resp.Data.Value
|
||||
t.md5hash.Write(resp.Data.Value)
|
||||
}
|
||||
}()
|
||||
|
||||
|
|
|
@ -98,13 +98,14 @@ func (m *LogBuffer) AddToBuffer(partitionKey, data []byte) {
|
|||
}
|
||||
|
||||
func (m *LogBuffer) Shutdown() {
|
||||
m.Lock()
|
||||
defer m.Unlock()
|
||||
|
||||
if m.isStopping {
|
||||
return
|
||||
}
|
||||
m.isStopping = true
|
||||
m.Lock()
|
||||
toFlush := m.copyToFlush()
|
||||
m.Unlock()
|
||||
m.flushChan <- toFlush
|
||||
close(m.flushChan)
|
||||
}
|
||||
|
@ -123,10 +124,14 @@ func (m *LogBuffer) loopInterval() {
|
|||
for !m.isStopping {
|
||||
time.Sleep(m.flushInterval)
|
||||
m.Lock()
|
||||
if m.isStopping {
|
||||
m.Unlock()
|
||||
return
|
||||
}
|
||||
// println("loop interval")
|
||||
toFlush := m.copyToFlush()
|
||||
m.Unlock()
|
||||
m.flushChan <- toFlush
|
||||
m.Unlock()
|
||||
}
|
||||
}
|
||||
|
||||
|
|
Loading…
Reference in a new issue