seaweedfs/weed/operation/delete_content.go

132 lines
3 KiB
Go
Raw Normal View History

2012-09-26 10:27:10 +00:00
package operation
import (
2018-10-14 07:30:20 +00:00
"context"
2014-04-15 16:09:40 +00:00
"errors"
2018-10-14 07:30:20 +00:00
"fmt"
"net/http"
2014-04-15 16:09:40 +00:00
"strings"
"sync"
"github.com/chrislusf/seaweedfs/weed/pb/volume_server_pb"
2012-09-26 10:27:10 +00:00
)
2014-04-15 16:09:40 +00:00
type DeleteResult struct {
Fid string `json:"fid"`
Size int `json:"size"`
Status int `json:"status"`
Error string `json:"error,omitempty"`
2014-04-15 16:09:40 +00:00
}
func ParseFileId(fid string) (vid string, key_cookie string, err error) {
commaIndex := strings.Index(fid, ",")
if commaIndex <= 0 {
return "", "", errors.New("Wrong fid format.")
}
return fid[:commaIndex], fid[commaIndex+1:], nil
}
// DeleteFiles batch deletes a list of fileIds
func DeleteFiles(master string, fileIds []string) ([]*volume_server_pb.DeleteResult, error) {
var ret []*volume_server_pb.DeleteResult
2014-04-15 16:09:40 +00:00
vid_to_fileIds := make(map[string][]string)
var vids []string
for _, fileId := range fileIds {
vid, _, err := ParseFileId(fileId)
if err != nil {
ret = append(ret, &volume_server_pb.DeleteResult{
FileId: vid,
Status: http.StatusBadRequest,
Error: err.Error()},
)
2014-04-15 16:09:40 +00:00
continue
}
if _, ok := vid_to_fileIds[vid]; !ok {
vid_to_fileIds[vid] = make([]string, 0)
vids = append(vids, vid)
}
vid_to_fileIds[vid] = append(vid_to_fileIds[vid], fileId)
}
lookupResults, err := LookupVolumeIds(master, vids)
if err != nil {
return ret, err
}
server_to_fileIds := make(map[string][]string)
for vid, result := range lookupResults {
if result.Error != "" {
ret = append(ret, &volume_server_pb.DeleteResult{
FileId: vid,
Status: http.StatusBadRequest,
Error: err.Error()},
)
2014-04-15 16:09:40 +00:00
continue
}
for _, location := range result.Locations {
if _, ok := server_to_fileIds[location.Url]; !ok {
server_to_fileIds[location.Url] = make([]string, 0)
2014-04-15 16:09:40 +00:00
}
server_to_fileIds[location.Url] = append(
server_to_fileIds[location.Url], vid_to_fileIds[vid]...)
2014-04-15 16:09:40 +00:00
}
}
var wg sync.WaitGroup
2014-04-15 16:09:40 +00:00
for server, fidList := range server_to_fileIds {
wg.Add(1)
go func(server string, fidList []string) {
defer wg.Done()
if deleteResults, deleteErr := DeleteFilesAtOneVolumeServer(server, fidList); deleteErr != nil {
err = deleteErr
} else {
ret = append(ret, deleteResults...)
2014-04-15 16:09:40 +00:00
}
2014-04-15 16:09:40 +00:00
}(server, fidList)
}
wg.Wait()
return ret, err
}
// DeleteFilesAtOneVolumeServer deletes a list of files that is on one volume server via gRpc
func DeleteFilesAtOneVolumeServer(volumeServer string, fileIds []string) (ret []*volume_server_pb.DeleteResult, err error) {
2018-10-15 06:12:43 +00:00
err = WithVolumeServerClient(volumeServer, func(volumeServerClient volume_server_pb.VolumeServerClient) error {
req := &volume_server_pb.BatchDeleteRequest{
FileIds: fileIds,
}
resp, err := volumeServerClient.BatchDelete(context.Background(), req)
2018-11-18 19:51:38 +00:00
// fmt.Printf("deleted %v %v: %v\n", fileIds, err, resp)
if err != nil {
return err
}
ret = append(ret, resp.Results...)
return nil
})
if err != nil {
return
}
for _, result := range ret {
2018-11-18 19:51:38 +00:00
if result.Error != "" && result.Error != "Not Found" {
return nil, fmt.Errorf("delete fileId %s: %v", result.FileId, result.Error)
}
}
return
2014-04-15 16:09:40 +00:00
}