101 lines
2.6 KiB
Go
101 lines
2.6 KiB
Go
|
package erasureencode
|
||
|
|
||
|
import (
|
||
|
"bytes"
|
||
|
"fmt"
|
||
|
"io"
|
||
|
)
|
||
|
|
||
|
type ReadPlanner func(meta *EEMeta) []ChunkShardMeta
|
||
|
type ReadHandler func(data []byte, plan ChunkShardMeta, readNum int) error
|
||
|
|
||
|
func decodeFn(inputs []io.ReadSeeker, file io.Writer, meta *EEMeta, getPlan ReadPlanner, handleRead ReadHandler) error {
|
||
|
raw := []byte{}
|
||
|
rawLen := int32(0)
|
||
|
fullPlan := getPlan(meta)
|
||
|
|
||
|
// we only need to seek once as the rest of the reads should be linear
|
||
|
for _, plan := range fullPlan[0:min(uint64(meta.Params.Shards), uint64(len(fullPlan)))] {
|
||
|
if _, err := inputs[plan.Chunk].Seek(int64(plan.ChunkOffset), io.SeekStart); err != nil {
|
||
|
return err
|
||
|
}
|
||
|
}
|
||
|
|
||
|
for i, plan := range fullPlan {
|
||
|
if rawLen != plan.Size {
|
||
|
raw = make([]byte, plan.Size)
|
||
|
rawLen = plan.Size
|
||
|
}
|
||
|
|
||
|
if _, err := io.ReadFull(inputs[plan.Chunk], raw); err != nil {
|
||
|
return err
|
||
|
}
|
||
|
if err := handleRead(raw, plan, i); err != nil {
|
||
|
return err
|
||
|
}
|
||
|
}
|
||
|
return nil
|
||
|
}
|
||
|
|
||
|
func Decode(inputs []io.ReadSeeker, file io.Writer, meta *EEMeta) error {
|
||
|
return decodeFn(inputs, file, meta, func(meta *EEMeta) []ChunkShardMeta {
|
||
|
return meta.Params.Plan(0, meta.Params.Size)
|
||
|
}, func(data []byte, _ ChunkShardMeta, _ int) error {
|
||
|
if _, err := file.Write(data); err != nil {
|
||
|
return err
|
||
|
}
|
||
|
return nil
|
||
|
})
|
||
|
}
|
||
|
|
||
|
func DecodeAndValidate(inputs []io.ReadSeeker, file io.Writer, meta *EEMeta) error {
|
||
|
size := uint64(meta.Params.Size)
|
||
|
shards := uint64(meta.Params.Shards)
|
||
|
|
||
|
// get set up to read meta including the padding
|
||
|
validateParams := *meta
|
||
|
if size%shards > 0 {
|
||
|
validateParams.Size = (size / shards) * (shards + 1)
|
||
|
}
|
||
|
|
||
|
return decodeFn(inputs, file, meta, func(_ *EEMeta) []ChunkShardMeta {
|
||
|
return validateParams.Plan(0, validateParams.Size)
|
||
|
}, func(data []byte, read ChunkShardMeta, i int) error {
|
||
|
actual := sha256sum(data)
|
||
|
if !bytes.Equal(actual, meta.ShardHashes[i]) {
|
||
|
return fmt.Errorf("shard hash mismatch")
|
||
|
}
|
||
|
dataLen := uint64(len(data))
|
||
|
writeData := data
|
||
|
if read.GlobalOffset > size {
|
||
|
writeData = nil
|
||
|
} else if read.GlobalOffset+dataLen > size {
|
||
|
writeData = data[0 : read.GlobalOffset-size]
|
||
|
}
|
||
|
if writeData != nil {
|
||
|
if _, err := file.Write(writeData); err != nil {
|
||
|
return err
|
||
|
}
|
||
|
}
|
||
|
return nil
|
||
|
})
|
||
|
|
||
|
raw := make([]byte, meta.Params.Stride)
|
||
|
rawLen := int32(len(raw))
|
||
|
for _, plan := range meta.Params.Plan(0, meta.Params.Size) {
|
||
|
if rawLen != plan.Size {
|
||
|
raw = make([]byte, plan.Size)
|
||
|
rawLen = plan.Size
|
||
|
}
|
||
|
|
||
|
// We can be particularly lazy and ignore Offset since we are reading the full file from the chunks
|
||
|
if _, err := io.ReadFull(inputs[plan.Chunk], raw); err != nil {
|
||
|
return err
|
||
|
}
|
||
|
if _, err := file.Write(raw); err != nil {
|
||
|
return err
|
||
|
}
|
||
|
}
|
||
|
return nil
|
||
|
}
|