file-store/pkg/erasureencode/params.go

68 lines
2.1 KiB
Go
Raw Normal View History

2022-08-24 02:54:01 +00:00
package erasureencode
import (
"fmt"
)
type Params struct {
Size uint64 `json:"s,string"`
Stride int32 `json:"t"`
Shards uint16 `json:"h"`
Parity uint16 `json:"p"`
}
type ChunkShardMeta struct {
Chunk uint16
ChunkOffset uint64
GlobalOffset uint64
Size int32
}
func (params Params) Plan(offset, size uint64) []ChunkShardMeta {
outputs := []ChunkShardMeta{}
end := offset + size
if (end - 1) > params.Size {
panic(fmt.Errorf("attempted read beyond end of file"))
}
// constant
shards := uint64(params.Shards)
baseStride := uint64(params.Stride)
baseStripeWidth := baseStride * shards
oddStripeOffset := (params.Size / baseStripeWidth) * baseStripeWidth
oddChunkOffset := oddStripeOffset / shards
oddSize := (params.Size - oddStripeOffset)
oddStride := oddSize / shards
if oddSize%shards > 0 {
oddStride += 1
}
for offset < end {
output := ChunkShardMeta{GlobalOffset: offset}
// var chunk uint64 // which chunk the shard is in
// var chunkOffset uint64 // the location within the chunk that the shard begins
// var shardOffset uint64 // the location within the shard at which the data begins
// var readSize uint64 // the number of bytes to read from the shard
if offset >= oddStripeOffset {
localOffset := offset - oddStripeOffset // the location relative to the odd data at which the desired data begins
output.Chunk = uint16(localOffset / oddStride)
shardOffset := localOffset % oddStride
output.ChunkOffset = oddChunkOffset + shardOffset
output.Size = int32(min(end-offset, oddStride-shardOffset))
} else {
shardNum := offset / baseStride // which shard the data is in
output.Chunk = uint16(shardNum % shards)
shardOffset := offset % baseStride
output.ChunkOffset = ((shardNum / shards) * baseStride) + shardOffset
output.Size = int32(min(end-offset, baseStride-shardOffset))
}
if output.Size <= 0 {
panic(fmt.Errorf("invalid read size"))
}
outputs = append(outputs, output)
offset += uint64(output.Size)
}
return outputs
}