2022-08-24 02:54:01 +00:00
|
|
|
package erasureencode
|
|
|
|
|
|
|
|
import (
|
|
|
|
"fmt"
|
|
|
|
)
|
|
|
|
|
|
|
|
type Params struct {
|
2022-08-25 03:12:12 +00:00
|
|
|
Size int64 `json:"s,string"`
|
2022-08-24 02:54:01 +00:00
|
|
|
Stride int32 `json:"t"`
|
|
|
|
Shards uint16 `json:"h"`
|
|
|
|
Parity uint16 `json:"p"`
|
|
|
|
}
|
|
|
|
|
|
|
|
type ChunkShardMeta struct {
|
2022-08-25 03:12:12 +00:00
|
|
|
Shard uint16
|
|
|
|
ShardOffset int64
|
2022-08-24 02:54:01 +00:00
|
|
|
Chunk uint16
|
2022-08-25 03:12:12 +00:00
|
|
|
ChunkOffset int64
|
|
|
|
GlobalOffset int64
|
2022-08-24 02:54:01 +00:00
|
|
|
Size int32
|
|
|
|
}
|
|
|
|
|
2022-08-25 03:12:12 +00:00
|
|
|
func (params Params) Plan(offset, size int64) []ChunkShardMeta {
|
2022-08-24 02:54:01 +00:00
|
|
|
outputs := []ChunkShardMeta{}
|
|
|
|
end := offset + size
|
|
|
|
if (end - 1) > params.Size {
|
|
|
|
panic(fmt.Errorf("attempted read beyond end of file"))
|
|
|
|
}
|
|
|
|
// constant
|
2022-08-25 03:12:12 +00:00
|
|
|
shards := int64(params.Shards)
|
|
|
|
baseStride := int64(params.Stride)
|
2022-08-24 02:54:01 +00:00
|
|
|
baseStripeWidth := baseStride * shards
|
|
|
|
|
|
|
|
oddStripeOffset := (params.Size / baseStripeWidth) * baseStripeWidth
|
|
|
|
oddChunkOffset := oddStripeOffset / shards
|
|
|
|
oddSize := (params.Size - oddStripeOffset)
|
|
|
|
oddStride := oddSize / shards
|
|
|
|
if oddSize%shards > 0 {
|
|
|
|
oddStride += 1
|
|
|
|
}
|
|
|
|
|
|
|
|
for offset < end {
|
|
|
|
output := ChunkShardMeta{GlobalOffset: offset}
|
|
|
|
// var chunk uint64 // which chunk the shard is in
|
|
|
|
// var chunkOffset uint64 // the location within the chunk that the shard begins
|
|
|
|
// var shardOffset uint64 // the location within the shard at which the data begins
|
|
|
|
// var readSize uint64 // the number of bytes to read from the shard
|
|
|
|
|
|
|
|
if offset >= oddStripeOffset {
|
|
|
|
localOffset := offset - oddStripeOffset // the location relative to the odd data at which the desired data begins
|
|
|
|
output.Chunk = uint16(localOffset / oddStride)
|
2022-08-25 03:12:12 +00:00
|
|
|
output.Shard = uint16(oddStripeOffset/baseStride) + output.Chunk
|
|
|
|
output.ShardOffset = localOffset % oddStride
|
|
|
|
output.ChunkOffset = oddChunkOffset + output.ShardOffset
|
|
|
|
output.Size = int32(min(end-offset, oddStride-output.ShardOffset))
|
2022-08-24 02:54:01 +00:00
|
|
|
} else {
|
2022-08-25 03:12:12 +00:00
|
|
|
shard := offset / baseStride
|
|
|
|
output.Shard = uint16(offset / baseStride) // which shard the data is in
|
|
|
|
output.Chunk = uint16(shard % shards)
|
|
|
|
output.ShardOffset = offset % baseStride
|
|
|
|
output.ChunkOffset = ((shard / shards) * baseStride) + output.ShardOffset
|
|
|
|
output.Size = int32(min(end-offset, baseStride-output.ShardOffset))
|
2022-08-24 02:54:01 +00:00
|
|
|
}
|
|
|
|
if output.Size <= 0 {
|
|
|
|
panic(fmt.Errorf("invalid read size"))
|
|
|
|
}
|
|
|
|
outputs = append(outputs, output)
|
2022-08-25 03:12:12 +00:00
|
|
|
offset += int64(output.Size)
|
2022-08-24 02:54:01 +00:00
|
|
|
}
|
|
|
|
return outputs
|
|
|
|
}
|