169 lines
4.8 KiB
Go
169 lines
4.8 KiB
Go
package tdb
|
|
|
|
import (
|
|
"bytes"
|
|
|
|
bolt "go.etcd.io/bbolt"
|
|
)
|
|
|
|
var (
|
|
IndexKeySeparator = []byte("&")
|
|
IndexFieldSeparator = []byte("=")
|
|
)
|
|
|
|
type indexish interface {
|
|
debugLogger
|
|
count(tx *Tx) int
|
|
initialize(tx *Tx) error
|
|
validate(tx *Tx, val dbPtrValue) error
|
|
update(tx *Tx, old, new dbPtrValue)
|
|
updateRaw(tx *Tx, write, delete [][]byte)
|
|
put(tx *Tx, val dbPtrValue)
|
|
putRaw(tx *Tx, val [][]byte)
|
|
delete(tx *Tx, val dbPtrValue)
|
|
deleteRaw(tx *Tx, val [][]byte)
|
|
bucket(tx *Tx) *bolt.Bucket
|
|
iteratePrefixed(tx *Tx, prefix []byte, i KeyIterator) error
|
|
indexedValues(val dbPtrValue) [][]byte
|
|
keyValue(val dbPtrValue) []byte
|
|
indexKeys(val dbPtrValue) [][]byte
|
|
shouldUpdate(tx *Tx, oldVal, newVal dbPtrValue) (needsUpdate bool, oldKeys, newKeys, writes, deletes [][]byte)
|
|
}
|
|
|
|
func indexishKeys(i indexish, pv dbPtrValue) [][]byte {
|
|
vals := i.indexedValues(pv)
|
|
|
|
if len(vals) == 0 {
|
|
return vals
|
|
}
|
|
|
|
keyVal := i.keyValue(pv)
|
|
for i, val := range vals {
|
|
bb := &bytes.Buffer{}
|
|
bb.Write(val)
|
|
bb.Write(IndexKeySeparator)
|
|
bb.Write(keyVal)
|
|
vals[i] = bb.Bytes()
|
|
}
|
|
return vals
|
|
}
|
|
|
|
func indexishPutRaw(i indexish, tx *Tx, writes [][]byte) {
|
|
lenWrites := len(writes)
|
|
i.debugLogf("[indexishPutRaw] putting %d keys", lenWrites)
|
|
if lenWrites == 0 {
|
|
return
|
|
}
|
|
|
|
b := i.bucket(tx)
|
|
for _, entry := range writes {
|
|
err := b.Put(entry, []byte{})
|
|
if err != nil {
|
|
i.debugLogf("%s", err)
|
|
panic(err)
|
|
}
|
|
}
|
|
}
|
|
|
|
func indexishDeleteRaw(i indexish, tx *Tx, deletes [][]byte) {
|
|
lenDeletes := len(deletes)
|
|
i.debugLogf("[indexishDeleteRaw] deleting %d keys", lenDeletes)
|
|
if lenDeletes == 0 {
|
|
return
|
|
}
|
|
|
|
b := i.bucket(tx)
|
|
for _, entry := range deletes {
|
|
err := b.Put(entry, []byte{})
|
|
if err != nil {
|
|
i.debugLogf("%s", err)
|
|
panic(err)
|
|
}
|
|
}
|
|
}
|
|
|
|
func indexishUpdateRaw(i indexish, tx *Tx, writes, deletes [][]byte) {
|
|
i.deleteRaw(tx, deletes)
|
|
i.putRaw(tx, writes)
|
|
}
|
|
|
|
func indexishShouldUpdate(i indexish, oldVal, newVal dbPtrValue) (bool, [][]byte, [][]byte, [][]byte, [][]byte) {
|
|
oldKeys := i.indexKeys(oldVal)
|
|
lenOldKeys := len(oldKeys)
|
|
newKeys := i.indexKeys(newVal)
|
|
lenNewKeys := len(newKeys)
|
|
|
|
// no keys before or after, nothing to do
|
|
if lenOldKeys == 0 && lenNewKeys == 0 {
|
|
return false, nil, nil, nil, nil
|
|
}
|
|
|
|
// either all new or all deleted, then just do that
|
|
if lenNewKeys == 0 || lenOldKeys == 0 {
|
|
return true, oldKeys, newKeys, newKeys, oldKeys
|
|
}
|
|
|
|
// we can handle things simply if we have exactly 1 of everything, this will be a fairly common case
|
|
if lenNewKeys == 1 && lenOldKeys == 1 {
|
|
// if the keys are the same then we don't need to do anything
|
|
if bytes.Equal(oldKeys[0], newKeys[0]) {
|
|
return false, nil, nil, nil, nil
|
|
}
|
|
// otherwise we need to delete and write the old and new respectively
|
|
return true, oldKeys, newKeys, newKeys, oldKeys
|
|
}
|
|
|
|
// the real meat and potatoes starts here
|
|
// we will need a lookup table of one of the slices, old was chosen for no particular reason
|
|
oldMap := make(map[string]bool)
|
|
for _, oldKey := range oldKeys {
|
|
oldMap[string(oldKey)] = true
|
|
}
|
|
i.debugLogf("[indexishDeleteRaw] indexed old with %d entries", len(oldMap))
|
|
|
|
writes := make([][]byte, 0, lenNewKeys)
|
|
// then we look at the other (new) slice
|
|
for _, newKey := range newKeys {
|
|
// and check if it needs to be created/not deleted (missing from lookup)
|
|
if _, has := oldMap[string(newKey)]; !has {
|
|
i.debugLogf("[indexishDeleteRaw] old does not have new key %s", newKey)
|
|
writes = append(writes, newKey)
|
|
} else {
|
|
i.debugLogf("[indexishDeleteRaw] old does has new key %s", newKey)
|
|
delete(oldMap, string(newKey))
|
|
}
|
|
}
|
|
|
|
// before having to do more we can check a few optimized paths
|
|
lenWrites := len(writes)
|
|
i.debugLogf("[indexishDeleteRaw] found %d writes", lenWrites)
|
|
// skip some steps if we need to write all keys, which implies deleting everything that's old
|
|
if lenWrites == lenNewKeys {
|
|
return true, oldKeys, newKeys, newKeys, oldKeys
|
|
}
|
|
// don't do anything if we have no writes and the old and new are the same length as they must be equal
|
|
if lenWrites == 0 && lenOldKeys == lenNewKeys {
|
|
i.debugLog("[indexishDeleteRaw] found no changes")
|
|
return false, nil, nil, nil, nil
|
|
}
|
|
lenDeletes := len(oldMap)
|
|
i.debugLogf("[indexishDeleteRaw] found %d deletes", lenDeletes)
|
|
// finally, we can skip building the deletion slice if we don't have to delete anything
|
|
if lenDeletes == 0 {
|
|
return true, oldKeys, newKeys, writes, [][]byte{}
|
|
}
|
|
|
|
deletes := make([][]byte, 0, lenDeletes)
|
|
// and finally we turn anything still in the lookup table into the list of deletions
|
|
for oldKey, _ := range oldMap {
|
|
deletes = append(deletes, []byte(oldKey))
|
|
}
|
|
|
|
// this case _should_ be unreachable due to our earlier optimized cases, but it is safer to leave it
|
|
if len(writes) == 0 && len(deletes) == 0 {
|
|
return false, nil, nil, nil, nil
|
|
}
|
|
|
|
return true, oldKeys, newKeys, writes, deletes
|
|
}
|