Index: impl/memory/datastore_index.go |
diff --git a/impl/memory/datastore_index.go b/impl/memory/datastore_index.go |
index 37200d9c22fde7df6a3d3a14795f615ec3c947a9..59b9c405d3a82bde2decf1b3a3896bc808e0a5be 100644 |
--- a/impl/memory/datastore_index.go |
+++ b/impl/memory/datastore_index.go |
@@ -14,8 +14,6 @@ import ( |
"github.com/luci/gkvlite" |
) |
-var indexCreationDeterministic = false |
- |
type qIndexSlice []*ds.IndexDefinition |
func (s qIndexSlice) Len() int { return len(s) } |
@@ -39,15 +37,15 @@ func defaultIndicies(kind string, pmap ds.PropertyMap) []*ds.IndexDefinition { |
ret = append(ret, &ds.IndexDefinition{Kind: kind, SortBy: []ds.IndexColumn{{Property: name}}}) |
ret = append(ret, &ds.IndexDefinition{Kind: kind, SortBy: []ds.IndexColumn{{Property: name, Direction: ds.DESCENDING}}}) |
} |
- if indexCreationDeterministic { |
+ if serializationDeterministic { |
sort.Sort(ret) |
} |
return ret |
} |
func indexEntriesWithBuiltins(k ds.Key, pm ds.PropertyMap, complexIdxs []*ds.IndexDefinition) *memStore { |
- sip := partiallySerialize(pm) |
- return sip.indexEntries(k, append(defaultIndicies(k.Kind(), pm), complexIdxs...)) |
+ sip := partiallySerialize(k, pm) |
+ return sip.indexEntries(k.Namespace(), append(defaultIndicies(k.Kind(), pm), complexIdxs...)) |
} |
// serializedPvals is all of the serialized DSProperty values in qASC order. |
@@ -58,26 +56,24 @@ func (s serializedPvals) Swap(i, j int) { s[i], s[j] = s[j], s[i] } |
func (s serializedPvals) Less(i, j int) bool { return bytes.Compare(s[i], s[j]) < 0 } |
// prop name -> [<serialized DSProperty>, ...] |
+// includes special values '__key__' and '__ancestor__' which contains all of |
+// the ancestor entries for this key. |
type serializedIndexablePmap map[string]serializedPvals |
-func partiallySerialize(pm ds.PropertyMap) (ret serializedIndexablePmap) { |
- if len(pm) == 0 { |
- return |
+func partiallySerialize(k ds.Key, pm ds.PropertyMap) (ret serializedIndexablePmap) { |
+ ret = make(serializedIndexablePmap, len(pm)+2) |
+ ret["__key__"] = [][]byte{serialize.ToBytes(ds.MkProperty(k))} |
+ for k != nil { |
+ ret["__ancestor__"] = append(ret["__ancestor__"], serialize.ToBytes(ds.MkProperty(k))) |
+ k = k.Parent() |
} |
- |
- buf := &bytes.Buffer{} |
- ret = make(serializedIndexablePmap, len(pm)) |
for k, vals := range pm { |
newVals := make(serializedPvals, 0, len(vals)) |
for _, v := range vals { |
if v.IndexSetting() == ds.NoIndex { |
continue |
} |
- buf.Reset() |
- serialize.WriteProperty(buf, serialize.WithoutContext, v) |
- newVal := make([]byte, buf.Len()) |
- copy(newVal, buf.Bytes()) |
- newVals = append(newVals, newVal) |
+ newVals = append(newVals, serialize.ToBytes(v)) |
} |
if len(newVals) > 0 { |
sort.Sort(newVals) |
@@ -95,7 +91,7 @@ type indexRowGen struct { |
} |
// permute calls cb for each index row, in the sorted order of the rows. |
-func (s indexRowGen) permute(cb func([]byte)) { |
+func (s indexRowGen) permute(collSetFn func(k, v []byte)) { |
iVec := make([]int, len(s.propVec)) |
iVecLim := make([]int, len(s.propVec)) |
@@ -137,18 +133,13 @@ func (s indexRowGen) permute(cb func([]byte)) { |
for pvalSliceIdx, pvalIdx := range iVec { |
bufsiz += len(s.propVec[pvalSliceIdx][pvalIdx]) |
} |
- buf := bytes.NewBuffer(make([]byte, 0, bufsiz)) |
+ buf := serialize.Invertible(bytes.NewBuffer(make([]byte, 0, bufsiz))) |
for pvalSliceIdx, pvalIdx := range iVec { |
data := s.propVec[pvalSliceIdx][pvalIdx] |
- if s.orders[pvalSliceIdx] == ds.ASCENDING { |
- buf.Write(data) |
- } else { |
- for _, b := range data { |
- buf.WriteByte(b ^ 0xFF) |
- } |
- } |
+ buf.SetInvert(s.orders[pvalSliceIdx] == ds.DESCENDING) |
+ buf.Write(data) |
} |
- cb(buf.Bytes()) |
+ collSetFn(buf.Bytes(), []byte{}) |
if !incPos() { |
break |
} |
@@ -162,13 +153,10 @@ type matcher struct { |
// matcher.match checks to see if the mapped, serialized property values |
// match the index. If they do, it returns a indexRowGen. Do not write or modify |
// the data in the indexRowGen. |
-func (m *matcher) match(idx *ds.IndexDefinition, sip serializedIndexablePmap) (indexRowGen, bool) { |
+func (m *matcher) match(sortBy []ds.IndexColumn, sip serializedIndexablePmap) (indexRowGen, bool) { |
m.buf.propVec = m.buf.propVec[:0] |
m.buf.orders = m.buf.orders[:0] |
- for _, sb := range idx.SortBy { |
- if sb.Property == "__key__" { |
- panic("don't know how to build compound index on __key__") |
- } |
+ for _, sb := range sortBy { |
if pv, ok := sip[sb.Property]; ok { |
m.buf.propVec = append(m.buf.propVec, pv) |
m.buf.orders = append(m.buf.orders, sb.Direction) |
@@ -179,41 +167,17 @@ func (m *matcher) match(idx *ds.IndexDefinition, sip serializedIndexablePmap) (i |
return m.buf, true |
} |
-func (sip serializedIndexablePmap) indexEntries(k ds.Key, idxs []*ds.IndexDefinition) *memStore { |
+func (sip serializedIndexablePmap) indexEntries(ns string, idxs []*ds.IndexDefinition) *memStore { |
ret := newMemStore() |
idxColl := ret.SetCollection("idx", nil) |
- // getIdxEnts retrieves an index collection or adds it if it's not there. |
- getIdxEnts := func(qi *ds.IndexDefinition) *memCollection { |
- b := serialize.ToBytes(*qi) |
- idxColl.Set(b, []byte{}) |
- return ret.SetCollection(fmt.Sprintf("idx:%s:%s", k.Namespace(), b), nil) |
- } |
- |
- keyData := serialize.ToBytes(k) |
- |
- walkPermutations := func(prefix []byte, irg indexRowGen, ents *memCollection) { |
- irg.permute(func(data []byte) { |
- buf := bytes.NewBuffer(make([]byte, 0, len(prefix)+len(data)+len(keyData))) |
- buf.Write(prefix) |
- buf.Write(data) |
- buf.Write(keyData) |
- ents.Set(buf.Bytes(), []byte{}) |
- }) |
- } |
mtch := matcher{} |
for _, idx := range idxs { |
- if irg, ok := mtch.match(idx, sip); ok { |
- idxEnts := getIdxEnts(idx) |
- if len(irg.propVec) == 0 { |
- idxEnts.Set(keyData, []byte{}) // propless index, e.g. kind -> key = nil |
- } else if idx.Ancestor { |
- for ancKey := k; ancKey != nil; ancKey = ancKey.Parent() { |
- walkPermutations(serialize.ToBytes(ancKey), irg, idxEnts) |
- } |
- } else { |
- walkPermutations(nil, irg, idxEnts) |
- } |
+ if irg, ok := mtch.match(idx.NormalizeOrder(), sip); ok { |
+ idxBin := serialize.ToBytes(*idx) |
+ idxColl.Set(idxBin, []byte{}) |
+ coll := ret.SetCollection(fmt.Sprintf("idx:%s:%s", ns, idxBin), nil) |
+ irg.permute(coll.Set) |
} |
} |
@@ -229,9 +193,7 @@ func getCompIdxs(idxColl *memCollection) []*ds.IndexDefinition { |
return false |
} |
qi, err := serialize.ReadIndexDefinition(bytes.NewBuffer(i.Key)) |
- if err != nil { |
- panic(err) // memory corruption |
- } |
+ memoryCorruption(err) |
compIdx = append(compIdx, &qi) |
return true |
}) |
@@ -284,7 +246,7 @@ func mergeIndexes(ns string, store, oldIdx, newIdx *memStore) { |
} |
}) |
default: |
- panic("impossible") |
+ impossible(fmt.Errorf("both values from gkvCollide were nil?")) |
} |
// TODO(riannucci): remove entries from idxColl and remove index collections |
// when there are no index entries for that index any more. |
@@ -294,15 +256,13 @@ func mergeIndexes(ns string, store, oldIdx, newIdx *memStore) { |
func addIndex(store *memStore, ns string, compIdx []*ds.IndexDefinition) { |
store.GetCollection("ents:"+ns).VisitItemsAscend(nil, true, func(i *gkvlite.Item) bool { |
pm, err := rpmWoCtx(i.Val, ns) |
- if err != nil { |
- panic(err) // memory corruption |
- } |
+ memoryCorruption(err) |
+ |
k, err := serialize.ReadKey(bytes.NewBuffer(i.Key), serialize.WithoutContext, globalAppID, ns) |
- if err != nil { |
- panic(err) |
- } |
- sip := partiallySerialize(pm) |
- mergeIndexes(ns, store, newMemStore(), sip.indexEntries(k, compIdx)) |
+ memoryCorruption(err) |
+ |
+ sip := partiallySerialize(k, pm) |
+ mergeIndexes(ns, store, newMemStore(), sip.indexEntries(k.Namespace(), compIdx)) |
return true |
}) |
} |