Chromium Code Reviews| Index: impl/memory/datastore_query.go |
| diff --git a/impl/memory/datastore_query.go b/impl/memory/datastore_query.go |
| index 65b6b311d44a01bc1f0c5f29e9c6969118b28592..3ec1e8223e4239ead53c48039e08bc95b1d5d97a 100644 |
| --- a/impl/memory/datastore_query.go |
| +++ b/impl/memory/datastore_query.go |
| @@ -9,8 +9,6 @@ import ( |
| "encoding/base64" |
| "errors" |
| "fmt" |
| - "math" |
| - "strings" |
| ds "github.com/luci/gae/service/datastore" |
| "github.com/luci/gae/service/datastore/serialize" |
| @@ -25,46 +23,6 @@ const MaxQueryComponents = 100 |
| var errQueryDone = errors.New("query is done") |
| -type queryOp int |
| - |
| -const ( |
| - qInvalid queryOp = iota |
| - qEqual |
| - qLessThan |
| - qLessEq |
| - qGreaterEq |
| - qGreaterThan |
| -) |
| - |
| -var queryOpMap = map[string]queryOp{ |
| - "=": qEqual, |
| - "<": qLessThan, |
| - "<=": qLessEq, |
| - ">=": qGreaterEq, |
| - ">": qGreaterThan, |
| -} |
| - |
| -type queryFilter struct { |
| - prop string |
| - op queryOp |
| - value interface{} |
| -} |
| - |
| -func parseFilter(f string) (prop string, op queryOp, err error) { |
| - toks := strings.SplitN(strings.TrimSpace(f), " ", 2) |
| - if len(toks) != 2 { |
| - err = errors.New("datastore: invalid filter: " + f) |
| - } else { |
| - op = queryOpMap[toks[1]] |
| - if op == qInvalid { |
| - err = fmt.Errorf("datastore: invalid operator %q in filter %q", toks[1], f) |
| - } else { |
| - prop = toks[0] |
| - } |
| - } |
| - return |
| -} |
| - |
| // A queryCursor is: |
| // {#orders} ++ IndexColumn* ++ RawRowData |
| // IndexColumn will always contain __key__ as the last column, and so #orders |
| @@ -116,78 +74,6 @@ func (q queryCursor) decode() ([]ds.IndexColumn, []byte, error) { |
| return cols, buf.Bytes(), nil |
| } |
| -type queryIneqFilter struct { |
| - prop string |
| - |
| - start []byte |
| - end []byte |
| -} |
| - |
| -// constrain 'folds' a new inequality into the current inequality filter. |
| -// |
| -// It will bump the end bound down, or the start bound up, assuming the incoming |
| -// constraint does so. |
| -// |
| -// It returns true iff the filter is overconstrained (i.e. start > end) |
| -func (q *queryIneqFilter) constrain(op queryOp, val []byte) bool { |
| - switch op { |
| - case qLessEq: |
| - val = increment(val) |
| - fallthrough |
| - case qLessThan: |
| - // adjust upper bound downwards |
| - if q.end == nil || bytes.Compare(q.end, val) > 0 { |
| - q.end = val |
| - } |
| - |
| - case qGreaterThan: |
| - val = increment(val) |
| - fallthrough |
| - case qGreaterEq: |
| - // adjust lower bound upwards |
| - if q.start == nil || bytes.Compare(q.start, val) < 0 { |
| - q.start = val |
| - } |
| - |
| - default: |
| - impossible(fmt.Errorf("constrain cannot handle filter op %d", op)) |
| - } |
| - |
| - if q.start != nil && q.end != nil { |
| - return bytes.Compare(q.start, q.end) >= 0 |
| - } |
| - return false |
| -} |
| - |
| -type queryImpl struct { |
| - ns string |
| - |
| - kind string |
| - |
| - // prop -> encoded values (which are ds.Property objects) |
| - // "__ancestor__" is the key for Ancestor queries. |
| - eqFilters map[string]stringset.Set |
| - ineqFilter queryIneqFilter |
| - order []ds.IndexColumn |
| - startCursor []byte |
| - startCursorColumns []ds.IndexColumn |
| - endCursor []byte |
| - endCursorColumns []ds.IndexColumn |
| - |
| - // All of these are applied in post (e.g. not during the native index scan). |
| - distinct bool |
| - eventualConsistency bool |
| - keysOnly bool |
| - limitSet bool |
| - limit int32 |
| - offset int32 |
| - project []string |
| - |
| - err error |
| -} |
| - |
| -var _ ds.Query = (*queryImpl)(nil) |
| - |
| func sortOrdersEqual(as, bs []ds.IndexColumn) bool { |
| if len(as) != len(bs) { |
| return false |
| @@ -200,121 +86,88 @@ func sortOrdersEqual(as, bs []ds.IndexColumn) bool { |
| return true |
| } |
| -func (q *queryImpl) reduce(ns string, isTxn bool) (*reducedQuery, error) { |
| - if q.err != nil { |
| - return nil, q.err |
| +func numComponents(fq *ds.FinalizedQuery) int { |
| + numComponents := len(fq.Orders()) |
| + if p, _, _ := fq.IneqFilterLow(); p != "" { |
| + numComponents++ |
| } |
| - if ns != q.ns { |
| - return nil, errors.New( |
| - "gae/memory: Namespace mismatched. Query and Datastore don't agree " + |
| - "on the current namespace") |
| + if p, _, _ := fq.IneqFilterHigh(); p != "" { |
| + numComponents++ |
| + } |
| + for _, v := range fq.EqFilters() { |
| + numComponents += v.Len() |
| } |
| - if isTxn && q.eqFilters["__ancestor__"] == nil { |
| - return nil, errors.New( |
| - "gae/memory: Only ancestor queries are allowed inside transactions") |
| + return numComponents |
| +} |
| + |
| +func reduce(fq *ds.FinalizedQuery, ns string, isTxn bool) (*reducedQuery, error) { |
| + if err := fq.Valid(globalAppID, ns); err != nil { |
| + return nil, err |
| } |
| - if q.numComponents() > MaxQueryComponents { |
| + if isTxn && fq.Ancestor() == nil { |
| + return nil, fmt.Errorf("queries within a transaction must include an Ancestor filter") |
| + } |
| + if num := numComponents(fq); num > MaxQueryComponents { |
| return nil, fmt.Errorf( |
| "gae/memory: query is too large. may not have more than "+ |
| "%d filters + sort orders + ancestor total: had %d", |
| - MaxQueryComponents, q.numComponents()) |
| - } |
| - if len(q.project) == 0 && q.distinct { |
| - // This must be delayed, because q.Distinct().Project("foo") is a valid |
| - // construction. If we checked this in Distinct, it could be too early, and |
| - // checking it in Project doesn't matter. |
| - return nil, errors.New( |
| - "gae/memory: Distinct() only makes sense on projection queries.") |
| - } |
| - if q.eqFilters["__ancestor__"] != nil && q.ineqFilter.prop == "__key__" { |
| - ancS, _ := q.eqFilters["__ancestor__"].Peek() |
| - anc := []byte(ancS[:len(ancS)-1]) |
| - if q.ineqFilter.start != nil && !bytes.HasPrefix(q.ineqFilter.start, anc) { |
| - return nil, errors.New( |
| - "gae/memory: __key__ inequality filter has a value outside of Ancestor()") |
| - } |
| - if q.ineqFilter.end != nil && !bytes.HasPrefix(q.ineqFilter.end, anc) { |
| - return nil, errors.New( |
| - "gae/memory: __key__ inequality filter has a value outside of Ancestor()") |
| - } |
| + MaxQueryComponents, num) |
| } |
| ret := &reducedQuery{ |
| - ns: q.ns, |
| - kind: q.kind, |
| - eqFilters: q.eqFilters, |
| - suffixFormat: q.order, |
| + ns: ns, |
| + kind: fq.Kind(), |
| + suffixFormat: fq.Orders(), |
| } |
| - // if len(q.suffixFormat) > 0, queryImpl already enforces that the first order |
| - // is the same as the inequality. Otherwise we need to add it. |
| - if len(ret.suffixFormat) == 0 && q.ineqFilter.prop != "" { |
| - ret.suffixFormat = []ds.IndexColumn{{Property: q.ineqFilter.prop}} |
| - } |
| - |
| - // The inequality is specified in natural (ascending) order in the query's |
| - // Filter syntax, but the order information may indicate to use a descending |
| - // index column for it. If that's the case, then we must invert, swap and |
| - // increment the inequality endpoints. |
| - // |
| - // Invert so that the desired numbers are represented correctly in the index. |
| - // Swap so that our iterators still go from >= start to < end. |
| - // Increment so that >= and < get correctly bounded (since the iterator is |
| - // still using natrual bytes ordering) |
| - if q.ineqFilter.prop != "" && ret.suffixFormat[0].Direction == ds.DESCENDING { |
| - hi, lo := []byte(nil), []byte(nil) |
| - if len(q.ineqFilter.end) > 0 { |
| - hi = increment(invert(q.ineqFilter.end)) |
| - } |
| - if len(q.ineqFilter.start) > 0 { |
| - lo = increment(invert(q.ineqFilter.start)) |
| + eqFilts := fq.EqFilters() |
| + ret.eqFilters = make(map[string]stringset.Set, len(eqFilts)) |
| + for prop, vals := range eqFilts { |
| + sVals := stringset.New(len(vals)) |
| + for _, v := range vals { |
| + sVals.Add(string(serialize.ToBytes(v))) |
| } |
| - q.ineqFilter.end, q.ineqFilter.start = lo, hi |
| + ret.eqFilters[prop] = sVals |
| } |
| - // Add any projection columns not mentioned in the user-defined order as |
| - // ASCENDING orders. Technically we could be smart and automatically use |
| - // a DESCENDING ordered index, if it fit, but the logic gets insane, since all |
| - // suffixes of all used indexes need to be PRECISELY equal (and so you'd have |
| - // to hunt/invalidate/something to find the combination of indexes that are |
| - // compatible with each other as well as the query). If you want to use |
| - // a DESCENDING column, just add it to the user sort order, and this loop will |
| - // not synthesize a new suffix entry for it. |
| - // |
| - // NOTE: if you want to use an index that sorts by -__key__, you MUST |
| - // include all of the projected fields for that index in the order explicitly. |
| - // Otherwise the generated suffixFormat will be wacky. So: |
| - // Query("Foo").Project("A", "B").Order("A").Order("-__key__") |
| - // |
| - // will turn into a suffixFormat of: |
| - // A, ASCENDING |
| - // __key__, DESCENDING |
| - // B, ASCENDING |
| - // __key__, ASCENDING |
| - // |
| - // To prevent this, your query should have another Order("B") clause before |
| - // the -__key__ clause. |
| - originalStop := len(ret.suffixFormat) |
| - for _, p := range q.project { |
| - needAdd := true |
| - // originalStop prevents this loop from getting longer every time we add |
| - // a projected property. |
| - for _, col := range ret.suffixFormat[:originalStop] { |
| - if col.Property == p { |
| - needAdd = false |
| - break |
| + startD := []byte(nil) |
| + endD := []byte(nil) |
| + if ineqProp := fq.IneqFilterProp(); ineqProp != "" { |
| + _, startOp, startV := fq.IneqFilterLow() |
|
dnj
2015/09/18 16:47:57
Maybe comment that the goal here is to bound >=, <
iannucci
2015/09/18 22:25:47
Done I think
|
| + if startOp != "" { |
| + startD = serialize.ToBytes(startV) |
| + if startOp == ">" { |
| + startD = increment(startD) |
| } |
| } |
| - if needAdd { |
| - ret.suffixFormat = append(ret.suffixFormat, ds.IndexColumn{Property: p}) |
| + |
| + _, endOp, endV := fq.IneqFilterHigh() |
| + if endOp != "" { |
| + endD = serialize.ToBytes(endV) |
| + if endOp == "<=" { |
| + endD = increment(endD) |
| + } |
| } |
| - } |
| - // If the suffix format ends with __key__ already (e.g. .Order("__key__")), |
| - // then we're good to go. Otherwise we need to add it as the last bit of the |
| - // suffix, since all indexes implicitly have it as the last column. |
| - if len(ret.suffixFormat) == 0 || ret.suffixFormat[len(ret.suffixFormat)-1].Property != "__key__" { |
| - ret.suffixFormat = append(ret.suffixFormat, ds.IndexColumn{Property: "__key__"}) |
| + // The inequality is specified in natural (ascending) order in the query's |
| + // Filter syntax, but the order information may indicate to use a descending |
| + // index column for it. If that's the case, then we must invert, swap and |
| + // increment the inequality endpoints. |
| + // |
| + // Invert so that the desired numbers are represented correctly in the index. |
| + // Swap so that our iterators still go from >= start to < end. |
| + // Increment so that >= and < get correctly bounded (since the iterator is |
| + // still using natrual bytes ordering) |
|
dnj
2015/09/18 16:47:57
Spaces at beginning of comment.
iannucci
2015/09/18 22:25:47
done
|
| + if ret.suffixFormat[0].Direction == ds.DESCENDING { |
| + hi, lo := []byte(nil), []byte(nil) |
| + if len(startD) > 0 { |
| + lo = increment(invert(startD)) |
| + } |
| + if len(endD) > 0 { |
| + hi = increment(invert(endD)) |
| + } |
| + endD, startD = lo, hi |
| + } |
| } |
|
iannucci
2015/09/18 04:31:52
all this missing stuff is now common code in "serv
|
| // Now we check the start and end cursors. |
| @@ -322,23 +175,35 @@ func (q *queryImpl) reduce(ns string, isTxn bool) (*reducedQuery, error) { |
| // Cursors are composed of a list of IndexColumns at the beginning, followed |
| // by the raw bytes to use for the suffix. The cursor is only valid if all of |
| // its IndexColumns match our proposed suffixFormat, as calculated above. |
| - ret.start = q.ineqFilter.start |
| - if q.startCursor != nil { |
| - if !sortOrdersEqual(q.startCursorColumns, ret.suffixFormat) { |
| - return nil, errors.New("gae/memory: start cursor is invalid for this query.") |
| - } |
| - if ret.start == nil || bytes.Compare(ret.start, q.startCursor) < 0 { |
| - ret.start = q.startCursor |
| - } |
| - } |
| + ret.start = startD |
| + ret.end = endD |
| + if start, end := fq.Bounds(); start != nil || end != nil { |
| + if start != nil { |
| + startCols, startD, err := start.(queryCursor).decode() |
| + if err != nil { |
| + return nil, err |
| + } |
| - ret.end = q.ineqFilter.end |
| - if q.endCursor != nil { |
| - if !sortOrdersEqual(q.endCursorColumns, ret.suffixFormat) { |
| - return nil, errors.New("gae/memory: end cursor is invalid for this query.") |
| + if !sortOrdersEqual(startCols, ret.suffixFormat) { |
| + return nil, errors.New("gae/memory: start cursor is invalid for this query.") |
| + } |
| + if ret.start == nil || bytes.Compare(ret.start, startD) < 0 { |
| + ret.start = startD |
| + } |
| } |
| - if ret.end == nil || bytes.Compare(q.endCursor, ret.end) < 0 { |
| - ret.end = q.endCursor |
| + |
| + if end != nil { |
| + endCols, endD, err := end.(queryCursor).decode() |
| + if err != nil { |
| + return nil, err |
| + } |
| + |
| + if !sortOrdersEqual(endCols, ret.suffixFormat) { |
| + return nil, errors.New("gae/memory: end cursor is invalid for this query.") |
| + } |
| + if ret.end == nil || bytes.Compare(endD, ret.end) < 0 { |
| + ret.end = endD |
| + } |
|
iannucci
2015/09/18 04:31:52
This changed because we don't serialize properties
|
| } |
| } |
| @@ -358,345 +223,3 @@ func (q *queryImpl) reduce(ns string, isTxn bool) (*reducedQuery, error) { |
| return ret, nil |
| } |
| - |
| -func (q *queryImpl) numComponents() int { |
| - numComponents := len(q.order) |
| - if q.ineqFilter.prop != "" { |
| - if q.ineqFilter.start != nil { |
| - numComponents++ |
| - } |
| - if q.ineqFilter.end != nil { |
| - numComponents++ |
| - } |
| - } |
| - for _, v := range q.eqFilters { |
| - numComponents += v.Len() |
| - } |
| - return numComponents |
| -} |
| - |
| -// checkMutateClone sees if the query has an error. If not, it clones the query, |
| -// and assigns the output of `check` to the query error slot. If check returns |
| -// nil, it calls `mutate` on the cloned query. The (possibly new) query is then |
| -// returned. |
| -func (q *queryImpl) checkMutateClone(check func() error, mutate func(*queryImpl)) *queryImpl { |
| - if q.err != nil { |
| - return q |
| - } |
| - nq := *q |
| - nq.eqFilters = make(map[string]stringset.Set, len(q.eqFilters)) |
| - for prop, vals := range q.eqFilters { |
| - nq.eqFilters[prop] = vals.Dup() |
| - } |
| - nq.order = make([]ds.IndexColumn, len(q.order)) |
| - copy(nq.order, q.order) |
| - nq.project = make([]string, len(q.project)) |
| - copy(nq.project, q.project) |
| - if check != nil { |
| - nq.err = check() |
| - } |
| - if nq.err == nil { |
| - mutate(&nq) |
| - } |
| - return &nq |
| -} |
| - |
| -func (q *queryImpl) Ancestor(k ds.Key) ds.Query { |
| - return q.checkMutateClone( |
| - func() error { |
| - if k == nil { |
| - // SDK has an explicit nil-check |
| - return errors.New("datastore: nil query ancestor") |
| - } |
| - if k.Namespace() != q.ns { |
| - return fmt.Errorf("bad namespace: %q (expected %q)", k.Namespace(), q.ns) |
| - } |
| - if !k.Valid(false, globalAppID, q.ns) { |
| - // technically the SDK implementation does a Weird Thing (tm) if both the |
| - // stringID and intID are set on a key; it only serializes the stringID in |
| - // the proto. This means that if you set the Ancestor to an invalid key, |
| - // you'll never actually hear about it. Instead of doing that insanity, we |
| - // just swap to an error here. |
| - return ds.ErrInvalidKey |
| - } |
| - if q.eqFilters["__ancestor__"] != nil { |
| - return errors.New("cannot have more than one ancestor") |
| - } |
| - return nil |
| - }, |
| - func(q *queryImpl) { |
| - q.addEqFilt("__ancestor__", ds.MkProperty(k)) |
| - }) |
| -} |
| - |
| -func (q *queryImpl) Distinct() ds.Query { |
| - return q.checkMutateClone(nil, func(q *queryImpl) { |
| - q.distinct = true |
| - }) |
| -} |
| - |
| -func (q *queryImpl) addEqFilt(prop string, p ds.Property) { |
| - binVal := string(serialize.ToBytes(p)) |
| - if cur, ok := q.eqFilters[prop]; !ok { |
| - s := stringset.New(1) |
| - s.Add(binVal) |
| - q.eqFilters[prop] = s |
| - } else { |
| - cur.Add(binVal) |
| - } |
| -} |
| - |
| -func (q *queryImpl) Filter(fStr string, val interface{}) ds.Query { |
| - prop := "" |
| - op := qInvalid |
| - p := ds.Property{} |
| - return q.checkMutateClone( |
| - func() error { |
| - var err error |
| - prop, op, err = parseFilter(fStr) |
| - if err != nil { |
| - return err |
| - } |
| - |
| - if q.kind == "" && prop != "__key__" { |
| - // https://cloud.google.com/appengine/docs/go/datastore/queries#Go_Kindless_queries |
| - return fmt.Errorf( |
| - "kindless queries can only filter on __key__, got %q", fStr) |
| - } |
| - |
| - err = p.SetValue(val, ds.ShouldIndex) |
| - if err != nil { |
| - return err |
| - } |
| - |
| - if p.Type() == ds.PTKey { |
| - if !p.Value().(ds.Key).Valid(false, globalAppID, q.ns) { |
| - return ds.ErrInvalidKey |
| - } |
| - } |
| - |
| - if prop == "__key__" { |
| - if op == qEqual { |
| - return fmt.Errorf( |
| - "query equality filter on __key__ is silly: %q", fStr) |
| - } |
| - if p.Type() != ds.PTKey { |
| - return fmt.Errorf("__key__ filter value is not a key: %T", val) |
| - } |
| - } else if strings.HasPrefix(prop, "__") && strings.HasSuffix(prop, "__") { |
| - return fmt.Errorf("filter on reserved property: %q", prop) |
| - } |
| - |
| - if op != qEqual { |
| - if q.ineqFilter.prop != "" && q.ineqFilter.prop != prop { |
| - return fmt.Errorf( |
| - "inequality filters on multiple properties: %q and %q", |
| - q.ineqFilter.prop, prop) |
| - } |
| - if len(q.order) > 0 && q.order[0].Property != prop { |
| - return fmt.Errorf( |
| - "first sort order must match inequality filter: %q v %q", |
| - q.order[0].Property, prop) |
| - } |
| - } else { |
| - for _, p := range q.project { |
| - if p == prop { |
| - return fmt.Errorf( |
| - "cannot project on field which is used in an equality filter: %q", |
| - prop) |
| - } |
| - } |
| - } |
| - return err |
| - }, |
| - func(q *queryImpl) { |
| - if op == qEqual { |
| - // add it to eq filters |
| - q.addEqFilt(prop, p) |
| - |
| - // remove it from sort orders. |
| - // https://cloud.google.com/appengine/docs/go/datastore/queries#sort_orders_are_ignored_on_properties_with_equality_filters |
| - toRm := -1 |
| - for i, o := range q.order { |
| - if o.Property == prop { |
| - toRm = i |
| - break |
| - } |
| - } |
| - if toRm >= 0 { |
| - q.order = append(q.order[:toRm], q.order[toRm+1:]...) |
| - } |
| - } else { |
| - q.ineqFilter.prop = prop |
| - if q.ineqFilter.constrain(op, serialize.ToBytes(p)) { |
| - q.err = errQueryDone |
| - } |
| - } |
| - }) |
| -} |
| - |
| -func (q *queryImpl) Order(prop string) ds.Query { |
| - col := ds.IndexColumn{} |
| - return q.checkMutateClone( |
| - func() error { |
| - // check that first order == first inequality. |
| - // if order is an equality already, ignore it |
| - col.Property = strings.TrimSpace(prop) |
| - if strings.HasPrefix(prop, "-") { |
| - col.Direction = ds.DESCENDING |
| - col.Property = strings.TrimSpace(prop[1:]) |
| - } else if strings.HasPrefix(prop, "+") { |
| - return fmt.Errorf("datastore: invalid order: %q", prop) |
| - } |
| - if len(col.Property) == 0 { |
| - return errors.New("datastore: empty order") |
| - } |
| - if len(q.order) == 0 && q.ineqFilter.prop != "" && q.ineqFilter.prop != col.Property { |
| - return fmt.Errorf( |
| - "first sort order must match inequality filter: %q v %q", |
| - prop, q.ineqFilter.prop) |
| - } |
| - if q.kind == "" && (col.Property != "__key__" || col.Direction != ds.ASCENDING) { |
| - return fmt.Errorf("invalid order for kindless query: %#v", col) |
| - } |
| - return nil |
| - }, |
| - func(q *queryImpl) { |
| - if _, ok := q.eqFilters[col.Property]; ok { |
| - // skip it if it's an equality filter |
| - // https://cloud.google.com/appengine/docs/go/datastore/queries#sort_orders_are_ignored_on_properties_with_equality_filters |
| - return |
| - } |
| - for _, order := range q.order { |
| - if order.Property == col.Property { |
| - // can't sort by the same order twice |
| - return |
| - } |
| - } |
| - q.order = append(q.order, col) |
| - }) |
| -} |
| - |
| -func (q *queryImpl) Project(fieldName ...string) ds.Query { |
| - return q.checkMutateClone( |
| - func() error { |
| - if q.keysOnly { |
| - return errors.New("cannot project a keysOnly query") |
| - } |
| - dupCheck := stringset.New(len(fieldName) + len(q.project)) |
| - for _, f := range fieldName { |
| - if !dupCheck.Add(f) { |
| - return fmt.Errorf("cannot project on the same field twice: %q", f) |
| - } |
| - if f == "" { |
| - return errors.New("cannot project on an empty field name") |
| - } |
| - if f == "__key__" { |
| - return fmt.Errorf("cannot project on __key__") |
| - } |
| - if _, ok := q.eqFilters[f]; ok { |
| - return fmt.Errorf( |
| - "cannot project on field which is used in an equality filter: %q", f) |
| - } |
| - for _, p := range q.project { |
| - if p == f { |
| - return fmt.Errorf("cannot project on the same field twice: %q", f) |
| - } |
| - } |
| - } |
| - return nil |
| - }, |
| - func(q *queryImpl) { |
| - q.project = append(q.project, fieldName...) |
| - }) |
| -} |
| - |
| -func (q *queryImpl) KeysOnly() ds.Query { |
| - return q.checkMutateClone( |
| - func() error { |
| - if len(q.project) != 0 { |
| - return errors.New("cannot project a keysOnly query") |
| - } |
| - return nil |
| - }, |
| - func(q *queryImpl) { |
| - q.keysOnly = true |
| - }) |
| -} |
| - |
| -func (q *queryImpl) Limit(limit int) ds.Query { |
| - return q.checkMutateClone( |
| - func() error { |
| - // nonsensically... ANY negative value means 'unlimited'. *shakes head* |
| - if limit < math.MinInt32 || limit > math.MaxInt32 { |
| - return errors.New("datastore: query limit overflow") |
| - } |
| - return nil |
| - }, |
| - func(q *queryImpl) { |
| - q.limitSet = true |
| - q.limit = int32(limit) |
| - }) |
| -} |
| - |
| -func (q *queryImpl) Offset(offset int) ds.Query { |
| - return q.checkMutateClone( |
| - func() error { |
| - if offset < 0 { |
| - return errors.New("datastore: negative query offset") |
| - } |
| - if offset > math.MaxInt32 { |
| - return errors.New("datastore: query offset overflow") |
| - } |
| - return nil |
| - }, |
| - func(q *queryImpl) { |
| - q.offset = int32(offset) |
| - }) |
| -} |
| - |
| -func queryCursorCheck(ns, flavor string, current []byte, newCursor ds.Cursor) ([]ds.IndexColumn, []byte, error) { |
| - if current != nil { |
| - return nil, nil, fmt.Errorf("%s cursor is multiply defined", flavor) |
| - } |
| - curs, ok := newCursor.(queryCursor) |
| - if !ok { |
| - return nil, nil, fmt.Errorf("%s cursor is unknown type: %T", flavor, curs) |
| - } |
| - return curs.decode() |
| -} |
| - |
| -func (q *queryImpl) Start(c ds.Cursor) ds.Query { |
| - cols := []ds.IndexColumn(nil) |
| - curs := []byte(nil) |
| - return q.checkMutateClone( |
| - func() (err error) { |
| - cols, curs, err = queryCursorCheck(q.ns, "start", q.startCursor, c) |
| - return |
| - }, |
| - func(q *queryImpl) { |
| - q.startCursorColumns = cols |
| - q.startCursor = curs |
| - }) |
| -} |
| - |
| -func (q *queryImpl) End(c ds.Cursor) ds.Query { |
| - cols := []ds.IndexColumn(nil) |
| - curs := queryCursor(nil) |
| - return q.checkMutateClone( |
| - func() (err error) { |
| - cols, curs, err = queryCursorCheck(q.ns, "end", q.endCursor, c) |
| - return |
| - }, |
| - func(q *queryImpl) { |
| - q.endCursorColumns = cols |
| - q.endCursor = curs |
| - }) |
| -} |
| - |
| -func (q *queryImpl) EventualConsistency() ds.Query { |
| - return q.checkMutateClone( |
| - nil, func(q *queryImpl) { |
| - q.eventualConsistency = true |
| - }) |
| -} |