Chromium Code Reviews| OLD | NEW |
|---|---|
| 1 // Copyright 2015 The Chromium Authors. All rights reserved. | 1 // Copyright 2015 The Chromium Authors. All rights reserved. |
| 2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
| 3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
| 4 | 4 |
| 5 package memory | 5 package memory |
| 6 | 6 |
| 7 import ( | 7 import ( |
| 8 "bytes" | |
| 8 "encoding/base64" | 9 "encoding/base64" |
| 9 "errors" | 10 "errors" |
| 10 "fmt" | 11 "fmt" |
| 11 "math" | 12 "math" |
| 12 "strings" | 13 "strings" |
| 13 | 14 |
| 14 ds "github.com/luci/gae/service/datastore" | 15 ds "github.com/luci/gae/service/datastore" |
| 15 "github.com/luci/gae/service/datastore/serialize" | 16 "github.com/luci/gae/service/datastore/serialize" |
| 17 "github.com/luci/luci-go/common/cmpbin" | |
| 16 ) | 18 ) |
| 17 | 19 |
| 18 // MaxQueryComponents was lifted from a hard-coded constant in dev_appserver. | 20 // MaxQueryComponents was lifted from a hard-coded constant in dev_appserver. |
| 19 // No idea if it's a real limit or just a convenience in the current dev | 21 // No idea if it's a real limit or just a convenience in the current dev |
| 20 // appserver implementation. | 22 // appserver implementation. |
| 21 const MaxQueryComponents = 100 | 23 const MaxQueryComponents = 100 |
| 22 | 24 |
| 23 var errQueryDone = errors.New("query is done") | 25 var errQueryDone = errors.New("query is done") |
| 24 | 26 |
| 25 type queryOp int | 27 type queryOp int |
| (...skipping 29 matching lines...) Expand all Loading... | |
| 55 op = queryOpMap[toks[1]] | 57 op = queryOpMap[toks[1]] |
| 56 if op == qInvalid { | 58 if op == qInvalid { |
| 57 err = fmt.Errorf("datastore: invalid operator %q in filt er %q", toks[1], f) | 59 err = fmt.Errorf("datastore: invalid operator %q in filt er %q", toks[1], f) |
| 58 } else { | 60 } else { |
| 59 prop = toks[0] | 61 prop = toks[0] |
| 60 } | 62 } |
| 61 } | 63 } |
| 62 return | 64 return |
| 63 } | 65 } |
| 64 | 66 |
| 65 type queryCursor string | 67 // A queryCursor is: |
| 68 // {#orders} ++ IndexColumn* ++ RawRowData | |
| 69 // IndexColumn will always contain __key__ as the last column, and so #orders | |
| 70 // must always be >= 1 | |
| 71 type queryCursor []byte | |
| 72 | |
| 73 func newCursor(s string) (ds.Cursor, error) { | |
| 74 » d, err := base64.URLEncoding.DecodeString(s) | |
| 75 » if err != nil { | |
| 76 » » return nil, fmt.Errorf("Failed to Base64-decode cursor: %s", err ) | |
| 77 » } | |
| 78 » c := queryCursor(d) | |
| 79 » if _, _, err := c.decode(); err != nil { | |
| 80 » » return nil, err | |
| 81 » } | |
| 82 » return c, nil | |
| 83 } | |
| 66 | 84 |
| 67 func (q queryCursor) String() string { return base64.URLEncoding.EncodeToString( []byte(q)) } | 85 func (q queryCursor) String() string { return base64.URLEncoding.EncodeToString( []byte(q)) } |
| 68 func (q queryCursor) Valid() bool { return q != "" } | 86 |
| 87 // decode returns the encoded IndexColumns, the raw row (cursor) data, or an | |
| 88 // error. | |
| 89 func (q queryCursor) decode() ([]ds.IndexColumn, []byte, error) { | |
| 90 » buf := bytes.NewBuffer([]byte(q)) | |
| 91 » count, _, err := cmpbin.ReadUint(buf) | |
| 92 » if err != nil { | |
| 93 » » return nil, nil, fmt.Errorf("invalid cursor: bad prefix number") | |
| 94 » } | |
| 95 | |
| 96 » if count == 0 || count > ds.MaxIndexColumns { | |
| 97 » » return nil, nil, fmt.Errorf("invalid cursor: bad column count %d ", count) | |
| 98 » } | |
| 99 | |
| 100 » if count == 0 { | |
| 101 » » return nil, nil, fmt.Errorf("invalid cursor: zero prefix number" ) | |
| 102 » } | |
| 103 | |
| 104 » cols := make([]ds.IndexColumn, count) | |
| 105 » for i := range cols { | |
| 106 » » if cols[i], err = serialize.ReadIndexColumn(buf); err != nil { | |
| 107 » » » return nil, nil, fmt.Errorf("invalid cursor: unable to d ecode IndexColumn %d: %s", i, err) | |
| 108 » » } | |
| 109 » } | |
| 110 | |
| 111 » if cols[len(cols)-1].Property != "__key__" { | |
| 112 » » return nil, nil, fmt.Errorf("invalid cursor: last column was not __key__: %v", cols[len(cols)-1]) | |
| 113 » } | |
| 114 | |
| 115 » return cols, buf.Bytes(), nil | |
| 116 } | |
| 69 | 117 |
| 70 type queryIneqFilter struct { | 118 type queryIneqFilter struct { |
| 71 prop string | 119 prop string |
| 72 | 120 |
| 73 » low *string | 121 » start []byte |
| 74 » high *string | 122 » end []byte |
| 75 } | |
| 76 | |
| 77 func decodeCursor(s string) (ds.Cursor, error) { | |
| 78 » d, err := base64.URLEncoding.DecodeString(s) | |
| 79 » if err != nil { | |
| 80 » » return nil, fmt.Errorf("Failed to Base64-decode cursor: %s", err ) | |
| 81 » } | |
| 82 » c := queryCursor(string(d)) | |
| 83 » if !c.Valid() { | |
| 84 » » return nil, errors.New("Decoded cursor is not valid.") | |
| 85 » } | |
| 86 » return c, nil | |
| 87 } | |
| 88 | |
| 89 func increment(bstr string, positive bool) string { | |
| 90 » lastIdx := len(bstr) - 1 | |
| 91 » last := bstr[lastIdx] | |
| 92 » if positive { | |
| 93 » » if last == 0xFF { | |
| 94 » » » return bstr + "\x00" | |
| 95 » » } | |
| 96 » » return bstr[:lastIdx-1] + string(last+1) | |
| 97 » } else { | |
| 98 » » if last == 0 { | |
| 99 » » » return bstr[:lastIdx-1] | |
| 100 » » } | |
| 101 » » return bstr[:lastIdx-1] + string(last-1) | |
| 102 » } | |
| 103 } | 123 } |
| 104 | 124 |
| 105 // constrain 'folds' a new inequality into the current inequality filter. | 125 // constrain 'folds' a new inequality into the current inequality filter. |
| 106 // | 126 // |
| 107 // It will bump the high bound down, or the low bound up, assuming the incoming | 127 // It will bump the end bound down, or the start bound up, assuming the incoming |
| 108 // constraint does so. | 128 // constraint does so. |
| 109 // | 129 // |
| 110 // It returns true iff the filter is overconstrained (i.e. low > high) | 130 // It returns true iff the filter is overconstrained (i.e. start > end) |
| 111 func (q *queryIneqFilter) constrain(op queryOp, val string) bool { | 131 func (q *queryIneqFilter) constrain(op queryOp, val []byte) bool { |
| 112 switch op { | 132 switch op { |
| 133 case qLessEq: | |
| 134 val = increment(val) | |
| 135 fallthrough | |
| 113 case qLessThan: | 136 case qLessThan: |
| 114 val = increment(val, true) | |
| 115 fallthrough | |
| 116 case qLessEq: | |
| 117 // adjust upper bound downwards | 137 // adjust upper bound downwards |
| 118 » » if q.high == nil || *q.high > val { | 138 » » if q.end == nil || bytes.Compare(q.end, val) > 0 { |
| 119 » » » q.high = &val | 139 » » » q.end = val |
| 120 } | 140 } |
| 121 | 141 |
| 122 case qGreaterThan: | 142 case qGreaterThan: |
| 123 » » val = increment(val, false) | 143 » » val = increment(val) |
| 124 fallthrough | 144 fallthrough |
| 125 case qGreaterEq: | 145 case qGreaterEq: |
| 126 // adjust lower bound upwards | 146 // adjust lower bound upwards |
| 127 » » if q.low == nil || *q.low < val { | 147 » » if q.start == nil || bytes.Compare(q.start, val) < 0 { |
| 128 » » » q.low = &val | 148 » » » q.start = val |
| 129 } | 149 } |
| 130 | 150 |
| 131 default: | 151 default: |
| 132 » » panic(fmt.Errorf("constrain cannot handle filter op %d", op)) | 152 » » impossible(fmt.Errorf("constrain cannot handle filter op %d", op )) |
| 133 } | 153 } |
| 134 | 154 |
| 135 » if q.low != nil && q.high != nil { | 155 » if q.start != nil && q.end != nil { |
| 136 » » return *q.low > *q.high | 156 » » return bytes.Compare(q.start, q.end) >= 0 |
| 137 } | 157 } |
| 138 return false | 158 return false |
| 139 } | 159 } |
| 140 | 160 |
| 141 type queryImpl struct { | 161 type queryImpl struct { |
| 142 ns string | 162 ns string |
| 143 | 163 |
| 144 » kind string | 164 » kind string |
| 145 » ancestor ds.Key | 165 |
| 146 | 166 » // prop -> encoded values (which are ds.Property objects) |
| 147 » // prop -> encoded values | 167 » // "__ancestor__" is the key for Ancestor queries. |
| 148 » eqFilters map[string]map[string]struct{} | 168 » eqFilters map[string]map[string]struct{} |
| 149 » ineqFilter queryIneqFilter | 169 » ineqFilter queryIneqFilter |
| 150 » order []ds.IndexColumn | 170 » order []ds.IndexColumn |
| 151 » project map[string]struct{} | 171 » startCursor []byte |
| 152 | 172 » startCursorColumns []ds.IndexColumn |
| 173 » endCursor []byte | |
| 174 » endCursorColumns []ds.IndexColumn | |
| 175 | |
| 176 » // All of these are applied in post (e.g. not during the native index sc an). | |
| 153 distinct bool | 177 distinct bool |
| 154 eventualConsistency bool | 178 eventualConsistency bool |
| 155 keysOnly bool | 179 keysOnly bool |
| 180 limitSet bool | |
| 156 limit int32 | 181 limit int32 |
| 157 offset int32 | 182 offset int32 |
| 158 | 183 » project []string |
| 159 » start queryCursor | |
| 160 » end queryCursor | |
| 161 | 184 |
| 162 err error | 185 err error |
| 163 } | 186 } |
| 164 | 187 |
| 165 var _ ds.Query = (*queryImpl)(nil) | 188 var _ ds.Query = (*queryImpl)(nil) |
| 166 | 189 |
| 167 func (q *queryImpl) valid(ns string, isTxn bool) (done bool, err error) { | 190 func sortOrdersEqual(as, bs []ds.IndexColumn) bool { |
| 168 » if q.err == errQueryDone { | 191 » if len(as) != len(bs) { |
| 169 » » done = true | 192 » » return false |
| 170 » } else if q.err != nil { | 193 » } |
| 171 » » err = q.err | 194 » for i, a := range as { |
| 172 » } else if ns != q.ns { | 195 » » if a != bs[i] { |
| 173 » » err = errors.New( | 196 » » » return false |
| 197 » » } | |
| 198 » } | |
| 199 » return true | |
| 200 } | |
| 201 | |
| 202 func (q *queryImpl) reduce(ns string, isTxn bool) (*reducedQuery, error) { | |
| 203 » if q.err != nil { | |
| 204 » » return nil, q.err | |
| 205 » } | |
| 206 » if ns != q.ns { | |
| 207 » » return nil, errors.New( | |
| 174 "gae/memory: Namespace mismatched. Query and Datastore d on't agree " + | 208 "gae/memory: Namespace mismatched. Query and Datastore d on't agree " + |
| 175 "on the current namespace") | 209 "on the current namespace") |
| 176 » } else if isTxn && q.ancestor == nil { | 210 » } |
| 177 » » err = errors.New( | 211 » if isTxn && q.eqFilters["__ancestor__"] == nil { |
| 212 » » return nil, errors.New( | |
| 178 "gae/memory: Only ancestor queries are allowed inside tr ansactions") | 213 "gae/memory: Only ancestor queries are allowed inside tr ansactions") |
| 179 » } else if q.numComponents() > MaxQueryComponents { | 214 » } |
| 180 » » err = fmt.Errorf( | 215 » if q.numComponents() > MaxQueryComponents { |
| 216 » » return nil, fmt.Errorf( | |
| 181 "gae/memory: query is too large. may not have more than "+ | 217 "gae/memory: query is too large. may not have more than "+ |
| 182 "%d filters + sort orders + ancestor total: had %d", | 218 "%d filters + sort orders + ancestor total: had %d", |
| 183 MaxQueryComponents, q.numComponents()) | 219 MaxQueryComponents, q.numComponents()) |
| 184 » } else if len(q.project) == 0 && q.distinct { | 220 » } |
| 221 » if len(q.project) == 0 && q.distinct { | |
| 185 // This must be delayed, because q.Distinct().Project("foo") is a valid | 222 // This must be delayed, because q.Distinct().Project("foo") is a valid |
| 186 // construction. If we checked this in Distinct, it could be too early, and | 223 // construction. If we checked this in Distinct, it could be too early, and |
| 187 // checking it in Project doesn't matter. | 224 // checking it in Project doesn't matter. |
| 188 » » err = errors.New( | 225 » » return nil, errors.New( |
| 189 "gae/memory: Distinct() only makes sense on projection q ueries.") | 226 "gae/memory: Distinct() only makes sense on projection q ueries.") |
| 190 } | 227 } |
| 191 » return | 228 » if q.eqFilters["__ancestor__"] != nil && q.ineqFilter.prop == "__key__" { |
| 229 » » anc := []byte(nil) | |
| 230 » » for k := range q.eqFilters["__ancestor__"] { | |
| 231 » » » anc = []byte(k) | |
| 232 » » » break | |
| 233 » » } | |
| 234 » » anc = anc[:len(anc)-1] | |
| 235 » » if q.ineqFilter.start != nil && !bytes.HasPrefix(q.ineqFilter.st art, anc) { | |
| 236 » » » return nil, errors.New( | |
| 237 » » » » "gae/memory: __key__ inequality filter has a val ue outside of Ancestor()") | |
| 238 » » } | |
| 239 » » if q.ineqFilter.end != nil && !bytes.HasPrefix(q.ineqFilter.end, anc) { | |
| 240 » » » return nil, errors.New( | |
| 241 » » » » "gae/memory: __key__ inequality filter has a val ue outside of Ancestor()") | |
| 242 » » } | |
| 243 » } | |
| 244 | |
| 245 » ret := &reducedQuery{ | |
| 246 » » ns: q.ns, | |
| 247 » » kind: q.kind, | |
| 248 » » eqFilters: q.eqFilters, | |
| 249 » » suffixFormat: q.order, | |
| 250 » } | |
| 251 | |
| 252 » // if len(q.suffixFormat) > 0, queryImpl already enforces that the first order | |
| 253 » // is the same as the inequality. Otherwise we need to add it. | |
| 254 » if len(ret.suffixFormat) == 0 && q.ineqFilter.prop != "" { | |
| 255 » » ret.suffixFormat = []ds.IndexColumn{{Property: q.ineqFilter.prop }} | |
| 256 » } | |
| 257 | |
| 258 » // The inequality is specified in natural (ascending) order in the query 's | |
| 259 » // Filter syntax, but the order information may indicate to use a descen ding | |
| 260 » // index column for it. If that's the case, then we must invert, swap an d | |
| 261 » // increment the inequality endpoints. | |
| 262 » // | |
| 263 » // Invert so that the desired numbers are represented correctly in the i ndex. | |
| 264 » // Swap so that our iterators still go from >= start to < end. | |
| 265 » // Increment so that >= and < get correctly bounded (since the iterator is | |
| 266 » // still using natrual bytes ordering) | |
| 267 » if q.ineqFilter.prop != "" && ret.suffixFormat[0].Direction == ds.DESCEN DING { | |
| 268 » » hi, lo := increment(invert(q.ineqFilter.end)), increment(invert( q.ineqFilter.start)) | |
| 269 » » q.ineqFilter.end, q.ineqFilter.start = lo, hi | |
| 270 » } | |
| 271 | |
| 272 » // Add any projection columns not mentioned in the user-defined order as | |
| 273 » // ASCENDING orders. Technically we could be smart and automatically use | |
| 274 » // a DESCENDING ordered index, if it fit, but the logic gets insane, sin ce all | |
| 275 » // suffixes of all used indexes need to be PRECISELY equal (and so you'd have | |
| 276 » // to hunt/invalidate/something to find the combination of indexes that are | |
| 277 » // compatible with each other as well as the query). If you want to use | |
| 278 » // a DESCENDING column, just add it to the user sort order, and this loo p will | |
| 279 » // not synthesize a new suffix entry for it. | |
| 280 » // | |
| 281 » // NOTE: if you want to use an index that sorts by -__key__, you MUST | |
| 282 » // include all of the projected fields for that index in the order expli citly. | |
| 283 » // Otherwise the generated suffixFormat will be wacky. So: | |
| 284 » // Query("Foo").Project("A", "B").Order("A").Order("-__key__") | |
| 285 » // | |
| 286 » // will turn into a suffixFormat of: | |
| 287 » // A, ASCENDING | |
| 288 » // __key__, DESCENDING | |
| 289 » // B, ASCENDING | |
| 290 » // __key__, ASCENDING | |
| 291 » // | |
| 292 » // To prevent this, your query should have another Order("B") clause bef ore | |
| 293 » // the -__key__ clause. | |
| 294 » originalStop := len(ret.suffixFormat) | |
| 295 » for _, p := range q.project { | |
| 296 » » needAdd := true | |
| 297 » » // originalStop prevents this loop from getting longer every tim e we add | |
| 298 » » // a projected property. | |
| 299 » » for _, col := range ret.suffixFormat[:originalStop] { | |
| 300 » » » if col.Property == p { | |
| 301 » » » » needAdd = false | |
| 302 » » » » break | |
| 303 » » » } | |
| 304 » » } | |
| 305 » » if needAdd { | |
| 306 » » » ret.suffixFormat = append(ret.suffixFormat, ds.IndexColu mn{Property: p}) | |
| 307 » » } | |
| 308 » } | |
| 309 | |
| 310 » // If the suffix format ends with __key__ already (e.g. .Order("__key__" )), | |
| 311 » // then we're good to go. Otherwise we need to add it as the last bit of the | |
| 312 » // suffix, since all indexes implicitly have it as the last column. | |
| 313 » if len(ret.suffixFormat) == 0 || ret.suffixFormat[len(ret.suffixFormat)- 1].Property != "__key__" { | |
| 314 » » ret.suffixFormat = append(ret.suffixFormat, ds.IndexColumn{Prope rty: "__key__"}) | |
| 315 » } | |
| 316 | |
| 317 » // Now we check the start and end cursors. | |
| 318 » // | |
| 319 » // Cursors are composed of a list of IndexColumns at the beginning, foll owed | |
| 320 » // by the raw bytes to use for the suffix. The cursor is only valid if a ll of | |
| 321 » // its IndexColumns match our proposed suffixFormat, as calculated above . | |
| 322 » ret.start = q.ineqFilter.start | |
| 323 » if q.startCursor != nil { | |
| 324 » » if !sortOrdersEqual(q.startCursorColumns, ret.suffixFormat) { | |
| 325 » » » return nil, errors.New("gae/memory: start cursor is inva lid for this query.") | |
| 326 » » } | |
| 327 » » if ret.start == nil || bytes.Compare(ret.start, q.startCursor) < 0 { | |
| 328 » » » ret.start = q.startCursor | |
| 329 » » } | |
| 330 » } | |
| 331 | |
| 332 » ret.end = q.ineqFilter.end | |
| 333 » if q.endCursor != nil { | |
| 334 » » if !sortOrdersEqual(q.endCursorColumns, ret.suffixFormat) { | |
| 335 » » » return nil, errors.New("gae/memory: end cursor is invali d for this query.") | |
| 336 » » } | |
| 337 » » if ret.end == nil || bytes.Compare(q.endCursor, ret.end) < 0 { | |
| 338 » » » ret.end = q.endCursor | |
| 339 » » } | |
| 340 » } | |
| 341 | |
| 342 » // Finally, verify that we could even /potentially/ do work. If we have | |
| 343 » // overlapping range ends, then we don't have anything to do. | |
| 344 » if ret.end != nil && bytes.Compare(ret.start, ret.end) >= 0 { | |
| 345 » » return nil, errQueryDone | |
| 346 » } | |
| 347 | |
| 348 » ret.numCols = len(ret.suffixFormat) | |
| 349 » for prop, vals := range ret.eqFilters { | |
| 350 » » if len(ret.suffixFormat) == 1 && prop == "__ancestor__" { | |
| 351 » » » continue | |
| 352 » » } | |
| 353 » » ret.numCols += len(vals) | |
| 354 » } | |
| 355 | |
| 356 » return ret, nil | |
| 192 } | 357 } |
| 193 | 358 |
| 194 func (q *queryImpl) numComponents() int { | 359 func (q *queryImpl) numComponents() int { |
| 195 numComponents := len(q.order) | 360 numComponents := len(q.order) |
| 196 if q.ineqFilter.prop != "" { | 361 if q.ineqFilter.prop != "" { |
| 197 » » if q.ineqFilter.low != nil { | 362 » » if q.ineqFilter.start != nil { |
| 198 numComponents++ | 363 numComponents++ |
| 199 } | 364 } |
| 200 » » if q.ineqFilter.high != nil { | 365 » » if q.ineqFilter.end != nil { |
| 201 numComponents++ | 366 numComponents++ |
| 202 } | 367 } |
| 203 } | 368 } |
| 204 for _, v := range q.eqFilters { | 369 for _, v := range q.eqFilters { |
| 205 numComponents += len(v) | 370 numComponents += len(v) |
| 206 } | 371 } |
| 207 if q.ancestor != nil { | |
| 208 numComponents++ | |
| 209 } | |
| 210 return numComponents | 372 return numComponents |
| 211 } | 373 } |
| 212 | 374 |
| 213 func (q *queryImpl) calculateIndex() *ds.IndexDefinition { | |
| 214 // as a nod to simplicity in this code, we'll require that a single inde x | |
| 215 // is able to service the entire query. E.g. no zigzag merge joins or | |
| 216 // multiqueries. This will mean that the user will need to rely on | |
| 217 // dev_appserver to tell them what indicies they need for real, and for thier | |
| 218 // tests they'll need to specify the missing composite indices manually. | |
| 219 // | |
| 220 // This COULD lead to an exploding indicies problem, but we can fix that when | |
| 221 // we get to it. | |
| 222 | |
| 223 //sortOrders := []qSortBy{} | |
| 224 | |
| 225 return nil | |
| 226 } | |
| 227 | |
| 228 // checkMutateClone sees if the query has an error. If not, it clones the query, | 375 // checkMutateClone sees if the query has an error. If not, it clones the query, |
| 229 // and assigns the output of `check` to the query error slot. If check returns | 376 // and assigns the output of `check` to the query error slot. If check returns |
| 230 // nil, it calls `mutate` on the cloned query. The (possibly new) query is then | 377 // nil, it calls `mutate` on the cloned query. The (possibly new) query is then |
| 231 // returned. | 378 // returned. |
| 232 func (q *queryImpl) checkMutateClone(check func() error, mutate func(*queryImpl) ) *queryImpl { | 379 func (q *queryImpl) checkMutateClone(check func() error, mutate func(*queryImpl) ) *queryImpl { |
| 233 if q.err != nil { | 380 if q.err != nil { |
| 234 return q | 381 return q |
| 235 } | 382 } |
| 236 nq := *q | 383 nq := *q |
| 237 nq.eqFilters = make(map[string]map[string]struct{}, len(q.eqFilters)) | 384 nq.eqFilters = make(map[string]map[string]struct{}, len(q.eqFilters)) |
| 238 for prop, vals := range q.eqFilters { | 385 for prop, vals := range q.eqFilters { |
| 239 nq.eqFilters[prop] = make(map[string]struct{}, len(vals)) | 386 nq.eqFilters[prop] = make(map[string]struct{}, len(vals)) |
| 240 for v := range vals { | 387 for v := range vals { |
| 241 nq.eqFilters[prop][v] = struct{}{} | 388 nq.eqFilters[prop][v] = struct{}{} |
| 242 } | 389 } |
| 243 } | 390 } |
| 244 nq.order = make([]ds.IndexColumn, len(q.order)) | 391 nq.order = make([]ds.IndexColumn, len(q.order)) |
| 245 copy(nq.order, q.order) | 392 copy(nq.order, q.order) |
| 246 » nq.project = make(map[string]struct{}, len(q.project)) | 393 » nq.project = make([]string, len(q.project)) |
| 247 » for f := range q.project { | 394 » copy(nq.project, q.project) |
| 248 » » nq.project[f] = struct{}{} | |
| 249 » } | |
| 250 if check != nil { | 395 if check != nil { |
| 251 nq.err = check() | 396 nq.err = check() |
| 252 } | 397 } |
| 253 if nq.err == nil { | 398 if nq.err == nil { |
| 254 mutate(&nq) | 399 mutate(&nq) |
| 255 } | 400 } |
| 256 return &nq | 401 return &nq |
| 257 } | 402 } |
| 258 | 403 |
| 259 func (q *queryImpl) Ancestor(k ds.Key) ds.Query { | 404 func (q *queryImpl) Ancestor(k ds.Key) ds.Query { |
| 260 return q.checkMutateClone( | 405 return q.checkMutateClone( |
| 261 func() error { | 406 func() error { |
| 262 if k == nil { | 407 if k == nil { |
| 263 // SDK has an explicit nil-check | 408 // SDK has an explicit nil-check |
| 264 return errors.New("datastore: nil query ancestor ") | 409 return errors.New("datastore: nil query ancestor ") |
| 265 } | 410 } |
| 411 if k.Namespace() != q.ns { | |
| 412 return fmt.Errorf("bad namespace: %q (expected % q)", k.Namespace(), q.ns) | |
| 413 } | |
| 266 if !k.Valid(false, globalAppID, q.ns) { | 414 if !k.Valid(false, globalAppID, q.ns) { |
| 267 // technically the SDK implementation does a Wei rd Thing (tm) if both the | 415 // technically the SDK implementation does a Wei rd Thing (tm) if both the |
| 268 // stringID and intID are set on a key; it only serializes the stringID in | 416 // stringID and intID are set on a key; it only serializes the stringID in |
| 269 // the proto. This means that if you set the Anc estor to an invalid key, | 417 // the proto. This means that if you set the Anc estor to an invalid key, |
| 270 // you'll never actually hear about it. Instead of doing that insanity, we | 418 // you'll never actually hear about it. Instead of doing that insanity, we |
| 271 // just swap to an error here. | 419 // just swap to an error here. |
| 272 return ds.ErrInvalidKey | 420 return ds.ErrInvalidKey |
| 273 } | 421 } |
| 274 » » » if k.Namespace() != q.ns { | 422 » » » if q.eqFilters["__ancestor__"] != nil { |
| 275 » » » » return fmt.Errorf("bad namespace: %q (expected % q)", k.Namespace(), q.ns) | |
| 276 » » » } | |
| 277 » » » if q.ancestor != nil { | |
| 278 return errors.New("cannot have more than one anc estor") | 423 return errors.New("cannot have more than one anc estor") |
| 279 } | 424 } |
| 280 return nil | 425 return nil |
| 281 }, | 426 }, |
| 282 func(q *queryImpl) { | 427 func(q *queryImpl) { |
| 283 » » » q.ancestor = k | 428 » » » q.addEqFilt("__ancestor__", ds.MkProperty(k)) |
| 284 }) | 429 }) |
| 285 } | 430 } |
| 286 | 431 |
| 287 func (q *queryImpl) Distinct() ds.Query { | 432 func (q *queryImpl) Distinct() ds.Query { |
| 288 return q.checkMutateClone(nil, func(q *queryImpl) { | 433 return q.checkMutateClone(nil, func(q *queryImpl) { |
| 289 q.distinct = true | 434 q.distinct = true |
| 290 }) | 435 }) |
| 291 } | 436 } |
| 292 | 437 |
| 438 func (q *queryImpl) addEqFilt(prop string, p ds.Property) { | |
| 439 binVal := string(serialize.ToBytes(p)) | |
| 440 if cur, ok := q.eqFilters[prop]; !ok { | |
| 441 q.eqFilters[prop] = map[string]struct{}{binVal: {}} | |
| 442 } else { | |
| 443 cur[binVal] = struct{}{} | |
| 444 } | |
| 445 } | |
| 446 | |
| 293 func (q *queryImpl) Filter(fStr string, val interface{}) ds.Query { | 447 func (q *queryImpl) Filter(fStr string, val interface{}) ds.Query { |
| 294 prop := "" | 448 prop := "" |
| 295 op := qInvalid | 449 op := qInvalid |
| 296 » binVal := "" | 450 » p := ds.Property{} |
| 297 return q.checkMutateClone( | 451 return q.checkMutateClone( |
| 298 func() error { | 452 func() error { |
| 299 var err error | 453 var err error |
| 300 prop, op, err = parseFilter(fStr) | 454 prop, op, err = parseFilter(fStr) |
| 301 if err != nil { | 455 if err != nil { |
| 302 return err | 456 return err |
| 303 } | 457 } |
| 304 | 458 |
| 305 if q.kind == "" && prop != "__key__" { | 459 if q.kind == "" && prop != "__key__" { |
| 306 // https://cloud.google.com/appengine/docs/go/da tastore/queries#Go_Kindless_queries | 460 // https://cloud.google.com/appengine/docs/go/da tastore/queries#Go_Kindless_queries |
| 307 return fmt.Errorf( | 461 return fmt.Errorf( |
| 308 "kindless queries can only filter on __k ey__, got %q", fStr) | 462 "kindless queries can only filter on __k ey__, got %q", fStr) |
| 309 } | 463 } |
| 310 | 464 |
| 311 » » » p := ds.Property{} | 465 » » » err = p.SetValue(val, ds.ShouldIndex) |
| 312 » » » err = p.SetValue(val, ds.NoIndex) | |
| 313 if err != nil { | 466 if err != nil { |
| 314 return err | 467 return err |
| 315 } | 468 } |
| 316 | 469 |
| 317 if p.Type() == ds.PTKey { | 470 if p.Type() == ds.PTKey { |
| 318 if !p.Value().(ds.Key).Valid(false, globalAppID, q.ns) { | 471 if !p.Value().(ds.Key).Valid(false, globalAppID, q.ns) { |
| 319 return ds.ErrInvalidKey | 472 return ds.ErrInvalidKey |
| 320 } | 473 } |
| 321 } | 474 } |
| 322 | 475 |
| 323 if prop == "__key__" { | 476 if prop == "__key__" { |
| 324 if op == qEqual { | 477 if op == qEqual { |
| 325 return fmt.Errorf( | 478 return fmt.Errorf( |
| 326 "query equality filter on __key_ _ is silly: %q", fStr) | 479 "query equality filter on __key_ _ is silly: %q", fStr) |
| 327 } | 480 } |
| 328 if p.Type() != ds.PTKey { | 481 if p.Type() != ds.PTKey { |
| 329 return fmt.Errorf("__key__ filter value is not a key: %T", val) | 482 return fmt.Errorf("__key__ filter value is not a key: %T", val) |
| 330 } | 483 } |
| 484 } else if strings.HasPrefix(prop, "__") && strings.HasSu ffix(prop, "__") { | |
| 485 return fmt.Errorf("filter on reserved property: %q", prop) | |
| 331 } | 486 } |
| 332 | 487 |
| 333 if op != qEqual { | 488 if op != qEqual { |
| 334 if q.ineqFilter.prop != "" && q.ineqFilter.prop != prop { | 489 if q.ineqFilter.prop != "" && q.ineqFilter.prop != prop { |
| 335 return fmt.Errorf( | 490 return fmt.Errorf( |
| 336 "inequality filters on multiple properties: %q and %q", | 491 "inequality filters on multiple properties: %q and %q", |
| 337 q.ineqFilter.prop, prop) | 492 q.ineqFilter.prop, prop) |
| 338 } | 493 } |
| 339 if len(q.order) > 0 && q.order[0].Property != pr op { | 494 if len(q.order) > 0 && q.order[0].Property != pr op { |
| 340 return fmt.Errorf( | 495 return fmt.Errorf( |
| 341 "first sort order must match ine quality filter: %q v %q", | 496 "first sort order must match ine quality filter: %q v %q", |
| 342 q.order[0].Property, prop) | 497 q.order[0].Property, prop) |
| 343 } | 498 } |
| 344 » » » } else if _, ok := q.project[prop]; ok { | 499 » » » } else { |
| 345 » » » » return fmt.Errorf( | 500 » » » » for _, p := range q.project { |
| 346 » » » » » "cannot project on field which is used i n an equality filter: %q", | 501 » » » » » if p == prop { |
| 347 » » » » » prop) | 502 » » » » » » return fmt.Errorf( |
| 503 » » » » » » » "cannot project on field which is used in an equality filter: %q", | |
| 504 » » » » » » » prop) | |
| 505 » » » » » } | |
| 506 » » » » } | |
| 348 } | 507 } |
| 349 binVal = string(serialize.ToBytes(p)) | |
| 350 return err | 508 return err |
| 351 }, | 509 }, |
| 352 func(q *queryImpl) { | 510 func(q *queryImpl) { |
| 353 if op == qEqual { | 511 if op == qEqual { |
| 354 // add it to eq filters | 512 // add it to eq filters |
| 355 » » » » if _, ok := q.eqFilters[prop]; !ok { | 513 » » » » q.addEqFilt(prop, p) |
| 356 » » » » » q.eqFilters[prop] = map[string]struct{}{ binVal: {}} | |
| 357 » » » » } else { | |
| 358 » » » » » q.eqFilters[prop][binVal] = struct{}{} | |
| 359 » » » » } | |
| 360 | 514 |
| 361 // remove it from sort orders. | 515 // remove it from sort orders. |
| 362 // https://cloud.google.com/appengine/docs/go/da tastore/queries#sort_orders_are_ignored_on_properties_with_equality_filters | 516 // https://cloud.google.com/appengine/docs/go/da tastore/queries#sort_orders_are_ignored_on_properties_with_equality_filters |
| 363 toRm := -1 | 517 toRm := -1 |
| 364 for i, o := range q.order { | 518 for i, o := range q.order { |
| 365 if o.Property == prop { | 519 if o.Property == prop { |
| 366 toRm = i | 520 toRm = i |
| 367 break | 521 break |
| 368 } | 522 } |
| 369 } | 523 } |
| 370 if toRm >= 0 { | 524 if toRm >= 0 { |
| 371 q.order = append(q.order[:toRm], q.order [toRm+1:]...) | 525 q.order = append(q.order[:toRm], q.order [toRm+1:]...) |
| 372 } | 526 } |
| 373 } else { | 527 } else { |
| 374 q.ineqFilter.prop = prop | 528 q.ineqFilter.prop = prop |
| 375 » » » » if q.ineqFilter.constrain(op, binVal) { | 529 » » » » if q.ineqFilter.constrain(op, serialize.ToBytes( p)) { |
| 376 q.err = errQueryDone | 530 q.err = errQueryDone |
| 377 } | 531 } |
| 378 } | 532 } |
| 379 }) | 533 }) |
| 380 } | 534 } |
| 381 | 535 |
| 382 func (q *queryImpl) Order(prop string) ds.Query { | 536 func (q *queryImpl) Order(prop string) ds.Query { |
| 383 col := ds.IndexColumn{} | 537 col := ds.IndexColumn{} |
| 384 return q.checkMutateClone( | 538 return q.checkMutateClone( |
| 385 func() error { | 539 func() error { |
| 386 // check that first order == first inequality. | 540 // check that first order == first inequality. |
| 387 // if order is an equality already, ignore it | 541 // if order is an equality already, ignore it |
| 388 col.Property = strings.TrimSpace(prop) | 542 col.Property = strings.TrimSpace(prop) |
| 389 if strings.HasPrefix(prop, "-") { | 543 if strings.HasPrefix(prop, "-") { |
| 390 col.Direction = ds.DESCENDING | 544 col.Direction = ds.DESCENDING |
| 391 col.Property = strings.TrimSpace(prop[1:]) | 545 col.Property = strings.TrimSpace(prop[1:]) |
| 392 } else if strings.HasPrefix(prop, "+") { | 546 } else if strings.HasPrefix(prop, "+") { |
| 393 return fmt.Errorf("datastore: invalid order: %q" , prop) | 547 return fmt.Errorf("datastore: invalid order: %q" , prop) |
| 394 } | 548 } |
| 395 if len(col.Property) == 0 { | 549 if len(col.Property) == 0 { |
| 396 return errors.New("datastore: empty order") | 550 return errors.New("datastore: empty order") |
| 397 } | 551 } |
| 398 » » » if q.ineqFilter.prop != "" && q.ineqFilter.prop != col.P roperty { | 552 » » » if len(q.order) == 0 && q.ineqFilter.prop != "" && q.ine qFilter.prop != col.Property { |
| 399 return fmt.Errorf( | 553 return fmt.Errorf( |
| 400 "first sort order must match inequality filter: %q v %q", | 554 "first sort order must match inequality filter: %q v %q", |
| 401 prop, q.ineqFilter.prop) | 555 prop, q.ineqFilter.prop) |
| 402 } | 556 } |
| 403 if q.kind == "" && (col.Property != "__key__" || col.Dir ection != ds.ASCENDING) { | 557 if q.kind == "" && (col.Property != "__key__" || col.Dir ection != ds.ASCENDING) { |
| 404 return fmt.Errorf("invalid order for kindless qu ery: %#v", col) | 558 return fmt.Errorf("invalid order for kindless qu ery: %#v", col) |
| 405 } | 559 } |
| 406 return nil | 560 return nil |
| 407 }, | 561 }, |
| 408 func(q *queryImpl) { | 562 func(q *queryImpl) { |
| 409 if _, ok := q.eqFilters[col.Property]; ok { | 563 if _, ok := q.eqFilters[col.Property]; ok { |
| 410 // skip it if it's an equality filter | 564 // skip it if it's an equality filter |
| 411 // https://cloud.google.com/appengine/docs/go/da tastore/queries#sort_orders_are_ignored_on_properties_with_equality_filters | 565 // https://cloud.google.com/appengine/docs/go/da tastore/queries#sort_orders_are_ignored_on_properties_with_equality_filters |
| 412 return | 566 return |
| 413 } | 567 } |
| 414 for _, order := range q.order { | 568 for _, order := range q.order { |
| 415 if order.Property == col.Property { | 569 if order.Property == col.Property { |
| 416 // can't sort by the same order twice | 570 // can't sort by the same order twice |
| 417 return | 571 return |
| 418 } | 572 } |
| 419 } | 573 } |
| 420 » » » if col.Property == "__key__" { | 574 » » » q.order = append(q.order, col) |
| 421 » » » » // __key__ order dominates all other orders | |
| 422 » » » » q.order = []ds.IndexColumn{col} | |
| 423 » » » } else { | |
| 424 » » » » q.order = append(q.order, col) | |
| 425 » » » } | |
| 426 }) | 575 }) |
| 427 } | 576 } |
| 428 | 577 |
| 429 func (q *queryImpl) Project(fieldName ...string) ds.Query { | 578 func (q *queryImpl) Project(fieldName ...string) ds.Query { |
| 430 return q.checkMutateClone( | 579 return q.checkMutateClone( |
| 431 func() error { | 580 func() error { |
| 432 if q.keysOnly { | 581 if q.keysOnly { |
| 433 return errors.New("cannot project a keysOnly que ry") | 582 return errors.New("cannot project a keysOnly que ry") |
| 434 } | 583 } |
| 584 dupCheck := map[string]struct{}{} | |
| 435 for _, f := range fieldName { | 585 for _, f := range fieldName { |
| 586 if _, ok := dupCheck[f]; ok { | |
| 587 return fmt.Errorf("cannot project on the same field twice: %q", f) | |
| 588 } | |
| 589 dupCheck[f] = struct{}{} | |
| 436 if f == "" { | 590 if f == "" { |
| 437 return errors.New("cannot project on an empty field name") | 591 return errors.New("cannot project on an empty field name") |
| 438 } | 592 } |
| 439 » » » » if strings.HasPrefix(f, "__") && strings.HasSuff ix(f, "__") { | 593 » » » » if f == "__key__" { |
| 440 » » » » » return fmt.Errorf("cannot project on %q" , f) | 594 » » » » » return fmt.Errorf("cannot project on __k ey__") |
| 441 } | 595 } |
| 442 if _, ok := q.eqFilters[f]; ok { | 596 if _, ok := q.eqFilters[f]; ok { |
| 443 return fmt.Errorf( | 597 return fmt.Errorf( |
| 444 "cannot project on field which i s used in an equality filter: %q", f) | 598 "cannot project on field which i s used in an equality filter: %q", f) |
| 445 } | 599 } |
| 600 for _, p := range q.project { | |
| 601 if p == f { | |
| 602 return fmt.Errorf("cannot projec t on the same field twice: %q", f) | |
| 603 } | |
| 604 } | |
| 446 } | 605 } |
| 447 return nil | 606 return nil |
| 448 }, | 607 }, |
| 449 func(q *queryImpl) { | 608 func(q *queryImpl) { |
| 450 » » » for _, f := range fieldName { | 609 » » » q.project = append(q.project, fieldName...) |
| 451 » » » » q.project[f] = struct{}{} | |
| 452 » » » } | |
| 453 }) | 610 }) |
| 454 } | 611 } |
| 455 | 612 |
| 456 func (q *queryImpl) KeysOnly() ds.Query { | 613 func (q *queryImpl) KeysOnly() ds.Query { |
| 457 return q.checkMutateClone( | 614 return q.checkMutateClone( |
| 458 func() error { | 615 func() error { |
| 459 if len(q.project) != 0 { | 616 if len(q.project) != 0 { |
| 460 return errors.New("cannot project a keysOnly que ry") | 617 return errors.New("cannot project a keysOnly que ry") |
| 461 } | 618 } |
| 462 return nil | 619 return nil |
| 463 }, | 620 }, |
| 464 func(q *queryImpl) { | 621 func(q *queryImpl) { |
| 465 q.keysOnly = true | 622 q.keysOnly = true |
| 466 }) | 623 }) |
| 467 } | 624 } |
| 468 | 625 |
| 469 func (q *queryImpl) Limit(limit int) ds.Query { | 626 func (q *queryImpl) Limit(limit int) ds.Query { |
| 470 return q.checkMutateClone( | 627 return q.checkMutateClone( |
| 471 func() error { | 628 func() error { |
| 629 if q.limitSet { | |
|
dnj (Google)
2015/08/28 17:54:22
nit: worth checking to see if the value is the sam
iannucci
2015/08/28 19:48:56
If they set it in a loop then they're doing it wei
dnj
2015/08/28 19:57:53
One use case might be starting with a standard que
| |
| 630 return errors.New("cannot set limit twice") | |
| 631 } | |
| 632 // nonsensically... ANY negative value means 'unlimited' . *shakes head* | |
| 472 if limit < math.MinInt32 || limit > math.MaxInt32 { | 633 if limit < math.MinInt32 || limit > math.MaxInt32 { |
| 473 return errors.New("datastore: query limit overfl ow") | 634 return errors.New("datastore: query limit overfl ow") |
| 474 } | 635 } |
| 475 return nil | 636 return nil |
| 476 }, | 637 }, |
| 477 func(q *queryImpl) { | 638 func(q *queryImpl) { |
| 639 q.limitSet = true | |
| 478 q.limit = int32(limit) | 640 q.limit = int32(limit) |
| 479 }) | 641 }) |
| 480 } | 642 } |
| 481 | 643 |
| 482 func (q *queryImpl) Offset(offset int) ds.Query { | 644 func (q *queryImpl) Offset(offset int) ds.Query { |
| 483 return q.checkMutateClone( | 645 return q.checkMutateClone( |
| 484 func() error { | 646 func() error { |
| 485 if offset < 0 { | 647 if offset < 0 { |
| 486 return errors.New("datastore: negative query off set") | 648 return errors.New("datastore: negative query off set") |
| 487 } | 649 } |
| 488 if offset > math.MaxInt32 { | 650 if offset > math.MaxInt32 { |
| 489 return errors.New("datastore: query offset overf low") | 651 return errors.New("datastore: query offset overf low") |
| 490 } | 652 } |
| 491 return nil | 653 return nil |
| 492 }, | 654 }, |
| 493 func(q *queryImpl) { | 655 func(q *queryImpl) { |
| 494 q.offset = int32(offset) | 656 q.offset = int32(offset) |
| 495 }) | 657 }) |
| 496 } | 658 } |
| 497 | 659 |
| 660 func queryCursorCheck(ns, flavor string, current []byte, newCursor ds.Cursor) ([ ]ds.IndexColumn, []byte, error) { | |
| 661 if current != nil { | |
| 662 return nil, nil, fmt.Errorf("%s cursor is multiply defined", fla vor) | |
| 663 } | |
| 664 curs, ok := newCursor.(queryCursor) | |
| 665 if !ok { | |
| 666 return nil, nil, fmt.Errorf("%s cursor is unknown type: %T", fla vor, curs) | |
| 667 } | |
| 668 return curs.decode() | |
| 669 } | |
| 670 | |
| 498 func (q *queryImpl) Start(c ds.Cursor) ds.Query { | 671 func (q *queryImpl) Start(c ds.Cursor) ds.Query { |
| 499 » curs := queryCursor("") | 672 » cols := []ds.IndexColumn(nil) |
| 673 » curs := []byte(nil) | |
| 500 return q.checkMutateClone( | 674 return q.checkMutateClone( |
| 501 » » func() error { | 675 » » func() (err error) { |
| 502 » » » ok := false | 676 » » » cols, curs, err = queryCursorCheck(q.ns, "start", q.star tCursor, c) |
| 503 » » » if curs, ok = c.(queryCursor); !ok { | 677 » » » return |
| 504 » » » » return fmt.Errorf("start cursor is unknown type: %T", c) | |
| 505 » » » } | |
| 506 » » » if !curs.Valid() { | |
| 507 » » » » return errors.New("datastore: invalid cursor") | |
| 508 » » » } | |
| 509 » » » return nil | |
| 510 }, | 678 }, |
| 511 func(q *queryImpl) { | 679 func(q *queryImpl) { |
| 512 » » » q.start = curs | 680 » » » q.startCursorColumns = cols |
| 681 » » » q.startCursor = curs | |
| 513 }) | 682 }) |
| 514 } | 683 } |
| 515 | 684 |
| 516 func (q *queryImpl) End(c ds.Cursor) ds.Query { | 685 func (q *queryImpl) End(c ds.Cursor) ds.Query { |
| 517 » curs := queryCursor("") | 686 » cols := []ds.IndexColumn(nil) |
| 687 » curs := queryCursor(nil) | |
| 518 return q.checkMutateClone( | 688 return q.checkMutateClone( |
| 519 » » func() error { | 689 » » func() (err error) { |
| 520 » » » ok := false | 690 » » » cols, curs, err = queryCursorCheck(q.ns, "end", q.endCur sor, c) |
| 521 » » » if curs, ok = c.(queryCursor); !ok { | 691 » » » return |
| 522 » » » » return fmt.Errorf("end cursor is unknown type: % T", c) | |
| 523 » » » } | |
| 524 » » » if !curs.Valid() { | |
| 525 » » » » return errors.New("datastore: invalid cursor") | |
| 526 » » » } | |
| 527 » » » return nil | |
| 528 }, | 692 }, |
| 529 func(q *queryImpl) { | 693 func(q *queryImpl) { |
| 530 » » » q.end = curs | 694 » » » q.endCursorColumns = cols |
| 695 » » » q.endCursor = curs | |
| 531 }) | 696 }) |
| 532 } | 697 } |
| 533 | 698 |
| 534 func (q *queryImpl) EventualConsistency() ds.Query { | 699 func (q *queryImpl) EventualConsistency() ds.Query { |
| 535 return q.checkMutateClone( | 700 return q.checkMutateClone( |
| 536 nil, func(q *queryImpl) { | 701 nil, func(q *queryImpl) { |
| 537 q.eventualConsistency = true | 702 q.eventualConsistency = true |
| 538 }) | 703 }) |
| 539 } | 704 } |
| OLD | NEW |