| OLD | NEW |
| 1 // Copyright 2015 The Chromium Authors. All rights reserved. | 1 // Copyright 2015 The Chromium Authors. All rights reserved. |
| 2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
| 3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
| 4 | 4 |
| 5 package memory | 5 package memory |
| 6 | 6 |
| 7 import ( | 7 import ( |
| 8 "bytes" | |
| 9 "errors" | 8 "errors" |
| 10 "fmt" | 9 "fmt" |
| 11 "math" | 10 "math" |
| 12 "strings" | 11 "strings" |
| 13 | 12 |
| 14 ds "github.com/luci/gae/service/datastore" | 13 ds "github.com/luci/gae/service/datastore" |
| 15 "github.com/luci/gkvlite" | 14 "github.com/luci/gkvlite" |
| 16 "github.com/luci/luci-go/common/cmpbin" | |
| 17 ) | 15 ) |
| 18 | 16 |
| 19 type qDirection bool | |
| 20 | |
| 21 const ( | |
| 22 qASC qDirection = true | |
| 23 qDEC = false | |
| 24 ) | |
| 25 | |
| 26 var builtinQueryPrefix = []byte{0} | |
| 27 var complexQueryPrefix = []byte{1} | |
| 28 | |
| 29 type qSortBy struct { | |
| 30 prop string | |
| 31 dir qDirection | |
| 32 } | |
| 33 | |
| 34 func (q qSortBy) WriteBinary(buf *bytes.Buffer) { | |
| 35 if q.dir == qASC { | |
| 36 buf.WriteByte(0) | |
| 37 } else { | |
| 38 buf.WriteByte(1) | |
| 39 } | |
| 40 cmpbin.WriteString(buf, q.prop) | |
| 41 } | |
| 42 | |
| 43 func (q *qSortBy) ReadBinary(buf *bytes.Buffer) error { | |
| 44 dir, err := buf.ReadByte() | |
| 45 if err != nil { | |
| 46 return err | |
| 47 } | |
| 48 q.dir = dir == 0 | |
| 49 q.prop, _, err = cmpbin.ReadString(buf) | |
| 50 return err | |
| 51 } | |
| 52 | |
| 53 type qIndex struct { | |
| 54 kind string | |
| 55 ancestor bool | |
| 56 sortby []qSortBy | |
| 57 } | |
| 58 | |
| 59 func (i *qIndex) Builtin() bool { | |
| 60 return !i.ancestor && len(i.sortby) <= 1 | |
| 61 } | |
| 62 | |
| 63 func (i *qIndex) Less(o *qIndex) bool { | |
| 64 ibuf, obuf := &bytes.Buffer{}, &bytes.Buffer{} | |
| 65 i.WriteBinary(ibuf) | |
| 66 o.WriteBinary(obuf) | |
| 67 return i.String() < o.String() | |
| 68 } | |
| 69 | |
| 70 // Valid verifies that this qIndex doesn't have duplicate sortBy fields. | |
| 71 func (i *qIndex) Valid() bool { | |
| 72 names := map[string]bool{} | |
| 73 for _, sb := range i.sortby { | |
| 74 if names[sb.prop] { | |
| 75 return false | |
| 76 } | |
| 77 names[sb.prop] = true | |
| 78 } | |
| 79 return true | |
| 80 } | |
| 81 | |
| 82 func (i *qIndex) WriteBinary(buf *bytes.Buffer) { | |
| 83 // TODO(riannucci): do a Grow call here? | |
| 84 if i.Builtin() { | |
| 85 buf.Write(builtinQueryPrefix) | |
| 86 } else { | |
| 87 buf.Write(complexQueryPrefix) | |
| 88 } | |
| 89 cmpbin.WriteString(buf, i.kind) | |
| 90 if i.ancestor { | |
| 91 buf.WriteByte(0) | |
| 92 } else { | |
| 93 buf.WriteByte(1) | |
| 94 } | |
| 95 cmpbin.WriteUint(buf, uint64(len(i.sortby))) | |
| 96 for _, sb := range i.sortby { | |
| 97 sb.WriteBinary(buf) | |
| 98 } | |
| 99 } | |
| 100 | |
| 101 func (i *qIndex) String() string { | |
| 102 ret := &bytes.Buffer{} | |
| 103 if i.Builtin() { | |
| 104 ret.WriteRune('B') | |
| 105 } else { | |
| 106 ret.WriteRune('C') | |
| 107 } | |
| 108 ret.WriteRune(':') | |
| 109 ret.WriteString(i.kind) | |
| 110 if i.ancestor { | |
| 111 ret.WriteString("|A") | |
| 112 } | |
| 113 for _, sb := range i.sortby { | |
| 114 ret.WriteRune('/') | |
| 115 if sb.dir == qDEC { | |
| 116 ret.WriteRune('-') | |
| 117 } | |
| 118 ret.WriteString(sb.prop) | |
| 119 } | |
| 120 return ret.String() | |
| 121 } | |
| 122 | |
| 123 func (i *qIndex) ReadBinary(buf *bytes.Buffer) error { | |
| 124 // discard builtin/complex byte | |
| 125 _, err := buf.ReadByte() | |
| 126 if err != nil { | |
| 127 return err | |
| 128 } | |
| 129 | |
| 130 i.kind, _, err = cmpbin.ReadString(buf) | |
| 131 if err != nil { | |
| 132 return err | |
| 133 } | |
| 134 anc, err := buf.ReadByte() | |
| 135 if err != nil { | |
| 136 return err | |
| 137 } | |
| 138 i.ancestor = anc == 1 | |
| 139 | |
| 140 numSorts, _, err := cmpbin.ReadUint(buf) | |
| 141 if err != nil { | |
| 142 return err | |
| 143 } | |
| 144 if numSorts > 64 { | |
| 145 return fmt.Errorf("qIndex.ReadBinary: Got over 64 sort orders: %
d", numSorts) | |
| 146 } | |
| 147 i.sortby = make([]qSortBy, numSorts) | |
| 148 for idx := range i.sortby { | |
| 149 err = (&i.sortby[idx]).ReadBinary(buf) | |
| 150 if err != nil { | |
| 151 return err | |
| 152 } | |
| 153 } | |
| 154 | |
| 155 return nil | |
| 156 } | |
| 157 | |
| 158 type queryOp int | 17 type queryOp int |
| 159 | 18 |
| 160 const ( | 19 const ( |
| 161 qInvalid queryOp = iota | 20 qInvalid queryOp = iota |
| 162 qEqual | 21 qEqual |
| 163 qLessThan | 22 qLessThan |
| 164 qLessEq | 23 qLessEq |
| 165 qGreaterEq | 24 qGreaterEq |
| 166 qGreaterThan | 25 qGreaterThan |
| 167 ) | 26 ) |
| 168 | 27 |
| 169 func (o queryOp) isEQOp() bool { | 28 func (o queryOp) isEQOp() bool { |
| 170 return o == qEqual | 29 return o == qEqual |
| 171 } | 30 } |
| 172 | 31 |
| 173 func (o queryOp) isINEQOp() bool { | 32 func (o queryOp) isINEQOp() bool { |
| 174 return o >= qLessThan && o <= qGreaterThan | 33 return o >= qLessThan && o <= qGreaterThan |
| 175 } | 34 } |
| 176 | 35 |
| 177 var queryOpMap = map[string]queryOp{ | 36 var queryOpMap = map[string]queryOp{ |
| 178 "=": qEqual, | 37 "=": qEqual, |
| 179 "<": qLessThan, | 38 "<": qLessThan, |
| 180 "<=": qLessEq, | 39 "<=": qLessEq, |
| 181 ">=": qGreaterEq, | 40 ">=": qGreaterEq, |
| 182 ">": qGreaterThan, | 41 ">": qGreaterThan, |
| 183 } | 42 } |
| 184 | 43 |
| 185 type queryFilter struct { | 44 type queryFilter struct { |
| 186 » field string | 45 » prop string |
| 187 op queryOp | 46 op queryOp |
| 188 value interface{} | 47 value interface{} |
| 189 } | 48 } |
| 190 | 49 |
| 191 func parseFilter(f string, v interface{}) (ret queryFilter, err error) { | 50 func parseFilter(f string, v interface{}) (ret queryFilter, err error) { |
| 192 toks := strings.SplitN(strings.TrimSpace(f), " ", 2) | 51 toks := strings.SplitN(strings.TrimSpace(f), " ", 2) |
| 193 if len(toks) != 2 { | 52 if len(toks) != 2 { |
| 194 err = errors.New("datastore: invalid filter: " + f) | 53 err = errors.New("datastore: invalid filter: " + f) |
| 195 } else { | 54 } else { |
| 196 op := queryOpMap[toks[1]] | 55 op := queryOpMap[toks[1]] |
| 197 if op == qInvalid { | 56 if op == qInvalid { |
| 198 err = fmt.Errorf("datastore: invalid operator %q in filt
er %q", toks[1], f) | 57 err = fmt.Errorf("datastore: invalid operator %q in filt
er %q", toks[1], f) |
| 199 } else { | 58 } else { |
| 200 » » » ret.field = toks[0] | 59 » » » ret.prop = toks[0] |
| 201 ret.op = op | 60 ret.op = op |
| 202 ret.value = v | 61 ret.value = v |
| 203 } | 62 } |
| 204 } | 63 } |
| 205 return | 64 return |
| 206 } | 65 } |
| 207 | 66 |
| 208 type queryOrder struct { | |
| 209 field string | |
| 210 direction qDirection | |
| 211 } | |
| 212 | |
| 213 type queryCursor string | 67 type queryCursor string |
| 214 | 68 |
| 215 func (q queryCursor) String() string { return string(q) } | 69 func (q queryCursor) String() string { return string(q) } |
| 216 func (q queryCursor) Valid() bool { return q != "" } | 70 func (q queryCursor) Valid() bool { return q != "" } |
| 217 | 71 |
| 218 type queryImpl struct { | 72 type queryImpl struct { |
| 219 ns string | 73 ns string |
| 220 | 74 |
| 221 kind string | 75 kind string |
| 222 ancestor ds.Key | 76 ancestor ds.Key |
| 223 filter []queryFilter | 77 filter []queryFilter |
| 224 » order []queryOrder | 78 » order []ds.IndexColumn |
| 225 project []string | 79 project []string |
| 226 | 80 |
| 227 distinct bool | 81 distinct bool |
| 228 eventualConsistency bool | 82 eventualConsistency bool |
| 229 keysOnly bool | 83 keysOnly bool |
| 230 limit int32 | 84 limit int32 |
| 231 offset int32 | 85 offset int32 |
| 232 | 86 |
| 233 start queryCursor | 87 start queryCursor |
| 234 end queryCursor | 88 end queryCursor |
| (...skipping 10 matching lines...) Expand all Loading... |
| 245 bs := newMemStore() | 99 bs := newMemStore() |
| 246 | 100 |
| 247 eqProperties := bs.MakePrivateCollection(nil) | 101 eqProperties := bs.MakePrivateCollection(nil) |
| 248 | 102 |
| 249 ineqProperties := bs.MakePrivateCollection(nil) | 103 ineqProperties := bs.MakePrivateCollection(nil) |
| 250 | 104 |
| 251 for _, f := range ret.filter { | 105 for _, f := range ret.filter { |
| 252 // if we supported the IN operator, we would check to see if the
re were | 106 // if we supported the IN operator, we would check to see if the
re were |
| 253 // multiple value operands here, but the go SDK doesn't support
this. | 107 // multiple value operands here, but the go SDK doesn't support
this. |
| 254 if f.op.isEQOp() { | 108 if f.op.isEQOp() { |
| 255 » » » eqProperties.Set([]byte(f.field), []byte{}) | 109 » » » eqProperties.Set([]byte(f.prop), []byte{}) |
| 256 } else if f.op.isINEQOp() { | 110 } else if f.op.isINEQOp() { |
| 257 » » » ineqProperties.Set([]byte(f.field), []byte{}) | 111 » » » ineqProperties.Set([]byte(f.prop), []byte{}) |
| 258 } | 112 } |
| 259 } | 113 } |
| 260 | 114 |
| 261 ineqProperties.VisitItemsAscend(nil, false, func(i *gkvlite.Item) bool { | 115 ineqProperties.VisitItemsAscend(nil, false, func(i *gkvlite.Item) bool { |
| 262 eqProperties.Delete(i.Key) | 116 eqProperties.Delete(i.Key) |
| 263 return true | 117 return true |
| 264 }) | 118 }) |
| 265 | 119 |
| 266 removeSet := bs.MakePrivateCollection(nil) | 120 removeSet := bs.MakePrivateCollection(nil) |
| 267 eqProperties.VisitItemsAscend(nil, false, func(i *gkvlite.Item) bool { | 121 eqProperties.VisitItemsAscend(nil, false, func(i *gkvlite.Item) bool { |
| 268 removeSet.Set(i.Key, []byte{}) | 122 removeSet.Set(i.Key, []byte{}) |
| 269 return true | 123 return true |
| 270 }) | 124 }) |
| 271 | 125 |
| 272 » newOrders := []queryOrder{} | 126 » newOrders := []ds.IndexColumn{} |
| 273 for _, o := range ret.order { | 127 for _, o := range ret.order { |
| 274 » » if removeSet.Get([]byte(o.field)) == nil { | 128 » » if removeSet.Get([]byte(o.Property)) == nil { |
| 275 » » » removeSet.Set([]byte(o.field), []byte{}) | 129 » » » removeSet.Set([]byte(o.Property), []byte{}) |
| 276 newOrders = append(newOrders, o) | 130 newOrders = append(newOrders, o) |
| 277 } | 131 } |
| 278 } | 132 } |
| 279 ret.order = newOrders | 133 ret.order = newOrders |
| 280 | 134 |
| 281 // need to fix ret.filters if we ever support the EXISTS operator and/or | 135 // need to fix ret.filters if we ever support the EXISTS operator and/or |
| 282 // projections. | 136 // projections. |
| 283 // | 137 // |
| 284 // newFilters = [] | 138 // newFilters = [] |
| 285 // for f in ret.filters: | 139 // for f in ret.filters: |
| 286 // if f.op != qExists: | 140 // if f.op != qExists: |
| 287 // newFilters = append(newFilters, f) | 141 // newFilters = append(newFilters, f) |
| 288 » // if !removeSet.Has(f.field): | 142 » // if !removeSet.Has(f.prop): |
| 289 » // removeSet.InsertNoReplace(f.field) | 143 » // removeSet.InsertNoReplace(f.prop) |
| 290 // newFilters = append(newFilters, f) | 144 // newFilters = append(newFilters, f) |
| 291 // | 145 // |
| 292 // so ret.filters == newFilters becuase none of ret.filters has op == qE
xists | 146 // so ret.filters == newFilters becuase none of ret.filters has op == qE
xists |
| 293 // | 147 // |
| 294 // then: | 148 // then: |
| 295 // | 149 // |
| 296 // for prop in ret.project: | 150 // for prop in ret.project: |
| 297 // if !removeSet.Has(prop): | 151 // if !removeSet.Has(prop): |
| 298 // removeSet.InsertNoReplace(prop) | 152 // removeSet.InsertNoReplace(prop) |
| 299 // ... make new EXISTS filters, add them to newFilters ... | 153 // ... make new EXISTS filters, add them to newFilters ... |
| 300 // ret.filters = newFilters | 154 // ret.filters = newFilters |
| 301 // | 155 // |
| 302 // However, since we don't support projection queries, this is moot. | 156 // However, since we don't support projection queries, this is moot. |
| 303 | 157 |
| 304 if eqProperties.Get([]byte("__key__")) != nil { | 158 if eqProperties.Get([]byte("__key__")) != nil { |
| 305 » » ret.order = []queryOrder{} | 159 » » ret.order = []ds.IndexColumn{} |
| 306 } | 160 } |
| 307 | 161 |
| 308 » newOrders = []queryOrder{} | 162 » newOrders = []ds.IndexColumn{} |
| 309 for _, o := range ret.order { | 163 for _, o := range ret.order { |
| 310 » » if o.field == "__key__" { | 164 » » if o.Property == "__key__" { |
| 311 newOrders = append(newOrders, o) | 165 newOrders = append(newOrders, o) |
| 312 break | 166 break |
| 313 } | 167 } |
| 314 newOrders = append(newOrders, o) | 168 newOrders = append(newOrders, o) |
| 315 } | 169 } |
| 316 ret.order = newOrders | 170 ret.order = newOrders |
| 317 | 171 |
| 318 return | 172 return |
| 319 } | 173 } |
| 320 | 174 |
| (...skipping 37 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 358 // if ret.ancestor.appid() != current appid | 212 // if ret.ancestor.appid() != current appid |
| 359 // "query app is x but ancestor app is x" | 213 // "query app is x but ancestor app is x" |
| 360 // if ret.ancestor.namespace() != current namespace | 214 // if ret.ancestor.namespace() != current namespace |
| 361 // "query namespace is x but ancestor namespace is x" | 215 // "query namespace is x but ancestor namespace is x" |
| 362 | 216 |
| 363 // if not all(g in orders for g in group_by) | 217 // if not all(g in orders for g in group_by) |
| 364 // "items in the group by clause must be specified first in the orderin
g" | 218 // "items in the group by clause must be specified first in the orderin
g" |
| 365 | 219 |
| 366 ineqPropName := "" | 220 ineqPropName := "" |
| 367 for _, f := range ret.filter { | 221 for _, f := range ret.filter { |
| 368 » » if f.field == "__key__" { | 222 » » if f.prop == "__key__" { |
| 369 k, ok := f.value.(ds.Key) | 223 k, ok := f.value.(ds.Key) |
| 370 if !ok { | 224 if !ok { |
| 371 ret.err = errors.New( | 225 ret.err = errors.New( |
| 372 "gae/memory: __key__ filter value must b
e a Key") | 226 "gae/memory: __key__ filter value must b
e a Key") |
| 373 return | 227 return |
| 374 } | 228 } |
| 375 if !ds.KeyValid(k, false, globalAppID, q.ns) { | 229 if !ds.KeyValid(k, false, globalAppID, q.ns) { |
| 376 // See the comment in queryImpl.Ancestor; basica
lly this check | 230 // See the comment in queryImpl.Ancestor; basica
lly this check |
| 377 // never happens in the real env because the SDK
silently swallows | 231 // never happens in the real env because the SDK
silently swallows |
| 378 // this condition :/ | 232 // this condition :/ |
| 379 ret.err = ds.ErrInvalidKey | 233 ret.err = ds.ErrInvalidKey |
| 380 return | 234 return |
| 381 } | 235 } |
| 382 if k.Namespace() != ns { | 236 if k.Namespace() != ns { |
| 383 ret.err = fmt.Errorf("bad namespace: %q (expecte
d %q)", k.Namespace(), ns) | 237 ret.err = fmt.Errorf("bad namespace: %q (expecte
d %q)", k.Namespace(), ns) |
| 384 return | 238 return |
| 385 } | 239 } |
| 386 // __key__ filter app is X but query app is X | 240 // __key__ filter app is X but query app is X |
| 387 // __key__ filter namespace is X but query namespace is
X | 241 // __key__ filter namespace is X but query namespace is
X |
| 388 } | 242 } |
| 389 » » // if f.op == qEqual and f.field in ret.project_fields | 243 » » // if f.op == qEqual and f.prop in ret.project_fields |
| 390 // "cannot use projection on a proprety with an equality filte
r" | 244 // "cannot use projection on a proprety with an equality filte
r" |
| 391 | 245 |
| 392 if f.op.isINEQOp() { | 246 if f.op.isINEQOp() { |
| 393 if ineqPropName == "" { | 247 if ineqPropName == "" { |
| 394 » » » » ineqPropName = f.field | 248 » » » » ineqPropName = f.prop |
| 395 » » » } else if f.field != ineqPropName { | 249 » » » } else if f.prop != ineqPropName { |
| 396 ret.err = fmt.Errorf( | 250 ret.err = fmt.Errorf( |
| 397 "gae/memory: Only one inequality filter
per query is supported. "+ | 251 "gae/memory: Only one inequality filter
per query is supported. "+ |
| 398 » » » » » » "Encountered both %s and %s", in
eqPropName, f.field) | 252 » » » » » » "Encountered both %s and %s", in
eqPropName, f.prop) |
| 399 return | 253 return |
| 400 } | 254 } |
| 401 } | 255 } |
| 402 } | 256 } |
| 403 | 257 |
| 404 // if ineqPropName != "" && len(group_by) > 0 && len(orders) ==0 | 258 // if ineqPropName != "" && len(group_by) > 0 && len(orders) ==0 |
| 405 // "Inequality filter on X must also be a group by property "+ | 259 // "Inequality filter on X must also be a group by property "+ |
| 406 // "when group by properties are set." | 260 // "when group by properties are set." |
| 407 | 261 |
| 408 if ineqPropName != "" && len(ret.order) != 0 { | 262 if ineqPropName != "" && len(ret.order) != 0 { |
| 409 » » if ret.order[0].field != ineqPropName { | 263 » » if ret.order[0].Property != ineqPropName { |
| 410 ret.err = fmt.Errorf( | 264 ret.err = fmt.Errorf( |
| 411 "gae/memory: The first sort property must be the
same as the property "+ | 265 "gae/memory: The first sort property must be the
same as the property "+ |
| 412 "to which the inequality filter is appli
ed. In your query "+ | 266 "to which the inequality filter is appli
ed. In your query "+ |
| 413 "the first sort property is %s but the i
nequality filter "+ | 267 "the first sort property is %s but the i
nequality filter "+ |
| 414 » » » » » "is on %s", ret.order[0].field, ineqProp
Name) | 268 » » » » » "is on %s", ret.order[0].Property, ineqP
ropName) |
| 415 return | 269 return |
| 416 } | 270 } |
| 417 } | 271 } |
| 418 | 272 |
| 419 if ret.kind == "" { | 273 if ret.kind == "" { |
| 420 for _, f := range ret.filter { | 274 for _, f := range ret.filter { |
| 421 » » » if f.field != "__key__" { | 275 » » » if f.prop != "__key__" { |
| 422 ret.err = errors.New( | 276 ret.err = errors.New( |
| 423 "gae/memory: kind is required for non-__
key__ filters") | 277 "gae/memory: kind is required for non-__
key__ filters") |
| 424 return | 278 return |
| 425 } | 279 } |
| 426 } | 280 } |
| 427 for _, o := range ret.order { | 281 for _, o := range ret.order { |
| 428 » » » if o.field != "__key__" || o.direction != qASC { | 282 » » » if o.Property != "__key__" || o.Direction != ds.ASCENDIN
G { |
| 429 ret.err = errors.New( | 283 ret.err = errors.New( |
| 430 "gae/memory: kind is required for all or
ders except __key__ ascending") | 284 "gae/memory: kind is required for all or
ders except __key__ ascending") |
| 431 return | 285 return |
| 432 } | 286 } |
| 433 } | 287 } |
| 434 } | 288 } |
| 435 return | 289 return |
| 436 } | 290 } |
| 437 | 291 |
| 438 func (q *queryImpl) calculateIndex() *qIndex { | 292 func (q *queryImpl) calculateIndex() *ds.IndexDefinition { |
| 439 // as a nod to simplicity in this code, we'll require that a single inde
x | 293 // as a nod to simplicity in this code, we'll require that a single inde
x |
| 440 // is able to service the entire query. E.g. no zigzag merge joins or | 294 // is able to service the entire query. E.g. no zigzag merge joins or |
| 441 // multiqueries. This will mean that the user will need to rely on | 295 // multiqueries. This will mean that the user will need to rely on |
| 442 // dev_appserver to tell them what indicies they need for real, and for
thier | 296 // dev_appserver to tell them what indicies they need for real, and for
thier |
| 443 // tests they'll need to specify the missing composite indices manually. | 297 // tests they'll need to specify the missing composite indices manually. |
| 444 // | 298 // |
| 445 // This COULD lead to an exploding indicies problem, but we can fix that
when | 299 // This COULD lead to an exploding indicies problem, but we can fix that
when |
| 446 // we get to it. | 300 // we get to it. |
| 447 | 301 |
| 448 //sortOrders := []qSortBy{} | 302 //sortOrders := []qSortBy{} |
| 449 | 303 |
| 450 return nil | 304 return nil |
| 451 } | 305 } |
| 452 | 306 |
| 453 func (q *queryImpl) clone() *queryImpl { | 307 func (q *queryImpl) clone() *queryImpl { |
| 454 ret := *q | 308 ret := *q |
| 455 ret.filter = append([]queryFilter(nil), q.filter...) | 309 ret.filter = append([]queryFilter(nil), q.filter...) |
| 456 » ret.order = append([]queryOrder(nil), q.order...) | 310 » ret.order = append([]ds.IndexColumn(nil), q.order...) |
| 457 ret.project = append([]string(nil), q.project...) | 311 ret.project = append([]string(nil), q.project...) |
| 458 return &ret | 312 return &ret |
| 459 } | 313 } |
| 460 | 314 |
| 461 func (q *queryImpl) Ancestor(k ds.Key) ds.Query { | 315 func (q *queryImpl) Ancestor(k ds.Key) ds.Query { |
| 462 q = q.clone() | 316 q = q.clone() |
| 463 q.ancestor = k | 317 q.ancestor = k |
| 464 if k == nil { | 318 if k == nil { |
| 465 // SDK has an explicit nil-check | 319 // SDK has an explicit nil-check |
| 466 q.err = errors.New("datastore: nil query ancestor") | 320 q.err = errors.New("datastore: nil query ancestor") |
| (...skipping 20 matching lines...) Expand all Loading... |
| 487 q = q.clone() | 341 q = q.clone() |
| 488 f, err := parseFilter(fStr, val) | 342 f, err := parseFilter(fStr, val) |
| 489 if err != nil { | 343 if err != nil { |
| 490 q.err = err | 344 q.err = err |
| 491 return q | 345 return q |
| 492 } | 346 } |
| 493 q.filter = append(q.filter, f) | 347 q.filter = append(q.filter, f) |
| 494 return q | 348 return q |
| 495 } | 349 } |
| 496 | 350 |
| 497 func (q *queryImpl) Order(field string) ds.Query { | 351 func (q *queryImpl) Order(prop string) ds.Query { |
| 498 q = q.clone() | 352 q = q.clone() |
| 499 » field = strings.TrimSpace(field) | 353 » prop = strings.TrimSpace(prop) |
| 500 » o := queryOrder{field, qASC} | 354 » o := ds.IndexColumn{Property: prop} |
| 501 » if strings.HasPrefix(field, "-") { | 355 » if strings.HasPrefix(prop, "-") { |
| 502 » » o.direction = qDEC | 356 » » o.Direction = ds.DESCENDING |
| 503 » » o.field = strings.TrimSpace(field[1:]) | 357 » » o.Property = strings.TrimSpace(prop[1:]) |
| 504 » } else if strings.HasPrefix(field, "+") { | 358 » } else if strings.HasPrefix(prop, "+") { |
| 505 » » q.err = fmt.Errorf("datastore: invalid order: %q", field) | 359 » » q.err = fmt.Errorf("datastore: invalid order: %q", prop) |
| 506 return q | 360 return q |
| 507 } | 361 } |
| 508 » if len(o.field) == 0 { | 362 » if len(o.Property) == 0 { |
| 509 q.err = errors.New("datastore: empty order") | 363 q.err = errors.New("datastore: empty order") |
| 510 return q | 364 return q |
| 511 } | 365 } |
| 512 q.order = append(q.order, o) | 366 q.order = append(q.order, o) |
| 513 return q | 367 return q |
| 514 } | 368 } |
| 515 | 369 |
| 516 func (q *queryImpl) Project(fieldName ...string) ds.Query { | 370 func (q *queryImpl) Project(fieldName ...string) ds.Query { |
| 517 q = q.clone() | 371 q = q.clone() |
| 518 q.project = append(q.project, fieldName...) | 372 q.project = append(q.project, fieldName...) |
| (...skipping 50 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 569 } | 423 } |
| 570 q.end = curs | 424 q.end = curs |
| 571 return q | 425 return q |
| 572 } | 426 } |
| 573 | 427 |
| 574 func (q *queryImpl) EventualConsistency() ds.Query { | 428 func (q *queryImpl) EventualConsistency() ds.Query { |
| 575 q = q.clone() | 429 q = q.clone() |
| 576 q.eventualConsistency = true | 430 q.eventualConsistency = true |
| 577 return q | 431 return q |
| 578 } | 432 } |
| OLD | NEW |