OLD | NEW |
---|---|
(Empty) | |
1 // Copyright 2015 The Chromium Authors. All rights reserved. | |
2 // Use of this source code is governed by a BSD-style license that can be | |
3 // found in the LICENSE file. | |
4 | |
5 package memory | |
6 | |
7 import ( | |
8 "encoding/hex" | |
9 "flag" | |
10 "fmt" | |
11 "os" | |
12 "runtime" | |
13 "sync" | |
14 "sync/atomic" | |
15 | |
16 "github.com/luci/gkvlite" | |
17 ) | |
18 | |
19 var logMemCollectionFolder = flag.String( | |
dnj
2016/04/22 06:09:44
Note this will be compiled into every binary. It m
iannucci
2016/04/22 06:56:25
I thought about that, but I think it'll be worse t
| |
20 "luci.gae.gkvlite_trace_folder", "", | |
21 "Set to a folder path to enable debugging traces to be dumped there. Set to '-' to dump to stdout.") | |
22 var logMemCollectionOnce sync.Once | |
23 var logMemCounter uint32 | |
24 var logMemNameKey = "holds a string indicating the GKVLiteDebuggingTraceName" | |
25 var stdoutLock sync.Mutex | |
26 | |
27 func wrapTracingMemStore(store memStore) memStore { | |
28 var writer traceWriter | |
29 logNum := atomic.AddUint32(&logMemCounter, 1) - 1 | |
30 collName := fmt.Sprintf("coll%d", logNum) | |
31 | |
32 if *logMemCollectionFolder == "-" { | |
33 writer = func(format string, a ...interface{}) { | |
34 stdoutLock.Lock() | |
35 defer stdoutLock.Unlock() | |
36 fmt.Printf(format+"\n", a...) | |
37 } | |
38 } else { | |
39 logMemCollectionOnce.Do(func() { | |
40 fmt.Fprintln(os.Stderr, "REMOVING LOG COLLECTION FOLDER: ", *logMemCollectionFolder) | |
dnj
2016/04/22 06:09:44
The possibility of me doing something stupid here
iannucci
2016/04/22 06:56:24
Good point, changed to use a tempdir at the design
| |
41 if err := os.RemoveAll(*logMemCollectionFolder); !(err = = os.ErrNotExist || err == nil) { | |
42 panic(err) | |
43 } | |
44 if err := os.MkdirAll(*logMemCollectionFolder, 0755); er r != nil { | |
dnj
2016/04/22 06:09:44
I would say the user should ensure that the direct
iannucci
2016/04/22 06:56:25
Acknowledged.
| |
45 panic(err) | |
46 } | |
47 }) | |
48 | |
49 lck := sync.Mutex{} | |
50 fname := fmt.Sprintf("%s/%d.trace", *logMemCollectionFolder, log Num) | |
dnj
2016/04/22 06:09:44
Use filepath.Join: https://golang.org/pkg/path/fil
iannucci
2016/04/22 06:56:25
Done.
| |
51 fil, err := os.Create(fname) | |
52 if err != nil { | |
53 panic(err) | |
54 } | |
55 writer = func(format string, a ...interface{}) { | |
56 lck.Lock() | |
57 defer lck.Unlock() | |
58 fmt.Fprintf(fil, format+"\n", a...) | |
59 } | |
60 runtime.SetFinalizer(&writer, func(_ *traceWriter) { fil.Close() }) | |
61 } | |
62 writer("%s := newMemStore()", collName) | |
63 return &tracingMemStoreImpl{store, writer, collName, 0, false} | |
64 } | |
65 | |
66 type traceWriter func(format string, a ...interface{}) | |
67 | |
68 type tracingMemStoreImpl struct { | |
69 i memStore | |
70 w traceWriter | |
71 | |
72 collName string | |
73 // for the mutable store, this is a counter that increments for every | |
74 // Snapshot, and for snapshots, this is the number of the snapshot. | |
75 snapNum uint32 | |
76 isSnap bool | |
77 } | |
78 | |
79 var _ memStore = (*tracingMemStoreImpl)(nil) | |
80 | |
81 func (t *tracingMemStoreImpl) ImATestingSnapshot() {} | |
82 | |
83 func (t *tracingMemStoreImpl) colWriter(action, name string) traceWriter { | |
84 ident := t.ident() | |
85 hexname := hex.EncodeToString([]byte(name)) | |
86 writer := func(format string, a ...interface{}) { | |
87 if format[0] == '/' { // comment | |
dnj
2016/04/22 06:09:44
Might as well just do a strings.HasPrefix("//") to
iannucci
2016/04/22 06:56:25
Done.
| |
88 t.w(format, a...) | |
89 } else { | |
90 t.w(fmt.Sprintf("%s_%s%s", ident, hexname, format), a... ) | |
91 } | |
92 } | |
93 writer(" := %s.%s(%q)", ident, action, name) | |
94 return writer | |
95 } | |
96 | |
97 func (t *tracingMemStoreImpl) ident() string { | |
98 if t.isSnap { | |
99 return fmt.Sprintf("%s_snap%d", t.collName, t.snapNum) | |
100 } | |
101 return t.collName | |
102 } | |
103 | |
104 func (t *tracingMemStoreImpl) GetCollection(name string) memCollection { | |
105 coll := t.i.GetCollection(name) | |
106 if coll == nil { | |
107 t.w("// %s.GetCollection(%q) -> nil", t.ident(), name) | |
108 return nil | |
109 } | |
110 writer := t.colWriter("GetCollection", name) | |
111 return &tracingMemCollectionImpl{coll, writer, 0} | |
112 } | |
113 | |
114 func (t *tracingMemStoreImpl) GetCollectionNames() []string { | |
115 t.w("%s.GetCollectionNames()", t.ident()) | |
116 return t.i.GetCollectionNames() | |
117 } | |
118 | |
119 func (t *tracingMemStoreImpl) GetOrCreateCollection(name string) memCollection { | |
120 writer := t.colWriter("GetOrCreateCollection", name) | |
121 return &tracingMemCollectionImpl{t.i.GetOrCreateCollection(name), writer , 0} | |
122 } | |
123 | |
124 func (t *tracingMemStoreImpl) Snapshot() memStore { | |
125 snap := t.i.Snapshot() | |
126 if snap == t.i { | |
127 t.w("// %s.Snapshot() -> self", t.ident()) | |
128 return t | |
129 } | |
130 ret := &tracingMemStoreImpl{snap, t.w, t.collName, t.snapNum, true} | |
131 t.w("%s := %s.Snapshot()", ret.ident(), t.ident()) | |
132 t.snapNum++ | |
dnj
2016/04/22 06:09:44
Non-atomic? Is Snapshot not goroutine-safe? I supp
iannucci
2016/04/22 06:56:24
not safe, snapshot already needs a lock.
| |
133 return ret | |
134 } | |
135 | |
136 func (t *tracingMemStoreImpl) IsReadOnly() bool { | |
137 return t.i.IsReadOnly() | |
138 } | |
139 | |
140 type tracingMemCollectionImpl struct { | |
141 i memCollection | |
142 w traceWriter | |
143 visitNumber uint32 | |
144 } | |
145 | |
146 var _ memCollection = (*tracingMemCollectionImpl)(nil) | |
147 | |
148 func (t *tracingMemCollectionImpl) Name() string { | |
149 return t.i.Name() | |
150 } | |
151 | |
152 func (t *tracingMemCollectionImpl) Delete(k []byte) bool { | |
153 t.w(".Delete(%#v)", k) | |
154 return t.i.Delete(k) | |
155 } | |
156 | |
157 func (t *tracingMemCollectionImpl) Get(k []byte) []byte { | |
158 t.w(".Get(%#v)", k) | |
159 return t.i.Get(k) | |
160 } | |
161 | |
162 func (t *tracingMemCollectionImpl) GetTotals() (numItems, numBytes uint64) { | |
163 t.w(".GetTotals()") | |
164 return t.i.GetTotals() | |
165 } | |
166 | |
167 func (t *tracingMemCollectionImpl) MinItem(withValue bool) *gkvlite.Item { | |
168 t.w(".MinItem(%t)", withValue) | |
169 return t.i.MinItem(withValue) | |
170 } | |
171 | |
172 func (t *tracingMemCollectionImpl) Set(k, v []byte) { | |
173 t.w(".Set(%#v, %#v)", k, v) | |
174 t.i.Set(k, v) | |
175 } | |
176 | |
177 func (t *tracingMemCollectionImpl) VisitItemsAscend(target []byte, withValue boo l, visitor gkvlite.ItemVisitor) { | |
178 vnum := t.visitNumber | |
dnj
2016/04/22 06:09:44
Same, non-atomic check? If not, just use an "int"
iannucci
2016/04/22 06:56:24
changed to uint
| |
179 t.visitNumber++ | |
180 | |
181 t.w(".VisitItemsAscend(%#v, %t, func(i *gkvlite.Item) bool{ return true }) // BEGIN VisitItemsAscend(%d)", target, withValue, vnum) | |
182 defer t.w("// END VisitItemsAscend(%d)", vnum) | |
183 t.i.VisitItemsAscend(target, withValue, visitor) | |
184 } | |
185 | |
186 func (t *tracingMemCollectionImpl) IsReadOnly() bool { | |
187 return t.i.IsReadOnly() | |
188 } | |
OLD | NEW |