OLD | NEW |
| (Empty) |
1 // Copyright (c) 2012 The Chromium Authors. All rights reserved. | |
2 // Use of this source code is governed by a BSD-style license that can be | |
3 // found in the LICENSE file. | |
4 | |
5 #include "chrome/browser/net/load_timing_observer.h" | |
6 | |
7 #include "base/time.h" | |
8 #include "chrome/browser/net/chrome_net_log.h" | |
9 #include "content/public/common/resource_response.h" | |
10 #include "content/public/browser/browser_thread.h" | |
11 #include "net/base/load_flags.h" | |
12 #include "net/url_request/url_request.h" | |
13 #include "net/url_request/url_request_netlog_params.h" | |
14 | |
15 using base::Time; | |
16 using base::TimeTicks; | |
17 using content::BrowserThread; | |
18 using webkit_glue::ResourceLoaderBridge; | |
19 using webkit_glue::ResourceLoadTimingInfo; | |
20 | |
21 const size_t kMaxNumEntries = 1000; | |
22 | |
23 namespace { | |
24 | |
25 const int64 kSyncPeriodMicroseconds = 1000 * 1000 * 10; | |
26 | |
27 // We know that this conversion is not solid and suffers from world clock | |
28 // changes, but given that we sync clock every 10 seconds, it should be good | |
29 // enough for the load timing info. | |
30 static Time TimeTicksToTime(const TimeTicks& time_ticks) { | |
31 static int64 tick_to_time_offset; | |
32 static int64 last_sync_ticks = 0; | |
33 if (time_ticks.ToInternalValue() - last_sync_ticks > | |
34 kSyncPeriodMicroseconds) { | |
35 int64 cur_time = (Time::Now() - Time()).InMicroseconds(); | |
36 int64 cur_time_ticks = (TimeTicks::Now() - TimeTicks()).InMicroseconds(); | |
37 // If we add this number to a time tick value, it gives the timestamp. | |
38 tick_to_time_offset = cur_time - cur_time_ticks; | |
39 last_sync_ticks = time_ticks.ToInternalValue(); | |
40 } | |
41 return Time::FromInternalValue(time_ticks.ToInternalValue() + | |
42 tick_to_time_offset); | |
43 } | |
44 | |
45 static int32 TimeTicksToOffset( | |
46 const TimeTicks& time_ticks, | |
47 LoadTimingObserver::URLRequestRecord* record) { | |
48 return static_cast<int32>( | |
49 (time_ticks - record->base_ticks).InMillisecondsRoundedUp()); | |
50 } | |
51 | |
52 } // namespace | |
53 | |
54 LoadTimingObserver::URLRequestRecord::URLRequestRecord() | |
55 : connect_job_id(net::NetLog::Source::kInvalidId), | |
56 socket_log_id(net::NetLog::Source::kInvalidId), | |
57 socket_reused(false) { | |
58 } | |
59 | |
60 LoadTimingObserver::HTTPStreamJobRecord::HTTPStreamJobRecord() | |
61 : socket_log_id(net::NetLog::Source::kInvalidId), | |
62 socket_reused(false) { | |
63 } | |
64 | |
65 LoadTimingObserver::LoadTimingObserver() | |
66 : last_connect_job_id_(net::NetLog::Source::kInvalidId) { | |
67 } | |
68 | |
69 LoadTimingObserver::~LoadTimingObserver() { | |
70 } | |
71 | |
72 void LoadTimingObserver::StartObserving(net::NetLog* net_log) { | |
73 net_log->AddThreadSafeObserver(this, net::NetLog::LOG_BASIC); | |
74 } | |
75 | |
76 LoadTimingObserver::URLRequestRecord* | |
77 LoadTimingObserver::GetURLRequestRecord(uint32 source_id) { | |
78 DCHECK(BrowserThread::CurrentlyOn(BrowserThread::IO)); | |
79 | |
80 URLRequestToRecordMap::iterator it = url_request_to_record_.find(source_id); | |
81 if (it != url_request_to_record_.end()) | |
82 return &it->second; | |
83 return NULL; | |
84 } | |
85 | |
86 void LoadTimingObserver::OnAddEntry(const net::NetLog::Entry& entry) { | |
87 // The events that the Observer is interested in only occur on the IO thread. | |
88 if (!BrowserThread::CurrentlyOn(BrowserThread::IO)) | |
89 return; | |
90 if (entry.source().type == net::NetLog::SOURCE_URL_REQUEST) | |
91 OnAddURLRequestEntry(entry); | |
92 else if (entry.source().type == net::NetLog::SOURCE_HTTP_STREAM_JOB) | |
93 OnAddHTTPStreamJobEntry(entry); | |
94 else if (entry.source().type == net::NetLog::SOURCE_CONNECT_JOB) | |
95 OnAddConnectJobEntry(entry); | |
96 else if (entry.source().type == net::NetLog::SOURCE_SOCKET) | |
97 OnAddSocketEntry(entry); | |
98 } | |
99 | |
100 // static | |
101 void LoadTimingObserver::PopulateTimingInfo( | |
102 net::URLRequest* request, | |
103 content::ResourceResponse* response) { | |
104 DCHECK(BrowserThread::CurrentlyOn(BrowserThread::IO)); | |
105 if (!(request->load_flags() & net::LOAD_ENABLE_LOAD_TIMING)) | |
106 return; | |
107 | |
108 ChromeNetLog* chrome_net_log = static_cast<ChromeNetLog*>( | |
109 request->net_log().net_log()); | |
110 if (chrome_net_log == NULL) | |
111 return; | |
112 | |
113 uint32 source_id = request->net_log().source().id; | |
114 LoadTimingObserver* observer = chrome_net_log->load_timing_observer(); | |
115 LoadTimingObserver::URLRequestRecord* record = | |
116 observer->GetURLRequestRecord(source_id); | |
117 if (record) { | |
118 response->head.connection_id = record->socket_log_id; | |
119 response->head.connection_reused = record->socket_reused; | |
120 #if !defined(OS_IOS) | |
121 response->head.load_timing = record->timing; | |
122 #endif | |
123 } | |
124 } | |
125 | |
126 void LoadTimingObserver::OnAddURLRequestEntry(const net::NetLog::Entry& entry) { | |
127 DCHECK(BrowserThread::CurrentlyOn(BrowserThread::IO)); | |
128 | |
129 bool is_begin = entry.phase() == net::NetLog::PHASE_BEGIN; | |
130 bool is_end = entry.phase() == net::NetLog::PHASE_END; | |
131 | |
132 if (entry.type() == net::NetLog::TYPE_URL_REQUEST_START_JOB) { | |
133 if (is_begin) { | |
134 // Only record timing for entries with corresponding flag. | |
135 int load_flags; | |
136 scoped_ptr<Value> event_params(entry.ParametersToValue()); | |
137 if (!net::StartEventLoadFlagsFromEventParams(event_params.get(), | |
138 &load_flags)) { | |
139 NOTREACHED(); | |
140 return; | |
141 } | |
142 | |
143 if (!(load_flags & net::LOAD_ENABLE_LOAD_TIMING)) | |
144 return; | |
145 | |
146 // Prevents us from passively growing the memory unbounded in case | |
147 // something went wrong. Should not happen. | |
148 if (url_request_to_record_.size() > kMaxNumEntries) { | |
149 LOG(WARNING) << "The load timing observer url request count has grown " | |
150 "larger than expected, resetting"; | |
151 url_request_to_record_.clear(); | |
152 } | |
153 | |
154 URLRequestRecord& record = url_request_to_record_[entry.source().id]; | |
155 base::TimeTicks now = GetCurrentTime(); | |
156 record.base_ticks = now; | |
157 #if !defined(OS_IOS) | |
158 record.timing = ResourceLoadTimingInfo(); | |
159 record.timing.base_ticks = now; | |
160 record.timing.base_time = TimeTicksToTime(now); | |
161 #endif | |
162 } | |
163 return; | |
164 } else if (entry.type() == net::NetLog::TYPE_REQUEST_ALIVE) { | |
165 // Cleanup records based on the TYPE_REQUEST_ALIVE entry. | |
166 if (is_end) | |
167 url_request_to_record_.erase(entry.source().id); | |
168 return; | |
169 } | |
170 | |
171 URLRequestRecord* record = GetURLRequestRecord(entry.source().id); | |
172 if (!record) | |
173 return; | |
174 | |
175 #if !defined(OS_IOS) | |
176 ResourceLoadTimingInfo& timing = record->timing; | |
177 | |
178 switch (entry.type()) { | |
179 case net::NetLog::TYPE_PROXY_SERVICE: | |
180 if (is_begin) | |
181 timing.proxy_start = TimeTicksToOffset(GetCurrentTime(), record); | |
182 else if (is_end) | |
183 timing.proxy_end = TimeTicksToOffset(GetCurrentTime(), record); | |
184 break; | |
185 case net::NetLog::TYPE_HTTP_STREAM_REQUEST_BOUND_TO_JOB: { | |
186 net::NetLog::Source http_stream_job_source; | |
187 scoped_ptr<Value> event_params(entry.ParametersToValue()); | |
188 if (!net::NetLog::Source::FromEventParameters( | |
189 event_params.get(), | |
190 &http_stream_job_source)) { | |
191 NOTREACHED(); | |
192 return; | |
193 } | |
194 DCHECK_EQ(net::NetLog::SOURCE_HTTP_STREAM_JOB, | |
195 http_stream_job_source.type); | |
196 | |
197 HTTPStreamJobToRecordMap::iterator it = | |
198 http_stream_job_to_record_.find(http_stream_job_source.id); | |
199 if (it == http_stream_job_to_record_.end()) | |
200 return; | |
201 if (!it->second.connect_start.is_null()) { | |
202 timing.connect_start = TimeTicksToOffset(it->second.connect_start, | |
203 record); | |
204 } | |
205 if (!it->second.connect_end.is_null()) | |
206 timing.connect_end = TimeTicksToOffset(it->second.connect_end, record); | |
207 if (!it->second.dns_start.is_null()) | |
208 timing.dns_start = TimeTicksToOffset(it->second.dns_start, record); | |
209 if (!it->second.dns_end.is_null()) | |
210 timing.dns_end = TimeTicksToOffset(it->second.dns_end, record); | |
211 if (!it->second.ssl_start.is_null()) | |
212 timing.ssl_start = TimeTicksToOffset(it->second.ssl_start, record); | |
213 if (!it->second.ssl_end.is_null()) | |
214 timing.ssl_end = TimeTicksToOffset(it->second.ssl_end, record); | |
215 record->socket_reused = it->second.socket_reused; | |
216 record->socket_log_id = it->second.socket_log_id; | |
217 break; | |
218 } | |
219 case net::NetLog::TYPE_HTTP_TRANSACTION_SEND_REQUEST: | |
220 if (is_begin) | |
221 timing.send_start = TimeTicksToOffset(GetCurrentTime(), record); | |
222 else if (is_end) | |
223 timing.send_end = TimeTicksToOffset(GetCurrentTime(), record); | |
224 break; | |
225 case net::NetLog::TYPE_HTTP_TRANSACTION_READ_HEADERS: | |
226 if (is_begin) { | |
227 timing.receive_headers_start = | |
228 TimeTicksToOffset(GetCurrentTime(), record); | |
229 } else if (is_end) { | |
230 timing.receive_headers_end = | |
231 TimeTicksToOffset(GetCurrentTime(), record); | |
232 } | |
233 break; | |
234 default: | |
235 break; | |
236 } | |
237 #endif // !defined(OS_IOS) | |
238 } | |
239 | |
240 void LoadTimingObserver::OnAddHTTPStreamJobEntry( | |
241 const net::NetLog::Entry& entry) { | |
242 DCHECK(BrowserThread::CurrentlyOn(BrowserThread::IO)); | |
243 | |
244 bool is_begin = entry.phase() == net::NetLog::PHASE_BEGIN; | |
245 bool is_end = entry.phase() == net::NetLog::PHASE_END; | |
246 | |
247 if (entry.type() == net::NetLog::TYPE_HTTP_STREAM_JOB) { | |
248 if (is_begin) { | |
249 // Prevents us from passively growing the memory unbounded in | |
250 // case something went wrong. Should not happen. | |
251 if (http_stream_job_to_record_.size() > kMaxNumEntries) { | |
252 LOG(WARNING) << "The load timing observer http stream job count " | |
253 "has grown larger than expected, resetting"; | |
254 http_stream_job_to_record_.clear(); | |
255 } | |
256 | |
257 http_stream_job_to_record_.insert( | |
258 std::make_pair(entry.source().id, HTTPStreamJobRecord())); | |
259 } else if (is_end) { | |
260 http_stream_job_to_record_.erase(entry.source().id); | |
261 } | |
262 return; | |
263 } | |
264 | |
265 HTTPStreamJobToRecordMap::iterator it = | |
266 http_stream_job_to_record_.find(entry.source().id); | |
267 if (it == http_stream_job_to_record_.end()) | |
268 return; | |
269 | |
270 switch (entry.type()) { | |
271 case net::NetLog::TYPE_SOCKET_POOL: | |
272 if (is_begin) | |
273 it->second.connect_start = GetCurrentTime(); | |
274 else if (is_end) | |
275 it->second.connect_end = GetCurrentTime(); | |
276 break; | |
277 case net::NetLog::TYPE_SOCKET_POOL_BOUND_TO_CONNECT_JOB: { | |
278 net::NetLog::Source connect_job_source; | |
279 scoped_ptr<Value> event_params(entry.ParametersToValue()); | |
280 if (!net::NetLog::Source::FromEventParameters(event_params.get(), | |
281 &connect_job_source)) { | |
282 NOTREACHED(); | |
283 return; | |
284 } | |
285 DCHECK_EQ(net::NetLog::SOURCE_CONNECT_JOB, connect_job_source.type); | |
286 | |
287 if (last_connect_job_id_ == connect_job_source.id && | |
288 !last_connect_job_record_.dns_start.is_null()) { | |
289 it->second.dns_start = last_connect_job_record_.dns_start; | |
290 it->second.dns_end = last_connect_job_record_.dns_end; | |
291 } | |
292 break; | |
293 } | |
294 case net::NetLog::TYPE_SOCKET_POOL_REUSED_AN_EXISTING_SOCKET: | |
295 it->second.socket_reused = true; | |
296 break; | |
297 case net::NetLog::TYPE_SOCKET_POOL_BOUND_TO_SOCKET: { | |
298 net::NetLog::Source socket_source; | |
299 scoped_ptr<Value> event_params(entry.ParametersToValue()); | |
300 if (!net::NetLog::Source::FromEventParameters(event_params.get(), | |
301 &socket_source)) { | |
302 NOTREACHED(); | |
303 return; | |
304 } | |
305 | |
306 it->second.socket_log_id = socket_source.id; | |
307 if (!it->second.socket_reused) { | |
308 SocketToRecordMap::iterator socket_it = | |
309 socket_to_record_.find(it->second.socket_log_id); | |
310 if (socket_it != socket_to_record_.end() && | |
311 !socket_it->second.ssl_start.is_null()) { | |
312 it->second.ssl_start = socket_it->second.ssl_start; | |
313 it->second.ssl_end = socket_it->second.ssl_end; | |
314 } | |
315 } | |
316 break; | |
317 } | |
318 default: | |
319 break; | |
320 } | |
321 } | |
322 | |
323 void LoadTimingObserver::OnAddConnectJobEntry(const net::NetLog::Entry& entry) { | |
324 DCHECK(BrowserThread::CurrentlyOn(BrowserThread::IO)); | |
325 | |
326 bool is_begin = entry.phase() == net::NetLog::PHASE_BEGIN; | |
327 bool is_end = entry.phase() == net::NetLog::PHASE_END; | |
328 | |
329 // Manage record lifetime based on the SOCKET_POOL_CONNECT_JOB entry. | |
330 if (entry.type() == net::NetLog::TYPE_SOCKET_POOL_CONNECT_JOB) { | |
331 if (is_begin) { | |
332 // Prevents us from passively growing the memory unbounded in case | |
333 // something went wrong. Should not happen. | |
334 if (connect_job_to_record_.size() > kMaxNumEntries) { | |
335 LOG(WARNING) << "The load timing observer connect job count has grown " | |
336 "larger than expected, resetting"; | |
337 connect_job_to_record_.clear(); | |
338 } | |
339 | |
340 connect_job_to_record_.insert( | |
341 std::make_pair(entry.source().id, ConnectJobRecord())); | |
342 } else if (is_end) { | |
343 ConnectJobToRecordMap::iterator it = | |
344 connect_job_to_record_.find(entry.source().id); | |
345 if (it != connect_job_to_record_.end()) { | |
346 last_connect_job_id_ = it->first; | |
347 last_connect_job_record_ = it->second; | |
348 connect_job_to_record_.erase(it); | |
349 } | |
350 } | |
351 } else if (entry.type() == net::NetLog::TYPE_HOST_RESOLVER_IMPL) { | |
352 ConnectJobToRecordMap::iterator it = | |
353 connect_job_to_record_.find(entry.source().id); | |
354 if (it != connect_job_to_record_.end()) { | |
355 if (is_begin) | |
356 it->second.dns_start = GetCurrentTime(); | |
357 else if (is_end) | |
358 it->second.dns_end = GetCurrentTime(); | |
359 } | |
360 } | |
361 } | |
362 | |
363 void LoadTimingObserver::OnAddSocketEntry(const net::NetLog::Entry& entry) { | |
364 DCHECK(BrowserThread::CurrentlyOn(BrowserThread::IO)); | |
365 | |
366 bool is_begin = entry.phase() == net::NetLog::PHASE_BEGIN; | |
367 bool is_end = entry.phase() == net::NetLog::PHASE_END; | |
368 | |
369 // Manage record lifetime based on the SOCKET_ALIVE entry. | |
370 if (entry.type() == net::NetLog::TYPE_SOCKET_ALIVE) { | |
371 if (is_begin) { | |
372 // Prevents us from passively growing the memory unbounded in case | |
373 // something went wrong. Should not happen. | |
374 if (socket_to_record_.size() > kMaxNumEntries) { | |
375 LOG(WARNING) << "The load timing observer socket count has grown " | |
376 "larger than expected, resetting"; | |
377 socket_to_record_.clear(); | |
378 } | |
379 | |
380 socket_to_record_.insert( | |
381 std::make_pair(entry.source().id, SocketRecord())); | |
382 } else if (is_end) { | |
383 socket_to_record_.erase(entry.source().id); | |
384 } | |
385 return; | |
386 } | |
387 SocketToRecordMap::iterator it = socket_to_record_.find(entry.source().id); | |
388 if (it == socket_to_record_.end()) | |
389 return; | |
390 | |
391 if (entry.type() == net::NetLog::TYPE_SSL_CONNECT) { | |
392 if (is_begin) | |
393 it->second.ssl_start = GetCurrentTime(); | |
394 else if (is_end) | |
395 it->second.ssl_end = GetCurrentTime(); | |
396 } | |
397 } | |
398 | |
399 base::TimeTicks LoadTimingObserver::GetCurrentTime() const { | |
400 return base::TimeTicks::Now(); | |
401 } | |
OLD | NEW |