Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(93)

Side by Side Diff: net/http/http_stream_factory_impl_request.cc

Issue 14813024: Introduce RequestWebSocketStream into HttpStreamFactory (Closed) Base URL: https://chromium.googlesource.com/chromium/src.git@master
Patch Set: Created 7 years, 6 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
OLDNEW
1 // Copyright (c) 2012 The Chromium Authors. All rights reserved. 1 // Copyright (c) 2012 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be 2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file. 3 // found in the LICENSE file.
4 4
5 #include "net/http/http_stream_factory_impl_request.h" 5 #include "net/http/http_stream_factory_impl_request.h"
6 6
7 #include "base/callback.h" 7 #include "base/callback.h"
8 #include "base/logging.h" 8 #include "base/logging.h"
9 #include "base/stl_util.h" 9 #include "base/stl_util.h"
10 #include "net/http/http_stream_factory_impl_job.h" 10 #include "net/http/http_stream_factory_impl_job.h"
11 #include "net/spdy/spdy_http_stream.h" 11 #include "net/spdy/spdy_http_stream.h"
12 #include "net/spdy/spdy_session.h" 12 #include "net/spdy/spdy_session.h"
13 13
14 namespace net { 14 namespace net {
15 15
16 HttpStreamFactoryImpl::Request::Request(const GURL& url, 16 HttpStreamFactoryImpl::Request::Request(
17 HttpStreamFactoryImpl* factory, 17 const GURL& url,
18 HttpStreamRequest::Delegate* delegate, 18 HttpStreamFactoryImpl* factory,
19 const BoundNetLog& net_log) 19 HttpStreamRequest::Delegate* delegate,
20 WebSocketStreamBase::Factory* websocket_stream_factory,
21 const BoundNetLog& net_log)
20 : url_(url), 22 : url_(url),
21 factory_(factory), 23 factory_(factory),
24 websocket_stream_factory_(websocket_stream_factory),
22 delegate_(delegate), 25 delegate_(delegate),
23 net_log_(net_log), 26 net_log_(net_log),
24 completed_(false), 27 completed_(false),
25 was_npn_negotiated_(false), 28 was_npn_negotiated_(false),
26 protocol_negotiated_(kProtoUnknown), 29 protocol_negotiated_(kProtoUnknown),
27 using_spdy_(false) { 30 using_spdy_(false) {
28 DCHECK(factory_); 31 DCHECK(factory_);
29 DCHECK(delegate_); 32 DCHECK(delegate_);
30 33
31 net_log_.BeginEvent(NetLog::TYPE_HTTP_STREAM_REQUEST); 34 net_log_.BeginEvent(NetLog::TYPE_HTTP_STREAM_REQUEST);
(...skipping 60 matching lines...) Expand 10 before | Expand all | Expand 10 after
92 job_net_log.AddEvent( 95 job_net_log.AddEvent(
93 NetLog::TYPE_HTTP_STREAM_JOB_BOUND_TO_REQUEST, 96 NetLog::TYPE_HTTP_STREAM_JOB_BOUND_TO_REQUEST,
94 net_log_.source().ToEventParametersCallback()); 97 net_log_.source().ToEventParametersCallback());
95 } 98 }
96 99
97 void HttpStreamFactoryImpl::Request::OnStreamReady( 100 void HttpStreamFactoryImpl::Request::OnStreamReady(
98 Job* job, 101 Job* job,
99 const SSLConfig& used_ssl_config, 102 const SSLConfig& used_ssl_config,
100 const ProxyInfo& used_proxy_info, 103 const ProxyInfo& used_proxy_info,
101 HttpStreamBase* stream) { 104 HttpStreamBase* stream) {
105 DCHECK(!factory_->for_websockets_);
102 DCHECK(stream); 106 DCHECK(stream);
103 DCHECK(completed_); 107 DCHECK(completed_);
104 108
105 // |job| should only be NULL if we're being serviced by a late bound 109 OnJobSucceeded(job);
106 // SpdySession or HttpPipelinedConnection (one that was not created by a job
107 // in our |jobs_| set).
108 if (!job) {
109 DCHECK(!bound_job_.get());
110 DCHECK(!jobs_.empty());
111 // NOTE(willchan): We do *NOT* call OrphanJobs() here. The reason is because
112 // we *WANT* to cancel the unnecessary Jobs from other requests if another
113 // Job completes first.
114 // TODO(mbelshe): Revisit this when we implement ip connection pooling of
115 // SpdySessions. Do we want to orphan the jobs for a different hostname so
116 // they complete? Or do we want to prevent connecting a new SpdySession if
117 // we've already got one available for a different hostname where the ip
118 // address matches up?
119 } else if (!bound_job_.get()) {
120 // We may have other jobs in |jobs_|. For example, if we start multiple jobs
121 // for Alternate-Protocol.
122 OrphanJobsExcept(job);
123 } else {
124 DCHECK(jobs_.empty());
125 }
126 delegate_->OnStreamReady(used_ssl_config, used_proxy_info, stream); 110 delegate_->OnStreamReady(used_ssl_config, used_proxy_info, stream);
127 } 111 }
128 112
113 void HttpStreamFactoryImpl::Request::OnWebSocketStreamReady(
114 Job* job,
115 const SSLConfig& used_ssl_config,
116 const ProxyInfo& used_proxy_info,
117 WebSocketStreamBase* stream) {
118 DCHECK(factory_->for_websockets_);
119 DCHECK(stream);
120 DCHECK(completed_);
121
122 OnJobSucceeded(job);
123 delegate_->OnWebSocketStreamReady(used_ssl_config, used_proxy_info, stream);
124 }
125
129 void HttpStreamFactoryImpl::Request::OnStreamFailed( 126 void HttpStreamFactoryImpl::Request::OnStreamFailed(
130 Job* job, 127 Job* job,
131 int status, 128 int status,
132 const SSLConfig& used_ssl_config) { 129 const SSLConfig& used_ssl_config) {
133 DCHECK_NE(OK, status); 130 DCHECK_NE(OK, status);
134 // |job| should only be NULL if we're being canceled by a late bound 131 // |job| should only be NULL if we're being canceled by a late bound
135 // HttpPipelinedConnection (one that was not created by a job in our |jobs_| 132 // HttpPipelinedConnection (one that was not created by a job in our |jobs_|
136 // set). 133 // set).
137 if (!job) { 134 if (!job) {
138 DCHECK(!bound_job_.get()); 135 DCHECK(!bound_job_.get());
(...skipping 134 matching lines...) Expand 10 before | Expand all | Expand 10 after
273 request_vector.erase(it); 270 request_vector.erase(it);
274 break; 271 break;
275 } 272 }
276 } 273 }
277 if (request_vector.empty()) 274 if (request_vector.empty())
278 http_pipelining_request_map.erase(*http_pipelining_key_); 275 http_pipelining_request_map.erase(*http_pipelining_key_);
279 http_pipelining_key_.reset(); 276 http_pipelining_key_.reset();
280 } 277 }
281 } 278 }
282 279
283 void HttpStreamFactoryImpl::Request::OnSpdySessionReady( 280 void HttpStreamFactoryImpl::Request::OnNewSpdySessionReady(
284 Job* job, 281 Job* job,
285 scoped_refptr<SpdySession> spdy_session, 282 scoped_refptr<SpdySession> spdy_session,
286 bool direct) { 283 bool direct) {
287 DCHECK(job); 284 DCHECK(job);
288 DCHECK(job->using_spdy()); 285 DCHECK(job->using_spdy());
289 286
290 // The first case is the usual case. 287 // The first case is the usual case.
291 if (!bound_job_.get()) { 288 if (!bound_job_.get()) {
292 OrphanJobsExcept(job); 289 OrphanJobsExcept(job);
293 } else { // This is the case for HTTPS proxy tunneling. 290 } else { // This is the case for HTTPS proxy tunneling.
294 DCHECK_EQ(bound_job_.get(), job); 291 DCHECK_EQ(bound_job_.get(), job);
295 DCHECK(jobs_.empty()); 292 DCHECK(jobs_.empty());
296 } 293 }
297 294
298 // Cache these values in case the job gets deleted. 295 // Cache these values in case the job gets deleted.
299 const SSLConfig used_ssl_config = job->server_ssl_config(); 296 const SSLConfig used_ssl_config = job->server_ssl_config();
300 const ProxyInfo used_proxy_info = job->proxy_info(); 297 const ProxyInfo used_proxy_info = job->proxy_info();
301 const bool was_npn_negotiated = job->was_npn_negotiated(); 298 const bool was_npn_negotiated = job->was_npn_negotiated();
302 const NextProto protocol_negotiated = 299 const NextProto protocol_negotiated =
303 job->protocol_negotiated(); 300 job->protocol_negotiated();
304 const bool using_spdy = job->using_spdy(); 301 const bool using_spdy = job->using_spdy();
305 const BoundNetLog net_log = job->net_log(); 302 const BoundNetLog net_log = job->net_log();
306 303
307 Complete(was_npn_negotiated, protocol_negotiated, using_spdy, net_log); 304 Complete(was_npn_negotiated, protocol_negotiated, using_spdy, net_log);
308 305
309 // Cache this so we can still use it if the request is deleted. 306 // Cache this so we can still use it if the request is deleted.
310 HttpStreamFactoryImpl* factory = factory_; 307 HttpStreamFactoryImpl* factory = factory_;
311 308 if (factory->for_websockets_) {
312 bool use_relative_url = direct || url().SchemeIs("https"); 309 DCHECK(websocket_stream_factory_);
313 delegate_->OnStreamReady( 310 bool use_relative_url = direct || url().SchemeIs("wss");
314 job->server_ssl_config(), 311 delegate_->OnWebSocketStreamReady(
315 job->proxy_info(), 312 job->server_ssl_config(),
316 new SpdyHttpStream(spdy_session.get(), use_relative_url)); 313 job->proxy_info(),
314 websocket_stream_factory_->CreateSpdyStream(
315 spdy_session, use_relative_url));
316 } else {
317 bool use_relative_url = direct || url().SchemeIs("https");
318 delegate_->OnStreamReady(
319 job->server_ssl_config(),
320 job->proxy_info(),
321 new SpdyHttpStream(spdy_session, use_relative_url));
322 }
317 // |this| may be deleted after this point. 323 // |this| may be deleted after this point.
318 factory->OnSpdySessionReady( 324 factory->OnNewSpdySessionReady(
319 spdy_session, direct, used_ssl_config, used_proxy_info, 325 spdy_session, direct, used_ssl_config, used_proxy_info,
320 was_npn_negotiated, protocol_negotiated, using_spdy, net_log); 326 was_npn_negotiated, protocol_negotiated, using_spdy, net_log);
321 } 327 }
322 328
323 void HttpStreamFactoryImpl::Request::OrphanJobsExcept(Job* job) { 329 void HttpStreamFactoryImpl::Request::OrphanJobsExcept(Job* job) {
324 DCHECK(job); 330 DCHECK(job);
325 DCHECK(!bound_job_.get()); 331 DCHECK(!bound_job_.get());
326 DCHECK(ContainsKey(jobs_, job)); 332 DCHECK(ContainsKey(jobs_, job));
327 bound_job_.reset(job); 333 bound_job_.reset(job);
328 jobs_.erase(job); 334 jobs_.erase(job);
329 factory_->request_map_.erase(job); 335 factory_->request_map_.erase(job);
330 336
331 OrphanJobs(); 337 OrphanJobs();
332 } 338 }
333 339
334 void HttpStreamFactoryImpl::Request::OrphanJobs() { 340 void HttpStreamFactoryImpl::Request::OrphanJobs() {
335 RemoveRequestFromSpdySessionRequestMap(); 341 RemoveRequestFromSpdySessionRequestMap();
336 RemoveRequestFromHttpPipeliningRequestMap(); 342 RemoveRequestFromHttpPipeliningRequestMap();
337 343
338 std::set<Job*> tmp; 344 std::set<Job*> tmp;
339 tmp.swap(jobs_); 345 tmp.swap(jobs_);
340 346
341 for (std::set<Job*>::iterator it = tmp.begin(); it != tmp.end(); ++it) 347 for (std::set<Job*>::iterator it = tmp.begin(); it != tmp.end(); ++it)
342 factory_->OrphanJob(*it, this); 348 factory_->OrphanJob(*it, this);
343 } 349 }
344 350
351 void HttpStreamFactoryImpl::Request::OnJobSucceeded(Job* job) {
352 // |job| should only be NULL if we're being serviced by a late bound
353 // SpdySession or HttpPipelinedConnection (one that was not created by a job
354 // in our |jobs_| set).
355 if (!job) {
356 DCHECK(!bound_job_.get());
357 DCHECK(!jobs_.empty());
358 // NOTE(willchan): We do *NOT* call OrphanJobs() here. The reason is because
359 // we *WANT* to cancel the unnecessary Jobs from other requests if another
360 // Job completes first.
361 // TODO(mbelshe): Revisit this when we implement ip connection pooling of
362 // SpdySessions. Do we want to orphan the jobs for a different hostname so
363 // they complete? Or do we want to prevent connecting a new SpdySession if
364 // we've already got one available for a different hostname where the ip
365 // address matches up?
366 } else if (!bound_job_.get()) {
367 // We may have other jobs in |jobs_|. For example, if we start multiple jobs
368 // for Alternate-Protocol.
369 OrphanJobsExcept(job);
370 } else {
371 DCHECK(jobs_.empty());
372 }
373 }
374
345 } // namespace net 375 } // namespace net
OLDNEW
« no previous file with comments | « net/http/http_stream_factory_impl_request.h ('k') | net/http/http_stream_factory_impl_unittest.cc » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698