OLD | NEW |
1 // Copyright (c) 2012 The Chromium Authors. All rights reserved. | 1 // Copyright (c) 2012 The Chromium Authors. All rights reserved. |
2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
4 | 4 |
5 #include "content/browser/worker_host/worker_service_impl.h" | 5 #include "content/browser/worker_host/worker_service_impl.h" |
6 | 6 |
7 #include <string> | 7 #include <string> |
8 | 8 |
9 #include "base/command_line.h" | 9 #include "base/command_line.h" |
10 #include "base/logging.h" | 10 #include "base/logging.h" |
(...skipping 78 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
89 // the queued workers, or a renderer has shut down, in which case it doesn't | 89 // the queued workers, or a renderer has shut down, in which case it doesn't |
90 // affect anything. We call this function in both scenarios because then we | 90 // affect anything. We call this function in both scenarios because then we |
91 // don't have to keep track which filters are from worker processes. | 91 // don't have to keep track which filters are from worker processes. |
92 TryStartingQueuedWorker(); | 92 TryStartingQueuedWorker(); |
93 } | 93 } |
94 | 94 |
95 void WorkerServiceImpl::CreateWorker( | 95 void WorkerServiceImpl::CreateWorker( |
96 const ViewHostMsg_CreateWorker_Params& params, | 96 const ViewHostMsg_CreateWorker_Params& params, |
97 int route_id, | 97 int route_id, |
98 WorkerMessageFilter* filter, | 98 WorkerMessageFilter* filter, |
99 ResourceContext* resource_context) { | 99 const std::string& partition_id, |
| 100 ResourceContext* resource_context, |
| 101 ChromeAppCacheService* appcache_service, |
| 102 fileapi::FileSystemContext* filesystem_context, |
| 103 webkit_database::DatabaseTracker* database_tracker, |
| 104 IndexedDBContextImpl* indexed_db_context) { |
| 105 DCHECK(BrowserThread::CurrentlyOn(BrowserThread::IO)); |
100 // Generate a unique route id for the browser-worker communication that's | 106 // Generate a unique route id for the browser-worker communication that's |
101 // unique among all worker processes. That way when the worker process sends | 107 // unique among all worker processes. That way when the worker process sends |
102 // a wrapped IPC message through us, we know which WorkerProcessHost to give | 108 // a wrapped IPC message through us, we know which WorkerProcessHost to give |
103 // it to. | 109 // it to. |
104 WorkerProcessHost::WorkerInstance instance( | 110 WorkerProcessHost::WorkerInstance instance( |
105 params.url, | 111 params.url, |
106 params.name, | 112 params.name, |
107 next_worker_route_id(), | 113 next_worker_route_id(), |
108 0, | 114 0, |
109 params.script_resource_appcache_id, | 115 params.script_resource_appcache_id, |
110 resource_context); | 116 partition_id, |
| 117 resource_context, |
| 118 appcache_service, |
| 119 filesystem_context, |
| 120 database_tracker, |
| 121 indexed_db_context); |
111 instance.AddFilter(filter, route_id); | 122 instance.AddFilter(filter, route_id); |
112 instance.worker_document_set()->Add( | 123 instance.worker_document_set()->Add( |
113 filter, params.document_id, filter->render_process_id(), | 124 filter, params.document_id, filter->render_process_id(), |
114 params.render_view_route_id); | 125 params.render_view_route_id); |
115 | 126 |
116 CreateWorkerFromInstance(instance); | 127 CreateWorkerFromInstance(instance); |
117 } | 128 } |
118 | 129 |
119 void WorkerServiceImpl::LookupSharedWorker( | 130 void WorkerServiceImpl::LookupSharedWorker( |
120 const ViewHostMsg_CreateWorker_Params& params, | 131 const ViewHostMsg_CreateWorker_Params& params, |
121 int route_id, | 132 int route_id, |
122 WorkerMessageFilter* filter, | 133 WorkerMessageFilter* filter, |
| 134 const std::string& partition_id, |
123 ResourceContext* resource_context, | 135 ResourceContext* resource_context, |
| 136 ChromeAppCacheService* appcache_service, |
| 137 fileapi::FileSystemContext* filesystem_context, |
| 138 webkit_database::DatabaseTracker* database_tracker, |
| 139 IndexedDBContextImpl* indexed_db_context, |
124 bool* exists, | 140 bool* exists, |
125 bool* url_mismatch) { | 141 bool* url_mismatch) { |
126 *exists = true; | 142 *exists = true; |
127 WorkerProcessHost::WorkerInstance* instance = FindSharedWorkerInstance( | 143 WorkerProcessHost::WorkerInstance* instance = FindSharedWorkerInstance( |
128 params.url, params.name, resource_context); | 144 params.url, params.name, partition_id, resource_context); |
129 | 145 |
130 if (!instance) { | 146 if (!instance) { |
131 // If no worker instance currently exists, we need to create a pending | 147 // If no worker instance currently exists, we need to create a pending |
132 // instance - this is to make sure that any subsequent lookups passing a | 148 // instance - this is to make sure that any subsequent lookups passing a |
133 // mismatched URL get the appropriate url_mismatch error at lookup time. | 149 // mismatched URL get the appropriate url_mismatch error at lookup time. |
134 // Having named shared workers was a Really Bad Idea due to details like | 150 // Having named shared workers was a Really Bad Idea due to details like |
135 // this. | 151 // this. |
136 instance = CreatePendingInstance(params.url, params.name, resource_context); | 152 instance = CreatePendingInstance(params.url, params.name, partition_id, |
| 153 resource_context, appcache_service, |
| 154 filesystem_context, database_tracker, |
| 155 indexed_db_context); |
137 *exists = false; | 156 *exists = false; |
138 } | 157 } |
139 | 158 |
140 // Make sure the passed-in instance matches the URL - if not, return an | 159 // Make sure the passed-in instance matches the URL - if not, return an |
141 // error. | 160 // error. |
142 if (params.url != instance->url()) { | 161 if (params.url != instance->url()) { |
143 *url_mismatch = true; | 162 *url_mismatch = true; |
144 *exists = false; | 163 *exists = false; |
145 } else { | 164 } else { |
146 *url_mismatch = false; | 165 *url_mismatch = false; |
(...skipping 77 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
224 queued_workers_.push_back(instance); | 243 queued_workers_.push_back(instance); |
225 return true; | 244 return true; |
226 } | 245 } |
227 } | 246 } |
228 | 247 |
229 // Check to see if this shared worker is already running (two pages may have | 248 // Check to see if this shared worker is already running (two pages may have |
230 // tried to start up the worker simultaneously). | 249 // tried to start up the worker simultaneously). |
231 // See if a worker with this name already exists. | 250 // See if a worker with this name already exists. |
232 WorkerProcessHost::WorkerInstance* existing_instance = | 251 WorkerProcessHost::WorkerInstance* existing_instance = |
233 FindSharedWorkerInstance( | 252 FindSharedWorkerInstance( |
234 instance.url(), instance.name(), instance.resource_context()); | 253 instance.url(), instance.name(), instance.partition_id(), |
| 254 instance.resource_context()); |
235 WorkerProcessHost::WorkerInstance::FilterInfo filter_info = | 255 WorkerProcessHost::WorkerInstance::FilterInfo filter_info = |
236 instance.GetFilter(); | 256 instance.GetFilter(); |
237 // If this worker is already running, no need to create a new copy. Just | 257 // If this worker is already running, no need to create a new copy. Just |
238 // inform the caller that the worker has been created. | 258 // inform the caller that the worker has been created. |
239 if (existing_instance) { | 259 if (existing_instance) { |
240 // Walk the worker's filter list to see if this client is listed. If not, | 260 // Walk the worker's filter list to see if this client is listed. If not, |
241 // then it means that the worker started by the client already exited so | 261 // then it means that the worker started by the client already exited so |
242 // we should not attach to this new one (http://crbug.com/29243). | 262 // we should not attach to this new one (http://crbug.com/29243). |
243 if (!existing_instance->HasFilter(filter_info.first, filter_info.second)) | 263 if (!existing_instance->HasFilter(filter_info.first, filter_info.second)) |
244 return false; | 264 return false; |
245 filter_info.first->Send(new ViewMsg_WorkerCreated(filter_info.second)); | 265 filter_info.first->Send(new ViewMsg_WorkerCreated(filter_info.second)); |
246 return true; | 266 return true; |
247 } | 267 } |
248 | 268 |
249 // Look to see if there's a pending instance. | 269 // Look to see if there's a pending instance. |
250 WorkerProcessHost::WorkerInstance* pending = FindPendingInstance( | 270 WorkerProcessHost::WorkerInstance* pending = FindPendingInstance( |
251 instance.url(), instance.name(), instance.resource_context()); | 271 instance.url(), instance.name(), instance.partition_id(), |
| 272 instance.resource_context()); |
252 // If there's no instance *and* no pending instance (or there is a pending | 273 // If there's no instance *and* no pending instance (or there is a pending |
253 // instance but it does not contain our filter info), then it means the | 274 // instance but it does not contain our filter info), then it means the |
254 // worker started up and exited already. Log a warning because this should | 275 // worker started up and exited already. Log a warning because this should |
255 // be a very rare occurrence and is probably a bug, but it *can* happen so | 276 // be a very rare occurrence and is probably a bug, but it *can* happen so |
256 // handle it gracefully. | 277 // handle it gracefully. |
257 if (!pending || | 278 if (!pending || |
258 !pending->HasFilter(filter_info.first, filter_info.second)) { | 279 !pending->HasFilter(filter_info.first, filter_info.second)) { |
259 DLOG(WARNING) << "Pending worker already exited"; | 280 DLOG(WARNING) << "Pending worker already exited"; |
260 return false; | 281 return false; |
261 } | 282 } |
262 | 283 |
263 // Assign the accumulated document set and filter list for this pending | 284 // Assign the accumulated document set and filter list for this pending |
264 // worker to the new instance. | 285 // worker to the new instance. |
265 DCHECK(!pending->worker_document_set()->IsEmpty()); | 286 DCHECK(!pending->worker_document_set()->IsEmpty()); |
266 instance.ShareDocumentSet(*pending); | 287 instance.ShareDocumentSet(*pending); |
267 for (WorkerProcessHost::WorkerInstance::FilterList::const_iterator i = | 288 for (WorkerProcessHost::WorkerInstance::FilterList::const_iterator i = |
268 pending->filters().begin(); | 289 pending->filters().begin(); |
269 i != pending->filters().end(); ++i) { | 290 i != pending->filters().end(); ++i) { |
270 instance.AddFilter(i->first, i->second); | 291 instance.AddFilter(i->first, i->second); |
271 } | 292 } |
272 RemovePendingInstances( | 293 RemovePendingInstances(instance.url(), instance.name(), |
273 instance.url(), instance.name(), instance.resource_context()); | 294 instance.partition_id(), instance.resource_context()); |
274 | 295 |
275 // Remove any queued instances of this worker and copy over the filter to | 296 // Remove any queued instances of this worker and copy over the filter to |
276 // this instance. | 297 // this instance. |
277 for (WorkerProcessHost::Instances::iterator iter = queued_workers_.begin(); | 298 for (WorkerProcessHost::Instances::iterator iter = queued_workers_.begin(); |
278 iter != queued_workers_.end();) { | 299 iter != queued_workers_.end();) { |
279 if (iter->Matches(instance.url(), instance.name(), | 300 if (iter->Matches(instance.url(), instance.name(), |
280 instance.resource_context())) { | 301 instance.partition_id(), instance.resource_context())) { |
281 DCHECK(iter->NumFilters() == 1); | 302 DCHECK(iter->NumFilters() == 1); |
282 WorkerProcessHost::WorkerInstance::FilterInfo filter_info = | 303 WorkerProcessHost::WorkerInstance::FilterInfo filter_info = |
283 iter->GetFilter(); | 304 iter->GetFilter(); |
284 instance.AddFilter(filter_info.first, filter_info.second); | 305 instance.AddFilter(filter_info.first, filter_info.second); |
285 iter = queued_workers_.erase(iter); | 306 iter = queued_workers_.erase(iter); |
286 } else { | 307 } else { |
287 ++iter; | 308 ++iter; |
288 } | 309 } |
289 } | 310 } |
290 | 311 |
291 if (!worker) { | 312 if (!worker) { |
292 WorkerMessageFilter* first_filter = instance.filters().begin()->first; | 313 WorkerMessageFilter* first_filter = instance.filters().begin()->first; |
293 worker = new WorkerProcessHost(instance.resource_context()); | 314 worker = new WorkerProcessHost(instance.partition_id(), |
| 315 instance.resource_context(), |
| 316 instance.appcache_service(), |
| 317 instance.filesystem_context(), |
| 318 instance.database_tracker(), |
| 319 instance.indexed_db_context()); |
294 // TODO(atwilson): This won't work if the message is from a worker process. | 320 // TODO(atwilson): This won't work if the message is from a worker process. |
295 // We don't support that yet though (this message is only sent from | 321 // We don't support that yet though (this message is only sent from |
296 // renderers) but when we do, we'll need to add code to pass in the current | 322 // renderers) but when we do, we'll need to add code to pass in the current |
297 // worker's document set for nested workers. | 323 // worker's document set for nested workers. |
298 if (!worker->Init(first_filter->render_process_id())) { | 324 if (!worker->Init(first_filter->render_process_id())) { |
299 delete worker; | 325 delete worker; |
300 return false; | 326 return false; |
301 } | 327 } |
302 } | 328 } |
303 | 329 |
(...skipping 206 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
510 int worker_route_id) { | 536 int worker_route_id) { |
511 WorkerDevToolsManager::GetInstance()->WorkerDestroyed( | 537 WorkerDevToolsManager::GetInstance()->WorkerDestroyed( |
512 process, worker_route_id); | 538 process, worker_route_id); |
513 FOR_EACH_OBSERVER(WorkerServiceObserver, observers_, | 539 FOR_EACH_OBSERVER(WorkerServiceObserver, observers_, |
514 WorkerDestroyed(process->GetData().id, worker_route_id)); | 540 WorkerDestroyed(process->GetData().id, worker_route_id)); |
515 } | 541 } |
516 | 542 |
517 WorkerProcessHost::WorkerInstance* WorkerServiceImpl::FindSharedWorkerInstance( | 543 WorkerProcessHost::WorkerInstance* WorkerServiceImpl::FindSharedWorkerInstance( |
518 const GURL& url, | 544 const GURL& url, |
519 const string16& name, | 545 const string16& name, |
| 546 const std::string& partition_id, |
520 ResourceContext* resource_context) { | 547 ResourceContext* resource_context) { |
521 for (WorkerProcessHostIterator iter; !iter.Done(); ++iter) { | 548 for (WorkerProcessHostIterator iter; !iter.Done(); ++iter) { |
522 for (WorkerProcessHost::Instances::iterator instance_iter = | 549 for (WorkerProcessHost::Instances::iterator instance_iter = |
523 iter->mutable_instances().begin(); | 550 iter->mutable_instances().begin(); |
524 instance_iter != iter->mutable_instances().end(); | 551 instance_iter != iter->mutable_instances().end(); |
525 ++instance_iter) { | 552 ++instance_iter) { |
526 if (instance_iter->Matches(url, name, resource_context)) | 553 if (instance_iter->Matches(url, name, partition_id, resource_context)) |
527 return &(*instance_iter); | 554 return &(*instance_iter); |
528 } | 555 } |
529 } | 556 } |
530 return NULL; | 557 return NULL; |
531 } | 558 } |
532 | 559 |
533 WorkerProcessHost::WorkerInstance* WorkerServiceImpl::FindPendingInstance( | 560 WorkerProcessHost::WorkerInstance* WorkerServiceImpl::FindPendingInstance( |
534 const GURL& url, | 561 const GURL& url, |
535 const string16& name, | 562 const string16& name, |
| 563 const std::string& partition_id, |
536 ResourceContext* resource_context) { | 564 ResourceContext* resource_context) { |
537 // Walk the pending instances looking for a matching pending worker. | 565 // Walk the pending instances looking for a matching pending worker. |
538 for (WorkerProcessHost::Instances::iterator iter = | 566 for (WorkerProcessHost::Instances::iterator iter = |
539 pending_shared_workers_.begin(); | 567 pending_shared_workers_.begin(); |
540 iter != pending_shared_workers_.end(); | 568 iter != pending_shared_workers_.end(); |
541 ++iter) { | 569 ++iter) { |
542 if (iter->Matches(url, name, resource_context)) { | 570 if (iter->Matches(url, name, partition_id, resource_context)) |
543 return &(*iter); | 571 return &(*iter); |
544 } | |
545 } | 572 } |
546 return NULL; | 573 return NULL; |
547 } | 574 } |
548 | 575 |
549 | 576 |
550 void WorkerServiceImpl::RemovePendingInstances( | 577 void WorkerServiceImpl::RemovePendingInstances( |
551 const GURL& url, | 578 const GURL& url, |
552 const string16& name, | 579 const string16& name, |
| 580 const std::string& partition_id, |
553 ResourceContext* resource_context) { | 581 ResourceContext* resource_context) { |
554 // Walk the pending instances looking for a matching pending worker. | 582 // Walk the pending instances looking for a matching pending worker. |
555 for (WorkerProcessHost::Instances::iterator iter = | 583 for (WorkerProcessHost::Instances::iterator iter = |
556 pending_shared_workers_.begin(); | 584 pending_shared_workers_.begin(); |
557 iter != pending_shared_workers_.end(); ) { | 585 iter != pending_shared_workers_.end(); ) { |
558 if (iter->Matches(url, name, resource_context)) { | 586 if (iter->Matches(url, name, partition_id, resource_context)) { |
559 iter = pending_shared_workers_.erase(iter); | 587 iter = pending_shared_workers_.erase(iter); |
560 } else { | 588 } else { |
561 ++iter; | 589 ++iter; |
562 } | 590 } |
563 } | 591 } |
564 } | 592 } |
565 | 593 |
566 WorkerProcessHost::WorkerInstance* WorkerServiceImpl::CreatePendingInstance( | 594 WorkerProcessHost::WorkerInstance* WorkerServiceImpl::CreatePendingInstance( |
567 const GURL& url, | 595 const GURL& url, |
568 const string16& name, | 596 const string16& name, |
569 ResourceContext* resource_context) { | 597 const std::string& partition_id, |
| 598 ResourceContext* resource_context, |
| 599 ChromeAppCacheService* appcache_service, |
| 600 fileapi::FileSystemContext* filesystem_context, |
| 601 webkit_database::DatabaseTracker* database_tracker, |
| 602 IndexedDBContextImpl* indexed_db_context) { |
570 // Look for an existing pending shared worker. | 603 // Look for an existing pending shared worker. |
571 WorkerProcessHost::WorkerInstance* instance = | 604 WorkerProcessHost::WorkerInstance* instance = |
572 FindPendingInstance(url, name, resource_context); | 605 FindPendingInstance(url, name, partition_id, resource_context); |
573 if (instance) | 606 if (instance) |
574 return instance; | 607 return instance; |
575 | 608 |
576 // No existing pending worker - create a new one. | 609 // No existing pending worker - create a new one. |
577 WorkerProcessHost::WorkerInstance pending(url, true, name, resource_context); | 610 WorkerProcessHost::WorkerInstance pending( |
| 611 url, true, name, partition_id, resource_context, appcache_service, |
| 612 filesystem_context, database_tracker, indexed_db_context); |
578 pending_shared_workers_.push_back(pending); | 613 pending_shared_workers_.push_back(pending); |
579 return &pending_shared_workers_.back(); | 614 return &pending_shared_workers_.back(); |
580 } | 615 } |
581 | 616 |
582 } // namespace content | 617 } // namespace content |
OLD | NEW |