Index: content/browser/worker_host/worker_service_impl.cc |
=================================================================== |
--- content/browser/worker_host/worker_service_impl.cc (revision 154786) |
+++ content/browser/worker_host/worker_service_impl.cc (working copy) |
@@ -11,6 +11,7 @@ |
#include "base/sys_info.h" |
#include "base/threading/thread.h" |
#include "content/browser/debugger/worker_devtools_manager.h" |
+#include "content/browser/storage_partition_impl.h" |
#include "content/browser/worker_host/worker_message_filter.h" |
#include "content/browser/worker_host/worker_process_host.h" |
#include "content/common/view_messages.h" |
@@ -96,7 +97,8 @@ |
const ViewHostMsg_CreateWorker_Params& params, |
int route_id, |
WorkerMessageFilter* filter, |
- ResourceContext* resource_context) { |
+ ResourceContext* resource_context, |
+ StoragePartitionImpl* storage_partition) { |
// Generate a unique route id for the browser-worker communication that's |
// unique among all worker processes. That way when the worker process sends |
// a wrapped IPC message through us, we know which WorkerProcessHost to give |
@@ -107,7 +109,8 @@ |
next_worker_route_id(), |
0, |
params.script_resource_appcache_id, |
- resource_context); |
+ resource_context, |
+ storage_partition); |
instance.AddFilter(filter, route_id); |
instance.worker_document_set()->Add( |
filter, params.document_id, filter->render_process_id(), |
@@ -121,11 +124,12 @@ |
int route_id, |
WorkerMessageFilter* filter, |
ResourceContext* resource_context, |
+ StoragePartitionImpl* storage_partition, |
bool* exists, |
bool* url_mismatch) { |
*exists = true; |
WorkerProcessHost::WorkerInstance* instance = FindSharedWorkerInstance( |
- params.url, params.name, resource_context); |
+ params.url, params.name, resource_context, storage_partition); |
if (!instance) { |
// If no worker instance currently exists, we need to create a pending |
@@ -133,7 +137,8 @@ |
// mismatched URL get the appropriate url_mismatch error at lookup time. |
// Having named shared workers was a Really Bad Idea due to details like |
// this. |
- instance = CreatePendingInstance(params.url, params.name, resource_context); |
+ instance = CreatePendingInstance(params.url, params.name, resource_context, |
+ storage_partition); |
*exists = false; |
} |
@@ -231,7 +236,8 @@ |
// See if a worker with this name already exists. |
WorkerProcessHost::WorkerInstance* existing_instance = |
FindSharedWorkerInstance( |
- instance.url(), instance.name(), instance.resource_context()); |
+ instance.url(), instance.name(), instance.resource_context(), |
+ instance.storage_partition()); |
WorkerProcessHost::WorkerInstance::FilterInfo filter_info = |
instance.GetFilter(); |
// If this worker is already running, no need to create a new copy. Just |
@@ -248,7 +254,8 @@ |
// Look to see if there's a pending instance. |
WorkerProcessHost::WorkerInstance* pending = FindPendingInstance( |
- instance.url(), instance.name(), instance.resource_context()); |
+ instance.url(), instance.name(), instance.resource_context(), |
+ instance.storage_partition()); |
// If there's no instance *and* no pending instance (or there is a pending |
// instance but it does not contain our filter info), then it means the |
// worker started up and exited already. Log a warning because this should |
@@ -270,14 +277,16 @@ |
instance.AddFilter(i->first, i->second); |
} |
RemovePendingInstances( |
- instance.url(), instance.name(), instance.resource_context()); |
+ instance.url(), instance.name(), instance.resource_context(), |
+ instance.storage_partition()); |
// Remove any queued instances of this worker and copy over the filter to |
// this instance. |
for (WorkerProcessHost::Instances::iterator iter = queued_workers_.begin(); |
iter != queued_workers_.end();) { |
if (iter->Matches(instance.url(), instance.name(), |
- instance.resource_context())) { |
+ instance.resource_context(), |
+ instance.storage_partition())) { |
DCHECK(iter->NumFilters() == 1); |
WorkerProcessHost::WorkerInstance::FilterInfo filter_info = |
iter->GetFilter(); |
@@ -290,7 +299,8 @@ |
if (!worker) { |
WorkerMessageFilter* first_filter = instance.filters().begin()->first; |
- worker = new WorkerProcessHost(instance.resource_context()); |
+ worker = new WorkerProcessHost(instance.resource_context(), |
+ instance.storage_partition()); |
// TODO(atwilson): This won't work if the message is from a worker process. |
// We don't support that yet though (this message is only sent from |
// renderers) but when we do, we'll need to add code to pass in the current |
@@ -517,13 +527,15 @@ |
WorkerProcessHost::WorkerInstance* WorkerServiceImpl::FindSharedWorkerInstance( |
const GURL& url, |
const string16& name, |
- ResourceContext* resource_context) { |
+ ResourceContext* resource_context, |
+ StoragePartitionImpl* storage_partition) { |
for (WorkerProcessHostIterator iter; !iter.Done(); ++iter) { |
for (WorkerProcessHost::Instances::iterator instance_iter = |
iter->mutable_instances().begin(); |
instance_iter != iter->mutable_instances().end(); |
++instance_iter) { |
- if (instance_iter->Matches(url, name, resource_context)) |
+ if (instance_iter->Matches(url, name, resource_context, |
+ storage_partition)) |
return &(*instance_iter); |
} |
} |
@@ -533,13 +545,14 @@ |
WorkerProcessHost::WorkerInstance* WorkerServiceImpl::FindPendingInstance( |
const GURL& url, |
const string16& name, |
- ResourceContext* resource_context) { |
+ ResourceContext* resource_context, |
+ StoragePartitionImpl* storage_partition) { |
// Walk the pending instances looking for a matching pending worker. |
for (WorkerProcessHost::Instances::iterator iter = |
pending_shared_workers_.begin(); |
iter != pending_shared_workers_.end(); |
++iter) { |
- if (iter->Matches(url, name, resource_context)) { |
+ if (iter->Matches(url, name, resource_context, storage_partition)) { |
return &(*iter); |
} |
} |
@@ -550,12 +563,13 @@ |
void WorkerServiceImpl::RemovePendingInstances( |
const GURL& url, |
const string16& name, |
- ResourceContext* resource_context) { |
+ ResourceContext* resource_context, |
+ StoragePartitionImpl* storage_partition) { |
// Walk the pending instances looking for a matching pending worker. |
for (WorkerProcessHost::Instances::iterator iter = |
pending_shared_workers_.begin(); |
iter != pending_shared_workers_.end(); ) { |
- if (iter->Matches(url, name, resource_context)) { |
+ if (iter->Matches(url, name, resource_context, storage_partition)) { |
iter = pending_shared_workers_.erase(iter); |
} else { |
++iter; |
@@ -566,15 +580,17 @@ |
WorkerProcessHost::WorkerInstance* WorkerServiceImpl::CreatePendingInstance( |
const GURL& url, |
const string16& name, |
- ResourceContext* resource_context) { |
+ ResourceContext* resource_context, |
+ StoragePartitionImpl* storage_partition) { |
// Look for an existing pending shared worker. |
WorkerProcessHost::WorkerInstance* instance = |
- FindPendingInstance(url, name, resource_context); |
+ FindPendingInstance(url, name, resource_context, storage_partition); |
if (instance) |
return instance; |
// No existing pending worker - create a new one. |
- WorkerProcessHost::WorkerInstance pending(url, true, name, resource_context); |
+ WorkerProcessHost::WorkerInstance pending( |
+ url, true, name, resource_context, storage_partition); |
pending_shared_workers_.push_back(pending); |
return &pending_shared_workers_.back(); |
} |