| Index: src/core/SkTaskGroup.cpp
|
| diff --git a/src/core/SkTaskGroup.cpp b/src/core/SkTaskGroup.cpp
|
| index b3c23b649beaece214b2531a13569dc790aefd46..1b542a63f51aed4e5548d335ae4d34ca614642e1 100644
|
| --- a/src/core/SkTaskGroup.cpp
|
| +++ b/src/core/SkTaskGroup.cpp
|
| @@ -96,7 +96,8 @@ private:
|
| SkAtomic<int32_t>* pending; // then decrement pending afterwards.
|
| };
|
|
|
| - explicit ThreadPool(int threads) {
|
| + explicit ThreadPool(int threads, std::function<void(void)> cleanupFn)
|
| + : fCleanupFn(cleanupFn) {
|
| if (threads == -1) {
|
| threads = sk_num_cores();
|
| }
|
| @@ -161,6 +162,9 @@ private:
|
| pool->fWork.pop_back();
|
| }
|
| if (!work.fn) {
|
| + if (pool->fCleanupFn) {
|
| + pool->fCleanupFn();
|
| + }
|
| return; // Poison pill. Time... to die.
|
| }
|
| work.fn();
|
| @@ -183,6 +187,8 @@ private:
|
|
|
| // These are only changed in a single-threaded context.
|
| SkTDArray<SkThread*> fThreads;
|
| +
|
| + std::function<void(void)> fCleanupFn;
|
| static ThreadPool* gGlobal;
|
|
|
| friend struct SkTaskGroup::Enabler;
|
| @@ -191,14 +197,20 @@ ThreadPool* ThreadPool::gGlobal = nullptr;
|
|
|
| } // namespace
|
|
|
| -SkTaskGroup::Enabler::Enabler(int threads) {
|
| +SkTaskGroup::Enabler::Enabler(int threads, std::function<void(void)> cleanupFn)
|
| + : fCleanupFn(cleanupFn) {
|
| SkASSERT(ThreadPool::gGlobal == nullptr);
|
| if (threads != 0) {
|
| - ThreadPool::gGlobal = new ThreadPool(threads);
|
| + ThreadPool::gGlobal = new ThreadPool(threads, cleanupFn);
|
| }
|
| }
|
|
|
| -SkTaskGroup::Enabler::~Enabler() { delete ThreadPool::gGlobal; }
|
| +SkTaskGroup::Enabler::~Enabler() {
|
| + if (!ThreadPool::gGlobal && fCleanupFn) {
|
| + fCleanupFn();
|
| + }
|
| + delete ThreadPool::gGlobal;
|
| +}
|
|
|
| SkTaskGroup::SkTaskGroup() : fPending(0) {}
|
|
|
|
|