Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(13)

Unified Diff: base/message_pump_win.cc

Issue 16020005: Fixed the racy code around the message-only window in base::MessagePumpForUI on Windows. (Closed) Base URL: svn://svn.chromium.org/chrome/trunk/src
Patch Set: - Created 7 years, 6 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View side-by-side diff with in-line comments
Download patch
Index: base/message_pump_win.cc
diff --git a/base/message_pump_win.cc b/base/message_pump_win.cc
index fb962caaa2f45e2f83792f10dc3fe7bd147e5d23..28421714cc678993dbcb0f356c598332e17d01b5 100644
--- a/base/message_pump_win.cc
+++ b/base/message_pump_win.cc
@@ -9,8 +9,6 @@
#include "base/debug/trace_event.h"
#include "base/message_loop.h"
#include "base/metrics/histogram.h"
-#include "base/process_util.h"
-#include "base/win/wrapped_window_proc.h"
namespace {
@@ -21,19 +19,34 @@ enum MessageLoopProblems {
MESSAGE_LOOP_PROBLEM_MAX,
};
+// Possible states of the message pump:
+// - kPumpIdle: the thread is sleeping, waiting for work to be posted.
+// - kPumpHaveWork: a window message or completion packet has been posted.
+// - kPumpDisabled: the pump is disabled, no more work can be posted.
+static const LONG kPumpIdle = 0;
+static const LONG kPumpHaveWork = 1;
+static const LONG kPumpDisabled = 2;
+
+// Used to wake up the message loop thread in the case if it is waiting for
+// another thread to exit MessagePumpForUI::ScheduleWork().
+VOID CALLBACK DummyApc(ULONG_PTR) {}
+
} // namespace
namespace base {
-static const wchar_t kWndClass[] = L"Chrome_MessagePumpWindow";
-
// Message sent to get an additional time slice for pumping (processing) another
// task (a series of such messages creates a continuous task pump).
static const int kMsgHaveWork = WM_USER + 1;
+// Used by MessagePumpUI to wake up the thread and check any pending timers.
+static const int kTimerId = 1;
+
//-----------------------------------------------------------------------------
// MessagePumpWin public:
+MessagePumpWin::MessagePumpWin() : pump_state_(kPumpIdle), state_(NULL) {}
+
void MessagePumpWin::AddObserver(MessagePumpObserver* observer) {
observers_.AddObserver(observer);
}
@@ -96,25 +109,31 @@ int MessagePumpWin::GetCurrentDelay() const {
// MessagePumpForUI public:
MessagePumpForUI::MessagePumpForUI()
- : instance_(NULL),
- message_filter_(new MessageFilter) {
- InitMessageWnd();
+ : message_filter_(new MessageFilter),
+ window_(new win::MessageWindow()) {
+ CHECK(DuplicateHandle(GetCurrentProcess(),
+ GetCurrentThread(),
+ GetCurrentProcess(),
+ thread_.Receive(),
+ THREAD_SET_CONTEXT,
+ FALSE, // no not inherit this handle
+ 0));
+ CHECK(window_->Create(this));
}
MessagePumpForUI::~MessagePumpForUI() {
- DestroyWindow(message_hwnd_);
- UnregisterClass(kWndClass, instance_);
}
void MessagePumpForUI::ScheduleWork() {
- if (InterlockedExchange(&have_work_, 1))
- return; // Someone else continued the pumping.
+ if (InterlockedCompareExchange(&pump_state_, kPumpHaveWork,
+ kPumpIdle) != kPumpIdle) {
+ // Either someone else continued the pumping or the pump is disabled.
+ return;
+ }
// Make sure the MessagePump does some work for us.
- BOOL ret = PostMessage(message_hwnd_, kMsgHaveWork,
- reinterpret_cast<WPARAM>(this), 0);
- if (ret)
- return; // There was room in the Window Message queue.
+ if (PostMessage(window_->hwnd(), kMsgHaveWork, 0, 0))
+ return;
// We have failed to insert a have-work message, so there is a chance that we
// will starve tasks/timers while sitting in a nested message loop. Nested
@@ -124,12 +143,27 @@ void MessagePumpForUI::ScheduleWork() {
// common (queue is full, of about 2000 messages), so we'll do a near-graceful
// recovery. Nested loops are pretty transient (we think), so this will
// probably be recoverable.
- InterlockedExchange(&have_work_, 0); // Clarify that we didn't really insert.
UMA_HISTOGRAM_ENUMERATION("Chrome.MessageLoopProblem", MESSAGE_POST_ERROR,
MESSAGE_LOOP_PROBLEM_MAX);
+
+ // Clarify that we didn't really insert.
+ InterlockedExchange(&pump_state_, kPumpIdle);
darin (slow to review) 2013/06/10 19:28:45 nit: Shouldn't this use InterlockedCompareExchange
alexeypa (please no reviews) 2013/06/10 20:16:12 No, at this point |pump_state_| is guaranteed to b
+
+ // Try to wake up the message loop thread by posting an APC. This might not
darin (slow to review) 2013/06/10 19:28:45 I'm not sure I fully understand the conditions tha
alexeypa (please no reviews) 2013/06/10 20:16:12 I don't have any data on hand but two reasons that
+ // work while a nested loop still running (see the comment above) but this
+ // will unblock WillDestroyCurrentMessageLoop() if it is waiting for
+ // ScheduleWork() to complete.
+ //
+ // According to the UMA metrics posting an I/O completion packet has very low
+ // error rate. Queuing an APC hits roughly the same path in the kernel so
+ // the error rate should be low as well. Given that we do it only when
+ // PostMessage() fails it should be safe to CHECK() here.
+ CHECK(QueueUserAPC(&DummyApc, thread_, 0));
}
void MessagePumpForUI::ScheduleDelayedWork(const TimeTicks& delayed_work_time) {
+ DCHECK(window_->CalledOnValidThread());
+
//
// We would *like* to provide high resolution timers. Windows timers using
// SetTimer() have a 10ms granularity. We have to use WM_TIMER as a wakeup
@@ -159,8 +193,7 @@ void MessagePumpForUI::ScheduleDelayedWork(const TimeTicks& delayed_work_time) {
// Create a WM_TIMER event that will wake us up to check for any pending
// timers (in case we are running within a nested, external sub-pump).
- BOOL ret = SetTimer(message_hwnd_, reinterpret_cast<UINT_PTR>(this),
- delay_msec, NULL);
+ BOOL ret = SetTimer(window_->hwnd(), kTimerId, delay_msec, NULL);
if (ret)
return;
// If we can't set timers, we are in big trouble... but cross our fingers for
@@ -170,6 +203,31 @@ void MessagePumpForUI::ScheduleDelayedWork(const TimeTicks& delayed_work_time) {
MESSAGE_LOOP_PROBLEM_MAX);
}
+void MessagePumpForUI::WillDestroyCurrentMessageLoop() {
+ DCHECK(window_->CalledOnValidThread());
+
+ // Disable the message pump. If |pump_state_ == kPumpHaveWork| then
+ // ScheduleWork() might be still running on a different thread. Wait until
+ // |kMsgHaveWork| is received or |pump_state_| is reset back to |kPumpIdle|.
+ while (InterlockedCompareExchange(&pump_state_, kPumpDisabled,
+ kPumpIdle) == kPumpHaveWork) {
+ MSG msg;
+ if (PeekMessage(&msg, NULL, 0, 0, PM_REMOVE)) {
+ if (msg.message == kMsgHaveWork && msg.hwnd == window_->hwnd()) {
+ // Now that we received |kMsgHaveWork| the pump can be safely disabled.
+ InterlockedExchange(&pump_state_, kPumpDisabled);
+ break;
+ }
+ }
+
+ // Wait until |kMsgHaveWork| is posted or an APC is received.
+ WaitForWork();
+ }
+
+ // At this point the pump is disabled and other threads exited ScheduleWork().
+ window_.reset();
+}
+
void MessagePumpForUI::PumpOutPendingPaintMessages() {
// If we are being called outside of the context of Run, then don't try to do
// any work.
@@ -197,21 +255,28 @@ void MessagePumpForUI::PumpOutPendingPaintMessages() {
//-----------------------------------------------------------------------------
// MessagePumpForUI private:
-// static
-LRESULT CALLBACK MessagePumpForUI::WndProcThunk(
- HWND hwnd, UINT message, WPARAM wparam, LPARAM lparam) {
+bool MessagePumpForUI::HandleMessage(HWND hwnd,
+ UINT message,
+ WPARAM wparam,
+ LPARAM lparam,
+ LRESULT* result) {
switch (message) {
case kMsgHaveWork:
- reinterpret_cast<MessagePumpForUI*>(wparam)->HandleWorkMessage();
+ HandleWorkMessage();
break;
+
case WM_TIMER:
- reinterpret_cast<MessagePumpForUI*>(wparam)->HandleTimerMessage();
+ HandleTimerMessage();
break;
}
- return DefWindowProc(hwnd, message, wparam, lparam);
+
+ // Do default processing for all messages.
+ return false;
}
void MessagePumpForUI::DoRunLoop() {
+ DCHECK(window_->CalledOnValidThread());
+
// IF this was just a simple PeekMessage() loop (servicing all possible work
// queues), then Windows would try to achieve the following order according
// to MSDN documentation about PeekMessage with no filter):
@@ -249,7 +314,7 @@ void MessagePumpForUI::DoRunLoop() {
// don't want to disturb that timer if it is already in flight. However,
// if we did do all remaining delayed work, then lets kill the WM_TIMER.
if (more_work_is_plausible && delayed_work_time_.is_null())
- KillTimer(message_hwnd_, reinterpret_cast<UINT_PTR>(this));
+ KillTimer(window_->hwnd(), kTimerId);
if (state_->should_quit)
break;
@@ -267,20 +332,6 @@ void MessagePumpForUI::DoRunLoop() {
}
}
-void MessagePumpForUI::InitMessageWnd() {
- WNDCLASSEX wc = {0};
- wc.cbSize = sizeof(wc);
- wc.lpfnWndProc = base::win::WrappedWindowProc<WndProcThunk>;
- wc.hInstance = base::GetModuleFromAddress(wc.lpfnWndProc);
- wc.lpszClassName = kWndClass;
- instance_ = wc.hInstance;
- RegisterClassEx(&wc);
-
- message_hwnd_ =
- CreateWindow(kWndClass, 0, 0, 0, 0, 0, 0, HWND_MESSAGE, 0, instance_, 0);
- DCHECK(message_hwnd_);
-}
-
void MessagePumpForUI::WaitForWork() {
// Wait until a message is available, up to the time needed by the timer
// manager to fire the next set of timers.
@@ -290,7 +341,7 @@ void MessagePumpForUI::WaitForWork() {
DWORD result;
result = MsgWaitForMultipleObjectsEx(0, NULL, delay, QS_ALLINPUT,
- MWMO_INPUTAVAILABLE);
+ MWMO_ALERTABLE | MWMO_INPUTAVAILABLE);
darin (slow to review) 2013/06/10 19:28:45 I'm a little worried that making this call alertab
alexeypa (please no reviews) 2013/06/10 20:16:12 It means that it will be possible to post APCs to
if (WAIT_OBJECT_0 == result) {
// A WM_* message is available.
@@ -311,6 +362,11 @@ void MessagePumpForUI::WaitForWork() {
WaitMessage();
}
return;
+ } else if (WAIT_IO_COMPLETION == result) {
+ // The wait was ended by one or more APCs. It could be cause
+ // MessagePumpUI::ScheduleWork() is trying to wake up the message loop
+ // thread.
+ return;
}
DCHECK_NE(WAIT_FAILED, result) << GetLastError();
@@ -322,7 +378,7 @@ void MessagePumpForUI::HandleWorkMessage() {
// sort.
if (!state_) {
// Since we handled a kMsgHaveWork message, we must still update this flag.
- InterlockedExchange(&have_work_, 0);
+ InterlockedExchange(&pump_state_, kPumpIdle);
return;
}
@@ -338,7 +394,7 @@ void MessagePumpForUI::HandleWorkMessage() {
}
void MessagePumpForUI::HandleTimerMessage() {
- KillTimer(message_hwnd_, reinterpret_cast<UINT_PTR>(this));
+ KillTimer(window_->hwnd(), kTimerId);
// If we are being called outside of the context of Run, then don't do
// anything. This could correspond to a MessageBox call or something of
@@ -382,7 +438,7 @@ bool MessagePumpForUI::ProcessMessageHelper(const MSG& msg) {
}
// While running our main message pump, we discard kMsgHaveWork messages.
- if (msg.message == kMsgHaveWork && msg.hwnd == message_hwnd_)
+ if (msg.message == kMsgHaveWork && msg.hwnd == window_->hwnd())
return ProcessPumpReplacementMessage();
if (CallMsgFilter(const_cast<MSG*>(&msg), kMessageFilterCode))
@@ -410,7 +466,7 @@ bool MessagePumpForUI::ProcessPumpReplacementMessage() {
// goal is to make the kMsgHaveWork as non-intrusive as possible, even though
// a continuous stream of such messages are posted. This method carefully
// peeks a message while there is no chance for a kMsgHaveWork to be pending,
- // then resets the have_work_ flag (allowing a replacement kMsgHaveWork to
+ // then resets the pump_state_ flag (allowing a replacement kMsgHaveWork to
// possibly be posted), and finally dispatches that peeked replacement. Note
// that the re-post of kMsgHaveWork may be asynchronous to this thread!!
@@ -429,11 +485,11 @@ bool MessagePumpForUI::ProcessPumpReplacementMessage() {
}
DCHECK(!have_message || kMsgHaveWork != msg.message ||
- msg.hwnd != message_hwnd_);
+ msg.hwnd != window_->hwnd());
// Since we discarded a kMsgHaveWork message, we must update the flag.
- int old_have_work = InterlockedExchange(&have_work_, 0);
- DCHECK(old_have_work);
+ int old_pump_state = InterlockedExchange(&pump_state_, kPumpIdle);
+ DCHECK(old_pump_state);
// We don't need a special time slice if we didn't have_message to process.
if (!have_message)
@@ -461,7 +517,7 @@ MessagePumpForIO::MessagePumpForIO() {
}
void MessagePumpForIO::ScheduleWork() {
- if (InterlockedExchange(&have_work_, 1))
+ if (InterlockedExchange(&pump_state_, kPumpHaveWork))
return; // Someone else continued the pumping.
// Make sure the MessagePump does some work for us.
@@ -472,7 +528,7 @@ void MessagePumpForIO::ScheduleWork() {
return; // Post worked perfectly.
// See comment in MessagePumpForUI::ScheduleWork() for this error recovery.
- InterlockedExchange(&have_work_, 0); // Clarify that we didn't succeed.
+ InterlockedExchange(&pump_state_, kPumpIdle);
UMA_HISTOGRAM_ENUMERATION("Chrome.MessageLoopProblem", COMPLETION_POST_ERROR,
MESSAGE_LOOP_PROBLEM_MAX);
}
@@ -617,7 +673,7 @@ bool MessagePumpForIO::ProcessInternalIOItem(const IOItem& item) {
this == reinterpret_cast<MessagePumpForIO*>(item.handler)) {
// This is our internal completion.
DCHECK(!item.bytes_transfered);
- InterlockedExchange(&have_work_, 0);
+ InterlockedExchange(&pump_state_, kPumpIdle);
return true;
}
return false;
« no previous file with comments | « base/message_pump_win.h ('k') | base/win/message_window.h » ('j') | base/win/message_window.cc » ('J')

Powered by Google App Engine
This is Rietveld 408576698