Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(305)

Side by Side Diff: base/message_pump_win.cc

Issue 10392163: Handle rare conditions, like message queue exhaustion (Closed) Base URL: svn://chrome-svn/chrome/trunk/src/
Patch Set: Created 8 years, 7 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
« no previous file with comments | « no previous file | no next file » | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 // Copyright (c) 2012 The Chromium Authors. All rights reserved. 1 // Copyright (c) 2012 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be 2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file. 3 // found in the LICENSE file.
4 4
5 #include "base/message_pump_win.h" 5 #include "base/message_pump_win.h"
6 6
7 #include <math.h> 7 #include <math.h>
8 8
9 #include "base/message_loop.h" 9 #include "base/message_loop.h"
10 #include "base/metrics/histogram.h" 10 #include "base/metrics/histogram.h"
11 #include "base/win/wrapped_window_proc.h" 11 #include "base/win/wrapped_window_proc.h"
12 12
13 namespace {
14
15 enum MessageLoopProblems {
16 MESSAGE_QUEUE_FULL,
17 COMPLETION_POST_ERROR,
18 MESSAGE_LOOP_PROBLEM_MAX,
19 };
20
21 } // namespace
22
13 namespace base { 23 namespace base {
14 24
15 static const wchar_t kWndClass[] = L"Chrome_MessagePumpWindow"; 25 static const wchar_t kWndClass[] = L"Chrome_MessagePumpWindow";
16 26
17 // Message sent to get an additional time slice for pumping (processing) another 27 // Message sent to get an additional time slice for pumping (processing) another
18 // task (a series of such messages creates a continuous task pump). 28 // task (a series of such messages creates a continuous task pump).
19 static const int kMsgHaveWork = WM_USER + 1; 29 static const int kMsgHaveWork = WM_USER + 1;
20 30
21 //----------------------------------------------------------------------------- 31 //-----------------------------------------------------------------------------
22 // MessagePumpWin public: 32 // MessagePumpWin public:
(...skipping 66 matching lines...) Expand 10 before | Expand all | Expand 10 after
89 MessagePumpForUI::~MessagePumpForUI() { 99 MessagePumpForUI::~MessagePumpForUI() {
90 DestroyWindow(message_hwnd_); 100 DestroyWindow(message_hwnd_);
91 UnregisterClass(kWndClass, GetModuleHandle(NULL)); 101 UnregisterClass(kWndClass, GetModuleHandle(NULL));
92 } 102 }
93 103
94 void MessagePumpForUI::ScheduleWork() { 104 void MessagePumpForUI::ScheduleWork() {
95 if (InterlockedExchange(&have_work_, 1)) 105 if (InterlockedExchange(&have_work_, 1))
96 return; // Someone else continued the pumping. 106 return; // Someone else continued the pumping.
97 107
98 // Make sure the MessagePump does some work for us. 108 // Make sure the MessagePump does some work for us.
99 PostMessage(message_hwnd_, kMsgHaveWork, reinterpret_cast<WPARAM>(this), 0); 109 BOOL ret = PostMessage(message_hwnd_, kMsgHaveWork,
110 reinterpret_cast<WPARAM>(this), 0);
111 if (ret)
112 return; // There was room in the Window Message queue.
113
114 // We have failed to insert a have-work message, so there is a chance that we
115 // will starve tasks/timers while sitting in a nested message loop. Nested
116 // loops only look at Windows Message queues, and don't look at *our* task
117 // queues, etc. Without the have-work message, we might not get a time slice
118 // in such a nested message loop. :-(
119 // We could abort here, but the fear is that this failure mode is plausibly
120 // common (queue is full, of about 2000 messages), so we'll do a near-graceful
121 // recovery. We'll put the have_work_ flag back in the state to indicate that
122 // there is no have-work message, and the next time someone posts a task to
123 // our Chromium task queue (or other processing that calls for
124 // ScheduleWork()), we will get back-on-track and be able to avoid starvation
125 // in nested loops. Nested loops are pretty transient (we think), so this
126 // will probably be workable.
rvargas (doing something else) 2012/05/19 00:19:11 I cannot say don't comment the code... but the oth
jar (doing other things) 2012/05/19 01:01:20 I shortened the more redundant section.. On 2012/
127 InterlockedExchange(&have_work_, 0); // Clarify that we didn't really insert.
128 UMA_HISTOGRAM_ENUMERATION("Chrome.MessageLoopProblem", MESSAGE_QUEUE_FULL,
129 MESSAGE_LOOP_PROBLEM_MAX);
100 } 130 }
101 131
102 void MessagePumpForUI::ScheduleDelayedWork(const TimeTicks& delayed_work_time) { 132 void MessagePumpForUI::ScheduleDelayedWork(const TimeTicks& delayed_work_time) {
103 // 133 //
104 // We would *like* to provide high resolution timers. Windows timers using 134 // We would *like* to provide high resolution timers. Windows timers using
105 // SetTimer() have a 10ms granularity. We have to use WM_TIMER as a wakeup 135 // SetTimer() have a 10ms granularity. We have to use WM_TIMER as a wakeup
106 // mechanism because the application can enter modal windows loops where it 136 // mechanism because the application can enter modal windows loops where it
107 // is not running our MessageLoop; the only way to have our timers fire in 137 // is not running our MessageLoop; the only way to have our timers fire in
108 // these cases is to post messages there. 138 // these cases is to post messages there.
109 // 139 //
(...skipping 12 matching lines...) Expand all
122 // 152 //
123 delayed_work_time_ = delayed_work_time; 153 delayed_work_time_ = delayed_work_time;
124 154
125 int delay_msec = GetCurrentDelay(); 155 int delay_msec = GetCurrentDelay();
126 DCHECK_GE(delay_msec, 0); 156 DCHECK_GE(delay_msec, 0);
127 if (delay_msec < USER_TIMER_MINIMUM) 157 if (delay_msec < USER_TIMER_MINIMUM)
128 delay_msec = USER_TIMER_MINIMUM; 158 delay_msec = USER_TIMER_MINIMUM;
129 159
130 // Create a WM_TIMER event that will wake us up to check for any pending 160 // Create a WM_TIMER event that will wake us up to check for any pending
131 // timers (in case we are running within a nested, external sub-pump). 161 // timers (in case we are running within a nested, external sub-pump).
132 SetTimer(message_hwnd_, reinterpret_cast<UINT_PTR>(this), delay_msec, NULL); 162 BOOL ret = SetTimer(message_hwnd_, reinterpret_cast<UINT_PTR>(this),
163 delay_msec, NULL);
164 CHECK(ret); // If we can't set timers, we are in big trouble.
rvargas (doing something else) 2012/05/19 00:19:11 I'd say do a GetLastError before crashing... and b
jar (doing other things) 2012/05/19 01:01:20 I switched to histogramming the presence of this e
133 } 165 }
134 166
135 void MessagePumpForUI::PumpOutPendingPaintMessages() { 167 void MessagePumpForUI::PumpOutPendingPaintMessages() {
136 // If we are being called outside of the context of Run, then don't try to do 168 // If we are being called outside of the context of Run, then don't try to do
137 // any work. 169 // any work.
138 if (!state_) 170 if (!state_)
139 return; 171 return;
140 172
141 // Create a mini-message-pump to force immediate processing of only Windows 173 // Create a mini-message-pump to force immediate processing of only Windows
142 // WM_PAINT messages. Don't provide an infinite loop, but do enough peeking 174 // WM_PAINT messages. Don't provide an infinite loop, but do enough peeking
(...skipping 270 matching lines...) Expand 10 before | Expand all | Expand 10 after
413 } 445 }
414 446
415 void MessagePumpForIO::ScheduleWork() { 447 void MessagePumpForIO::ScheduleWork() {
416 if (InterlockedExchange(&have_work_, 1)) 448 if (InterlockedExchange(&have_work_, 1))
417 return; // Someone else continued the pumping. 449 return; // Someone else continued the pumping.
418 450
419 // Make sure the MessagePump does some work for us. 451 // Make sure the MessagePump does some work for us.
420 BOOL ret = PostQueuedCompletionStatus(port_, 0, 452 BOOL ret = PostQueuedCompletionStatus(port_, 0,
421 reinterpret_cast<ULONG_PTR>(this), 453 reinterpret_cast<ULONG_PTR>(this),
422 reinterpret_cast<OVERLAPPED*>(this)); 454 reinterpret_cast<OVERLAPPED*>(this));
423 DCHECK(ret); 455 if (ret)
456 return; // Post worked perfectly.
457
458 // See comment in MessagePumpForUI::ScheduleWork() for this error recovery.
459 InterlockedExchange(&have_work_, 0); // Clarify that we didn't succeed.
460 UMA_HISTOGRAM_ENUMERATION("Chrome.MessageLoopProblem", COMPLETION_POST_ERROR,
461 MESSAGE_LOOP_PROBLEM_MAX);
424 } 462 }
425 463
426 void MessagePumpForIO::ScheduleDelayedWork(const TimeTicks& delayed_work_time) { 464 void MessagePumpForIO::ScheduleDelayedWork(const TimeTicks& delayed_work_time) {
427 // We know that we can't be blocked right now since this method can only be 465 // We know that we can't be blocked right now since this method can only be
428 // called on the same thread as Run, so we only need to update our record of 466 // called on the same thread as Run, so we only need to update our record of
429 // how long to sleep when we do sleep. 467 // how long to sleep when we do sleep.
430 delayed_work_time_ = delayed_work_time; 468 delayed_work_time_ = delayed_work_time;
431 } 469 }
432 470
433 void MessagePumpForIO::RegisterIOHandler(HANDLE file_handle, 471 void MessagePumpForIO::RegisterIOHandler(HANDLE file_handle,
(...skipping 140 matching lines...) Expand 10 before | Expand all | Expand 10 after
574 612
575 void MessagePumpForIO::WillProcessIOEvent() { 613 void MessagePumpForIO::WillProcessIOEvent() {
576 FOR_EACH_OBSERVER(IOObserver, io_observers_, WillProcessIOEvent()); 614 FOR_EACH_OBSERVER(IOObserver, io_observers_, WillProcessIOEvent());
577 } 615 }
578 616
579 void MessagePumpForIO::DidProcessIOEvent() { 617 void MessagePumpForIO::DidProcessIOEvent() {
580 FOR_EACH_OBSERVER(IOObserver, io_observers_, DidProcessIOEvent()); 618 FOR_EACH_OBSERVER(IOObserver, io_observers_, DidProcessIOEvent());
581 } 619 }
582 620
583 } // namespace base 621 } // namespace base
OLDNEW
« no previous file with comments | « no previous file | no next file » | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698