| OLD | NEW |
| (Empty) |
| 1 // Copyright (c) 2012 The Chromium Authors. All rights reserved. | |
| 2 // Use of this source code is governed by a BSD-style license that can be | |
| 3 // found in the LICENSE file. | |
| 4 | |
| 5 // For WinDDK ATL compatibility, these ATL headers must come first. | |
| 6 #include "build/build_config.h" | |
| 7 #if defined(OS_WIN) | |
| 8 #include <atlbase.h> // NOLINT | |
| 9 #include <atlwin.h> // NOLINT | |
| 10 #endif | |
| 11 | |
| 12 #include "chrome/browser/ui/views/autocomplete/autocomplete_result_view.h" | |
| 13 | |
| 14 #include <algorithm> // NOLINT | |
| 15 | |
| 16 #include "base/i18n/bidi_line_iterator.h" | |
| 17 #include "chrome/browser/autocomplete/autocomplete_popup_model.h" | |
| 18 #include "chrome/browser/ui/views/autocomplete/autocomplete_result_view_model.h" | |
| 19 #include "chrome/browser/ui/views/location_bar/location_bar_view.h" | |
| 20 #include "grit/generated_resources.h" | |
| 21 #include "grit/theme_resources.h" | |
| 22 #include "ui/base/l10n/l10n_util.h" | |
| 23 #include "ui/base/native_theme/native_theme.h" | |
| 24 #include "ui/base/resource/resource_bundle.h" | |
| 25 #include "ui/base/text/text_elider.h" | |
| 26 #include "ui/gfx/canvas.h" | |
| 27 #include "ui/gfx/color_utils.h" | |
| 28 | |
| 29 namespace { | |
| 30 | |
| 31 const char16 kEllipsis[] = { 0x2026, 0x0 }; | |
| 32 | |
| 33 // The minimum distance between the top and bottom of the {icon|text} and the | |
| 34 // top or bottom of the row. | |
| 35 const int kMinimumIconVerticalPadding = 2; | |
| 36 const int kMinimumTextVerticalPadding = 3; | |
| 37 | |
| 38 } // namespace | |
| 39 | |
| 40 //////////////////////////////////////////////////////////////////////////////// | |
| 41 // AutocompleteResultView, public: | |
| 42 | |
| 43 // Precalculated data used to draw the portion of a match classification that | |
| 44 // fits entirely within one run. | |
| 45 struct AutocompleteResultView::ClassificationData { | |
| 46 string16 text; | |
| 47 const gfx::Font* font; | |
| 48 SkColor color; | |
| 49 gfx::Size pixel_size; | |
| 50 }; | |
| 51 | |
| 52 // Precalculated data used to draw a complete visual run within the match. | |
| 53 // This will include all or part of at leasdt one, and possibly several, | |
| 54 // classifications. | |
| 55 struct AutocompleteResultView::RunData { | |
| 56 size_t run_start; // Offset within the match text where this run begins. | |
| 57 int visual_order; // Where this run occurs in visual order. The earliest | |
| 58 // run drawn is run 0. | |
| 59 bool is_rtl; | |
| 60 int pixel_width; | |
| 61 Classifications classifications; // Classification pieces within this run, | |
| 62 // in logical order. | |
| 63 }; | |
| 64 | |
| 65 // This class is a utility class for calculations affected by whether the result | |
| 66 // view is horizontally mirrored. The drawing functions can be written as if | |
| 67 // all drawing occurs left-to-right, and then use this class to get the actual | |
| 68 // coordinates to begin drawing onscreen. | |
| 69 class AutocompleteResultView::MirroringContext { | |
| 70 public: | |
| 71 MirroringContext() : center_(0), right_(0) {} | |
| 72 | |
| 73 // Tells the mirroring context to use the provided range as the physical | |
| 74 // bounds of the drawing region. When coordinate mirroring is needed, the | |
| 75 // mirror point will be the center of this range. | |
| 76 void Initialize(int x, int width) { | |
| 77 center_ = x + width / 2; | |
| 78 right_ = x + width; | |
| 79 } | |
| 80 | |
| 81 // Given a logical range within the drawing region, returns the coordinate of | |
| 82 // the possibly-mirrored "left" side. (This functions exactly like | |
| 83 // View::MirroredLeftPointForRect().) | |
| 84 int mirrored_left_coord(int left, int right) const { | |
| 85 return base::i18n::IsRTL() ? (center_ + (center_ - right)) : left; | |
| 86 } | |
| 87 | |
| 88 // Given a logical coordinate within the drawing region, returns the remaining | |
| 89 // width available. | |
| 90 int remaining_width(int x) const { | |
| 91 return right_ - x; | |
| 92 } | |
| 93 | |
| 94 private: | |
| 95 int center_; | |
| 96 int right_; | |
| 97 | |
| 98 DISALLOW_COPY_AND_ASSIGN(MirroringContext); | |
| 99 }; | |
| 100 | |
| 101 AutocompleteResultView::AutocompleteResultView( | |
| 102 AutocompleteResultViewModel* model, | |
| 103 int model_index, | |
| 104 const gfx::Font& font, | |
| 105 const gfx::Font& bold_font) | |
| 106 : edge_item_padding_(LocationBarView::GetItemPadding()), | |
| 107 item_padding_(LocationBarView::GetItemPadding()), | |
| 108 minimum_text_vertical_padding_(kMinimumTextVerticalPadding), | |
| 109 model_(model), | |
| 110 model_index_(model_index), | |
| 111 normal_font_(font), | |
| 112 bold_font_(bold_font), | |
| 113 ellipsis_width_(font.GetStringWidth(string16(kEllipsis))), | |
| 114 mirroring_context_(new MirroringContext()), | |
| 115 keyword_icon_(new views::ImageView()), | |
| 116 ALLOW_THIS_IN_INITIALIZER_LIST( | |
| 117 animation_(new ui::SlideAnimation(this))) { | |
| 118 CHECK_GE(model_index, 0); | |
| 119 if (default_icon_size_ == 0) { | |
| 120 default_icon_size_ = | |
| 121 ui::ResourceBundle::GetSharedInstance().GetImageSkiaNamed( | |
| 122 AutocompleteMatch::TypeToIcon(AutocompleteMatch::URL_WHAT_YOU_TYPED))-> | |
| 123 width(); | |
| 124 } | |
| 125 keyword_icon_->set_owned_by_client(); | |
| 126 keyword_icon_->EnableCanvasFlippingForRTLUI(true); | |
| 127 keyword_icon_->SetImage(GetKeywordIcon()); | |
| 128 keyword_icon_->SizeToPreferredSize(); | |
| 129 } | |
| 130 | |
| 131 AutocompleteResultView::~AutocompleteResultView() { | |
| 132 } | |
| 133 | |
| 134 // static | |
| 135 SkColor AutocompleteResultView::GetColor(ResultViewState state, | |
| 136 ColorKind kind) { | |
| 137 static bool initialized = false; | |
| 138 static SkColor colors[NUM_STATES][NUM_KINDS]; | |
| 139 if (!initialized) { | |
| 140 #if defined(OS_WIN) | |
| 141 colors[NORMAL][BACKGROUND] = color_utils::GetSysSkColor(COLOR_WINDOW); | |
| 142 colors[SELECTED][BACKGROUND] = color_utils::GetSysSkColor(COLOR_HIGHLIGHT); | |
| 143 colors[NORMAL][TEXT] = color_utils::GetSysSkColor(COLOR_WINDOWTEXT); | |
| 144 colors[SELECTED][TEXT] = color_utils::GetSysSkColor(COLOR_HIGHLIGHTTEXT); | |
| 145 #elif defined(USE_AURA) | |
| 146 const ui::NativeTheme* theme = ui::NativeTheme::instance(); | |
| 147 colors[SELECTED][BACKGROUND] = theme->GetSystemColor( | |
| 148 ui::NativeTheme::kColorId_TextfieldSelectionBackgroundFocused); | |
| 149 colors[NORMAL][BACKGROUND] = theme->GetSystemColor( | |
| 150 ui::NativeTheme::kColorId_TextfieldDefaultBackground); | |
| 151 colors[NORMAL][URL] = SkColorSetARGB(0xff, 0x00, 0x99, 0x33); | |
| 152 colors[SELECTED][URL] = SkColorSetARGB(0xff, 0x00, 0x66, 0x22); | |
| 153 colors[HOVERED][URL] = SkColorSetARGB(0xff, 0x00, 0x66, 0x22); | |
| 154 #else | |
| 155 // TODO(beng): source from theme provider. | |
| 156 colors[NORMAL][BACKGROUND] = SK_ColorWHITE; | |
| 157 colors[SELECTED][BACKGROUND] = SK_ColorBLUE; | |
| 158 colors[NORMAL][TEXT] = SK_ColorBLACK; | |
| 159 colors[SELECTED][TEXT] = SK_ColorWHITE; | |
| 160 #endif | |
| 161 colors[HOVERED][BACKGROUND] = | |
| 162 color_utils::AlphaBlend(colors[SELECTED][BACKGROUND], | |
| 163 colors[NORMAL][BACKGROUND], 64); | |
| 164 colors[HOVERED][TEXT] = colors[NORMAL][TEXT]; | |
| 165 for (int i = 0; i < NUM_STATES; ++i) { | |
| 166 #if defined(USE_AURA) | |
| 167 colors[i][TEXT] = | |
| 168 color_utils::AlphaBlend(SK_ColorBLACK, colors[i][BACKGROUND], 0xdd); | |
| 169 colors[i][DIMMED_TEXT] = | |
| 170 color_utils::AlphaBlend(SK_ColorBLACK, colors[i][BACKGROUND], 0xbb); | |
| 171 #else | |
| 172 colors[i][DIMMED_TEXT] = | |
| 173 color_utils::AlphaBlend(colors[i][TEXT], colors[i][BACKGROUND], 128); | |
| 174 colors[i][URL] = color_utils::GetReadableColor(SkColorSetRGB(0, 128, 0), | |
| 175 colors[i][BACKGROUND]); | |
| 176 #endif | |
| 177 | |
| 178 // TODO(joi): Programmatically draw the dropdown border using | |
| 179 // this color as well. (Right now it's drawn as black with 25% | |
| 180 // alpha.) | |
| 181 colors[i][DIVIDER] = | |
| 182 color_utils::AlphaBlend(colors[i][TEXT], colors[i][BACKGROUND], 0x34); | |
| 183 } | |
| 184 initialized = true; | |
| 185 } | |
| 186 | |
| 187 return colors[state][kind]; | |
| 188 } | |
| 189 | |
| 190 void AutocompleteResultView::SetMatch(const AutocompleteMatch& match) { | |
| 191 match_ = match; | |
| 192 animation_->Reset(); | |
| 193 | |
| 194 if (match.associated_keyword.get()) { | |
| 195 keyword_icon_->SetImage(GetKeywordIcon()); | |
| 196 | |
| 197 if (!keyword_icon_->parent()) | |
| 198 AddChildView(keyword_icon_.get()); | |
| 199 } else if (keyword_icon_->parent()) { | |
| 200 RemoveChildView(keyword_icon_.get()); | |
| 201 } | |
| 202 | |
| 203 Layout(); | |
| 204 } | |
| 205 | |
| 206 void AutocompleteResultView::ShowKeyword(bool show_keyword) { | |
| 207 if (show_keyword) | |
| 208 animation_->Show(); | |
| 209 else | |
| 210 animation_->Hide(); | |
| 211 } | |
| 212 | |
| 213 void AutocompleteResultView::Invalidate() { | |
| 214 keyword_icon_->SetImage(GetKeywordIcon()); | |
| 215 SchedulePaint(); | |
| 216 } | |
| 217 | |
| 218 gfx::Size AutocompleteResultView::GetPreferredSize() { | |
| 219 return gfx::Size(0, std::max( | |
| 220 default_icon_size_ + (kMinimumIconVerticalPadding * 2), | |
| 221 GetTextHeight() + (minimum_text_vertical_padding_ * 2))); | |
| 222 } | |
| 223 | |
| 224 //////////////////////////////////////////////////////////////////////////////// | |
| 225 // AutocompleteResultView, protected: | |
| 226 | |
| 227 AutocompleteResultView::ResultViewState | |
| 228 AutocompleteResultView::GetState() const { | |
| 229 if (model_->IsSelectedIndex(model_index_)) | |
| 230 return SELECTED; | |
| 231 return model_->IsHoveredIndex(model_index_) ? HOVERED : NORMAL; | |
| 232 } | |
| 233 | |
| 234 void AutocompleteResultView::PaintMatch(gfx::Canvas* canvas, | |
| 235 const AutocompleteMatch& match, | |
| 236 int x) { | |
| 237 x = DrawString(canvas, match.contents, match.contents_class, false, x, | |
| 238 text_bounds_.y()); | |
| 239 | |
| 240 // Paint the description. | |
| 241 // TODO(pkasting): Because we paint in multiple separate pieces, we can wind | |
| 242 // up with no space even for an ellipsis for one or both of these pieces. | |
| 243 // Instead, we should paint the entire match as a single long string. This | |
| 244 // would also let us use a more properly-localizable string than we get with | |
| 245 // just the IDS_AUTOCOMPLETE_MATCH_DESCRIPTION_SEPARATOR. | |
| 246 if (!match.description.empty()) { | |
| 247 string16 separator = | |
| 248 l10n_util::GetStringUTF16(IDS_AUTOCOMPLETE_MATCH_DESCRIPTION_SEPARATOR); | |
| 249 ACMatchClassifications classifications; | |
| 250 classifications.push_back( | |
| 251 ACMatchClassification(0, ACMatchClassification::NONE)); | |
| 252 x = DrawString(canvas, separator, classifications, true, x, | |
| 253 text_bounds_.y()); | |
| 254 | |
| 255 DrawString(canvas, match.description, match.description_class, true, x, | |
| 256 text_bounds_.y()); | |
| 257 } | |
| 258 } | |
| 259 | |
| 260 int AutocompleteResultView::GetTextHeight() const { | |
| 261 return std::max(normal_font_.GetHeight(), bold_font_.GetHeight()); | |
| 262 } | |
| 263 | |
| 264 // static | |
| 265 bool AutocompleteResultView::SortRunsLogically(const RunData& lhs, | |
| 266 const RunData& rhs) { | |
| 267 return lhs.run_start < rhs.run_start; | |
| 268 } | |
| 269 | |
| 270 // static | |
| 271 bool AutocompleteResultView::SortRunsVisually(const RunData& lhs, | |
| 272 const RunData& rhs) { | |
| 273 return lhs.visual_order < rhs.visual_order; | |
| 274 } | |
| 275 | |
| 276 // static | |
| 277 int AutocompleteResultView::default_icon_size_ = 0; | |
| 278 | |
| 279 const SkBitmap* AutocompleteResultView::GetIcon() const { | |
| 280 const SkBitmap* bitmap = model_->GetIconIfExtensionMatch(model_index_); | |
| 281 if (bitmap) | |
| 282 return bitmap; | |
| 283 | |
| 284 int icon = match_.starred ? | |
| 285 IDR_OMNIBOX_STAR : AutocompleteMatch::TypeToIcon(match_.type); | |
| 286 if (GetState() == SELECTED) { | |
| 287 switch (icon) { | |
| 288 case IDR_OMNIBOX_EXTENSION_APP: | |
| 289 icon = IDR_OMNIBOX_EXTENSION_APP_SELECTED; | |
| 290 break; | |
| 291 case IDR_OMNIBOX_HTTP: | |
| 292 icon = IDR_OMNIBOX_HTTP_SELECTED; | |
| 293 break; | |
| 294 case IDR_OMNIBOX_SEARCH: | |
| 295 icon = IDR_OMNIBOX_SEARCH_SELECTED; | |
| 296 break; | |
| 297 case IDR_OMNIBOX_STAR: | |
| 298 icon = IDR_OMNIBOX_STAR_SELECTED; | |
| 299 break; | |
| 300 default: | |
| 301 NOTREACHED(); | |
| 302 break; | |
| 303 } | |
| 304 } | |
| 305 return ui::ResourceBundle::GetSharedInstance().GetBitmapNamed(icon); | |
| 306 } | |
| 307 | |
| 308 const gfx::ImageSkia* AutocompleteResultView::GetKeywordIcon() const { | |
| 309 // NOTE: If we ever begin returning icons of varying size, then callers need | |
| 310 // to ensure that |keyword_icon_| is resized each time its image is reset. | |
| 311 return ui::ResourceBundle::GetSharedInstance().GetImageSkiaNamed( | |
| 312 (GetState() == SELECTED) ? IDR_OMNIBOX_TTS_SELECTED : IDR_OMNIBOX_TTS); | |
| 313 } | |
| 314 | |
| 315 int AutocompleteResultView::DrawString( | |
| 316 gfx::Canvas* canvas, | |
| 317 const string16& text, | |
| 318 const ACMatchClassifications& classifications, | |
| 319 bool force_dim, | |
| 320 int x, | |
| 321 int y) { | |
| 322 if (text.empty()) | |
| 323 return x; | |
| 324 | |
| 325 // Check whether or not this text is a URL. URLs are always displayed LTR | |
| 326 // regardless of locale. | |
| 327 bool is_url = true; | |
| 328 for (ACMatchClassifications::const_iterator i(classifications.begin()); | |
| 329 i != classifications.end(); ++i) { | |
| 330 if (!(i->style & ACMatchClassification::URL)) { | |
| 331 is_url = false; | |
| 332 break; | |
| 333 } | |
| 334 } | |
| 335 | |
| 336 // Split the text into visual runs. We do this first so that we don't need to | |
| 337 // worry about whether our eliding might change the visual display in | |
| 338 // unintended ways, e.g. by removing directional markings or by adding an | |
| 339 // ellipsis that's not enclosed in appropriate markings. | |
| 340 base::i18n::BiDiLineIterator bidi_line; | |
| 341 if (!bidi_line.Open(text, base::i18n::IsRTL(), is_url)) | |
| 342 return x; | |
| 343 const int num_runs = bidi_line.CountRuns(); | |
| 344 Runs runs; | |
| 345 for (int run = 0; run < num_runs; ++run) { | |
| 346 int run_start_int = 0, run_length_int = 0; | |
| 347 // The index we pass to GetVisualRun corresponds to the position of the run | |
| 348 // in the displayed text. For example, the string "Google in HEBREW" (where | |
| 349 // HEBREW is text in the Hebrew language) has two runs: "Google in " which | |
| 350 // is an LTR run, and "HEBREW" which is an RTL run. In an LTR context, the | |
| 351 // run "Google in " has the index 0 (since it is the leftmost run | |
| 352 // displayed). In an RTL context, the same run has the index 1 because it | |
| 353 // is the rightmost run. This is why the order in which we traverse the | |
| 354 // runs is different depending on the locale direction. | |
| 355 const UBiDiDirection run_direction = bidi_line.GetVisualRun( | |
| 356 (base::i18n::IsRTL() && !is_url) ? (num_runs - run - 1) : run, | |
| 357 &run_start_int, &run_length_int); | |
| 358 DCHECK_GT(run_length_int, 0); | |
| 359 runs.push_back(RunData()); | |
| 360 RunData* current_run = &runs.back(); | |
| 361 current_run->run_start = run_start_int; | |
| 362 const size_t run_end = current_run->run_start + run_length_int; | |
| 363 current_run->visual_order = run; | |
| 364 current_run->is_rtl = !is_url && (run_direction == UBIDI_RTL); | |
| 365 | |
| 366 // Compute classifications for this run. | |
| 367 for (size_t i = 0; i < classifications.size(); ++i) { | |
| 368 const size_t text_start = | |
| 369 std::max(classifications[i].offset, current_run->run_start); | |
| 370 if (text_start >= run_end) | |
| 371 break; // We're past the last classification in the run. | |
| 372 | |
| 373 const size_t text_end = (i < (classifications.size() - 1)) ? | |
| 374 std::min(classifications[i + 1].offset, run_end) : run_end; | |
| 375 if (text_end <= current_run->run_start) | |
| 376 continue; // We haven't reached the first classification in the run. | |
| 377 | |
| 378 current_run->classifications.push_back(ClassificationData()); | |
| 379 ClassificationData* current_data = | |
| 380 ¤t_run->classifications.back(); | |
| 381 current_data->text = text.substr(text_start, text_end - text_start); | |
| 382 | |
| 383 // Calculate style-related data. | |
| 384 const int style = classifications[i].style; | |
| 385 const bool use_bold_font = !!(style & ACMatchClassification::MATCH); | |
| 386 current_data->font = &(use_bold_font ? bold_font_ : normal_font_); | |
| 387 const ResultViewState state = GetState(); | |
| 388 if (style & ACMatchClassification::URL) | |
| 389 current_data->color = GetColor(state, URL); | |
| 390 else if (style & ACMatchClassification::DIM) | |
| 391 current_data->color = GetColor(state, DIMMED_TEXT); | |
| 392 else | |
| 393 current_data->color = GetColor(state, force_dim ? DIMMED_TEXT : TEXT); | |
| 394 int width = 0; | |
| 395 int height = 0; | |
| 396 gfx::Canvas::SizeStringInt(current_data->text, *current_data->font, | |
| 397 &width, &height, gfx::Canvas::NO_ELLIPSIS); | |
| 398 current_data->pixel_size = gfx::Size(width, height); | |
| 399 current_run->pixel_width += width; | |
| 400 } | |
| 401 DCHECK(!current_run->classifications.empty()); | |
| 402 } | |
| 403 DCHECK(!runs.empty()); | |
| 404 | |
| 405 // Sort into logical order so we can elide logically. | |
| 406 std::sort(runs.begin(), runs.end(), &SortRunsLogically); | |
| 407 | |
| 408 // Now determine what to elide, if anything. Several subtle points: | |
| 409 // * Because we have the run data, we can get edge cases correct, like | |
| 410 // whether to place an ellipsis before or after the end of a run when the | |
| 411 // text needs to be elided at the run boundary. | |
| 412 // * The "or one before it" comments below refer to cases where an earlier | |
| 413 // classification fits completely, but leaves too little space for an | |
| 414 // ellipsis that turns out to be needed later. These cases are commented | |
| 415 // more completely in Elide(). | |
| 416 int remaining_width = mirroring_context_->remaining_width(x); | |
| 417 for (Runs::iterator i(runs.begin()); i != runs.end(); ++i) { | |
| 418 if (i->pixel_width > remaining_width) { | |
| 419 // This run or one before it needs to be elided. | |
| 420 for (Classifications::iterator j(i->classifications.begin()); | |
| 421 j != i->classifications.end(); ++j) { | |
| 422 if (j->pixel_size.width() > remaining_width) { | |
| 423 // This classification or one before it needs to be elided. Erase all | |
| 424 // further classifications and runs so Elide() can simply reverse- | |
| 425 // iterate over everything to find the specific classification to | |
| 426 // elide. | |
| 427 i->classifications.erase(++j, i->classifications.end()); | |
| 428 runs.erase(++i, runs.end()); | |
| 429 Elide(&runs, remaining_width); | |
| 430 break; | |
| 431 } | |
| 432 remaining_width -= j->pixel_size.width(); | |
| 433 } | |
| 434 break; | |
| 435 } | |
| 436 remaining_width -= i->pixel_width; | |
| 437 } | |
| 438 | |
| 439 // Sort back into visual order so we can display the runs correctly. | |
| 440 std::sort(runs.begin(), runs.end(), &SortRunsVisually); | |
| 441 | |
| 442 // Draw the runs. | |
| 443 for (Runs::iterator i(runs.begin()); i != runs.end(); ++i) { | |
| 444 const bool reverse_visible_order = (i->is_rtl != base::i18n::IsRTL()); | |
| 445 int flags = gfx::Canvas::NO_ELLIPSIS; // We've already elided. | |
| 446 if (reverse_visible_order) { | |
| 447 std::reverse(i->classifications.begin(), i->classifications.end()); | |
| 448 if (i->is_rtl) | |
| 449 flags |= gfx::Canvas::FORCE_RTL_DIRECTIONALITY; | |
| 450 } | |
| 451 for (Classifications::const_iterator j(i->classifications.begin()); | |
| 452 j != i->classifications.end(); ++j) { | |
| 453 const int left = | |
| 454 mirroring_context_->mirrored_left_coord(x, x + j->pixel_size.width()); | |
| 455 // By passing the same y-coordinate for each run, we vertically align the | |
| 456 // tops of successive runs. This isn't actually what we want; we want to | |
| 457 // align the baselines, but Canvas doesn't currently expose text | |
| 458 // measurement APIs sufficient to make that happen. The problem here is | |
| 459 // font substitution: if no fonts are substituted, then all runs have the | |
| 460 // same font (in bold or normal styles), and thus the same height and same | |
| 461 // baseline. If fonts are substituted within a run, the characters are | |
| 462 // baseline-aligned within the run, but using the same top coordinate as | |
| 463 // for other runs is only correct if the overall ascent for this run is | |
| 464 // the same as for other runs -- that is, if the tallest ascent of all | |
| 465 // fonts in the run is equal to the ascent of the normal font. If this | |
| 466 // condition doesn't hold, the baseline for this run will be drawn too | |
| 467 // high or too low, depending on whether the run's tallest ascent is | |
| 468 // shorter or higher than the normal font's ascent, respectively. | |
| 469 // | |
| 470 // TODO(asvitkine): Fix this by replacing the SizeStringInt() calls | |
| 471 // elsewhere in this file with calls that can calculate actual baselines | |
| 472 // even in the face of font fallback. Tracked as: http://crbug.com/128027 | |
| 473 canvas->DrawStringInt(j->text, *j->font, j->color, left, y, | |
| 474 j->pixel_size.width(), j->pixel_size.height(), | |
| 475 flags); | |
| 476 x += j->pixel_size.width(); | |
| 477 } | |
| 478 } | |
| 479 | |
| 480 return x; | |
| 481 } | |
| 482 | |
| 483 void AutocompleteResultView::Elide(Runs* runs, int remaining_width) const { | |
| 484 // The complexity of this function is due to edge cases like the following: | |
| 485 // We have 100 px of available space, an initial classification that takes 86 | |
| 486 // px, and a font that has a 15 px wide ellipsis character. Now if the first | |
| 487 // classification is followed by several very narrow classifications (e.g. 3 | |
| 488 // px wide each), we don't know whether we need to elide or not at the time we | |
| 489 // see the first classification -- it depends on how many subsequent | |
| 490 // classifications follow, and some of those may be in the next run (or | |
| 491 // several runs!). This is why instead we let our caller move forward until | |
| 492 // we know we definitely need to elide, and then in this function we move | |
| 493 // backward again until we find a string that we can successfully do the | |
| 494 // eliding on. | |
| 495 bool first_classification = true; | |
| 496 for (Runs::reverse_iterator i(runs->rbegin()); i != runs->rend(); ++i) { | |
| 497 for (Classifications::reverse_iterator j(i->classifications.rbegin()); | |
| 498 j != i->classifications.rend(); ++j) { | |
| 499 if (!first_classification) { | |
| 500 // For all but the first classification we consider, we need to append | |
| 501 // an ellipsis, since there isn't enough room to draw it after this | |
| 502 // classification. | |
| 503 j->text += kEllipsis; | |
| 504 | |
| 505 // We also add this classification's width (sans ellipsis) back to the | |
| 506 // available width since we want to consider the available space we'll | |
| 507 // have when we draw this classification. | |
| 508 remaining_width += j->pixel_size.width(); | |
| 509 } | |
| 510 first_classification = false; | |
| 511 | |
| 512 // Can we fit at least an ellipsis? | |
| 513 string16 elided_text = | |
| 514 ui::ElideText(j->text, *j->font, remaining_width, ui::ELIDE_AT_END); | |
| 515 Classifications::reverse_iterator prior_classification(j); | |
| 516 ++prior_classification; | |
| 517 const bool on_first_classification = | |
| 518 (prior_classification == i->classifications.rend()); | |
| 519 if (elided_text.empty() && (remaining_width >= ellipsis_width_) && | |
| 520 on_first_classification) { | |
| 521 // Edge case: This classification is bold, we can't fit a bold ellipsis | |
| 522 // but we can fit a normal one, and this is the first classification in | |
| 523 // the run. We should display a lone normal ellipsis, because appending | |
| 524 // one to the end of the previous run might put it in the wrong visual | |
| 525 // location (if the previous run is reversed from the normal visual | |
| 526 // order). | |
| 527 // NOTE: If this isn't the first classification in the run, we don't | |
| 528 // need to bother with this; see note below. | |
| 529 elided_text = kEllipsis; | |
| 530 } | |
| 531 if (!elided_text.empty()) { | |
| 532 // Success. Elide this classification and stop. | |
| 533 j->text = elided_text; | |
| 534 | |
| 535 // If we could only fit an ellipsis, then only make it bold if there was | |
| 536 // an immediate prior classification in this run that was also bold, or | |
| 537 // it will look orphaned. | |
| 538 if ((elided_text.length() == 1) && | |
| 539 (on_first_classification || | |
| 540 (prior_classification->font == &normal_font_))) | |
| 541 j->font = &normal_font_; | |
| 542 | |
| 543 int width = 0; | |
| 544 int height = 0; | |
| 545 gfx::Canvas::SizeStringInt(elided_text, *j->font, &width, &height, | |
| 546 gfx::Canvas::NO_ELLIPSIS); | |
| 547 j->pixel_size = gfx::Size(width, height); | |
| 548 | |
| 549 // Erase any other classifications that come after the elided one. | |
| 550 i->classifications.erase(j.base(), i->classifications.end()); | |
| 551 runs->erase(i.base(), runs->end()); | |
| 552 return; | |
| 553 } | |
| 554 | |
| 555 // We couldn't fit an ellipsis. Move back one classification, | |
| 556 // append an ellipsis, and try again. | |
| 557 // NOTE: In the edge case that a bold ellipsis doesn't fit but a | |
| 558 // normal one would, and we reach here, then there is a previous | |
| 559 // classification in this run, and so either: | |
| 560 // * It's normal, and will be able to draw successfully with the | |
| 561 // ellipsis we'll append to it, or | |
| 562 // * It is also bold, in which case we don't want to fall back | |
| 563 // to a normal ellipsis anyway (see comment above). | |
| 564 } | |
| 565 } | |
| 566 | |
| 567 // We couldn't draw anything. | |
| 568 runs->clear(); | |
| 569 } | |
| 570 | |
| 571 void AutocompleteResultView::Layout() { | |
| 572 const SkBitmap* icon = GetIcon(); | |
| 573 | |
| 574 icon_bounds_.SetRect(edge_item_padding_ + | |
| 575 ((icon->width() == default_icon_size_) ? | |
| 576 0 : LocationBarView::kIconInternalPadding), | |
| 577 (height() - icon->height()) / 2, icon->width(), icon->height()); | |
| 578 | |
| 579 int text_x = edge_item_padding_ + default_icon_size_ + item_padding_; | |
| 580 int text_height = GetTextHeight(); | |
| 581 int text_width; | |
| 582 | |
| 583 if (match_.associated_keyword.get()) { | |
| 584 const int kw_collapsed_size = | |
| 585 keyword_icon_->width() + edge_item_padding_; | |
| 586 const int max_kw_x = width() - kw_collapsed_size; | |
| 587 const int kw_x = | |
| 588 animation_->CurrentValueBetween(max_kw_x, edge_item_padding_); | |
| 589 const int kw_text_x = kw_x + keyword_icon_->width() + item_padding_; | |
| 590 | |
| 591 text_width = kw_x - text_x - item_padding_; | |
| 592 keyword_text_bounds_.SetRect(kw_text_x, 0, | |
| 593 std::max(width() - kw_text_x - edge_item_padding_, 0), text_height); | |
| 594 keyword_icon_->SetPosition(gfx::Point(kw_x, | |
| 595 (height() - keyword_icon_->height()) / 2)); | |
| 596 } else { | |
| 597 text_width = width() - text_x - edge_item_padding_; | |
| 598 } | |
| 599 | |
| 600 text_bounds_.SetRect(text_x, std::max(0, (height() - text_height) / 2), | |
| 601 std::max(text_width, 0), text_height); | |
| 602 } | |
| 603 | |
| 604 void AutocompleteResultView::OnBoundsChanged( | |
| 605 const gfx::Rect& previous_bounds) { | |
| 606 animation_->SetSlideDuration(width() / 4); | |
| 607 } | |
| 608 | |
| 609 void AutocompleteResultView::OnPaint(gfx::Canvas* canvas) { | |
| 610 const ResultViewState state = GetState(); | |
| 611 if (state != NORMAL) | |
| 612 canvas->DrawColor(GetColor(state, BACKGROUND)); | |
| 613 | |
| 614 if (!match_.associated_keyword.get() || | |
| 615 keyword_icon_->x() > icon_bounds_.right()) { | |
| 616 // Paint the icon. | |
| 617 canvas->DrawImageInt(*GetIcon(), GetMirroredXForRect(icon_bounds_), | |
| 618 icon_bounds_.y()); | |
| 619 | |
| 620 // Paint the text. | |
| 621 int x = GetMirroredXForRect(text_bounds_); | |
| 622 mirroring_context_->Initialize(x, text_bounds_.width()); | |
| 623 PaintMatch(canvas, match_, x); | |
| 624 } | |
| 625 | |
| 626 if (match_.associated_keyword.get()) { | |
| 627 // Paint the keyword text. | |
| 628 int x = GetMirroredXForRect(keyword_text_bounds_); | |
| 629 mirroring_context_->Initialize(x, keyword_text_bounds_.width()); | |
| 630 PaintMatch(canvas, *match_.associated_keyword.get(), x); | |
| 631 } | |
| 632 } | |
| 633 | |
| 634 void AutocompleteResultView::AnimationProgressed( | |
| 635 const ui::Animation* animation) { | |
| 636 Layout(); | |
| 637 SchedulePaint(); | |
| 638 } | |
| OLD | NEW |