OLD | NEW |
---|---|
1 | 1 |
2 /* | 2 /* |
3 * Copyright 2011 Google Inc. | 3 * Copyright 2011 Google Inc. |
4 * | 4 * |
5 * Use of this source code is governed by a BSD-style license that can be | 5 * Use of this source code is governed by a BSD-style license that can be |
6 * found in the LICENSE file. | 6 * found in the LICENSE file. |
7 */ | 7 */ |
8 #ifndef SkPictureFlat_DEFINED | 8 #ifndef SkPictureFlat_DEFINED |
9 #define SkPictureFlat_DEFINED | 9 #define SkPictureFlat_DEFINED |
10 | 10 |
(...skipping 133 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
144 // | 144 // |
145 // The following templated classes provide an efficient way to store and compare | 145 // The following templated classes provide an efficient way to store and compare |
146 // objects that have been flattened (i.e. serialized in an ordered binary | 146 // objects that have been flattened (i.e. serialized in an ordered binary |
147 // format). | 147 // format). |
148 // | 148 // |
149 // SkFlatData: is a simple indexable container for the flattened data | 149 // SkFlatData: is a simple indexable container for the flattened data |
150 // which is agnostic to the type of data is is indexing. It is | 150 // which is agnostic to the type of data is is indexing. It is |
151 // also responsible for flattening/unflattening objects but | 151 // also responsible for flattening/unflattening objects but |
152 // details of that operation are hidden in the provided procs | 152 // details of that operation are hidden in the provided procs |
153 // SkFlatDictionary: is an abstract templated dictionary that maintains a | 153 // SkFlatDictionary: is an abstract templated dictionary that maintains a |
154 // searchable set of SkFlataData objects of type T. | 154 // searchable set of SkFlatData objects of type T. |
155 // SkFlatController: is an interface provided to SkFlatDictionary which handles | 155 // SkFlatController: is an interface provided to SkFlatDictionary which handles |
156 // allocation and unallocation in some cases. It also holds | 156 // allocation (and unallocation in some cases). It also holds |
157 // ref count recorders and the like. | 157 // ref count recorders and the like. |
158 // | 158 // |
159 // NOTE: any class that wishes to be used in conjunction with SkFlatDictionary | 159 // NOTE: any class that wishes to be used in conjunction with SkFlatDictionary |
160 // must subclass the dictionary and provide the necessary flattening procs. | 160 // must subclass the dictionary and provide the necessary flattening procs. |
161 // The end of this header contains dictionary subclasses for some common classes | 161 // The end of this header contains dictionary subclasses for some common classes |
162 // like SkBitmap, SkMatrix, SkPaint, and SkRegion. SkFlatController must also | 162 // like SkBitmap, SkMatrix, SkPaint, and SkRegion. SkFlatController must also |
163 // be implemented, or SkChunkFlatController can be used to use an | 163 // be implemented, or SkChunkFlatController can be used to use an |
164 // SkChunkAllocator and never do replacements. | 164 // SkChunkAllocator and never do replacements. |
165 // | 165 // |
166 // | 166 // |
167 /////////////////////////////////////////////////////////////////////////////// | 167 /////////////////////////////////////////////////////////////////////////////// |
168 | 168 |
169 class SkFlatData; | 169 class SkFlatData; |
170 | 170 |
171 class SkFlatController : public SkRefCnt { | 171 class SkFlatController : public SkRefCnt { |
172 public: | 172 public: |
173 SK_DECLARE_INST_COUNT(SkFlatController) | 173 SK_DECLARE_INST_COUNT(SkFlatController) |
174 | 174 |
175 SkFlatController(); | 175 SkFlatController(); |
176 virtual ~SkFlatController(); | 176 virtual ~SkFlatController(); |
177 /** | 177 /** |
178 * Provide a new block of memory for the SkFlatDictionary to use. | 178 * Return a new block of memory for the SkFlatDictionary to use. |
179 * This memory is owned by the controller and has the same lifetime unless y ou | |
180 * call unalloc(), in which case it may be freed early. | |
179 */ | 181 */ |
180 virtual void* allocThrow(size_t bytes) = 0; | 182 virtual void* allocThrow(size_t bytes) = 0; |
181 | 183 |
182 /** | 184 /** |
183 * Unallocate a previously allocated block, returned by allocThrow. | 185 * Hint that this block, which was allocated with allocThrow, is no longer n eeded. |
184 * Implementation should at least perform an unallocation if passed the last | 186 * The implementation may choose to free this memory any time beteween now a nd destruction. |
185 * pointer returned by allocThrow. If findAndReplace() is intended to be | |
186 * used, unalloc should also be able to unallocate the SkFlatData that is | |
187 * provided. | |
188 */ | 187 */ |
189 virtual void unalloc(void* ptr) = 0; | 188 virtual void unalloc(void* ptr) = 0; |
190 | 189 |
191 /** | 190 /** |
192 * Used during creation and unflattening of SkFlatData objects. If the | 191 * Used during creation and unflattening of SkFlatData objects. If the |
193 * objects being flattened contain bitmaps they are stored in this heap | 192 * objects being flattened contain bitmaps they are stored in this heap |
194 * and the flattenable stores the index to the bitmap on the heap. | 193 * and the flattenable stores the index to the bitmap on the heap. |
195 * This should be set by the protected setBitmapHeap. | 194 * This should be set by the protected setBitmapHeap. |
196 */ | 195 */ |
197 SkBitmapHeap* getBitmapHeap() { return fBitmapHeap; } | 196 SkBitmapHeap* getBitmapHeap() { return fBitmapHeap; } |
(...skipping 195 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
393 } | 392 } |
394 | 393 |
395 enum { | 394 enum { |
396 kInCache_Sentinel = 0, | 395 kInCache_Sentinel = 0, |
397 kCandidate_Sentinel = ~0U, | 396 kCandidate_Sentinel = ~0U, |
398 }; | 397 }; |
399 void setSentinel(uint32_t value) { | 398 void setSentinel(uint32_t value) { |
400 SkASSERT(SkIsAlign4(fFlatSize)); | 399 SkASSERT(SkIsAlign4(fFlatSize)); |
401 this->data32()[fFlatSize >> 2] = value; | 400 this->data32()[fFlatSize >> 2] = value; |
402 } | 401 } |
402 | |
403 // This does not modify the payload flat data, in case it's already been wri tten. | |
404 void stampHeaderAndSentinel(int index, int32_t size); | |
405 template <class T> friend class SkFlatDictionary; // For stampHeaderAndSent inel(). | |
403 }; | 406 }; |
404 | 407 |
405 template <class T> | 408 template <class T> |
406 class SkFlatDictionary { | 409 class SkFlatDictionary { |
410 static const size_t kWriteBufferGrowthBytes = 1024; | |
411 | |
407 public: | 412 public: |
408 SkFlatDictionary(SkFlatController* controller) | 413 SkFlatDictionary(SkFlatController* controller, size_t scratchSizeGuess = 0) |
409 : fController(controller) { | 414 : fFlattenProc(NULL) |
410 fFlattenProc = NULL; | 415 , fUnflattenProc(NULL) |
411 fUnflattenProc = NULL; | 416 , fController(SkRef(controller)) |
412 SkASSERT(controller); | 417 , fScratchSize(scratchSizeGuess) |
413 fController->ref(); | 418 , fScratch(AllocScratch(fScratchSize)) |
414 // set to 1 since returning a zero from find() indicates failure | 419 , fWriteBuffer(kWriteBufferGrowthBytes) |
415 fNextIndex = 1; | 420 , fWriteBufferReady(false) |
421 , fNextIndex(1) { // set to 1 since returning a zero from find() indicates failure | |
416 sk_bzero(fHash, sizeof(fHash)); | 422 sk_bzero(fHash, sizeof(fHash)); |
417 // index 0 is always empty since it is used as a signal that find failed | 423 // index 0 is always empty since it is used as a signal that find failed |
418 fIndexedData.push(NULL); | 424 fIndexedData.push(NULL); |
419 } | 425 } |
420 | 426 |
421 virtual ~SkFlatDictionary() { | 427 ~SkFlatDictionary() { |
422 fController->unref(); | 428 sk_free(fScratch); |
423 } | 429 } |
424 | 430 |
425 int count() const { | 431 int count() const { |
426 SkASSERT(fIndexedData.count() == fSortedData.count()+1); | 432 SkASSERT(fIndexedData.count() == fSortedData.count()+1); |
427 return fSortedData.count(); | 433 return fSortedData.count(); |
428 } | 434 } |
429 | 435 |
430 const SkFlatData* operator[](int index) const { | 436 const SkFlatData* operator[](int index) const { |
431 SkASSERT(index >= 0 && index < fSortedData.count()); | 437 SkASSERT(index >= 0 && index < fSortedData.count()); |
432 return fSortedData[index]; | 438 return fSortedData[index]; |
(...skipping 92 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
525 SkASSERT(fIndexedData.count() == fSortedData.count()+1); | 531 SkASSERT(fIndexedData.count() == fSortedData.count()+1); |
526 const SkFlatData* element = fIndexedData[index]; | 532 const SkFlatData* element = fIndexedData[index]; |
527 SkASSERT(index == element->index()); | 533 SkASSERT(index == element->index()); |
528 | 534 |
529 T* dst = new T; | 535 T* dst = new T; |
530 this->unflatten(dst, element); | 536 this->unflatten(dst, element); |
531 return dst; | 537 return dst; |
532 } | 538 } |
533 | 539 |
534 const SkFlatData* findAndReturnFlat(const T& element) { | 540 const SkFlatData* findAndReturnFlat(const T& element) { |
535 SkFlatData* flat = SkFlatData::Create(fController, &element, fNextIndex, fFlattenProc); | 541 // Only valid until the next call to resetScratch(). |
542 const SkFlatData& scratch = this->resetScratch(element, fNextIndex); | |
536 | 543 |
537 int hashIndex = ChecksumToHashIndex(flat->checksum()); | 544 // See if we have it in the hash? |
545 const int hashIndex = ChecksumToHashIndex(scratch.checksum()); | |
538 const SkFlatData* candidate = fHash[hashIndex]; | 546 const SkFlatData* candidate = fHash[hashIndex]; |
539 if (candidate && !SkFlatData::Compare(*flat, *candidate)) { | 547 if (candidate != NULL && SkFlatData::Compare(scratch, *candidate) == 0) { |
540 fController->unalloc(flat); | |
541 return candidate; | 548 return candidate; |
542 } | 549 } |
543 | 550 |
544 int index = SkTSearch<const SkFlatData, | 551 // See if we have it at all? |
545 SkFlatData::Less>((const SkFlatData**) fSortedData .begin(), | 552 const int index = SkTSearch<const SkFlatData, SkFlatData::Less>(fSortedD ata.begin(), |
546 fSortedData.count(), flat, sizeo f(flat)); | 553 fSortedD ata.count(), |
554 &scratch , | |
555 sizeof(& scratch)); | |
547 if (index >= 0) { | 556 if (index >= 0) { |
548 fController->unalloc(flat); | 557 // Found. Update hash before we return. |
549 fHash[hashIndex] = fSortedData[index]; | 558 fHash[hashIndex] = fSortedData[index]; |
550 return fSortedData[index]; | 559 return fSortedData[index]; |
551 } | 560 } |
552 | 561 |
553 index = ~index; | 562 // We don't have it. Add it. |
554 *fSortedData.insert(index) = flat; | 563 SkFlatData* detached = this->detachScratch(); |
555 *fIndexedData.insert(flat->index()) = flat; | 564 // detached will live beyond the next call to resetScratch(), but is own ed by fController. |
565 *fSortedData.insert(~index) = detached; // SkTSearch returned bit-not o f where to insert. | |
566 *fIndexedData.insert(detached->index()) = detached; | |
567 fHash[hashIndex] = detached; | |
568 | |
569 SkASSERT(detached->index() == fNextIndex); | |
556 SkASSERT(fSortedData.count() == fNextIndex); | 570 SkASSERT(fSortedData.count() == fNextIndex); |
571 SkASSERT(fIndexedData.count() == fNextIndex+1); | |
557 fNextIndex++; | 572 fNextIndex++; |
558 flat->setSentinelInCache(); | 573 |
559 fHash[hashIndex] = flat; | 574 return detached; |
560 SkASSERT(fIndexedData.count() == fSortedData.count()+1); | |
561 return flat; | |
562 } | 575 } |
563 | 576 |
564 protected: | 577 protected: |
565 void (*fFlattenProc)(SkOrderedWriteBuffer&, const void*); | 578 void (*fFlattenProc)(SkOrderedWriteBuffer&, const void*); |
566 void (*fUnflattenProc)(SkOrderedReadBuffer&, void*); | 579 void (*fUnflattenProc)(SkOrderedReadBuffer&, void*); |
567 | 580 |
568 private: | 581 private: |
582 // Layout: [ SkFlatData header, 20 bytes ] [ data ..., 4-byte aligned ] [ se ntinel, 4 bytes] | |
583 static size_t SizeWithPadding(size_t flatDataSize) { | |
584 SkASSERT(SkIsAlign4(flatDataSize)); | |
585 return sizeof(SkFlatData) + flatDataSize + sizeof(uint32_t); | |
586 } | |
587 | |
588 // Allocate a new scratch SkFlatData. Must be sk_freed. | |
589 static SkFlatData* AllocScratch(size_t scratchSize) { | |
590 return (SkFlatData*) sk_malloc_throw(SizeWithPadding(scratchSize)); | |
591 } | |
592 | |
593 // We have to delay fWriteBuffer's initialization until its first use; fCont roller might not | |
594 // be fully set up by the time we get it in the constructor. | |
595 void lazyWriteBufferInit() { | |
596 if (fWriteBufferReady) { | |
597 return; | |
598 } | |
599 // Without a bitmap heap, we'll flatten bitmaps into paints. That's nev er what you want. | |
600 SkASSERT(fController->getBitmapHeap() != NULL); | |
601 fWriteBuffer.setBitmapHeap(fController->getBitmapHeap()); | |
602 fWriteBuffer.setTypefaceRecorder(fController->getTypefaceSet()); | |
603 fWriteBuffer.setNamedFactoryRecorder(fController->getNamedFactorySet()); | |
604 fWriteBuffer.setFlags(fController->getWriteBufferFlags()); | |
605 fWriteBufferReady = true; | |
606 } | |
607 | |
608 // This reference is valid only until the next call to resetScratch() or det achScratch(). | |
609 const SkFlatData& resetScratch(const T& element, int index) { | |
610 this->lazyWriteBufferInit(); | |
611 | |
612 // Flatten element into fWriteBuffer (using fScratch as storage). | |
613 fWriteBuffer.reset(fScratch->data(), fScratchSize); | |
614 fFlattenProc(fWriteBuffer, &element); | |
615 const size_t bytesWritten = fWriteBuffer.bytesWritten(); | |
616 | |
617 // If all the flattened bytes fit into fScratch, we can skip a call to w riteToMemory. | |
618 if (!fWriteBuffer.wroteOnlyToStorage()) { | |
619 SkASSERT(bytesWritten > fScratchSize); | |
620 // It didn't all fit. Copy into a larger replacement SkFlatData. | |
621 // We can't just realloc because it might move the pointer and confu se writeToMemory. | |
622 SkFlatData* larger = AllocScratch(bytesWritten); | |
623 fWriteBuffer.writeToMemory(larger->data()); | |
624 | |
625 // Carry on with this larger scratch to minimize the likelihood of f uture resizing. | |
626 sk_free(fScratch); | |
627 fScratchSize = bytesWritten; | |
628 fScratch = larger; | |
629 } | |
630 | |
631 // The data is in fScratch now, but we need to stamp its header and trai ling sentinel. | |
632 fScratch->stampHeaderAndSentinel(index, bytesWritten); | |
633 return *fScratch; | |
634 } | |
635 | |
636 // This result is owned by fController and lives as long as it does (unless unalloc'd). | |
637 SkFlatData* detachScratch() { | |
638 // Allocate a new SkFlatData exactly big enough to hold our current scra tch. | |
639 // We use the controller for this allocation to extend the allocation's lifetime and allow | |
640 // the controller to do whatever memory management it wants. | |
641 const size_t paddedSize = SizeWithPadding(fScratch->flatSize()); | |
642 SkFlatData* detached = (SkFlatData*)fController->allocThrow(paddedSize); | |
643 | |
644 // Copy scratch into the new SkFlatData, setting the sentinel for cache storage. | |
645 memcpy(detached, fScratch, paddedSize); | |
646 detached->setSentinelInCache(); | |
647 | |
648 // We can now reuse fScratch, and detached will live until fController d ies. | |
649 return detached; | |
650 } | |
651 | |
569 void unflatten(T* dst, const SkFlatData* element) const { | 652 void unflatten(T* dst, const SkFlatData* element) const { |
570 element->unflatten(dst, fUnflattenProc, | 653 element->unflatten(dst, fUnflattenProc, |
571 fController->getBitmapHeap(), | 654 fController->getBitmapHeap(), |
572 fController->getTypefacePlayback()); | 655 fController->getTypefacePlayback()); |
573 } | 656 } |
574 | 657 |
575 void unflattenIntoArray(T* array) const { | 658 void unflattenIntoArray(T* array) const { |
576 const int count = fSortedData.count(); | 659 const int count = fSortedData.count(); |
577 SkASSERT(fIndexedData.count() == fSortedData.count()+1); | 660 SkASSERT(fIndexedData.count() == fSortedData.count()+1); |
578 const SkFlatData* const* iter = fSortedData.begin(); | 661 const SkFlatData* const* iter = fSortedData.begin(); |
579 for (int i = 0; i < count; ++i) { | 662 for (int i = 0; i < count; ++i) { |
580 const SkFlatData* element = iter[i]; | 663 const SkFlatData* element = iter[i]; |
581 int index = element->index() - 1; | 664 int index = element->index() - 1; |
582 SkASSERT((unsigned)index < (unsigned)count); | 665 SkASSERT((unsigned)index < (unsigned)count); |
583 unflatten(&array[index], element); | 666 unflatten(&array[index], element); |
584 } | 667 } |
585 } | 668 } |
586 | 669 |
587 SkFlatController * const fController; | 670 SkAutoTUnref<SkFlatController> fController; |
588 int fNextIndex; | 671 size_t fScratchSize; // How many bytes fScratch has allocated for data itse lf. |
672 SkFlatData* fScratch; // Owned, must be freed with sk_free. | |
673 SkOrderedWriteBuffer fWriteBuffer; | |
674 bool fWriteBufferReady; | |
589 | 675 |
590 // SkFlatDictionary has two copies of the data one indexed by the | 676 // SkFlatDictionary has two copies of the data one indexed by the |
591 // SkFlatData's index and the other sorted. The sorted data is used | 677 // SkFlatData's index and the other sorted. The sorted data is used |
592 // for finding and uniquification while the indexed copy is used | 678 // for finding and uniquification while the indexed copy is used |
593 // for standard array-style lookups based on the SkFlatData's index | 679 // for standard array-style lookups based on the SkFlatData's index |
594 // (as in 'unflatten'). | 680 // (as in 'unflatten'). |
681 int fNextIndex; | |
595 SkTDArray<const SkFlatData*> fIndexedData; | 682 SkTDArray<const SkFlatData*> fIndexedData; |
596 // fSortedData is sorted by checksum/size/data. | 683 // fSortedData is sorted by checksum/size/data. |
597 SkTDArray<const SkFlatData*> fSortedData; | 684 SkTDArray<const SkFlatData*> fSortedData; |
598 | 685 |
599 enum { | 686 enum { |
600 // Determined by trying diff values on picture-recording benchmarks | 687 // Determined by trying diff values on picture-recording benchmarks |
601 // (e.g. PictureRecordBench.cpp), choosing the smallest value that | 688 // (e.g. PictureRecordBench.cpp), choosing the smallest value that |
602 // showed a big improvement. Even better would be to benchmark diff | 689 // showed a big improvement. Even better would be to benchmark diff |
603 // values on recording representative web-pages or other "real" content. | 690 // values on recording representative web-pages or other "real" content. |
604 HASH_BITS = 7, | 691 HASH_BITS = 7, |
(...skipping 33 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
638 | 725 |
639 class SkChunkFlatController : public SkFlatController { | 726 class SkChunkFlatController : public SkFlatController { |
640 public: | 727 public: |
641 SkChunkFlatController(size_t minSize) | 728 SkChunkFlatController(size_t minSize) |
642 : fHeap(minSize) | 729 : fHeap(minSize) |
643 , fTypefaceSet(SkNEW(SkRefCntSet)) { | 730 , fTypefaceSet(SkNEW(SkRefCntSet)) { |
644 this->setTypefaceSet(fTypefaceSet); | 731 this->setTypefaceSet(fTypefaceSet); |
645 this->setTypefacePlayback(&fTypefacePlayback); | 732 this->setTypefacePlayback(&fTypefacePlayback); |
646 } | 733 } |
647 | 734 |
648 ~SkChunkFlatController() { | |
649 fTypefaceSet->unref(); | |
650 } | |
651 | |
652 virtual void* allocThrow(size_t bytes) SK_OVERRIDE { | 735 virtual void* allocThrow(size_t bytes) SK_OVERRIDE { |
653 return fHeap.allocThrow(bytes); | 736 fLastAllocated = fHeap.allocThrow(bytes); |
737 return fLastAllocated; | |
654 } | 738 } |
655 | 739 |
656 virtual void unalloc(void* ptr) SK_OVERRIDE { | 740 virtual void unalloc(void* ptr) SK_OVERRIDE { |
657 (void) fHeap.unalloc(ptr); | 741 // fHeap can only free a pointer if it was the last one allocated. Othe rwise, we'll just |
742 // have to wait until fHeap is destroyed. | |
743 if (ptr == fLastAllocated) (void)fHeap.unalloc(ptr); | |
658 } | 744 } |
659 | 745 |
660 void setupPlaybacks() const { | 746 void setupPlaybacks() const { |
661 fTypefacePlayback.reset(fTypefaceSet); | 747 fTypefacePlayback.reset(fTypefaceSet.get()); |
662 } | 748 } |
663 | 749 |
664 void setBitmapStorage(SkBitmapHeap* heap) { | 750 void setBitmapStorage(SkBitmapHeap* heap) { |
665 this->setBitmapHeap(heap); | 751 this->setBitmapHeap(heap); |
666 } | 752 } |
667 | 753 |
668 private: | 754 private: |
669 SkChunkAlloc fHeap; | 755 SkChunkAlloc fHeap; |
670 SkRefCntSet* fTypefaceSet; | 756 SkAutoTUnref<SkRefCntSet> fTypefaceSet; |
757 void* fLastAllocated; | |
scroggo
2013/07/30 21:21:12
This should never be a problem in practice, but sh
| |
671 mutable SkTypefacePlayback fTypefacePlayback; | 758 mutable SkTypefacePlayback fTypefacePlayback; |
672 }; | 759 }; |
673 | 760 |
674 class SkBitmapDictionary : public SkFlatDictionary<SkBitmap> { | |
675 public: | |
676 SkBitmapDictionary(SkFlatController* controller) | |
677 : SkFlatDictionary<SkBitmap>(controller) { | |
678 fFlattenProc = &SkFlattenObjectProc<SkBitmap>; | |
679 fUnflattenProc = &SkUnflattenObjectProc<SkBitmap>; | |
680 } | |
681 }; | |
682 | |
683 class SkMatrixDictionary : public SkFlatDictionary<SkMatrix> { | 761 class SkMatrixDictionary : public SkFlatDictionary<SkMatrix> { |
684 public: | 762 public: |
763 // All matrices fit in 36 bytes. | |
685 SkMatrixDictionary(SkFlatController* controller) | 764 SkMatrixDictionary(SkFlatController* controller) |
686 : SkFlatDictionary<SkMatrix>(controller) { | 765 : SkFlatDictionary<SkMatrix>(controller, 36) { |
687 fFlattenProc = &flattenMatrix; | 766 fFlattenProc = &flattenMatrix; |
688 fUnflattenProc = &unflattenMatrix; | 767 fUnflattenProc = &unflattenMatrix; |
689 } | 768 } |
690 | 769 |
691 static void flattenMatrix(SkOrderedWriteBuffer& buffer, const void* obj) { | 770 static void flattenMatrix(SkOrderedWriteBuffer& buffer, const void* obj) { |
692 buffer.getWriter32()->writeMatrix(*(SkMatrix*)obj); | 771 buffer.getWriter32()->writeMatrix(*(SkMatrix*)obj); |
693 } | 772 } |
694 | 773 |
695 static void unflattenMatrix(SkOrderedReadBuffer& buffer, void* obj) { | 774 static void unflattenMatrix(SkOrderedReadBuffer& buffer, void* obj) { |
696 buffer.getReader32()->readMatrix((SkMatrix*)obj); | 775 buffer.getReader32()->readMatrix((SkMatrix*)obj); |
697 } | 776 } |
698 }; | 777 }; |
699 | 778 |
700 class SkPaintDictionary : public SkFlatDictionary<SkPaint> { | 779 class SkPaintDictionary : public SkFlatDictionary<SkPaint> { |
701 public: | 780 public: |
781 // The largest paint across ~60 .skps was 500 bytes. | |
702 SkPaintDictionary(SkFlatController* controller) | 782 SkPaintDictionary(SkFlatController* controller) |
703 : SkFlatDictionary<SkPaint>(controller) { | 783 : SkFlatDictionary<SkPaint>(controller, 512) { |
704 fFlattenProc = &SkFlattenObjectProc<SkPaint>; | 784 fFlattenProc = &SkFlattenObjectProc<SkPaint>; |
705 fUnflattenProc = &SkUnflattenObjectProc<SkPaint>; | 785 fUnflattenProc = &SkUnflattenObjectProc<SkPaint>; |
706 } | 786 } |
707 }; | 787 }; |
708 | 788 |
709 class SkRegionDictionary : public SkFlatDictionary<SkRegion> { | 789 class SkRegionDictionary : public SkFlatDictionary<SkRegion> { |
710 public: | 790 public: |
711 SkRegionDictionary(SkFlatController* controller) | 791 SkRegionDictionary(SkFlatController* controller) |
712 : SkFlatDictionary<SkRegion>(controller) { | 792 : SkFlatDictionary<SkRegion>(controller) { |
713 fFlattenProc = &flattenRegion; | 793 fFlattenProc = &flattenRegion; |
714 fUnflattenProc = &unflattenRegion; | 794 fUnflattenProc = &unflattenRegion; |
715 } | 795 } |
716 | 796 |
717 static void flattenRegion(SkOrderedWriteBuffer& buffer, const void* obj) { | 797 static void flattenRegion(SkOrderedWriteBuffer& buffer, const void* obj) { |
718 buffer.getWriter32()->writeRegion(*(SkRegion*)obj); | 798 buffer.getWriter32()->writeRegion(*(SkRegion*)obj); |
719 } | 799 } |
720 | 800 |
721 static void unflattenRegion(SkOrderedReadBuffer& buffer, void* obj) { | 801 static void unflattenRegion(SkOrderedReadBuffer& buffer, void* obj) { |
722 buffer.getReader32()->readRegion((SkRegion*)obj); | 802 buffer.getReader32()->readRegion((SkRegion*)obj); |
723 } | 803 } |
724 }; | 804 }; |
725 | 805 |
726 #endif | 806 #endif |
OLD | NEW |