OLD | NEW |
(Empty) | |
| 1 // Copyright 2015 The Chromium Authors. All rights reserved. |
| 2 // Use of this source code is governed by a BSD-style license that can be |
| 3 // found in the LICENSE file. |
| 4 |
| 5 #include "base/bind.h" |
| 6 #include "base/callback_helpers.h" |
| 7 #include "base/message_loop/message_loop.h" |
| 8 #include "media/blink/multibuffer_reader.h" |
| 9 #include "net/base/net_errors.h" |
| 10 |
| 11 namespace media { |
| 12 |
| 13 MultiBufferReader::MultiBufferReader( |
| 14 MultiBuffer* multibuffer, |
| 15 int64_t start, |
| 16 int64_t end, |
| 17 const base::Callback<void(int64_t, int64_t)>& progress_callback) |
| 18 : multibuffer_(multibuffer), |
| 19 // If end is -1, we use a very large (but still supported) value instead. |
| 20 end_(end == -1LL ? (1LL << (multibuffer->block_size_shift() + 30)) : end), |
| 21 preload_high_(0), |
| 22 preload_low_(0), |
| 23 max_buffer_forward_(0), |
| 24 max_buffer_backward_(0), |
| 25 pos_(start), |
| 26 preload_pos_(-1), |
| 27 loading_(true), |
| 28 current_wait_size_(0), |
| 29 progress_callback_(progress_callback), |
| 30 weak_factory_(this) { |
| 31 DCHECK_GE(start, 0); |
| 32 DCHECK_GE(end_, 0); |
| 33 } |
| 34 |
| 35 MultiBufferReader::~MultiBufferReader() { |
| 36 multibuffer_->RemoveReader(preload_pos_, this); |
| 37 multibuffer_->IncrementMaxSize( |
| 38 -block_ceil(max_buffer_forward_ + max_buffer_backward_)); |
| 39 multibuffer_->PinRange(block(pos_ - max_buffer_backward_), |
| 40 block_ceil(pos_ + max_buffer_forward_), -1); |
| 41 multibuffer_->CleanupWriters(preload_pos_); |
| 42 } |
| 43 |
| 44 void MultiBufferReader::Seek(int64_t pos) { |
| 45 DCHECK_GE(pos, 0); |
| 46 if (pos == pos_) |
| 47 return; |
| 48 // Use a rangemap to compute the diff in pinning. |
| 49 IntervalMap<MultiBuffer::BlockId, int32_t> tmp; |
| 50 tmp.IncrementInterval(block(pos_ - max_buffer_backward_), |
| 51 block_ceil(pos_ + max_buffer_forward_), -1); |
| 52 tmp.IncrementInterval(block(pos - max_buffer_backward_), |
| 53 block_ceil(pos + max_buffer_forward_), 1); |
| 54 |
| 55 multibuffer_->PinRanges(tmp); |
| 56 |
| 57 multibuffer_->RemoveReader(preload_pos_, this); |
| 58 MultiBufferBlockId old_preload_pos = preload_pos_; |
| 59 preload_pos_ = block(pos); |
| 60 pos_ = pos; |
| 61 UpdateInternalState(); |
| 62 multibuffer_->CleanupWriters(old_preload_pos); |
| 63 } |
| 64 |
| 65 void MultiBufferReader::SetMaxBuffer(int64_t backward, int64_t forward) { |
| 66 // Safe, because we know this doesn't actually prune the cache right away. |
| 67 multibuffer_->IncrementMaxSize( |
| 68 -block_ceil(max_buffer_forward_ + max_buffer_backward_)); |
| 69 // Use a rangemap to compute the diff in pinning. |
| 70 IntervalMap<MultiBuffer::BlockId, int32_t> tmp; |
| 71 tmp.IncrementInterval(block(pos_ - max_buffer_backward_), |
| 72 block_ceil(pos_ + max_buffer_forward_), -1); |
| 73 max_buffer_backward_ = backward; |
| 74 max_buffer_forward_ = forward; |
| 75 tmp.IncrementInterval(block(pos_ - max_buffer_backward_), |
| 76 block_ceil(pos_ + max_buffer_forward_), 1); |
| 77 multibuffer_->PinRanges(tmp); |
| 78 |
| 79 multibuffer_->IncrementMaxSize( |
| 80 block_ceil(max_buffer_forward_ + max_buffer_backward_)); |
| 81 } |
| 82 |
| 83 int64_t MultiBufferReader::Available() const { |
| 84 int64_t unavailable_byte_pos = |
| 85 static_cast<int64_t>(multibuffer_->FindNextUnavailable(block(pos_))) |
| 86 << multibuffer_->block_size_shift(); |
| 87 return std::max<int64_t>(0, unavailable_byte_pos - pos_); |
| 88 } |
| 89 |
| 90 int64_t MultiBufferReader::TryRead(uint8_t* data, int64_t len) { |
| 91 DCHECK_GT(len, 0); |
| 92 current_wait_size_ = 0; |
| 93 cb_.Reset(); |
| 94 DCHECK_LE(pos_ + len, end_); |
| 95 const MultiBuffer::DataMap& data_map = multibuffer_->map(); |
| 96 MultiBuffer::DataMap::const_iterator i = data_map.find(block(pos_)); |
| 97 int64_t p = pos_; |
| 98 int64_t bytes_read = 0; |
| 99 while (bytes_read < len) { |
| 100 if (i == data_map.end()) |
| 101 break; |
| 102 if (i->first != block(p)) |
| 103 break; |
| 104 if (i->second->end_of_stream()) |
| 105 break; |
| 106 size_t offset = p & ((1LL << multibuffer_->block_size_shift()) - 1); |
| 107 size_t tocopy = |
| 108 std::min<size_t>(len - bytes_read, i->second->data_size() - offset); |
| 109 memcpy(data, i->second->data() + offset, tocopy); |
| 110 data += tocopy; |
| 111 bytes_read += tocopy; |
| 112 p += tocopy; |
| 113 ++i; |
| 114 } |
| 115 Seek(p); |
| 116 return bytes_read; |
| 117 } |
| 118 |
| 119 int MultiBufferReader::Wait(int64_t len, const base::Closure& cb) { |
| 120 DCHECK_LE(pos_ + len, end_); |
| 121 DCHECK_NE(Available(), -1); |
| 122 DCHECK_LE(len, max_buffer_forward_); |
| 123 current_wait_size_ = len; |
| 124 |
| 125 cb_.Reset(); |
| 126 UpdateInternalState(); |
| 127 |
| 128 if (Available() >= current_wait_size_) { |
| 129 return net::OK; |
| 130 } else { |
| 131 cb_ = cb; |
| 132 return net::ERR_IO_PENDING; |
| 133 } |
| 134 } |
| 135 |
| 136 void MultiBufferReader::SetPreload(int64_t preload_high, int64_t preload_low) { |
| 137 DCHECK_GE(preload_high, preload_low); |
| 138 multibuffer_->RemoveReader(preload_pos_, this); |
| 139 preload_pos_ = block(pos_); |
| 140 preload_high_ = preload_high; |
| 141 preload_low_ = preload_low; |
| 142 UpdateInternalState(); |
| 143 } |
| 144 |
| 145 bool MultiBufferReader::IsLoading() const { |
| 146 return loading_; |
| 147 } |
| 148 |
| 149 void MultiBufferReader::CheckWait() { |
| 150 if (!cb_.is_null() && |
| 151 (Available() >= current_wait_size_ || Available() == -1)) { |
| 152 // We redirect the call through a weak pointer to ourselves to guarantee |
| 153 // there are no callbacks from us after we've been destroyed. |
| 154 base::MessageLoop::current()->PostTask( |
| 155 FROM_HERE, |
| 156 base::Bind(&MultiBufferReader::Call, weak_factory_.GetWeakPtr(), |
| 157 base::ResetAndReturn(&cb_))); |
| 158 } |
| 159 } |
| 160 |
| 161 void MultiBufferReader::Call(const base::Closure& cb) const { |
| 162 cb.Run(); |
| 163 } |
| 164 |
| 165 void MultiBufferReader::NotifyAvailableRange( |
| 166 const Interval<MultiBufferBlockId>& range) { |
| 167 // Update end_ if we can. |
| 168 if (range.end > range.begin) { |
| 169 auto i = multibuffer_->map().find(range.end - 1); |
| 170 DCHECK(i != multibuffer_->map().end()); |
| 171 if (i->second->end_of_stream()) { |
| 172 // This is an upper limit because the last-to-one block is allowed |
| 173 // to be smaller than the rest of the blocks. |
| 174 int64_t size_upper_limit = static_cast<int64_t>(range.end) |
| 175 << multibuffer_->block_size_shift(); |
| 176 end_ = std::min(end_, size_upper_limit); |
| 177 } |
| 178 } |
| 179 UpdateInternalState(); |
| 180 if (!progress_callback_.is_null()) { |
| 181 // We redirect the call through a weak pointer to ourselves to guarantee |
| 182 // there are no callbacks from us after we've been destroyed. |
| 183 base::MessageLoop::current()->PostTask( |
| 184 FROM_HERE, |
| 185 base::Bind(&MultiBufferReader::Call, weak_factory_.GetWeakPtr(), |
| 186 base::Bind(progress_callback_, |
| 187 static_cast<int64_t>(range.begin) |
| 188 << multibuffer_->block_size_shift(), |
| 189 static_cast<int64_t>(range.end) |
| 190 << multibuffer_->block_size_shift()))); |
| 191 // We may be destroyed, do not touch |this|. |
| 192 } |
| 193 } |
| 194 |
| 195 void MultiBufferReader::UpdateInternalState() { |
| 196 int64_t effective_preload = loading_ ? preload_high_ : preload_low_; |
| 197 |
| 198 loading_ = false; |
| 199 if (preload_pos_ == -1) { |
| 200 preload_pos_ = block(pos_); |
| 201 DCHECK_GE(preload_pos_, 0); |
| 202 } |
| 203 MultiBuffer::BlockId max_preload = block_ceil( |
| 204 std::min(end_, pos_ + std::max(effective_preload, current_wait_size_))); |
| 205 |
| 206 // Note that we might not have been added to the multibuffer, |
| 207 // removing ourselves is a no-op in that case. |
| 208 multibuffer_->RemoveReader(preload_pos_, this); |
| 209 |
| 210 // We explicitly allow preloading to go beyond the pinned region in the cache. |
| 211 // It only happens when we want to preload something into the disk cache. |
| 212 // Thus it is possible to have blocks between our current reading position |
| 213 // and preload_pos_ be unavailable. When we get a Seek() call (possibly |
| 214 // through TryRead()) we reset the preload_pos_ to the current reading |
| 215 // position, and preload_pos_ will become the first unavailable block after |
| 216 // our current reading position again. |
| 217 preload_pos_ = multibuffer_->FindNextUnavailable(preload_pos_); |
| 218 DCHECK_GE(preload_pos_, 0); |
| 219 |
| 220 DVLOG(3) << "UpdateInternalState" |
| 221 << " pp = " << preload_pos_ |
| 222 << " block_ceil(end_) = " << block_ceil(end_) << " end_ = " << end_ |
| 223 << " max_preload " << max_preload; |
| 224 |
| 225 if (preload_pos_ < block_ceil(end_)) { |
| 226 if (preload_pos_ < max_preload) { |
| 227 loading_ = true; |
| 228 multibuffer_->AddReader(preload_pos_, this); |
| 229 } else if (multibuffer_->Contains(preload_pos_ - 1)) { |
| 230 --preload_pos_; |
| 231 multibuffer_->AddReader(preload_pos_, this); |
| 232 } |
| 233 } |
| 234 CheckWait(); |
| 235 } |
| 236 |
| 237 } // namespace media |
OLD | NEW |