Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(543)

Side by Side Diff: content/common/gpu/media/vt_video_encode_accelerator_mac.cc

Issue 1636083003: H264 HW encode using VideoToolbox (Closed) Base URL: https://chromium.googlesource.com/chromium/src.git@master
Patch Set: mcasas@ nit. Created 4 years, 9 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
« no previous file with comments | « content/common/gpu/media/vt_video_encode_accelerator_mac.h ('k') | content/content_common.gypi » ('j') | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 // Copyright 2014 The Chromium Authors. All rights reserved. 1 // Copyright 2016 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be 2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file. 3 // found in the LICENSE file.
4 4
5 #include "media/cast/sender/h264_vt_encoder.h" 5 #include "content/common/gpu/media/vt_video_encode_accelerator_mac.h"
6 6
7 #include <stddef.h> 7 #include "base/thread_task_runner_handle.h"
8 8 #include "media/base/mac/coremedia_glue.h"
9 #include <string>
10 #include <vector>
11
12 #include "base/big_endian.h"
13 #include "base/bind.h"
14 #include "base/bind_helpers.h"
15 #include "base/location.h"
16 #include "base/logging.h"
17 #include "base/macros.h"
18 #include "base/power_monitor/power_monitor.h"
19 #include "base/synchronization/lock.h"
20 #include "build/build_config.h"
21 #include "media/base/mac/corevideo_glue.h" 9 #include "media/base/mac/corevideo_glue.h"
22 #include "media/base/mac/video_frame_mac.h" 10 #include "media/base/mac/video_frame_mac.h"
23 #include "media/cast/common/rtp_time.h" 11
24 #include "media/cast/constants.h" 12 namespace content {
25 #include "media/cast/sender/video_frame_factory.h"
26
27 namespace media {
28 namespace cast {
29 13
30 namespace { 14 namespace {
31 15
32 // Container for the associated data of a video frame being processed. 16 // TODO(emircan): Check if we can find the actual system capabilities via
33 struct InProgressFrameEncode { 17 // creating VTCompressionSessions with varying requirements.
34 const RtpTimeTicks rtp_timestamp; 18 // See crbug.com/584784.
19 const size_t kBitsPerByte = 8;
20 const size_t kDefaultResolutionWidth = 640;
21 const size_t kDefaultResolutionHeight = 480;
22 const size_t kMaxFrameRateNumerator = 30;
23 const size_t kMaxFrameRateDenominator = 1;
24 const size_t kMaxResolutionWidth = 4096;
25 const size_t kMaxResolutionHeight = 2160;
26 const size_t kNumInputBuffers = 3;
27
28 } // namespace
29
30 struct VTVideoEncodeAccelerator::InProgressFrameEncode {
31 InProgressFrameEncode(base::TimeDelta rtp_timestamp,
32 base::TimeTicks ref_time)
33 : timestamp(rtp_timestamp), reference_time(ref_time) {}
34 const base::TimeDelta timestamp;
35 const base::TimeTicks reference_time; 35 const base::TimeTicks reference_time;
36 const VideoEncoder::FrameEncodedCallback frame_encoded_callback; 36
37 37 private:
38 InProgressFrameEncode(RtpTimeTicks rtp, 38 DISALLOW_IMPLICIT_CONSTRUCTORS(InProgressFrameEncode);
39 base::TimeTicks r_time,
40 VideoEncoder::FrameEncodedCallback callback)
41 : rtp_timestamp(rtp),
42 reference_time(r_time),
43 frame_encoded_callback(callback) {}
44 }; 39 };
45 40
46 base::ScopedCFTypeRef<CFDictionaryRef> 41 struct VTVideoEncodeAccelerator::EncodeOutput {
47 DictionaryWithKeysAndValues(CFTypeRef* keys, CFTypeRef* values, size_t size) { 42 EncodeOutput(VTEncodeInfoFlags info_flags, CMSampleBufferRef sbuf)
48 return base::ScopedCFTypeRef<CFDictionaryRef>(CFDictionaryCreate( 43 : info(info_flags), sample_buffer(sbuf, base::scoped_policy::RETAIN) {}
49 kCFAllocatorDefault, keys, values, size, &kCFTypeDictionaryKeyCallBacks, 44 const VTEncodeInfoFlags info;
50 &kCFTypeDictionaryValueCallBacks)); 45 const base::ScopedCFTypeRef<CMSampleBufferRef> sample_buffer;
51 } 46
52 47 private:
53 base::ScopedCFTypeRef<CFDictionaryRef> DictionaryWithKeyValue(CFTypeRef key, 48 DISALLOW_IMPLICIT_CONSTRUCTORS(EncodeOutput);
54 CFTypeRef value) { 49 };
55 CFTypeRef keys[1] = {key}; 50
56 CFTypeRef values[1] = {value}; 51 struct VTVideoEncodeAccelerator::BitstreamBufferRef {
57 return DictionaryWithKeysAndValues(keys, values, 1); 52 BitstreamBufferRef(int32_t id,
58 } 53 scoped_ptr<base::SharedMemory> shm,
59 54 size_t size)
60 base::ScopedCFTypeRef<CFArrayRef> ArrayWithIntegers(const int* v, size_t size) { 55 : id(id), shm(std::move(shm)), size(size) {}
61 std::vector<CFNumberRef> numbers; 56 const int32_t id;
62 numbers.reserve(size); 57 const scoped_ptr<base::SharedMemory> shm;
63 for (const int* end = v + size; v < end; ++v) 58 const size_t size;
64 numbers.push_back(CFNumberCreate(nullptr, kCFNumberSInt32Type, v)); 59
65 base::ScopedCFTypeRef<CFArrayRef> array(CFArrayCreate( 60 private:
66 kCFAllocatorDefault, reinterpret_cast<const void**>(&numbers[0]), 61 DISALLOW_IMPLICIT_CONSTRUCTORS(BitstreamBufferRef);
67 numbers.size(), &kCFTypeArrayCallBacks)); 62 };
68 for (auto& number : numbers) { 63
69 CFRelease(number); 64 VTVideoEncodeAccelerator::VTVideoEncodeAccelerator()
70 } 65 : client_task_runner_(base::ThreadTaskRunnerHandle::Get()),
71 return array; 66 encoder_thread_("VTEncoderThread"),
72 } 67 encoder_task_weak_factory_(this) {
73 68 encoder_weak_ptr_ = encoder_task_weak_factory_.GetWeakPtr();
74 template <typename NalSizeType> 69 }
75 void CopyNalsToAnnexB(char* avcc_buffer, 70
76 const size_t avcc_size, 71 VTVideoEncodeAccelerator::~VTVideoEncodeAccelerator() {
77 std::string* annexb_buffer) { 72 DVLOG(3) << __FUNCTION__;
78 static_assert(sizeof(NalSizeType) == 1 || sizeof(NalSizeType) == 2 || 73 DCHECK(thread_checker_.CalledOnValidThread());
79 sizeof(NalSizeType) == 4, 74
80 "NAL size type has unsupported size"); 75 Destroy();
81 static const char startcode_3[3] = {0, 0, 1}; 76 DCHECK(!encoder_thread_.IsRunning());
82 DCHECK(avcc_buffer); 77 DCHECK(!encoder_task_weak_factory_.HasWeakPtrs());
83 DCHECK(annexb_buffer); 78 }
84 size_t bytes_left = avcc_size; 79
85 while (bytes_left > 0) { 80 media::VideoEncodeAccelerator::SupportedProfiles
86 DCHECK_GT(bytes_left, sizeof(NalSizeType)); 81 VTVideoEncodeAccelerator::GetSupportedProfiles() {
87 NalSizeType nal_size; 82 DVLOG(3) << __FUNCTION__;
88 base::ReadBigEndian(avcc_buffer, &nal_size); 83 DCHECK(thread_checker_.CalledOnValidThread());
89 bytes_left -= sizeof(NalSizeType); 84
90 avcc_buffer += sizeof(NalSizeType); 85 SupportedProfiles profiles;
91 86 // Check if HW encoder is supported initially.
92 DCHECK_GE(bytes_left, nal_size); 87 videotoolbox_glue_ = VideoToolboxGlue::Get();
93 annexb_buffer->append(startcode_3, sizeof(startcode_3)); 88 if (!videotoolbox_glue_) {
94 annexb_buffer->append(avcc_buffer, nal_size); 89 DLOG(ERROR) << "Failed creating VideoToolbox glue.";
95 bytes_left -= nal_size; 90 return profiles;
96 avcc_buffer += nal_size; 91 }
97 } 92 const bool rv = CreateCompressionSession(
98 } 93 media::video_toolbox::DictionaryWithKeysAndValues(nullptr, nullptr, 0),
99 94 gfx::Size(kDefaultResolutionWidth, kDefaultResolutionHeight), true);
100 // Copy a H.264 frame stored in a CM sample buffer to an Annex B buffer. Copies 95 DestroyCompressionSession();
101 // parameter sets for keyframes before the frame data as well. 96 if (!rv) {
102 void CopySampleBufferToAnnexBBuffer(CoreMediaGlue::CMSampleBufferRef sbuf, 97 VLOG(1) << "Failed creating compression session with hardware support.";
103 std::string* annexb_buffer, 98 return profiles;
104 bool keyframe) { 99 }
105 // Perform two pass, one to figure out the total output size, and another to 100
106 // copy the data after having performed a single output allocation. Note that 101 SupportedProfile profile;
107 // we'll allocate a bit more because we'll count 4 bytes instead of 3 for 102 profile.profile = media::H264PROFILE_BASELINE;
108 // video NALs. 103 profile.max_framerate_numerator = kMaxFrameRateNumerator;
109 104 profile.max_framerate_denominator = kMaxFrameRateDenominator;
110 OSStatus status; 105 profile.max_resolution = gfx::Size(kMaxResolutionWidth, kMaxResolutionHeight);
111 106 profiles.push_back(profile);
112 // Get the sample buffer's block buffer and format description. 107 return profiles;
113 auto bb = CoreMediaGlue::CMSampleBufferGetDataBuffer(sbuf); 108 }
114 DCHECK(bb); 109
115 auto fdesc = CoreMediaGlue::CMSampleBufferGetFormatDescription(sbuf); 110 bool VTVideoEncodeAccelerator::Initialize(
116 DCHECK(fdesc); 111 media::VideoPixelFormat format,
117 112 const gfx::Size& input_visible_size,
118 size_t bb_size = CoreMediaGlue::CMBlockBufferGetDataLength(bb); 113 media::VideoCodecProfile output_profile,
119 size_t total_bytes = bb_size; 114 uint32_t initial_bitrate,
120 115 Client* client) {
121 size_t pset_count; 116 DVLOG(3) << __FUNCTION__
122 int nal_size_field_bytes; 117 << ": input_format=" << media::VideoPixelFormatToString(format)
123 status = CoreMediaGlue::CMVideoFormatDescriptionGetH264ParameterSetAtIndex( 118 << ", input_visible_size=" << input_visible_size.ToString()
124 fdesc, 0, nullptr, nullptr, &pset_count, &nal_size_field_bytes); 119 << ", output_profile=" << output_profile
125 if (status == 120 << ", initial_bitrate=" << initial_bitrate;
126 CoreMediaGlue::kCMFormatDescriptionBridgeError_InvalidParameter) { 121 DCHECK(thread_checker_.CalledOnValidThread());
127 DLOG(WARNING) << " assuming 2 parameter sets and 4 bytes NAL length header"; 122 DCHECK(client);
128 pset_count = 2; 123
129 nal_size_field_bytes = 4; 124 if (media::PIXEL_FORMAT_I420 != format) {
130 } else if (status != noErr) { 125 DLOG(ERROR) << "Input format not supported= "
131 DLOG(ERROR) 126 << media::VideoPixelFormatToString(format);
132 << " CMVideoFormatDescriptionGetH264ParameterSetAtIndex failed: " 127 return false;
133 << status; 128 }
134 return; 129 if (media::H264PROFILE_BASELINE != output_profile) {
135 } 130 DLOG(ERROR) << "Output profile not supported= "
136 131 << output_profile;
137 if (keyframe) { 132 return false;
138 const uint8_t* pset; 133 }
139 size_t pset_size; 134
140 for (size_t pset_i = 0; pset_i < pset_count; ++pset_i) { 135 videotoolbox_glue_ = VideoToolboxGlue::Get();
141 status = 136 if (!videotoolbox_glue_) {
142 CoreMediaGlue::CMVideoFormatDescriptionGetH264ParameterSetAtIndex( 137 DLOG(ERROR) << "Failed creating VideoToolbox glue.";
143 fdesc, pset_i, &pset, &pset_size, nullptr, nullptr); 138 return false;
144 if (status != noErr) { 139 }
145 DLOG(ERROR) 140
146 << " CMVideoFormatDescriptionGetH264ParameterSetAtIndex failed: " 141 client_ptr_factory_.reset(new base::WeakPtrFactory<Client>(client));
147 << status; 142 client_ = client_ptr_factory_->GetWeakPtr();
148 return; 143 input_visible_size_ = input_visible_size;
149 } 144 frame_rate_ = kMaxFrameRateNumerator / kMaxFrameRateDenominator;
150 total_bytes += pset_size + nal_size_field_bytes; 145 target_bitrate_ = initial_bitrate;
151 } 146 bitstream_buffer_size_ = input_visible_size.GetArea();
152 } 147
153 148 if (!ResetCompressionSession()) {
154 annexb_buffer->reserve(total_bytes); 149 DLOG(ERROR) << "Failed creating compression session.";
155 150 return false;
156 // Copy all parameter sets before keyframes. 151 }
157 if (keyframe) { 152
158 const uint8_t* pset; 153 if (!encoder_thread_.Start()) {
159 size_t pset_size; 154 DLOG(ERROR) << "Failed spawning encoder thread.";
160 for (size_t pset_i = 0; pset_i < pset_count; ++pset_i) { 155 return false;
161 status = 156 }
162 CoreMediaGlue::CMVideoFormatDescriptionGetH264ParameterSetAtIndex( 157
163 fdesc, pset_i, &pset, &pset_size, nullptr, nullptr); 158 client_task_runner_->PostTask(
164 if (status != noErr) { 159 FROM_HERE,
165 DLOG(ERROR) 160 base::Bind(&Client::RequireBitstreamBuffers, client_, kNumInputBuffers,
166 << " CMVideoFormatDescriptionGetH264ParameterSetAtIndex failed: " 161 input_visible_size_, bitstream_buffer_size_));
167 << status; 162 return true;
168 return; 163 }
169 } 164
170 static const char startcode_4[4] = {0, 0, 0, 1}; 165 void VTVideoEncodeAccelerator::Encode(
171 annexb_buffer->append(startcode_4, sizeof(startcode_4)); 166 const scoped_refptr<media::VideoFrame>& frame,
172 annexb_buffer->append(reinterpret_cast<const char*>(pset), pset_size); 167 bool force_keyframe) {
173 } 168 DVLOG(3) << __FUNCTION__;
174 } 169 DCHECK(thread_checker_.CalledOnValidThread());
175 170
176 // Block buffers can be composed of non-contiguous chunks. For the sake of 171 encoder_thread_.message_loop()->PostTask(
177 // keeping this code simple, flatten non-contiguous block buffers. 172 FROM_HERE,
178 base::ScopedCFTypeRef<CoreMediaGlue::CMBlockBufferRef> contiguous_bb( 173 base::Bind(&VTVideoEncodeAccelerator::EncodeTask,
179 bb, base::scoped_policy::RETAIN); 174 base::Unretained(this),
180 if (!CoreMediaGlue::CMBlockBufferIsRangeContiguous(bb, 0, 0)) { 175 frame,
181 contiguous_bb.reset(); 176 force_keyframe));
182 status = CoreMediaGlue::CMBlockBufferCreateContiguous( 177 }
183 kCFAllocatorDefault, bb, kCFAllocatorDefault, nullptr, 0, 0, 0, 178
184 contiguous_bb.InitializeInto()); 179 void VTVideoEncodeAccelerator::UseOutputBitstreamBuffer(
185 if (status != noErr) { 180 const media::BitstreamBuffer& buffer) {
186 DLOG(ERROR) << " CMBlockBufferCreateContiguous failed: " << status; 181 DVLOG(3) << __FUNCTION__ << ": buffer size=" << buffer.size();
187 return; 182 DCHECK(thread_checker_.CalledOnValidThread());
188 } 183
189 } 184 if (buffer.size() < bitstream_buffer_size_) {
190 185 DLOG(ERROR) << "Output BitstreamBuffer isn't big enough: " << buffer.size()
191 // Copy all the NAL units. In the process convert them from AVCC format 186 << " vs. " << bitstream_buffer_size_;
192 // (length header) to AnnexB format (start code). 187 client_->NotifyError(kInvalidArgumentError);
193 char* bb_data; 188 return;
194 status = CoreMediaGlue::CMBlockBufferGetDataPointer(contiguous_bb, 0, nullptr, 189 }
195 nullptr, &bb_data); 190
191 scoped_ptr<base::SharedMemory> shm(
192 new base::SharedMemory(buffer.handle(), false));
193 if (!shm->Map(buffer.size())) {
194 DLOG(ERROR) << "Failed mapping shared memory.";
195 client_->NotifyError(kPlatformFailureError);
196 return;
197 }
198
199 scoped_ptr<BitstreamBufferRef> buffer_ref(
200 new BitstreamBufferRef(buffer.id(), std::move(shm), buffer.size()));
201
202 encoder_thread_.message_loop()->PostTask(
203 FROM_HERE,
204 base::Bind(&VTVideoEncodeAccelerator::UseOutputBitstreamBufferTask,
205 base::Unretained(this),
206 base::Passed(&buffer_ref)));
207 }
208
209 void VTVideoEncodeAccelerator::RequestEncodingParametersChange(
210 uint32_t bitrate,
211 uint32_t framerate) {
212 DVLOG(3) << __FUNCTION__ << ": bitrate=" << bitrate
213 << ": framerate=" << framerate;
214 DCHECK(thread_checker_.CalledOnValidThread());
215
216 frame_rate_ = framerate > 1 ? framerate : 1;
217 target_bitrate_ = bitrate > 1 ? bitrate : 1;
218
219 if (!compression_session_) {
220 client_->NotifyError(kPlatformFailureError);
221 return;
222 }
223
224 media::video_toolbox::SessionPropertySetter session_property_setter(
225 compression_session_, videotoolbox_glue_);
226 // TODO(emircan): See crbug.com/425352.
227 bool rv = session_property_setter.Set(
228 videotoolbox_glue_->kVTCompressionPropertyKey_AverageBitRate(),
229 target_bitrate_);
230 rv &= session_property_setter.Set(
231 videotoolbox_glue_->kVTCompressionPropertyKey_ExpectedFrameRate(),
232 frame_rate_);
233 rv &= session_property_setter.Set(
234 videotoolbox_glue_->kVTCompressionPropertyKey_DataRateLimits(),
235 media::video_toolbox::ArrayWithIntegerAndFloat(
236 target_bitrate_ / kBitsPerByte, 1.0f));
237 DLOG_IF(ERROR, !rv) << "Couldn't change session encoding parameters.";
238 }
239
240 void VTVideoEncodeAccelerator::Destroy() {
241 DVLOG(3) << __FUNCTION__;
242 DCHECK(thread_checker_.CalledOnValidThread());
243
244 // Cancel all callbacks.
245 client_ptr_factory_.reset();
246
247 if (encoder_thread_.IsRunning()) {
248 encoder_thread_.message_loop()->PostTask(
249 FROM_HERE,
250 base::Bind(&VTVideoEncodeAccelerator::DestroyTask,
251 base::Unretained(this)));
252 encoder_thread_.Stop();
253 } else {
254 DestroyTask();
255 }
256 }
257
258 void VTVideoEncodeAccelerator::EncodeTask(
259 const scoped_refptr<media::VideoFrame>& frame,
260 bool force_keyframe) {
261 DCHECK(encoder_thread_.task_runner()->BelongsToCurrentThread());
262 DCHECK(compression_session_);
263 DCHECK(frame);
264
265 // TODO(emircan): See if we can eliminate a copy here by using
266 // CVPixelBufferPool for the allocation of incoming VideoFrames.
267 base::ScopedCFTypeRef<CVPixelBufferRef> pixel_buffer =
268 media::WrapVideoFrameInCVPixelBuffer(*frame);
269 base::ScopedCFTypeRef<CFDictionaryRef> frame_props =
270 media::video_toolbox::DictionaryWithKeyValue(
271 videotoolbox_glue_->kVTEncodeFrameOptionKey_ForceKeyFrame(),
272 force_keyframe ? kCFBooleanTrue : kCFBooleanFalse);
273
274 base::TimeTicks ref_time;
275 if (!frame->metadata()->GetTimeTicks(
276 media::VideoFrameMetadata::REFERENCE_TIME, &ref_time)) {
277 ref_time = base::TimeTicks::Now();
278 }
279 auto timestamp_cm = CoreMediaGlue::CMTimeMake(
280 frame->timestamp().InMicroseconds(), USEC_PER_SEC);
281 // Wrap information we'll need after the frame is encoded in a heap object.
282 // We'll get the pointer back from the VideoToolbox completion callback.
283 scoped_ptr<InProgressFrameEncode> request(new InProgressFrameEncode(
284 frame->timestamp(), ref_time));
285
286 OSStatus status = videotoolbox_glue_->VTCompressionSessionEncodeFrame(
287 compression_session_, pixel_buffer, timestamp_cm,
288 CoreMediaGlue::CMTime{0, 0, 0, 0}, frame_props,
289 reinterpret_cast<void*>(request.get()), nullptr);
196 if (status != noErr) { 290 if (status != noErr) {
197 DLOG(ERROR) << " CMBlockBufferGetDataPointer failed: " << status; 291 DLOG(ERROR) << " VTCompressionSessionEncodeFrame failed: " << status;
198 return; 292 NotifyError(kPlatformFailureError);
199 }
200
201 if (nal_size_field_bytes == 1) {
202 CopyNalsToAnnexB<uint8_t>(bb_data, bb_size, annexb_buffer);
203 } else if (nal_size_field_bytes == 2) {
204 CopyNalsToAnnexB<uint16_t>(bb_data, bb_size, annexb_buffer);
205 } else if (nal_size_field_bytes == 4) {
206 CopyNalsToAnnexB<uint32_t>(bb_data, bb_size, annexb_buffer);
207 } else { 293 } else {
208 NOTREACHED(); 294 // We can pass the ownership to the encode callback if successful.
209 } 295 CHECK(request.release());
Pawel Osciak 2016/03/11 00:43:59 Unless I'm missing something, we passed a raw poin
emircan 2016/03/11 01:53:20 I am updating the comment. If encode is successful
210 } 296 }
211 297 }
212 } // namespace 298
213 299 void VTVideoEncodeAccelerator::UseOutputBitstreamBufferTask(
214 class H264VideoToolboxEncoder::VideoFrameFactoryImpl 300 scoped_ptr<BitstreamBufferRef> buffer_ref) {
215 : public base::RefCountedThreadSafe<VideoFrameFactoryImpl>, 301 DCHECK(encoder_thread_.task_runner()->BelongsToCurrentThread());
216 public VideoFrameFactory { 302
217 public: 303 // If there is already EncodeOutput waiting, copy its output first.
218 // Type that proxies the VideoFrameFactory interface to this class. 304 if (!encoder_output_queue_.empty()) {
219 class Proxy; 305 scoped_ptr<VTVideoEncodeAccelerator::EncodeOutput> encode_output =
220 306 std::move(encoder_output_queue_.front());
221 VideoFrameFactoryImpl(const base::WeakPtr<H264VideoToolboxEncoder>& encoder, 307 encoder_output_queue_.pop_front();
222 const scoped_refptr<CastEnvironment>& cast_environment) 308 ReturnBitstreamBuffer(std::move(encode_output), std::move(buffer_ref));
223 : encoder_(encoder), cast_environment_(cast_environment) {} 309 return;
224 310 }
225 scoped_refptr<VideoFrame> MaybeCreateFrame( 311
226 const gfx::Size& frame_size, 312 bitstream_buffer_queue_.push_back(std::move(buffer_ref));
227 base::TimeDelta timestamp) final { 313 }
228 if (frame_size.IsEmpty()) { 314
229 DVLOG(1) << "Rejecting empty video frame."; 315 void VTVideoEncodeAccelerator::DestroyTask() {
230 return nullptr; 316 DCHECK(!encoder_thread_.IsRunning() ||
231 } 317 encoder_thread_.task_runner()->BelongsToCurrentThread());
232 318
233 base::AutoLock auto_lock(lock_); 319 // Cancel all encoder thread callbacks.
234 320 encoder_task_weak_factory_.InvalidateWeakPtrs();
235 // If the pool size does not match, speculatively reset the encoder to use 321
236 // the new size and return null. Cache the new frame size right away and 322 // This call blocks until all pending frames are flushed out.
237 // toss away the pixel buffer pool to avoid spurious tasks until the encoder 323 DestroyCompressionSession();
238 // is done resetting. 324 }
239 if (frame_size != pool_frame_size_) { 325
240 DVLOG(1) << "MaybeCreateFrame: Detected frame size change."; 326 void VTVideoEncodeAccelerator::NotifyError(
241 cast_environment_->PostTask( 327 media::VideoEncodeAccelerator::Error error) {
242 CastEnvironment::MAIN, FROM_HERE, 328 DCHECK(encoder_thread_.task_runner()->BelongsToCurrentThread());
243 base::Bind(&H264VideoToolboxEncoder::UpdateFrameSize, encoder_, 329 client_task_runner_->PostTask(
244 frame_size)); 330 FROM_HERE, base::Bind(&Client::NotifyError, client_, error));
245 pool_frame_size_ = frame_size; 331 }
246 pool_.reset();
247 return nullptr;
248 }
249
250 if (!pool_) {
251 DVLOG(1) << "MaybeCreateFrame: No pixel buffer pool.";
252 return nullptr;
253 }
254
255 // Allocate a pixel buffer from the pool and return a wrapper VideoFrame.
256 base::ScopedCFTypeRef<CVPixelBufferRef> buffer;
257 auto status = CVPixelBufferPoolCreatePixelBuffer(kCFAllocatorDefault, pool_,
258 buffer.InitializeInto());
259 if (status != kCVReturnSuccess) {
260 DLOG(ERROR) << "CVPixelBufferPoolCreatePixelBuffer failed: " << status;
261 return nullptr;
262 }
263
264 DCHECK(buffer);
265 return VideoFrame::WrapCVPixelBuffer(buffer, timestamp);
266 }
267
268 void Update(const base::ScopedCFTypeRef<CVPixelBufferPoolRef>& pool,
269 const gfx::Size& frame_size) {
270 base::AutoLock auto_lock(lock_);
271 pool_ = pool;
272 pool_frame_size_ = frame_size;
273 }
274
275 private:
276 friend class base::RefCountedThreadSafe<VideoFrameFactoryImpl>;
277 ~VideoFrameFactoryImpl() final {}
278
279 base::Lock lock_;
280 base::ScopedCFTypeRef<CVPixelBufferPoolRef> pool_;
281 gfx::Size pool_frame_size_;
282
283 // Weak back reference to the encoder and the cast envrionment so we can
284 // message the encoder when the frame size changes.
285 const base::WeakPtr<H264VideoToolboxEncoder> encoder_;
286 const scoped_refptr<CastEnvironment> cast_environment_;
287
288 DISALLOW_COPY_AND_ASSIGN(VideoFrameFactoryImpl);
289 };
290
291 class H264VideoToolboxEncoder::VideoFrameFactoryImpl::Proxy
292 : public VideoFrameFactory {
293 public:
294 explicit Proxy(
295 const scoped_refptr<VideoFrameFactoryImpl>& video_frame_factory)
296 : video_frame_factory_(video_frame_factory) {
297 DCHECK(video_frame_factory_);
298 }
299
300 scoped_refptr<VideoFrame> MaybeCreateFrame(
301 const gfx::Size& frame_size,
302 base::TimeDelta timestamp) final {
303 return video_frame_factory_->MaybeCreateFrame(frame_size, timestamp);
304 }
305
306 private:
307 ~Proxy() final {}
308
309 const scoped_refptr<VideoFrameFactoryImpl> video_frame_factory_;
310
311 DISALLOW_COPY_AND_ASSIGN(Proxy);
312 };
313 332
314 // static 333 // static
315 bool H264VideoToolboxEncoder::IsSupported( 334 void VTVideoEncodeAccelerator::CompressionCallback(void* encoder_opaque,
316 const VideoSenderConfig& video_config) { 335 void* request_opaque,
317 return video_config.codec == CODEC_VIDEO_H264 && VideoToolboxGlue::Get(); 336 OSStatus status,
318 } 337 VTEncodeInfoFlags info,
319 338 CMSampleBufferRef sbuf) {
320 H264VideoToolboxEncoder::H264VideoToolboxEncoder( 339 // This function may be called asynchronously, on a different thread from the
321 const scoped_refptr<CastEnvironment>& cast_environment, 340 // one that calls VTCompressionSessionEncodeFrame.
322 const VideoSenderConfig& video_config, 341 DVLOG(3) << __FUNCTION__;
323 const StatusChangeCallback& status_change_cb) 342
324 : cast_environment_(cast_environment), 343 auto encoder = reinterpret_cast<VTVideoEncodeAccelerator*>(encoder_opaque);
325 videotoolbox_glue_(VideoToolboxGlue::Get()), 344 DCHECK(encoder);
326 video_config_(video_config), 345
327 status_change_cb_(status_change_cb), 346 // Release InProgressFrameEncode, since we don't have support to return
328 last_frame_id_(kFirstFrameId - 1), 347 // timestamps at this point.
329 encode_next_frame_as_keyframe_(false), 348 scoped_ptr<InProgressFrameEncode> request(
330 power_suspended_(false), 349 reinterpret_cast<InProgressFrameEncode*>(request_opaque));
331 weak_factory_(this) { 350 request.reset();
332 DCHECK(cast_environment_->CurrentlyOn(CastEnvironment::MAIN)); 351
333 DCHECK(!status_change_cb_.is_null()); 352 // EncodeOutput holds onto CMSampleBufferRef when posting task between
334 353 // threads.
335 OperationalStatus operational_status = 354 scoped_ptr<EncodeOutput> encode_output(new EncodeOutput(info, sbuf));
336 H264VideoToolboxEncoder::IsSupported(video_config) 355
337 ? STATUS_INITIALIZED 356 // This method is NOT called on |encoder_thread_|, so we still need to
338 : STATUS_UNSUPPORTED_CODEC; 357 // post a task back to it to do work.
339 cast_environment_->PostTask( 358 encoder->encoder_thread_.task_runner()->PostTask(
340 CastEnvironment::MAIN, FROM_HERE, 359 FROM_HERE, base::Bind(&VTVideoEncodeAccelerator::CompressionCallbackTask,
341 base::Bind(status_change_cb_, operational_status)); 360 encoder->encoder_weak_ptr_, status,
342 361 base::Passed(&encode_output)));
343 if (operational_status == STATUS_INITIALIZED) { 362 }
344 // Create the shared video frame factory. It persists for the combined 363
345 // lifetime of the encoder and all video frame factory proxies created by 364 void VTVideoEncodeAccelerator::CompressionCallbackTask(
346 // |CreateVideoFrameFactory| that reference it. 365 OSStatus status,
347 video_frame_factory_ = 366 scoped_ptr<EncodeOutput> encode_output) {
348 scoped_refptr<VideoFrameFactoryImpl>(new VideoFrameFactoryImpl( 367 DCHECK(encoder_thread_.task_runner()->BelongsToCurrentThread());
349 weak_factory_.GetWeakPtr(), cast_environment_)); 368
350 369 if (status != noErr) {
351 // Register for power state changes. 370 DLOG(ERROR) << " encode failed: " << status;
352 auto power_monitor = base::PowerMonitor::Get(); 371 NotifyError(kPlatformFailureError);
353 if (power_monitor) { 372 return;
354 power_monitor->AddObserver(this); 373 }
355 VLOG(1) << "Registered for power state changes."; 374
356 } else { 375 // If there isn't any BitstreamBuffer to copy into, add it to a queue for
357 DLOG(WARNING) << "No power monitor. Process suspension will invalidate " 376 // later use.
358 "the encoder."; 377 if (bitstream_buffer_queue_.empty()) {
359 } 378 encoder_output_queue_.push_back(std::move(encode_output));
360 } 379 return;
361 } 380 }
362 381
363 H264VideoToolboxEncoder::~H264VideoToolboxEncoder() { 382 scoped_ptr<VTVideoEncodeAccelerator::BitstreamBufferRef> buffer_ref =
383 std::move(bitstream_buffer_queue_.front());
384 bitstream_buffer_queue_.pop_front();
385 ReturnBitstreamBuffer(std::move(encode_output), std::move(buffer_ref));
386 }
387
388 void VTVideoEncodeAccelerator::ReturnBitstreamBuffer(
389 scoped_ptr<EncodeOutput> encode_output,
390 scoped_ptr<VTVideoEncodeAccelerator::BitstreamBufferRef> buffer_ref) {
391 DVLOG(3) << __FUNCTION__;
392 DCHECK(encoder_thread_.task_runner()->BelongsToCurrentThread());
393
394 if (encode_output->info & VideoToolboxGlue::kVTEncodeInfo_FrameDropped) {
395 DVLOG(2) << " frame dropped";
396 client_task_runner_->PostTask(
397 FROM_HERE, base::Bind(&Client::BitstreamBufferReady, client_,
398 buffer_ref->id, 0, false));
399 return;
400 }
401
402 auto sample_attachments = static_cast<CFDictionaryRef>(CFArrayGetValueAtIndex(
403 CoreMediaGlue::CMSampleBufferGetSampleAttachmentsArray(
404 encode_output->sample_buffer.get(), true),
405 0));
406 const bool keyframe =
407 !CFDictionaryContainsKey(sample_attachments,
408 CoreMediaGlue::kCMSampleAttachmentKey_NotSync());
409
410 size_t used_buffer_size = 0;
411 const bool copy_rv = media::video_toolbox::CopySampleBufferToAnnexBBuffer(
412 encode_output->sample_buffer.get(), keyframe, buffer_ref->size,
413 reinterpret_cast<char*>(buffer_ref->shm->memory()), &used_buffer_size);
414 if (!copy_rv) {
415 DLOG(ERROR) << "Cannot copy output from SampleBuffer to AnnexBBuffer.";
416 used_buffer_size = 0;
417 }
418
419 client_task_runner_->PostTask(
420 FROM_HERE, base::Bind(&Client::BitstreamBufferReady, client_,
421 buffer_ref->id, used_buffer_size, keyframe));
422 }
423
424 bool VTVideoEncodeAccelerator::ResetCompressionSession() {
425 DCHECK(thread_checker_.CalledOnValidThread());
426
364 DestroyCompressionSession(); 427 DestroyCompressionSession();
365 428
366 // If video_frame_factory_ is not null, the encoder registered for power state 429 CFTypeRef attributes_keys[] = {
367 // changes in the ctor and it must now unregister. 430 kCVPixelBufferOpenGLCompatibilityKey,
368 if (video_frame_factory_) { 431 kCVPixelBufferIOSurfacePropertiesKey,
369 auto power_monitor = base::PowerMonitor::Get(); 432 kCVPixelBufferPixelFormatTypeKey
370 if (power_monitor) 433 };
371 power_monitor->RemoveObserver(this);
372 }
373 }
374
375 void H264VideoToolboxEncoder::ResetCompressionSession() {
376 DCHECK(thread_checker_.CalledOnValidThread());
377
378 // Ignore reset requests while power suspended.
379 if (power_suspended_)
380 return;
381
382 // Notify that we're resetting the encoder.
383 cast_environment_->PostTask(
384 CastEnvironment::MAIN, FROM_HERE,
385 base::Bind(status_change_cb_, STATUS_CODEC_REINIT_PENDING));
386
387 // Destroy the current session, if any.
388 DestroyCompressionSession();
389
390 // On OS X, allow the hardware encoder. Don't require it, it does not support
391 // all configurations (some of which are used for testing).
392 base::ScopedCFTypeRef<CFDictionaryRef> encoder_spec;
393 #if !defined(OS_IOS)
394 encoder_spec = DictionaryWithKeyValue(
395 videotoolbox_glue_
396 ->kVTVideoEncoderSpecification_EnableHardwareAcceleratedVideoEncoder() ,
397 kCFBooleanTrue);
398 #endif
399
400 // Force 420v so that clients can easily use these buffers as GPU textures.
401 const int format[] = { 434 const int format[] = {
402 CoreVideoGlue::kCVPixelFormatType_420YpCbCr8BiPlanarVideoRange}; 435 CoreVideoGlue::kCVPixelFormatType_420YpCbCr8BiPlanarVideoRange};
403 436 CFTypeRef attributes_values[] = {
404 // Keep these attachment settings in-sync with those in ConfigureSession(). 437 kCFBooleanTrue,
405 CFTypeRef attachments_keys[] = {kCVImageBufferColorPrimariesKey, 438 media::video_toolbox::DictionaryWithKeysAndValues(nullptr, nullptr, 0)
406 kCVImageBufferTransferFunctionKey, 439 .release(),
407 kCVImageBufferYCbCrMatrixKey}; 440 media::video_toolbox::ArrayWithIntegers(format, arraysize(format))
408 CFTypeRef attachments_values[] = {kCVImageBufferColorPrimaries_ITU_R_709_2, 441 .release()};
409 kCVImageBufferTransferFunction_ITU_R_709_2, 442 const base::ScopedCFTypeRef<CFDictionaryRef> attributes =
410 kCVImageBufferYCbCrMatrix_ITU_R_709_2}; 443 media::video_toolbox::DictionaryWithKeysAndValues(
411 CFTypeRef buffer_attributes_keys[] = {kCVPixelBufferPixelFormatTypeKey, 444 attributes_keys, attributes_values, arraysize(attributes_keys));
412 kCVBufferPropagatedAttachmentsKey}; 445 for (auto& v : attributes_values)
413 CFTypeRef buffer_attributes_values[] = {
414 ArrayWithIntegers(format, arraysize(format)).release(),
415 DictionaryWithKeysAndValues(attachments_keys, attachments_values,
416 arraysize(attachments_keys)).release()};
417 const base::ScopedCFTypeRef<CFDictionaryRef> buffer_attributes =
418 DictionaryWithKeysAndValues(buffer_attributes_keys,
419 buffer_attributes_values,
420 arraysize(buffer_attributes_keys));
421 for (auto& v : buffer_attributes_values)
422 CFRelease(v); 446 CFRelease(v);
423 447
448 bool session_rv =
449 CreateCompressionSession(attributes, input_visible_size_, true);
450 if (!session_rv) {
451 // Our experiments showed that VideoToolbox falls to SW for resolutions
452 // below 480x360. For clients that might downgrade to these lower
453 // resolutions, we need to create a session.
454 DestroyCompressionSession();
455 session_rv =
456 CreateCompressionSession(attributes, input_visible_size_, false);
457 if (!session_rv) {
458 DestroyCompressionSession();
459 return false;
460 }
461 }
462
463 const bool configure_rv = ConfigureCompressionSession();
464 if (configure_rv)
465 RequestEncodingParametersChange(target_bitrate_, frame_rate_);
466 return configure_rv;
467 }
468
469 bool VTVideoEncodeAccelerator::CreateCompressionSession(
470 base::ScopedCFTypeRef<CFDictionaryRef> attributes,
471 const gfx::Size& input_size,
472 bool require_hw_encoding) {
473 DCHECK(thread_checker_.CalledOnValidThread());
474
475 std::vector<CFTypeRef> encoder_keys;
476 std::vector<CFTypeRef> encoder_values;
477 encoder_keys.push_back(videotoolbox_glue_
478 ->kVTVideoEncoderSpecification_EnableHardwareAcceleratedVideoEncoder());
479 encoder_values.push_back(kCFBooleanTrue);
480
481 if (require_hw_encoding) {
482 encoder_keys.push_back(
483 videotoolbox_glue_
484 ->kVTVideoEncoderSpecification_RequireHardwareAcceleratedVideoEncoder());
485 encoder_values.push_back(kCFBooleanTrue);
486 }
487 base::ScopedCFTypeRef<CFDictionaryRef> encoder_spec =
488 media::video_toolbox::DictionaryWithKeysAndValues(
489 encoder_keys.data(), encoder_values.data(), encoder_keys.size());
490
424 // Create the compression session. 491 // Create the compression session.
425
426 // Note that the encoder object is given to the compression session as the 492 // Note that the encoder object is given to the compression session as the
427 // callback context using a raw pointer. The C API does not allow us to use a 493 // callback context using a raw pointer. The C API does not allow us to use a
428 // smart pointer, nor is this encoder ref counted. However, this is still 494 // smart pointer, nor is this encoder ref counted. However, this is still
429 // safe, because we 1) we own the compression session and 2) we tear it down 495 // safe, because we 1) we own the compression session and 2) we tear it down
430 // safely. When destructing the encoder, the compression session is flushed 496 // safely. When destructing the encoder, the compression session is flushed
431 // and invalidated. Internally, VideoToolbox will join all of its threads 497 // and invalidated. Internally, VideoToolbox will join all of its threads
432 // before returning to the client. Therefore, when control returns to us, we 498 // before returning to the client. Therefore, when control returns to us, we
433 // are guaranteed that the output callback will not execute again. 499 // are guaranteed that the output callback will not execute again.
434 OSStatus status = videotoolbox_glue_->VTCompressionSessionCreate( 500 OSStatus status = videotoolbox_glue_->VTCompressionSessionCreate(
435 kCFAllocatorDefault, frame_size_.width(), frame_size_.height(), 501 kCFAllocatorDefault,
436 CoreMediaGlue::kCMVideoCodecType_H264, encoder_spec, buffer_attributes, 502 input_size.width(),
503 input_size.height(),
504 CoreMediaGlue::kCMVideoCodecType_H264,
505 encoder_spec,
506 attributes,
437 nullptr /* compressedDataAllocator */, 507 nullptr /* compressedDataAllocator */,
438 &H264VideoToolboxEncoder::CompressionCallback, 508 &VTVideoEncodeAccelerator::CompressionCallback,
439 reinterpret_cast<void*>(this), compression_session_.InitializeInto()); 509 reinterpret_cast<void*>(this),
510 compression_session_.InitializeInto());
440 if (status != noErr) { 511 if (status != noErr) {
441 DLOG(ERROR) << " VTCompressionSessionCreate failed: " << status; 512 DLOG(ERROR) << " VTCompressionSessionCreate failed: " << status;
442 // Notify that reinitialization has failed. 513 return false;
443 cast_environment_->PostTask( 514 }
444 CastEnvironment::MAIN, FROM_HERE, 515 DVLOG(3) << " VTCompressionSession created with HW encode: "
445 base::Bind(status_change_cb_, STATUS_CODEC_INIT_FAILED)); 516 << require_hw_encoding << ", input size=" << input_size.ToString();
446 return; 517 return true;
447 } 518 }
448 519
449 // Configure the session (apply session properties based on the current state 520 bool VTVideoEncodeAccelerator::ConfigureCompressionSession() {
450 // of the encoder, experimental tuning and requirements). 521 DCHECK(thread_checker_.CalledOnValidThread());
451 ConfigureCompressionSession(); 522 DCHECK(compression_session_);
452 523
453 // Update the video frame factory. 524 media::video_toolbox::SessionPropertySetter session_property_setter(
454 base::ScopedCFTypeRef<CVPixelBufferPoolRef> pool( 525 compression_session_, videotoolbox_glue_);
455 videotoolbox_glue_->VTCompressionSessionGetPixelBufferPool( 526 bool rv = true;
456 compression_session_), 527 rv &= session_property_setter.Set(
457 base::scoped_policy::RETAIN);
458 video_frame_factory_->Update(pool, frame_size_);
459
460 // Notify that reinitialization is done.
461 cast_environment_->PostTask(
462 CastEnvironment::MAIN, FROM_HERE,
463 base::Bind(status_change_cb_, STATUS_INITIALIZED));
464 }
465
466 void H264VideoToolboxEncoder::ConfigureCompressionSession() {
467 SetSessionProperty(
468 videotoolbox_glue_->kVTCompressionPropertyKey_ProfileLevel(), 528 videotoolbox_glue_->kVTCompressionPropertyKey_ProfileLevel(),
469 videotoolbox_glue_->kVTProfileLevel_H264_Main_AutoLevel()); 529 videotoolbox_glue_->kVTProfileLevel_H264_Baseline_AutoLevel());
470 SetSessionProperty(videotoolbox_glue_->kVTCompressionPropertyKey_RealTime(), 530 rv &= session_property_setter.Set(
471 true); 531 videotoolbox_glue_->kVTCompressionPropertyKey_RealTime(), true);
472 SetSessionProperty( 532 rv &= session_property_setter.Set(
473 videotoolbox_glue_->kVTCompressionPropertyKey_AllowFrameReordering(), 533 videotoolbox_glue_->kVTCompressionPropertyKey_AllowFrameReordering(),
474 false); 534 false);
475 SetSessionProperty( 535 DLOG_IF(ERROR, !rv) << " Setting session property failed.";
476 videotoolbox_glue_->kVTCompressionPropertyKey_MaxKeyFrameInterval(), 240); 536 return rv;
477 SetSessionProperty( 537 }
478 videotoolbox_glue_ 538
479 ->kVTCompressionPropertyKey_MaxKeyFrameIntervalDuration(), 539 void VTVideoEncodeAccelerator::DestroyCompressionSession() {
480 240); 540 // This method may be called on |encoder thread| or GPU child thread.
481 // TODO(jfroy): implement better bitrate control 541
482 // https://crbug.com/425352
483 SetSessionProperty(
484 videotoolbox_glue_->kVTCompressionPropertyKey_AverageBitRate(),
485 (video_config_.min_bitrate + video_config_.max_bitrate) / 2);
486 SetSessionProperty(
487 videotoolbox_glue_->kVTCompressionPropertyKey_ExpectedFrameRate(),
488 video_config_.max_frame_rate);
489 // Keep these attachment settings in-sync with those in Initialize().
490 SetSessionProperty(
491 videotoolbox_glue_->kVTCompressionPropertyKey_ColorPrimaries(),
492 kCVImageBufferColorPrimaries_ITU_R_709_2);
493 SetSessionProperty(
494 videotoolbox_glue_->kVTCompressionPropertyKey_TransferFunction(),
495 kCVImageBufferTransferFunction_ITU_R_709_2);
496 SetSessionProperty(
497 videotoolbox_glue_->kVTCompressionPropertyKey_YCbCrMatrix(),
498 kCVImageBufferYCbCrMatrix_ITU_R_709_2);
499 if (video_config_.max_number_of_video_buffers_used > 0) {
500 SetSessionProperty(
501 videotoolbox_glue_->kVTCompressionPropertyKey_MaxFrameDelayCount(),
502 video_config_.max_number_of_video_buffers_used);
503 }
504 }
505
506 void H264VideoToolboxEncoder::DestroyCompressionSession() {
507 DCHECK(thread_checker_.CalledOnValidThread());
508
509 // If the compression session exists, invalidate it. This blocks until all
510 // pending output callbacks have returned and any internal threads have
511 // joined, ensuring no output callback ever sees a dangling encoder pointer.
512 //
513 // Before destroying the compression session, the video frame factory's pool
514 // is updated to null so that no thread will produce new video frames via the
515 // factory until a new compression session is created. The current frame size
516 // is passed to prevent the video frame factory from posting |UpdateFrameSize|
517 // tasks. Indeed, |DestroyCompressionSession| is either called from
518 // |ResetCompressionSession|, in which case a new pool and frame size will be
519 // set, or from callsites that require that there be no compression session
520 // (ex: the dtor).
521 if (compression_session_) { 542 if (compression_session_) {
522 video_frame_factory_->Update(
523 base::ScopedCFTypeRef<CVPixelBufferPoolRef>(nullptr), frame_size_);
524 videotoolbox_glue_->VTCompressionSessionInvalidate(compression_session_); 543 videotoolbox_glue_->VTCompressionSessionInvalidate(compression_session_);
525 compression_session_.reset(); 544 compression_session_.reset();
526 } 545 }
527 } 546 }
528 547
529 bool H264VideoToolboxEncoder::EncodeVideoFrame( 548 } // namespace content
530 const scoped_refptr<media::VideoFrame>& video_frame,
531 const base::TimeTicks& reference_time,
532 const FrameEncodedCallback& frame_encoded_callback) {
533 DCHECK(thread_checker_.CalledOnValidThread());
534 DCHECK(!frame_encoded_callback.is_null());
535
536 // Reject empty video frames.
537 const gfx::Size frame_size = video_frame->visible_rect().size();
538 if (frame_size.IsEmpty()) {
539 DVLOG(1) << "Rejecting empty video frame.";
540 return false;
541 }
542
543 // Handle frame size changes. This will reset the compression session.
544 if (frame_size != frame_size_) {
545 DVLOG(1) << "EncodeVideoFrame: Detected frame size change.";
546 UpdateFrameSize(frame_size);
547 }
548
549 // Need a compression session to continue.
550 if (!compression_session_) {
551 DLOG(ERROR) << "No compression session.";
552 return false;
553 }
554
555 // Wrap the VideoFrame in a CVPixelBuffer. In all cases, no data will be
556 // copied. If the VideoFrame was created by this encoder's video frame
557 // factory, then the returned CVPixelBuffer will have been obtained from the
558 // compression session's pixel buffer pool. This will eliminate a copy of the
559 // frame into memory visible by the hardware encoder. The VideoFrame's
560 // lifetime is extended for the lifetime of the returned CVPixelBuffer.
561 auto pixel_buffer = media::WrapVideoFrameInCVPixelBuffer(*video_frame);
562 if (!pixel_buffer) {
563 DLOG(ERROR) << "WrapVideoFrameInCVPixelBuffer failed.";
564 return false;
565 }
566
567 // Convert the frame timestamp to CMTime.
568 auto timestamp_cm = CoreMediaGlue::CMTimeMake(
569 (reference_time - base::TimeTicks()).InMicroseconds(), USEC_PER_SEC);
570
571 // Wrap information we'll need after the frame is encoded in a heap object.
572 // We'll get the pointer back from the VideoToolbox completion callback.
573 scoped_ptr<InProgressFrameEncode> request(new InProgressFrameEncode(
574 RtpTimeTicks::FromTimeDelta(video_frame->timestamp(), kVideoFrequency),
575 reference_time, frame_encoded_callback));
576
577 // Build a suitable frame properties dictionary for keyframes.
578 base::ScopedCFTypeRef<CFDictionaryRef> frame_props;
579 if (encode_next_frame_as_keyframe_) {
580 frame_props = DictionaryWithKeyValue(
581 videotoolbox_glue_->kVTEncodeFrameOptionKey_ForceKeyFrame(),
582 kCFBooleanTrue);
583 encode_next_frame_as_keyframe_ = false;
584 }
585
586 // Submit the frame to the compression session. The function returns as soon
587 // as the frame has been enqueued.
588 OSStatus status = videotoolbox_glue_->VTCompressionSessionEncodeFrame(
589 compression_session_, pixel_buffer, timestamp_cm,
590 CoreMediaGlue::CMTime{0, 0, 0, 0}, frame_props,
591 reinterpret_cast<void*>(request.release()), nullptr);
592 if (status != noErr) {
593 DLOG(ERROR) << " VTCompressionSessionEncodeFrame failed: " << status;
594 return false;
595 }
596
597 return true;
598 }
599
600 void H264VideoToolboxEncoder::UpdateFrameSize(const gfx::Size& size_needed) {
601 DCHECK(thread_checker_.CalledOnValidThread());
602
603 // Our video frame factory posts a task to update the frame size when its
604 // cache of the frame size differs from what the client requested. To avoid
605 // spurious encoder resets, check again here.
606 if (size_needed == frame_size_) {
607 DCHECK(compression_session_);
608 return;
609 }
610
611 VLOG(1) << "Resetting compression session (for frame size change from "
612 << frame_size_.ToString() << " to " << size_needed.ToString() << ").";
613
614 // If there is an existing session, finish every pending frame.
615 if (compression_session_) {
616 EmitFrames();
617 }
618
619 // Store the new frame size.
620 frame_size_ = size_needed;
621
622 // Reset the compression session.
623 ResetCompressionSession();
624 }
625
626 void H264VideoToolboxEncoder::SetBitRate(int /*new_bit_rate*/) {
627 DCHECK(thread_checker_.CalledOnValidThread());
628 // VideoToolbox does not seem to support bitrate reconfiguration.
629 }
630
631 void H264VideoToolboxEncoder::GenerateKeyFrame() {
632 DCHECK(thread_checker_.CalledOnValidThread());
633 encode_next_frame_as_keyframe_ = true;
634 }
635
636 scoped_ptr<VideoFrameFactory>
637 H264VideoToolboxEncoder::CreateVideoFrameFactory() {
638 DCHECK(thread_checker_.CalledOnValidThread());
639 return scoped_ptr<VideoFrameFactory>(
640 new VideoFrameFactoryImpl::Proxy(video_frame_factory_));
641 }
642
643 void H264VideoToolboxEncoder::EmitFrames() {
644 DCHECK(thread_checker_.CalledOnValidThread());
645 if (!compression_session_)
646 return;
647
648 OSStatus status = videotoolbox_glue_->VTCompressionSessionCompleteFrames(
649 compression_session_, CoreMediaGlue::CMTime{0, 0, 0, 0});
650 if (status != noErr) {
651 DLOG(ERROR) << " VTCompressionSessionCompleteFrames failed: " << status;
652 }
653 }
654
655 void H264VideoToolboxEncoder::OnSuspend() {
656 VLOG(1)
657 << "OnSuspend: Emitting all frames and destroying compression session.";
658 EmitFrames();
659 DestroyCompressionSession();
660 power_suspended_ = true;
661 }
662
663 void H264VideoToolboxEncoder::OnResume() {
664 power_suspended_ = false;
665
666 // Reset the compression session only if the frame size is not zero (which
667 // will obviously fail). It is possible for the frame size to be zero if no
668 // frame was submitted for encoding or requested from the video frame factory
669 // before suspension.
670 if (!frame_size_.IsEmpty()) {
671 VLOG(1) << "OnResume: Resetting compression session.";
672 ResetCompressionSession();
673 }
674 }
675
676 bool H264VideoToolboxEncoder::SetSessionProperty(CFStringRef key,
677 int32_t value) {
678 base::ScopedCFTypeRef<CFNumberRef> cfvalue(
679 CFNumberCreate(nullptr, kCFNumberSInt32Type, &value));
680 return videotoolbox_glue_->VTSessionSetProperty(compression_session_, key,
681 cfvalue) == noErr;
682 }
683
684 bool H264VideoToolboxEncoder::SetSessionProperty(CFStringRef key, bool value) {
685 CFBooleanRef cfvalue = (value) ? kCFBooleanTrue : kCFBooleanFalse;
686 return videotoolbox_glue_->VTSessionSetProperty(compression_session_, key,
687 cfvalue) == noErr;
688 }
689
690 bool H264VideoToolboxEncoder::SetSessionProperty(CFStringRef key,
691 CFStringRef value) {
692 return videotoolbox_glue_->VTSessionSetProperty(compression_session_, key,
693 value) == noErr;
694 }
695
696 void H264VideoToolboxEncoder::CompressionCallback(void* encoder_opaque,
697 void* request_opaque,
698 OSStatus status,
699 VTEncodeInfoFlags info,
700 CMSampleBufferRef sbuf) {
701 auto encoder = reinterpret_cast<H264VideoToolboxEncoder*>(encoder_opaque);
702 const scoped_ptr<InProgressFrameEncode> request(
703 reinterpret_cast<InProgressFrameEncode*>(request_opaque));
704 bool keyframe = false;
705 bool has_frame_data = false;
706
707 if (status != noErr) {
708 DLOG(ERROR) << " encoding failed: " << status;
709 encoder->cast_environment_->PostTask(
710 CastEnvironment::MAIN, FROM_HERE,
711 base::Bind(encoder->status_change_cb_, STATUS_CODEC_RUNTIME_ERROR));
712 } else if ((info & VideoToolboxGlue::kVTEncodeInfo_FrameDropped)) {
713 DVLOG(2) << " frame dropped";
714 } else {
715 auto sample_attachments =
716 static_cast<CFDictionaryRef>(CFArrayGetValueAtIndex(
717 CoreMediaGlue::CMSampleBufferGetSampleAttachmentsArray(sbuf, true),
718 0));
719
720 // If the NotSync key is not present, it implies Sync, which indicates a
721 // keyframe (at least I think, VT documentation is, erm, sparse). Could
722 // alternatively use kCMSampleAttachmentKey_DependsOnOthers == false.
723 keyframe = !CFDictionaryContainsKey(
724 sample_attachments,
725 CoreMediaGlue::kCMSampleAttachmentKey_NotSync());
726 has_frame_data = true;
727 }
728
729 // Increment the encoder-scoped frame id and assign the new value to this
730 // frame. VideoToolbox calls the output callback serially, so this is safe.
731 const uint32_t frame_id = ++encoder->last_frame_id_;
732
733 scoped_ptr<SenderEncodedFrame> encoded_frame(new SenderEncodedFrame());
734 encoded_frame->frame_id = frame_id;
735 encoded_frame->reference_time = request->reference_time;
736 encoded_frame->rtp_timestamp = request->rtp_timestamp;
737 if (keyframe) {
738 encoded_frame->dependency = EncodedFrame::KEY;
739 encoded_frame->referenced_frame_id = frame_id;
740 } else {
741 encoded_frame->dependency = EncodedFrame::DEPENDENT;
742 // H.264 supports complex frame reference schemes (multiple reference
743 // frames, slice references, backward and forward references, etc). Cast
744 // doesn't support the concept of forward-referencing frame dependencies or
745 // multiple frame dependencies; so pretend that all frames are only
746 // decodable after their immediately preceding frame is decoded. This will
747 // ensure a Cast receiver only attempts to decode the frames sequentially
748 // and in order. Furthermore, the encoder is configured to never use forward
749 // references (see |kVTCompressionPropertyKey_AllowFrameReordering|). There
750 // is no way to prevent multiple reference frames.
751 encoded_frame->referenced_frame_id = frame_id - 1;
752 }
753
754 if (has_frame_data)
755 CopySampleBufferToAnnexBBuffer(sbuf, &encoded_frame->data, keyframe);
756
757 // TODO(miu): Compute and populate the |deadline_utilization| and
758 // |lossy_utilization| performance metrics in |encoded_frame|.
759
760 encoded_frame->encode_completion_time =
761 encoder->cast_environment_->Clock()->NowTicks();
762 encoder->cast_environment_->PostTask(
763 CastEnvironment::MAIN, FROM_HERE,
764 base::Bind(request->frame_encoded_callback,
765 base::Passed(&encoded_frame)));
766 }
767
768 } // namespace cast
769 } // namespace media
OLDNEW
« no previous file with comments | « content/common/gpu/media/vt_video_encode_accelerator_mac.h ('k') | content/content_common.gypi » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698