OLD | NEW |
| (Empty) |
1 /* | |
2 * libjingle | |
3 * Copyright 2004--2011, Google Inc. | |
4 * | |
5 * Redistribution and use in source and binary forms, with or without | |
6 * modification, are permitted provided that the following conditions are met: | |
7 * | |
8 * 1. Redistributions of source code must retain the above copyright notice, | |
9 * this list of conditions and the following disclaimer. | |
10 * 2. Redistributions in binary form must reproduce the above copyright notice, | |
11 * this list of conditions and the following disclaimer in the documentation | |
12 * and/or other materials provided with the distribution. | |
13 * 3. The name of the author may not be used to endorse or promote products | |
14 * derived from this software without specific prior written permission. | |
15 * | |
16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED | |
17 * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF | |
18 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO | |
19 * EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, | |
20 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, | |
21 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; | |
22 * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, | |
23 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR | |
24 * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF | |
25 * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. | |
26 */ | |
27 | |
28 #ifdef HAVE_CONFIG_H | |
29 #include <config.h> | |
30 #endif | |
31 | |
32 #ifdef HAVE_WEBRTC_VIDEO | |
33 | |
34 #include "talk/session/phone/webrtcvideoengine.h" | |
35 | |
36 #include "talk/base/basictypes.h" | |
37 #include "talk/base/common.h" | |
38 #include "talk/base/buffer.h" | |
39 #include "talk/base/byteorder.h" | |
40 #include "talk/base/logging.h" | |
41 #include "talk/base/stringutils.h" | |
42 #include "talk/session/phone/videorenderer.h" | |
43 #include "talk/session/phone/webrtcpassthroughrender.h" | |
44 #include "talk/session/phone/webrtcvoiceengine.h" | |
45 #include "talk/session/phone/webrtcvideocapturer.h" | |
46 #include "talk/session/phone/webrtcvideoframe.h" | |
47 #include "talk/session/phone/webrtcvie.h" | |
48 #include "talk/session/phone/webrtcvoe.h" | |
49 | |
50 // TODO Change video protection calls when WebRTC API has changed. | |
51 #define WEBRTC_VIDEO_AVPF_NACK_ONLY | |
52 | |
53 namespace cricket { | |
54 | |
55 static const int kDefaultLogSeverity = talk_base::LS_WARNING; | |
56 | |
57 static const int kMinVideoBitrate = 100; | |
58 static const int kStartVideoBitrate = 300; | |
59 static const int kMaxVideoBitrate = 2000; | |
60 | |
61 static const int kVideoMtu = 1200; | |
62 | |
63 static const int kVideoRtpBufferSize = 65536; | |
64 | |
65 static const char kVp8PayloadName[] = "VP8"; | |
66 static const char kRedPayloadName[] = "red"; | |
67 static const char kFecPayloadName[] = "ulpfec"; | |
68 | |
69 static const int kDefaultNumberOfTemporalLayers = 3; | |
70 | |
71 static void LogMultiline(talk_base::LoggingSeverity sev, char* text) { | |
72 const char* delim = "\r\n"; | |
73 for (char* tok = strtok(text, delim); tok; tok = strtok(NULL, delim)) { | |
74 LOG_V(sev) << tok; | |
75 } | |
76 } | |
77 | |
78 class WebRtcRenderAdapter : public webrtc::ExternalRenderer { | |
79 public: | |
80 explicit WebRtcRenderAdapter(VideoRenderer* renderer) | |
81 : renderer_(renderer), width_(0), height_(0) { | |
82 } | |
83 virtual ~WebRtcRenderAdapter() { | |
84 } | |
85 | |
86 void SetRenderer(VideoRenderer* renderer) { | |
87 talk_base::CritScope cs(&crit_); | |
88 renderer_ = renderer; | |
89 } | |
90 // Implementation of webrtc::ExternalRenderer. | |
91 virtual int FrameSizeChange(unsigned int width, unsigned int height, | |
92 unsigned int /*number_of_streams*/) { | |
93 talk_base::CritScope cs(&crit_); | |
94 if (renderer_ == NULL) { | |
95 return 0; | |
96 } | |
97 width_ = width; | |
98 height_ = height; | |
99 return renderer_->SetSize(width_, height_, 0) ? 0 : -1; | |
100 } | |
101 virtual int DeliverFrame(unsigned char* buffer, int buffer_size, | |
102 unsigned int time_stamp) { | |
103 talk_base::CritScope cs(&crit_); | |
104 frame_rate_tracker_.Update(1); | |
105 if (renderer_ == NULL) { | |
106 return 0; | |
107 } | |
108 WebRtcVideoFrame video_frame; | |
109 video_frame.Attach(buffer, buffer_size, width_, height_, | |
110 1, 1, 0, time_stamp, 0); | |
111 | |
112 int ret = renderer_->RenderFrame(&video_frame) ? 0 : -1; | |
113 uint8* buffer_temp; | |
114 size_t buffer_size_temp; | |
115 video_frame.Detach(&buffer_temp, &buffer_size_temp); | |
116 return ret; | |
117 } | |
118 | |
119 unsigned int width() { | |
120 talk_base::CritScope cs(&crit_); | |
121 return width_; | |
122 } | |
123 unsigned int height() { | |
124 talk_base::CritScope cs(&crit_); | |
125 return height_; | |
126 } | |
127 int framerate() { | |
128 talk_base::CritScope cs(&crit_); | |
129 return frame_rate_tracker_.units_second(); | |
130 } | |
131 | |
132 private: | |
133 talk_base::CriticalSection crit_; | |
134 VideoRenderer* renderer_; | |
135 unsigned int width_; | |
136 unsigned int height_; | |
137 talk_base::RateTracker frame_rate_tracker_; | |
138 }; | |
139 | |
140 class WebRtcDecoderObserver : public webrtc::ViEDecoderObserver { | |
141 public: | |
142 WebRtcDecoderObserver(int video_channel) | |
143 : video_channel_(video_channel), | |
144 framerate_(0), | |
145 bitrate_(0), | |
146 firs_requested_(0) { } | |
147 | |
148 // virtual functions from VieDecoderObserver. | |
149 virtual void IncomingCodecChanged(const int videoChannel, | |
150 const webrtc::VideoCodec& videoCodec) { } | |
151 virtual void IncomingRate(const int videoChannel, | |
152 const unsigned int framerate, | |
153 const unsigned int bitrate) { | |
154 ASSERT(video_channel_ == videoChannel); | |
155 framerate_ = framerate; | |
156 bitrate_ = bitrate; | |
157 } | |
158 virtual void RequestNewKeyFrame(const int videoChannel) { | |
159 ASSERT(video_channel_ == videoChannel); | |
160 ++firs_requested_; | |
161 } | |
162 | |
163 int framerate() const { return framerate_; } | |
164 int bitrate() const { return bitrate_; } | |
165 int firs_requested() const { return firs_requested_; } | |
166 | |
167 private: | |
168 int video_channel_; | |
169 int framerate_; | |
170 int bitrate_; | |
171 int firs_requested_; | |
172 }; | |
173 | |
174 class WebRtcEncoderObserver : public webrtc::ViEEncoderObserver { | |
175 public: | |
176 WebRtcEncoderObserver(int video_channel) | |
177 : video_channel_(video_channel), framerate_(0), bitrate_(0) { } | |
178 | |
179 // virtual functions from VieEncoderObserver. | |
180 virtual void OutgoingRate(const int videoChannel, | |
181 const unsigned int framerate, | |
182 const unsigned int bitrate) { | |
183 ASSERT(video_channel_ == videoChannel); | |
184 framerate_ = framerate; | |
185 bitrate_ = bitrate; | |
186 } | |
187 | |
188 int framerate() const { return framerate_; } | |
189 int bitrate() const { return bitrate_; } | |
190 | |
191 private: | |
192 int video_channel_; | |
193 int framerate_; | |
194 int bitrate_; | |
195 }; | |
196 | |
197 class LocalStreamInfo { | |
198 public: | |
199 int width() { | |
200 talk_base::CritScope cs(&crit_); | |
201 return width_; | |
202 } | |
203 int height() { | |
204 talk_base::CritScope cs(&crit_); | |
205 return height_; | |
206 } | |
207 int framerate() { | |
208 talk_base::CritScope cs(&crit_); | |
209 return rate_tracker_.units_second(); | |
210 } | |
211 | |
212 void UpdateFrame(int width, int height) { | |
213 talk_base::CritScope cs(&crit_); | |
214 width_ = width; | |
215 height_ = height; | |
216 rate_tracker_.Update(1); | |
217 } | |
218 | |
219 private: | |
220 talk_base::CriticalSection crit_; | |
221 unsigned int width_; | |
222 unsigned int height_; | |
223 talk_base::RateTracker rate_tracker_; | |
224 }; | |
225 | |
226 const WebRtcVideoEngine::VideoCodecPref | |
227 WebRtcVideoEngine::kVideoCodecPrefs[] = { | |
228 {kVp8PayloadName, 100, 0}, | |
229 #ifndef WEBRTC_VIDEO_AVPF_NACK_ONLY | |
230 {kRedPayloadName, 101, 1}, | |
231 {kFecPayloadName, 102, 2}, | |
232 #endif | |
233 }; | |
234 | |
235 // The formats are sorted by the descending order of width. We use the order to | |
236 // find the next format for CPU and bandwidth adaptation. | |
237 const VideoFormatPod WebRtcVideoEngine::kVideoFormats[] = { | |
238 {1280, 800, 30, FOURCC_ANY}, | |
239 {1280, 720, 30, FOURCC_ANY}, | |
240 {960, 600, 30, FOURCC_ANY}, | |
241 {960, 540, 30, FOURCC_ANY}, | |
242 {640, 400, 30, FOURCC_ANY}, | |
243 {640, 360, 30, FOURCC_ANY}, | |
244 {640, 480, 30, FOURCC_ANY}, | |
245 {480, 300, 30, FOURCC_ANY}, | |
246 {480, 270, 30, FOURCC_ANY}, | |
247 {480, 360, 30, FOURCC_ANY}, | |
248 {320, 200, 30, FOURCC_ANY}, | |
249 {320, 180, 30, FOURCC_ANY}, | |
250 {320, 240, 30, FOURCC_ANY}, | |
251 {240, 150, 30, FOURCC_ANY}, | |
252 {240, 135, 30, FOURCC_ANY}, | |
253 {240, 180, 30, FOURCC_ANY}, | |
254 {160, 100, 30, FOURCC_ANY}, | |
255 {160, 90, 30, FOURCC_ANY}, | |
256 {160, 120, 30, FOURCC_ANY}, | |
257 }; | |
258 | |
259 const VideoFormatPod WebRtcVideoEngine::kDefaultVideoFormat = | |
260 {640, 400, 30, FOURCC_ANY}; | |
261 | |
262 WebRtcVideoEngine::WebRtcVideoEngine() { | |
263 Construct(new ViEWrapper(), new ViETraceWrapper(), NULL); | |
264 } | |
265 | |
266 WebRtcVideoEngine::WebRtcVideoEngine(WebRtcVoiceEngine* voice_engine, | |
267 ViEWrapper* vie_wrapper) { | |
268 Construct(vie_wrapper, new ViETraceWrapper(), voice_engine); | |
269 } | |
270 | |
271 WebRtcVideoEngine::WebRtcVideoEngine(WebRtcVoiceEngine* voice_engine, | |
272 ViEWrapper* vie_wrapper, | |
273 ViETraceWrapper* tracing) { | |
274 Construct(vie_wrapper, tracing, voice_engine); | |
275 } | |
276 | |
277 void WebRtcVideoEngine::Construct(ViEWrapper* vie_wrapper, | |
278 ViETraceWrapper* tracing, | |
279 WebRtcVoiceEngine* voice_engine) { | |
280 LOG(LS_INFO) << "WebRtcVideoEngine::WebRtcVideoEngine"; | |
281 vie_wrapper_.reset(vie_wrapper); | |
282 vie_wrapper_base_initialized_ = false; | |
283 tracing_.reset(tracing); | |
284 voice_engine_ = voice_engine; | |
285 initialized_ = false; | |
286 log_level_ = kDefaultLogSeverity; | |
287 render_module_.reset(new WebRtcPassthroughRender()); | |
288 local_renderer_w_ = local_renderer_h_ = 0; | |
289 local_renderer_ = NULL; | |
290 owns_capturer_ = false; | |
291 video_capturer_ = NULL; | |
292 capture_started_ = false; | |
293 | |
294 ApplyLogging(); | |
295 if (tracing_->SetTraceCallback(this) != 0) { | |
296 LOG_RTCERR1(SetTraceCallback, this); | |
297 } | |
298 | |
299 // Set default quality levels for our supported codecs. We override them here | |
300 // if we know your cpu performance is low, and they can be updated explicitly | |
301 // by calling SetDefaultCodec. For example by a flute preference setting, or | |
302 // by the server with a jec in response to our reported system info. | |
303 VideoCodec max_codec(kVideoCodecPrefs[0].payload_type, | |
304 kVideoCodecPrefs[0].name, | |
305 kDefaultVideoFormat.width, | |
306 kDefaultVideoFormat.height, | |
307 kDefaultVideoFormat.framerate, | |
308 0); | |
309 if (!SetDefaultCodec(max_codec)) { | |
310 LOG(LS_ERROR) << "Failed to initialize list of supported codec types"; | |
311 } | |
312 } | |
313 | |
314 WebRtcVideoEngine::~WebRtcVideoEngine() { | |
315 ClearCapturer(); | |
316 LOG(LS_INFO) << "WebRtcVideoEngine::~WebRtcVideoEngine"; | |
317 if (initialized_) { | |
318 Terminate(); | |
319 } | |
320 tracing_->SetTraceCallback(NULL); | |
321 } | |
322 | |
323 bool WebRtcVideoEngine::Init() { | |
324 LOG(LS_INFO) << "WebRtcVideoEngine::Init"; | |
325 bool result = InitVideoEngine(); | |
326 if (result) { | |
327 LOG(LS_INFO) << "VideoEngine Init done"; | |
328 } else { | |
329 LOG(LS_ERROR) << "VideoEngine Init failed, releasing"; | |
330 Terminate(); | |
331 } | |
332 return result; | |
333 } | |
334 | |
335 bool WebRtcVideoEngine::InitVideoEngine() { | |
336 LOG(LS_INFO) << "WebRtcVideoEngine::InitVideoEngine"; | |
337 | |
338 // Init WebRTC VideoEngine. | |
339 if (!vie_wrapper_base_initialized_) { | |
340 if (vie_wrapper_->base()->Init() != 0) { | |
341 LOG_RTCERR0(Init); | |
342 return false; | |
343 } | |
344 vie_wrapper_base_initialized_ = true; | |
345 } | |
346 | |
347 // Log the VoiceEngine version info. | |
348 char buffer[1024] = ""; | |
349 if (vie_wrapper_->base()->GetVersion(buffer) != 0) { | |
350 LOG_RTCERR0(GetVersion); | |
351 return false; | |
352 } | |
353 | |
354 LOG(LS_INFO) << "WebRtc VideoEngine Version:"; | |
355 LogMultiline(talk_base::LS_INFO, buffer); | |
356 | |
357 // Hook up to VoiceEngine for sync purposes, if supplied. | |
358 if (!voice_engine_) { | |
359 LOG(LS_WARNING) << "NULL voice engine"; | |
360 } else if ((vie_wrapper_->base()->SetVoiceEngine( | |
361 voice_engine_->voe()->engine())) != 0) { | |
362 LOG_RTCERR0(SetVoiceEngine); | |
363 return false; | |
364 } | |
365 | |
366 // Register for callbacks from the engine. | |
367 if ((vie_wrapper_->base()->RegisterObserver(*this)) != 0) { | |
368 LOG_RTCERR0(RegisterObserver); | |
369 return false; | |
370 } | |
371 | |
372 // Register our custom render module. | |
373 if (vie_wrapper_->render()->RegisterVideoRenderModule( | |
374 *render_module_.get()) != 0) { | |
375 LOG_RTCERR0(RegisterVideoRenderModule); | |
376 return false; | |
377 } | |
378 | |
379 initialized_ = true; | |
380 return true; | |
381 } | |
382 | |
383 void WebRtcVideoEngine::Terminate() { | |
384 LOG(LS_INFO) << "WebRtcVideoEngine::Terminate"; | |
385 initialized_ = false; | |
386 SetCapture(false); | |
387 | |
388 if (vie_wrapper_->render()->DeRegisterVideoRenderModule( | |
389 *render_module_.get()) != 0) { | |
390 LOG_RTCERR0(DeRegisterVideoRenderModule); | |
391 } | |
392 | |
393 if (vie_wrapper_->base()->DeregisterObserver() != 0) { | |
394 LOG_RTCERR0(DeregisterObserver); | |
395 } | |
396 | |
397 if (vie_wrapper_->base()->SetVoiceEngine(NULL) != 0) { | |
398 LOG_RTCERR0(SetVoiceEngine); | |
399 } | |
400 } | |
401 | |
402 int WebRtcVideoEngine::GetCapabilities() { | |
403 return VIDEO_RECV | VIDEO_SEND; | |
404 } | |
405 | |
406 bool WebRtcVideoEngine::SetOptions(int options) { | |
407 return true; | |
408 } | |
409 | |
410 bool WebRtcVideoEngine::SetDefaultEncoderConfig( | |
411 const VideoEncoderConfig& config) { | |
412 return SetDefaultCodec(config.max_codec); | |
413 } | |
414 | |
415 // SetDefaultCodec may be called while the capturer is running. For example, a | |
416 // test call is started in a page with QVGA default codec, and then a real call | |
417 // is started in another page with VGA default codec. This is the corner case | |
418 // and happens only when a session is started. We ignore this case currently. | |
419 bool WebRtcVideoEngine::SetDefaultCodec(const VideoCodec& codec) { | |
420 if (!RebuildCodecList(codec)) { | |
421 LOG(LS_WARNING) << "Failed to RebuildCodecList"; | |
422 return false; | |
423 } | |
424 | |
425 default_codec_format_ = VideoFormat( | |
426 video_codecs_[0].width, | |
427 video_codecs_[0].height, | |
428 VideoFormat::FpsToInterval(video_codecs_[0].framerate), | |
429 FOURCC_ANY); | |
430 return true; | |
431 } | |
432 | |
433 WebRtcVideoMediaChannel* WebRtcVideoEngine::CreateChannel( | |
434 VoiceMediaChannel* voice_channel) { | |
435 WebRtcVideoMediaChannel* channel = | |
436 new WebRtcVideoMediaChannel(this, voice_channel); | |
437 if (!channel->Init()) { | |
438 delete channel; | |
439 channel = NULL; | |
440 } | |
441 return channel; | |
442 } | |
443 | |
444 bool WebRtcVideoEngine::SetCaptureDevice(const Device* device) { | |
445 if (!device) { | |
446 ClearCapturer(); | |
447 LOG(LS_INFO) << "Camera set to NULL"; | |
448 return true; | |
449 } | |
450 // No-op if the device hasn't changed. | |
451 if ((video_capturer_ != NULL) && video_capturer_->GetId() == device->id) { | |
452 return true; | |
453 } | |
454 // Create a new capturer for the specified device. | |
455 VideoCapturer* capturer = CreateVideoCapturer(*device); | |
456 if (!capturer) { | |
457 LOG(LS_ERROR) << "Failed to create camera '" << device->name << "', id='" | |
458 << device->id << "'"; | |
459 return false; | |
460 } | |
461 const bool owns_capturer = true; | |
462 if (!SetCapturer(capturer, owns_capturer)) { | |
463 return false; | |
464 } | |
465 LOG(LS_INFO) << "Camera set to '" << device->name << "', id='" | |
466 << device->id << "'"; | |
467 return true; | |
468 } | |
469 | |
470 bool WebRtcVideoEngine::SetCaptureModule(webrtc::VideoCaptureModule* vcm) { | |
471 if (!vcm) { | |
472 if ((video_capturer_ != NULL) && video_capturer_->IsRunning()) { | |
473 LOG(LS_WARNING) << "Failed to set camera to NULL when is running."; | |
474 return false; | |
475 } else { | |
476 ClearCapturer(); | |
477 LOG(LS_INFO) << "Camera set to NULL"; | |
478 return true; | |
479 } | |
480 } | |
481 // Create a new capturer for the specified device. | |
482 WebRtcVideoCapturer* capturer = new WebRtcVideoCapturer; | |
483 if (!capturer->Init(vcm)) { | |
484 LOG(LS_ERROR) << "Failed to create camera from VCM"; | |
485 delete capturer; | |
486 return false; | |
487 } | |
488 const bool owns_capturer = true; | |
489 if (!SetCapturer(capturer, owns_capturer)) { | |
490 return false; | |
491 } | |
492 LOG(LS_INFO) << "Camera created with VCM"; | |
493 CaptureResult ret = SetCapture(true); | |
494 if (ret != cricket::CR_SUCCESS && ret != cricket::CR_PENDING) { | |
495 return false; | |
496 } | |
497 return true; | |
498 } | |
499 | |
500 bool WebRtcVideoEngine::SetVideoCapturer(VideoCapturer* capturer, | |
501 uint32 /*ssrc*/) { | |
502 const bool capture = (capturer != NULL); | |
503 const bool owns_capturer = false; | |
504 CaptureResult res = CR_FAILURE; | |
505 if (capture) { | |
506 // Register the capturer before starting to capture. | |
507 if (!SetCapturer(capturer, owns_capturer)) { | |
508 return false; | |
509 } | |
510 const bool kEnableCapture = true; | |
511 res = SetCapture(kEnableCapture); | |
512 } else { | |
513 // Stop capturing before unregistering the capturer. | |
514 const bool kDisableCapture = false; | |
515 res = SetCapture(kDisableCapture); | |
516 if (!SetCapturer(capturer, owns_capturer)) { | |
517 return false; | |
518 } | |
519 } | |
520 return (res == CR_SUCCESS) || (res == CR_PENDING); | |
521 } | |
522 | |
523 bool WebRtcVideoEngine::SetLocalRenderer(VideoRenderer* renderer) { | |
524 local_renderer_w_ = local_renderer_h_ = 0; | |
525 local_renderer_ = renderer; | |
526 return true; | |
527 } | |
528 | |
529 CaptureResult WebRtcVideoEngine::SetCapture(bool capture) { | |
530 bool old_capture = capture_started_; | |
531 capture_started_ = capture; | |
532 CaptureResult res = UpdateCapturingState(); | |
533 if (res != CR_SUCCESS && res != CR_PENDING) { | |
534 capture_started_ = old_capture; | |
535 } | |
536 return res; | |
537 } | |
538 | |
539 VideoCapturer* WebRtcVideoEngine::CreateVideoCapturer(const Device& device) { | |
540 WebRtcVideoCapturer* capturer = new WebRtcVideoCapturer; | |
541 if (!capturer->Init(device)) { | |
542 delete capturer; | |
543 return NULL; | |
544 } | |
545 return capturer; | |
546 } | |
547 | |
548 CaptureResult WebRtcVideoEngine::UpdateCapturingState() { | |
549 CaptureResult result = CR_SUCCESS; | |
550 | |
551 bool capture = capture_started_; | |
552 if (!IsCapturing() && capture) { // Start capturing. | |
553 if (video_capturer_ == NULL) { | |
554 return CR_NO_DEVICE; | |
555 } | |
556 | |
557 VideoFormat capture_format; | |
558 if (!video_capturer_->GetBestCaptureFormat(default_codec_format_, | |
559 &capture_format)) { | |
560 LOG(LS_WARNING) << "Unsupported format:" | |
561 << " width=" << default_codec_format_.width | |
562 << " height=" << default_codec_format_.height | |
563 << ". Supported formats are:"; | |
564 const std::vector<VideoFormat>* formats = | |
565 video_capturer_->GetSupportedFormats(); | |
566 if (formats) { | |
567 for (std::vector<VideoFormat>::const_iterator i = formats->begin(); | |
568 i != formats->end(); ++i) { | |
569 const VideoFormat& format = *i; | |
570 LOG(LS_WARNING) << " " << GetFourccName(format.fourcc) << ":" | |
571 << format.width << "x" << format.height << "x" | |
572 << format.framerate(); | |
573 } | |
574 } | |
575 return CR_FAILURE; | |
576 } | |
577 | |
578 // Start the video capturer. | |
579 result = video_capturer_->Start(capture_format); | |
580 if (CR_SUCCESS != result && CR_PENDING != result) { | |
581 LOG(LS_ERROR) << "Failed to start the video capturer"; | |
582 return result; | |
583 } | |
584 } else if (IsCapturing() && !capture) { // Stop capturing. | |
585 video_capturer_->Stop(); | |
586 } | |
587 | |
588 return result; | |
589 } | |
590 | |
591 bool WebRtcVideoEngine::IsCapturing() const { | |
592 return (video_capturer_ != NULL) && video_capturer_->IsRunning(); | |
593 } | |
594 | |
595 void WebRtcVideoEngine::OnFrameCaptured(VideoCapturer* capturer, | |
596 const CapturedFrame* frame) { | |
597 // Force 16:10 for now. We'll be smarter with the capture refactor. | |
598 int cropped_height = frame->width * default_codec_format_.height | |
599 / default_codec_format_.width; | |
600 if (cropped_height > frame->height) { | |
601 // TODO: Once we support horizontal cropping, add cropped_width. | |
602 cropped_height = frame->height; | |
603 } | |
604 | |
605 // This CapturedFrame* will already be in I420. In the future, when | |
606 // WebRtcVideoFrame has support for independent planes, we can just attach | |
607 // to it and update the pointers when cropping. | |
608 WebRtcVideoFrame i420_frame; | |
609 if (!i420_frame.Init(frame, frame->width, cropped_height)) { | |
610 LOG(LS_ERROR) << "Couldn't convert to I420! " | |
611 << frame->width << " x " << cropped_height; | |
612 return; | |
613 } | |
614 | |
615 // Send I420 frame to the local renderer. | |
616 if (local_renderer_) { | |
617 if (local_renderer_w_ != static_cast<int>(i420_frame.GetWidth()) || | |
618 local_renderer_h_ != static_cast<int>(i420_frame.GetHeight())) { | |
619 local_renderer_->SetSize(local_renderer_w_ = i420_frame.GetWidth(), | |
620 local_renderer_h_ = i420_frame.GetHeight(), 0); | |
621 } | |
622 local_renderer_->RenderFrame(&i420_frame); | |
623 } | |
624 | |
625 // Send I420 frame to the registered senders. | |
626 talk_base::CritScope cs(&channels_crit_); | |
627 for (VideoChannels::iterator it = channels_.begin(); | |
628 it != channels_.end(); ++it) { | |
629 if ((*it)->sending()) (*it)->SendFrame(0, &i420_frame); | |
630 } | |
631 } | |
632 | |
633 const std::vector<VideoCodec>& WebRtcVideoEngine::codecs() const { | |
634 return video_codecs_; | |
635 } | |
636 | |
637 void WebRtcVideoEngine::SetLogging(int min_sev, const char* filter) { | |
638 log_level_ = min_sev; | |
639 ApplyLogging(); | |
640 } | |
641 | |
642 int WebRtcVideoEngine::GetLastEngineError() { | |
643 return vie_wrapper_->error(); | |
644 } | |
645 | |
646 // Checks to see whether we comprehend and could receive a particular codec | |
647 bool WebRtcVideoEngine::FindCodec(const VideoCodec& in) { | |
648 for (int i = 0; i < ARRAY_SIZE(kVideoFormats); ++i) { | |
649 const VideoFormat fmt(kVideoFormats[i]); | |
650 if ((in.width == 0 && in.height == 0) || | |
651 (fmt.width == in.width && fmt.height == in.height)) { | |
652 for (int j = 0; j < ARRAY_SIZE(kVideoCodecPrefs); ++j) { | |
653 VideoCodec codec(kVideoCodecPrefs[j].payload_type, | |
654 kVideoCodecPrefs[j].name, 0, 0, 0, 0); | |
655 if (codec.Matches(in)) { | |
656 return true; | |
657 } | |
658 } | |
659 } | |
660 } | |
661 return false; | |
662 } | |
663 | |
664 // Given the requested codec, returns true if we can send that codec type and | |
665 // updates out with the best quality we could send for that codec. If current is | |
666 // not empty, we constrain out so that its aspect ratio matches current's. | |
667 bool WebRtcVideoEngine::CanSendCodec(const VideoCodec& requested, | |
668 const VideoCodec& current, | |
669 VideoCodec* out) { | |
670 if (!out) { | |
671 return false; | |
672 } | |
673 | |
674 std::vector<VideoCodec>::const_iterator local_max; | |
675 for (local_max = video_codecs_.begin(); | |
676 local_max < video_codecs_.end(); | |
677 ++local_max) { | |
678 // First match codecs by payload type | |
679 if (!requested.Matches(local_max->id, local_max->name)) { | |
680 continue; | |
681 } | |
682 | |
683 out->id = requested.id; | |
684 out->name = requested.name; | |
685 out->preference = requested.preference; | |
686 out->framerate = talk_base::_min(requested.framerate, local_max->framerate); | |
687 out->width = 0; | |
688 out->height = 0; | |
689 | |
690 if (0 == requested.width && 0 == requested.height) { | |
691 // Special case with resolution 0. The channel should not send frames. | |
692 return true; | |
693 } else if (0 == requested.width || 0 == requested.height) { | |
694 // 0xn and nx0 are invalid resolutions. | |
695 return false; | |
696 } | |
697 | |
698 // Pick the best quality that is within their and our bounds and has the | |
699 // correct aspect ratio. | |
700 for (int j = 0; j < ARRAY_SIZE(kVideoFormats); ++j) { | |
701 const VideoFormat format(kVideoFormats[j]); | |
702 | |
703 // Skip any format that is larger than the local or remote maximums, or | |
704 // smaller than the current best match | |
705 if (format.width > requested.width || format.height > requested.height || | |
706 format.width > local_max->width || | |
707 (format.width < out->width && format.height < out->height)) { | |
708 continue; | |
709 } | |
710 | |
711 bool better = false; | |
712 | |
713 // Check any further constraints on this prospective format | |
714 if (!out->width || !out->height) { | |
715 // If we don't have any matches yet, this is the best so far. | |
716 better = true; | |
717 } else if (current.width && current.height) { | |
718 // current is set so format must match its ratio exactly. | |
719 better = | |
720 (format.width * current.height == format.height * current.width); | |
721 } else { | |
722 // Prefer closer aspect ratios i.e | |
723 // format.aspect - requested.aspect < out.aspect - requested.aspect | |
724 better = abs(format.width * requested.height * out->height - | |
725 requested.width * format.height * out->height) < | |
726 abs(out->width * format.height * requested.height - | |
727 requested.width * format.height * out->height); | |
728 } | |
729 | |
730 if (better) { | |
731 out->width = format.width; | |
732 out->height = format.height; | |
733 } | |
734 } | |
735 if (out->width > 0) { | |
736 return true; | |
737 } | |
738 } | |
739 return false; | |
740 } | |
741 | |
742 void WebRtcVideoEngine::ConvertToCricketVideoCodec( | |
743 const webrtc::VideoCodec& in_codec, VideoCodec& out_codec) { | |
744 out_codec.id = in_codec.plType; | |
745 out_codec.name = in_codec.plName; | |
746 out_codec.width = in_codec.width; | |
747 out_codec.height = in_codec.height; | |
748 out_codec.framerate = in_codec.maxFramerate; | |
749 } | |
750 | |
751 bool WebRtcVideoEngine::ConvertFromCricketVideoCodec( | |
752 const VideoCodec& in_codec, webrtc::VideoCodec& out_codec) { | |
753 bool found = false; | |
754 int ncodecs = vie_wrapper_->codec()->NumberOfCodecs(); | |
755 for (int i = 0; i < ncodecs; ++i) { | |
756 if (vie_wrapper_->codec()->GetCodec(i, out_codec) == 0 && | |
757 in_codec.name == out_codec.plName) { | |
758 found = true; | |
759 break; | |
760 } | |
761 } | |
762 | |
763 if (!found) { | |
764 LOG(LS_ERROR) << "invalid codec type"; | |
765 return false; | |
766 } | |
767 | |
768 if (in_codec.id != 0) | |
769 out_codec.plType = in_codec.id; | |
770 | |
771 if (in_codec.width != 0) | |
772 out_codec.width = in_codec.width; | |
773 | |
774 if (in_codec.height != 0) | |
775 out_codec.height = in_codec.height; | |
776 | |
777 if (in_codec.framerate != 0) | |
778 out_codec.maxFramerate = in_codec.framerate; | |
779 | |
780 // Init the codec with the default bandwidth options. | |
781 out_codec.minBitrate = kMinVideoBitrate; | |
782 out_codec.startBitrate = kStartVideoBitrate; | |
783 out_codec.maxBitrate = kMaxVideoBitrate; | |
784 | |
785 return true; | |
786 } | |
787 | |
788 void WebRtcVideoEngine::RegisterChannel(WebRtcVideoMediaChannel *channel) { | |
789 talk_base::CritScope cs(&channels_crit_); | |
790 channels_.push_back(channel); | |
791 } | |
792 | |
793 void WebRtcVideoEngine::UnregisterChannel(WebRtcVideoMediaChannel *channel) { | |
794 talk_base::CritScope cs(&channels_crit_); | |
795 channels_.erase(std::remove(channels_.begin(), channels_.end(), channel), | |
796 channels_.end()); | |
797 } | |
798 | |
799 bool WebRtcVideoEngine::SetVoiceEngine(WebRtcVoiceEngine* voice_engine) { | |
800 if (initialized_) { | |
801 LOG(LS_WARNING) << "SetVoiceEngine can not be called after Init."; | |
802 return false; | |
803 } | |
804 voice_engine_ = voice_engine; | |
805 return true; | |
806 } | |
807 | |
808 bool WebRtcVideoEngine::EnableTimedRender() { | |
809 if (initialized_) { | |
810 LOG(LS_WARNING) << "EnableTimedRender can not be called after Init."; | |
811 return false; | |
812 } | |
813 render_module_.reset(webrtc::VideoRender::CreateVideoRender(0, NULL, | |
814 false, webrtc::kRenderExternal)); | |
815 return true; | |
816 } | |
817 | |
818 void WebRtcVideoEngine::ApplyLogging() { | |
819 int filter = 0; | |
820 switch (log_level_) { | |
821 case talk_base::LS_VERBOSE: filter |= webrtc::kTraceAll; | |
822 case talk_base::LS_INFO: filter |= webrtc::kTraceStateInfo; | |
823 case talk_base::LS_WARNING: filter |= webrtc::kTraceWarning; | |
824 case talk_base::LS_ERROR: filter |= | |
825 webrtc::kTraceError | webrtc::kTraceCritical; | |
826 } | |
827 tracing_->SetTraceFilter(filter); | |
828 } | |
829 | |
830 // Rebuilds the codec list to be only those that are less intensive | |
831 // than the specified codec. | |
832 bool WebRtcVideoEngine::RebuildCodecList(const VideoCodec& in_codec) { | |
833 if (!FindCodec(in_codec)) | |
834 return false; | |
835 | |
836 video_codecs_.clear(); | |
837 | |
838 bool found = false; | |
839 for (size_t i = 0; i < ARRAY_SIZE(kVideoCodecPrefs); ++i) { | |
840 const VideoCodecPref& pref(kVideoCodecPrefs[i]); | |
841 if (!found) | |
842 found = (in_codec.name == pref.name); | |
843 if (found) { | |
844 VideoCodec codec(pref.payload_type, pref.name, | |
845 in_codec.width, in_codec.height, in_codec.framerate, | |
846 ARRAY_SIZE(kVideoCodecPrefs) - i); | |
847 video_codecs_.push_back(codec); | |
848 } | |
849 } | |
850 ASSERT(found); | |
851 return true; | |
852 } | |
853 | |
854 bool WebRtcVideoEngine::SetCapturer(VideoCapturer* capturer, | |
855 bool own_capturer) { | |
856 if (capturer == NULL) { | |
857 ClearCapturer(); | |
858 return true; | |
859 } | |
860 // Hook up signals and install the supplied capturer. | |
861 SignalCaptureResult.repeat(capturer->SignalStartResult); | |
862 capturer->SignalFrameCaptured.connect(this, | |
863 &WebRtcVideoEngine::OnFrameCaptured); | |
864 ClearCapturer(); | |
865 video_capturer_ = capturer; | |
866 owns_capturer_ = own_capturer; | |
867 // Possibly restart the capturer if it is supposed to be running. | |
868 CaptureResult result = UpdateCapturingState(); | |
869 if (result != CR_SUCCESS && result != CR_PENDING) { | |
870 LOG(LS_WARNING) << "Camera failed to restart"; | |
871 return false; | |
872 } | |
873 return true; | |
874 } | |
875 | |
876 void WebRtcVideoEngine::PerformanceAlarm(const unsigned int cpu_load) { | |
877 LOG(LS_INFO) << "WebRtcVideoEngine::PerformanceAlarm"; | |
878 } | |
879 | |
880 // Ignore spammy trace messages, mostly from the stats API when we haven't | |
881 // gotten RTCP info yet from the remote side. | |
882 bool WebRtcVideoEngine::ShouldIgnoreTrace(const std::string& trace) { | |
883 static const char* const kTracesToIgnore[] = { | |
884 NULL | |
885 }; | |
886 for (const char* const* p = kTracesToIgnore; *p; ++p) { | |
887 if (trace.find(*p) == 0) { | |
888 return true; | |
889 } | |
890 } | |
891 return false; | |
892 } | |
893 | |
894 int WebRtcVideoEngine::GetNumOfChannels() { | |
895 talk_base::CritScope cs(&channels_crit_); | |
896 return channels_.size(); | |
897 } | |
898 | |
899 void WebRtcVideoEngine::Print(const webrtc::TraceLevel level, | |
900 const char* trace, const int length) { | |
901 talk_base::LoggingSeverity sev = talk_base::LS_VERBOSE; | |
902 if (level == webrtc::kTraceError || level == webrtc::kTraceCritical) | |
903 sev = talk_base::LS_ERROR; | |
904 else if (level == webrtc::kTraceWarning) | |
905 sev = talk_base::LS_WARNING; | |
906 else if (level == webrtc::kTraceStateInfo || level == webrtc::kTraceInfo) | |
907 sev = talk_base::LS_INFO; | |
908 | |
909 if (sev >= log_level_) { | |
910 // Skip past boilerplate prefix text | |
911 if (length < 72) { | |
912 std::string msg(trace, length); | |
913 LOG(LS_ERROR) << "Malformed webrtc log message: "; | |
914 LOG_V(sev) << msg; | |
915 } else { | |
916 std::string msg(trace + 71, length - 72); | |
917 if (!ShouldIgnoreTrace(msg) && | |
918 (!voice_engine_ || !voice_engine_->ShouldIgnoreTrace(msg))) { | |
919 LOG_V(sev) << "WebRtc:" << msg; | |
920 } | |
921 } | |
922 } | |
923 } | |
924 | |
925 // TODO: stubs for now | |
926 bool WebRtcVideoEngine::RegisterProcessor( | |
927 VideoProcessor* video_processor) { | |
928 return true; | |
929 } | |
930 bool WebRtcVideoEngine::UnregisterProcessor( | |
931 VideoProcessor* video_processor) { | |
932 return true; | |
933 } | |
934 | |
935 void WebRtcVideoEngine::ClearCapturer() { | |
936 if (owns_capturer_) { | |
937 delete video_capturer_; | |
938 } | |
939 video_capturer_ = NULL; | |
940 } | |
941 | |
942 // WebRtcVideoMediaChannel | |
943 | |
944 WebRtcVideoMediaChannel::WebRtcVideoMediaChannel( | |
945 WebRtcVideoEngine* engine, VoiceMediaChannel* channel) | |
946 : engine_(engine), | |
947 voice_channel_(channel), | |
948 vie_channel_(-1), | |
949 vie_capture_(-1), | |
950 external_capture_(NULL), | |
951 sending_(false), | |
952 render_started_(false), | |
953 muted_(false), | |
954 send_min_bitrate_(kMinVideoBitrate), | |
955 send_start_bitrate_(kStartVideoBitrate), | |
956 send_max_bitrate_(kMaxVideoBitrate), | |
957 local_stream_info_(new LocalStreamInfo()) { | |
958 engine->RegisterChannel(this); | |
959 } | |
960 | |
961 bool WebRtcVideoMediaChannel::Init() { | |
962 if (engine_->vie()->base()->CreateChannel(vie_channel_) != 0) { | |
963 LOG_RTCERR1(CreateChannel, vie_channel_); | |
964 return false; | |
965 } | |
966 | |
967 LOG(LS_INFO) << "WebRtcVideoMediaChannel::Init " | |
968 << "vie_channel " << vie_channel_ << " created"; | |
969 | |
970 // Connect the voice channel, if there is one. | |
971 if (voice_channel_) { | |
972 WebRtcVoiceMediaChannel* channel = | |
973 static_cast<WebRtcVoiceMediaChannel*>(voice_channel_); | |
974 if (engine_->vie()->base()->ConnectAudioChannel( | |
975 vie_channel_, channel->voe_channel()) != 0) { | |
976 LOG_RTCERR2(ConnectAudioChannel, vie_channel_, channel->voe_channel()); | |
977 LOG(LS_WARNING) << "A/V not synchronized"; | |
978 // Not a fatal error. | |
979 } | |
980 } | |
981 | |
982 // Register external transport. | |
983 if (engine_->vie()->network()->RegisterSendTransport( | |
984 vie_channel_, *this) != 0) { | |
985 LOG_RTCERR1(RegisterSendTransport, vie_channel_); | |
986 return false; | |
987 } | |
988 | |
989 // Set MTU. | |
990 if (engine_->vie()->network()->SetMTU(vie_channel_, kVideoMtu) != 0) { | |
991 LOG_RTCERR2(SetMTU, vie_channel_, kVideoMtu); | |
992 return false; | |
993 } | |
994 | |
995 // Register external capture. | |
996 if (engine()->vie()->capture()->AllocateExternalCaptureDevice( | |
997 vie_capture_, external_capture_) != 0) { | |
998 LOG_RTCERR0(AllocateExternalCaptureDevice); | |
999 return false; | |
1000 } | |
1001 | |
1002 // Connect external capture. | |
1003 if (engine()->vie()->capture()->ConnectCaptureDevice( | |
1004 vie_capture_, vie_channel_) != 0) { | |
1005 LOG_RTCERR2(ConnectCaptureDevice, vie_capture_, vie_channel_); | |
1006 return false; | |
1007 } | |
1008 | |
1009 // Install render adapter. | |
1010 remote_renderer_.reset(new WebRtcRenderAdapter(NULL)); | |
1011 if (engine_->vie()->render()->AddRenderer(vie_channel_, | |
1012 webrtc::kVideoI420, remote_renderer_.get()) != 0) { | |
1013 LOG_RTCERR3(AddRenderer, vie_channel_, webrtc::kVideoI420, | |
1014 remote_renderer_.get()); | |
1015 remote_renderer_.reset(); | |
1016 return false; | |
1017 } | |
1018 | |
1019 // Register decoder observer for incoming framerate and bitrate. | |
1020 decoder_observer_.reset(new WebRtcDecoderObserver(vie_channel_)); | |
1021 if (engine()->vie()->codec()->RegisterDecoderObserver( | |
1022 vie_channel_, *decoder_observer_) != 0) { | |
1023 LOG_RTCERR1(RegisterDecoderObserver, decoder_observer_.get()); | |
1024 return false; | |
1025 } | |
1026 | |
1027 // Register encoder observer for outgoing framerate and bitrate. | |
1028 encoder_observer_.reset(new WebRtcEncoderObserver(vie_channel_)); | |
1029 if (engine()->vie()->codec()->RegisterEncoderObserver( | |
1030 vie_channel_, *encoder_observer_) != 0) { | |
1031 LOG_RTCERR1(RegisterEncoderObserver, encoder_observer_.get()); | |
1032 return false; | |
1033 } | |
1034 | |
1035 // Turn on RTCP and loss feedback reporting. | |
1036 if (!EnableRtcp() || | |
1037 !EnablePli()) { | |
1038 return false; | |
1039 } | |
1040 | |
1041 #ifdef WEBRTC_VIDEO_AVPF_NACK_ONLY | |
1042 // Turn on NACK-only loss handling. | |
1043 if (!EnableNack()) | |
1044 return false; | |
1045 #endif | |
1046 | |
1047 // Turn on TMMBR-based BWE reporting. | |
1048 if (!EnableTmmbr()) { | |
1049 return false; | |
1050 } | |
1051 | |
1052 return true; | |
1053 } | |
1054 | |
1055 WebRtcVideoMediaChannel::~WebRtcVideoMediaChannel() { | |
1056 if (vie_channel_ != -1) { | |
1057 // Stop sending. | |
1058 SetSend(false); | |
1059 if (engine()->vie()->codec()->DeregisterEncoderObserver( | |
1060 vie_channel_) != 0) { | |
1061 LOG_RTCERR1(DeregisterEncoderObserver, vie_channel_); | |
1062 } | |
1063 | |
1064 // Stop the renderer. | |
1065 SetRender(false); | |
1066 if (engine()->vie()->codec()->DeregisterDecoderObserver( | |
1067 vie_channel_) != 0) { | |
1068 LOG_RTCERR1(DeregisterDecoderObserver, vie_channel_); | |
1069 } | |
1070 if (remote_renderer_.get() && | |
1071 engine()->vie()->render()->RemoveRenderer(vie_channel_) != 0) { | |
1072 LOG_RTCERR1(RemoveRenderer, vie_channel_); | |
1073 } | |
1074 | |
1075 // Destroy the external capture interface. | |
1076 if (vie_capture_ != -1) { | |
1077 if (engine()->vie()->capture()->DisconnectCaptureDevice( | |
1078 vie_channel_) != 0) { | |
1079 LOG_RTCERR1(DisconnectCaptureDevice, vie_channel_); | |
1080 } | |
1081 if (engine()->vie()->capture()->ReleaseCaptureDevice( | |
1082 vie_capture_) != 0) { | |
1083 LOG_RTCERR1(ReleaseCaptureDevice, vie_capture_); | |
1084 } | |
1085 } | |
1086 | |
1087 // Deregister external transport. | |
1088 if (engine()->vie()->network()->DeregisterSendTransport( | |
1089 vie_channel_) != 0) { | |
1090 LOG_RTCERR1(DeregisterSendTransport, vie_channel_); | |
1091 } | |
1092 | |
1093 // Delete the VideoEngine channel. | |
1094 if (engine()->vie()->base()->DeleteChannel(vie_channel_) != 0) { | |
1095 LOG_RTCERR1(DeleteChannel, vie_channel_); | |
1096 } | |
1097 } | |
1098 | |
1099 // Unregister the channel from the engine. | |
1100 engine()->UnregisterChannel(this); | |
1101 } | |
1102 | |
1103 bool WebRtcVideoMediaChannel::SetRecvCodecs( | |
1104 const std::vector<VideoCodec>& codecs) { | |
1105 bool ret = true; | |
1106 for (std::vector<VideoCodec>::const_iterator iter = codecs.begin(); | |
1107 iter != codecs.end(); ++iter) { | |
1108 if (engine()->FindCodec(*iter)) { | |
1109 webrtc::VideoCodec wcodec; | |
1110 if (engine()->ConvertFromCricketVideoCodec(*iter, wcodec)) { | |
1111 if (engine()->vie()->codec()->SetReceiveCodec( | |
1112 vie_channel_, wcodec) != 0) { | |
1113 LOG_RTCERR2(SetReceiveCodec, vie_channel_, wcodec.plName); | |
1114 ret = false; | |
1115 } | |
1116 } | |
1117 } else { | |
1118 LOG(LS_INFO) << "Unknown codec " << iter->name; | |
1119 ret = false; | |
1120 } | |
1121 } | |
1122 | |
1123 // make channel ready to receive packets | |
1124 if (ret) { | |
1125 if (engine()->vie()->base()->StartReceive(vie_channel_) != 0) { | |
1126 LOG_RTCERR1(StartReceive, vie_channel_); | |
1127 ret = false; | |
1128 } | |
1129 } | |
1130 return ret; | |
1131 } | |
1132 | |
1133 bool WebRtcVideoMediaChannel::SetSendCodecs( | |
1134 const std::vector<VideoCodec>& codecs) { | |
1135 // Match with local video codec list. | |
1136 std::vector<webrtc::VideoCodec> send_codecs; | |
1137 int red_type = -1, fec_type = -1; | |
1138 VideoCodec checked_codec; | |
1139 VideoCodec current; // defaults to 0x0 | |
1140 if (sending_) { | |
1141 engine()->ConvertToCricketVideoCodec(*send_codec_, current); | |
1142 } | |
1143 for (std::vector<VideoCodec>::const_iterator iter = codecs.begin(); | |
1144 iter != codecs.end(); ++iter) { | |
1145 if (_stricmp(iter->name.c_str(), kRedPayloadName) == 0) { | |
1146 red_type = iter->id; | |
1147 } else if (_stricmp(iter->name.c_str(), kFecPayloadName) == 0) { | |
1148 fec_type = iter->id; | |
1149 } else if (engine()->CanSendCodec(*iter, current, &checked_codec)) { | |
1150 webrtc::VideoCodec wcodec; | |
1151 if (engine()->ConvertFromCricketVideoCodec(checked_codec, wcodec)) { | |
1152 send_codecs.push_back(wcodec); | |
1153 } | |
1154 } else { | |
1155 LOG(LS_WARNING) << "Unknown codec " << iter->name; | |
1156 } | |
1157 } | |
1158 | |
1159 // Fail if we don't have a match. | |
1160 if (send_codecs.empty()) { | |
1161 LOG(LS_WARNING) << "No matching codecs avilable"; | |
1162 return false; | |
1163 } | |
1164 | |
1165 #ifndef WEBRTC_VIDEO_AVPF_NACK_ONLY | |
1166 // Configure FEC if enabled. | |
1167 if (!SetNackFec(red_type, fec_type)) { | |
1168 return false; | |
1169 } | |
1170 #endif | |
1171 | |
1172 // Select the first matched codec. | |
1173 webrtc::VideoCodec& codec(send_codecs[0]); | |
1174 | |
1175 // Set the default number of temporal layers for VP8. | |
1176 if (webrtc::kVideoCodecVP8 == codec.codecType) { | |
1177 codec.codecSpecific.VP8.numberOfTemporalLayers = | |
1178 kDefaultNumberOfTemporalLayers; | |
1179 } | |
1180 | |
1181 if (!SetSendCodec( | |
1182 codec, send_min_bitrate_, send_start_bitrate_, send_max_bitrate_)) { | |
1183 return false; | |
1184 } | |
1185 | |
1186 LOG(LS_INFO) << "Selected video codec " << send_codec_->plName << "/" | |
1187 << send_codec_->width << "x" << send_codec_->height << "x" | |
1188 << static_cast<int>(send_codec_->maxFramerate); | |
1189 if (webrtc::kVideoCodecVP8 == codec.codecType) { | |
1190 LOG(LS_INFO) << "VP8 number of layers: " | |
1191 << static_cast<int>( | |
1192 send_codec_->codecSpecific.VP8.numberOfTemporalLayers); | |
1193 } | |
1194 return true; | |
1195 } | |
1196 | |
1197 bool WebRtcVideoMediaChannel::SetRender(bool render) { | |
1198 if (render == render_started_) { | |
1199 return true; // no action required | |
1200 } | |
1201 | |
1202 bool ret = true; | |
1203 if (render) { | |
1204 if (engine()->vie()->render()->StartRender(vie_channel_) != 0) { | |
1205 LOG_RTCERR1(StartRender, vie_channel_); | |
1206 ret = false; | |
1207 } | |
1208 } else { | |
1209 if (engine()->vie()->render()->StopRender(vie_channel_) != 0) { | |
1210 LOG_RTCERR1(StopRender, vie_channel_); | |
1211 ret = false; | |
1212 } | |
1213 } | |
1214 if (ret) { | |
1215 render_started_ = render; | |
1216 } | |
1217 | |
1218 return ret; | |
1219 } | |
1220 | |
1221 bool WebRtcVideoMediaChannel::SetSend(bool send) { | |
1222 if (send == sending()) { | |
1223 return true; // no action required | |
1224 } | |
1225 | |
1226 if (send) { | |
1227 // We've been asked to start sending. | |
1228 // SetSendCodecs must have been called already. | |
1229 if (!send_codec_.get()) { | |
1230 return false; | |
1231 } | |
1232 | |
1233 if (engine()->vie()->base()->StartSend(vie_channel_) != 0) { | |
1234 LOG_RTCERR1(StartSend, vie_channel_); | |
1235 return false; | |
1236 } | |
1237 } else { | |
1238 // We've been asked to stop sending. | |
1239 if (engine()->vie()->base()->StopSend(vie_channel_) != 0) { | |
1240 LOG_RTCERR1(StopSend, vie_channel_); | |
1241 return false; | |
1242 } | |
1243 } | |
1244 | |
1245 sending_ = send; | |
1246 return true; | |
1247 } | |
1248 | |
1249 bool WebRtcVideoMediaChannel::AddStream(uint32 ssrc, uint32 voice_ssrc) { | |
1250 return false; | |
1251 } | |
1252 | |
1253 bool WebRtcVideoMediaChannel::RemoveStream(uint32 ssrc) { | |
1254 return false; | |
1255 } | |
1256 | |
1257 bool WebRtcVideoMediaChannel::SetRenderer( | |
1258 uint32 ssrc, VideoRenderer* renderer) { | |
1259 if (ssrc != 0) | |
1260 return false; | |
1261 | |
1262 remote_renderer_->SetRenderer(renderer); | |
1263 return true; | |
1264 } | |
1265 | |
1266 bool WebRtcVideoMediaChannel::GetStats(VideoMediaInfo* info) { | |
1267 // Get basic statistics. | |
1268 unsigned int bytes_sent, packets_sent, bytes_recv, packets_recv; | |
1269 unsigned int ssrc; | |
1270 if (engine_->vie()->rtp()->GetRTPStatistics(vie_channel_, | |
1271 bytes_sent, packets_sent, bytes_recv, packets_recv) != 0) { | |
1272 LOG_RTCERR1(GetRTPStatistics, vie_channel_); | |
1273 return false; | |
1274 } | |
1275 | |
1276 // Get sender statistics and build VideoSenderInfo. | |
1277 if (engine_->vie()->rtp()->GetLocalSSRC(vie_channel_, ssrc) == 0) { | |
1278 VideoSenderInfo sinfo; | |
1279 sinfo.ssrc = ssrc; | |
1280 sinfo.codec_name = send_codec_.get() ? send_codec_->plName : ""; | |
1281 sinfo.bytes_sent = bytes_sent; | |
1282 sinfo.packets_sent = packets_sent; | |
1283 sinfo.packets_cached = -1; | |
1284 sinfo.packets_lost = -1; | |
1285 sinfo.fraction_lost = -1; | |
1286 sinfo.firs_rcvd = -1; | |
1287 sinfo.nacks_rcvd = -1; | |
1288 sinfo.rtt_ms = -1; | |
1289 sinfo.frame_width = local_stream_info_->width(); | |
1290 sinfo.frame_height = local_stream_info_->height(); | |
1291 sinfo.framerate_input = local_stream_info_->framerate(); | |
1292 sinfo.framerate_sent = encoder_observer_->framerate(); | |
1293 sinfo.nominal_bitrate = encoder_observer_->bitrate(); | |
1294 sinfo.preferred_bitrate = kMaxVideoBitrate; | |
1295 | |
1296 // Get received RTCP statistics for the sender, if available. | |
1297 // It's not a fatal error if we can't, since RTCP may not have arrived yet. | |
1298 uint16 r_fraction_lost; | |
1299 unsigned int r_cumulative_lost; | |
1300 unsigned int r_extended_max; | |
1301 unsigned int r_jitter; | |
1302 int r_rtt_ms; | |
1303 if (engine_->vie()->rtp()->GetReceivedRTCPStatistics(vie_channel_, | |
1304 r_fraction_lost, r_cumulative_lost, r_extended_max, | |
1305 r_jitter, r_rtt_ms) == 0) { | |
1306 // Convert Q8 to float. | |
1307 sinfo.packets_lost = r_cumulative_lost; | |
1308 sinfo.fraction_lost = static_cast<float>(r_fraction_lost) / (1 << 8); | |
1309 sinfo.rtt_ms = r_rtt_ms; | |
1310 } | |
1311 info->senders.push_back(sinfo); | |
1312 } else { | |
1313 LOG_RTCERR1(GetLocalSSRC, vie_channel_); | |
1314 } | |
1315 | |
1316 // Get receiver statistics and build VideoReceiverInfo, if we have data. | |
1317 if (engine_->vie()->rtp()->GetRemoteSSRC(vie_channel_, ssrc) == 0) { | |
1318 VideoReceiverInfo rinfo; | |
1319 rinfo.ssrc = ssrc; | |
1320 rinfo.bytes_rcvd = bytes_recv; | |
1321 rinfo.packets_rcvd = packets_recv; | |
1322 rinfo.packets_lost = -1; | |
1323 rinfo.packets_concealed = -1; | |
1324 rinfo.fraction_lost = -1; // from SentRTCP | |
1325 rinfo.firs_sent = decoder_observer_->firs_requested(); | |
1326 rinfo.nacks_sent = -1; | |
1327 rinfo.frame_width = remote_renderer_->width(); | |
1328 rinfo.frame_height = remote_renderer_->height(); | |
1329 rinfo.framerate_rcvd = decoder_observer_->framerate(); | |
1330 int fps = remote_renderer_->framerate(); | |
1331 rinfo.framerate_decoded = fps; | |
1332 rinfo.framerate_output = fps; | |
1333 | |
1334 // Get sent RTCP statistics. | |
1335 uint16 s_fraction_lost; | |
1336 unsigned int s_cumulative_lost; | |
1337 unsigned int s_extended_max; | |
1338 unsigned int s_jitter; | |
1339 int s_rtt_ms; | |
1340 if (engine_->vie()->rtp()->GetSentRTCPStatistics(vie_channel_, | |
1341 s_fraction_lost, s_cumulative_lost, s_extended_max, | |
1342 s_jitter, s_rtt_ms) == 0) { | |
1343 // Convert Q8 to float. | |
1344 rinfo.packets_lost = s_cumulative_lost; | |
1345 rinfo.fraction_lost = static_cast<float>(s_fraction_lost) / (1 << 8); | |
1346 } | |
1347 info->receivers.push_back(rinfo); | |
1348 } | |
1349 | |
1350 // Build BandwidthEstimationInfo. | |
1351 // TODO: Fill in more BWE stats once we have them. | |
1352 unsigned int total_bitrate_sent; | |
1353 unsigned int video_bitrate_sent; | |
1354 unsigned int fec_bitrate_sent; | |
1355 unsigned int nack_bitrate_sent; | |
1356 if (engine_->vie()->rtp()->GetBandwidthUsage(vie_channel_, | |
1357 total_bitrate_sent, video_bitrate_sent, | |
1358 fec_bitrate_sent, nack_bitrate_sent) == 0) { | |
1359 BandwidthEstimationInfo bwe; | |
1360 bwe.actual_enc_bitrate = video_bitrate_sent; | |
1361 bwe.transmit_bitrate = total_bitrate_sent; | |
1362 bwe.retransmit_bitrate = nack_bitrate_sent; | |
1363 info->bw_estimations.push_back(bwe); | |
1364 } else { | |
1365 LOG_RTCERR1(GetBandwidthUsage, vie_channel_); | |
1366 } | |
1367 | |
1368 return true; | |
1369 } | |
1370 | |
1371 bool WebRtcVideoMediaChannel::SendIntraFrame() { | |
1372 bool ret = true; | |
1373 if (engine()->vie()->codec()->SendKeyFrame(vie_channel_) != 0) { | |
1374 LOG_RTCERR1(SendKeyFrame, vie_channel_); | |
1375 ret = false; | |
1376 } | |
1377 | |
1378 return ret; | |
1379 } | |
1380 | |
1381 bool WebRtcVideoMediaChannel::RequestIntraFrame() { | |
1382 // There is no API exposed to application to request a key frame | |
1383 // ViE does this internally when there are errors from decoder | |
1384 return false; | |
1385 } | |
1386 | |
1387 void WebRtcVideoMediaChannel::OnPacketReceived(talk_base::Buffer* packet) { | |
1388 engine()->vie()->network()->ReceivedRTPPacket(vie_channel_, | |
1389 packet->data(), | |
1390 packet->length()); | |
1391 } | |
1392 | |
1393 void WebRtcVideoMediaChannel::OnRtcpReceived(talk_base::Buffer* packet) { | |
1394 engine_->vie()->network()->ReceivedRTCPPacket(vie_channel_, | |
1395 packet->data(), | |
1396 packet->length()); | |
1397 } | |
1398 | |
1399 void WebRtcVideoMediaChannel::SetSendSsrc(uint32 id) { | |
1400 if (!sending_) { | |
1401 if (engine()->vie()->rtp()->SetLocalSSRC(vie_channel_, id) != 0) { | |
1402 LOG_RTCERR1(SetLocalSSRC, vie_channel_); | |
1403 } | |
1404 } else { | |
1405 LOG(LS_ERROR) << "Channel already in send state"; | |
1406 } | |
1407 } | |
1408 | |
1409 bool WebRtcVideoMediaChannel::SetRtcpCName(const std::string& cname) { | |
1410 if (engine()->vie()->rtp()->SetRTCPCName(vie_channel_, | |
1411 cname.c_str()) != 0) { | |
1412 LOG_RTCERR2(SetRTCPCName, vie_channel_, cname.c_str()); | |
1413 return false; | |
1414 } | |
1415 return true; | |
1416 } | |
1417 | |
1418 bool WebRtcVideoMediaChannel::Mute(bool on) { | |
1419 muted_ = on; | |
1420 return true; | |
1421 } | |
1422 | |
1423 bool WebRtcVideoMediaChannel::SetSendBandwidth(bool autobw, int bps) { | |
1424 LOG(LS_INFO) << "RtcVideoMediaChanne::SetSendBandwidth"; | |
1425 | |
1426 if (!send_codec_.get()) { | |
1427 LOG(LS_INFO) << "The send codec has not been set up yet."; | |
1428 return true; | |
1429 } | |
1430 | |
1431 int min_bitrate; | |
1432 int start_bitrate; | |
1433 int max_bitrate; | |
1434 if (autobw) { | |
1435 // Use the default values for min bitrate. | |
1436 min_bitrate = kMinVideoBitrate; | |
1437 // Use the default value or the bps for the max | |
1438 max_bitrate = (bps <= 0) ? kMaxVideoBitrate : (bps / 1000); | |
1439 // Maximum start bitrate can be kStartVideoBitrate. | |
1440 start_bitrate = talk_base::_min(kStartVideoBitrate, max_bitrate); | |
1441 } else { | |
1442 // Use the default start or the bps as the target bitrate. | |
1443 int target_bitrate = (bps <= 0) ? kStartVideoBitrate : (bps / 1000); | |
1444 min_bitrate = target_bitrate; | |
1445 start_bitrate = target_bitrate; | |
1446 max_bitrate = target_bitrate; | |
1447 } | |
1448 | |
1449 if (!SetSendCodec(*send_codec_, min_bitrate, start_bitrate, max_bitrate)) { | |
1450 return false; | |
1451 } | |
1452 | |
1453 return true; | |
1454 } | |
1455 | |
1456 bool WebRtcVideoMediaChannel::SetOptions(int options) { | |
1457 return true; | |
1458 } | |
1459 | |
1460 void WebRtcVideoMediaChannel::SetInterface(NetworkInterface* iface) { | |
1461 MediaChannel::SetInterface(iface); | |
1462 // Set the RTP recv/send buffer to a bigger size | |
1463 if (network_interface_) { | |
1464 network_interface_->SetOption(NetworkInterface::ST_RTP, | |
1465 talk_base::Socket::OPT_RCVBUF, | |
1466 kVideoRtpBufferSize); | |
1467 network_interface_->SetOption(NetworkInterface::ST_RTP, | |
1468 talk_base::Socket::OPT_SNDBUF, | |
1469 kVideoRtpBufferSize); | |
1470 } | |
1471 } | |
1472 | |
1473 // TODO: Add unittests to test this function. | |
1474 bool WebRtcVideoMediaChannel::SendFrame(uint32 ssrc, const VideoFrame* frame) { | |
1475 if (ssrc != 0 || !sending() || !external_capture_) { | |
1476 return false; | |
1477 } | |
1478 | |
1479 // Update local stream statistics. | |
1480 local_stream_info_->UpdateFrame(frame->GetWidth(), frame->GetHeight()); | |
1481 | |
1482 // If the captured video format is smaller than what we asked for, reset send | |
1483 // codec on video engine. | |
1484 if (send_codec_.get() != NULL && | |
1485 frame->GetWidth() < send_codec_->width && | |
1486 frame->GetHeight() < send_codec_->height) { | |
1487 LOG(LS_INFO) << "Captured video frame size changed to: " | |
1488 << frame->GetWidth() << "x" << frame->GetHeight(); | |
1489 webrtc::VideoCodec new_codec = *send_codec_; | |
1490 new_codec.width = frame->GetWidth(); | |
1491 new_codec.height = frame->GetHeight(); | |
1492 if (!SetSendCodec( | |
1493 new_codec, send_min_bitrate_, send_start_bitrate_, send_max_bitrate_)) { | |
1494 LOG(LS_WARNING) << "Failed to switch to new frame size: " | |
1495 << frame->GetWidth() << "x" << frame->GetHeight(); | |
1496 } | |
1497 } | |
1498 | |
1499 // Blacken the frame if video is muted. | |
1500 const VideoFrame* frame_out = frame; | |
1501 talk_base::scoped_ptr<VideoFrame> black_frame; | |
1502 if (muted_) { | |
1503 black_frame.reset(frame->Copy()); | |
1504 black_frame->SetToBlack(); | |
1505 frame_out = black_frame.get(); | |
1506 } | |
1507 | |
1508 webrtc::ViEVideoFrameI420 frame_i420; | |
1509 // TODO: Update the webrtc::ViEVideoFrameI420 | |
1510 // to use const unsigned char* | |
1511 frame_i420.y_plane = const_cast<unsigned char*>(frame_out->GetYPlane()); | |
1512 frame_i420.u_plane = const_cast<unsigned char*>(frame_out->GetUPlane()); | |
1513 frame_i420.v_plane = const_cast<unsigned char*>(frame_out->GetVPlane()); | |
1514 frame_i420.y_pitch = frame_out->GetYPitch(); | |
1515 frame_i420.u_pitch = frame_out->GetUPitch(); | |
1516 frame_i420.v_pitch = frame_out->GetVPitch(); | |
1517 frame_i420.width = frame_out->GetWidth(); | |
1518 frame_i420.height = frame_out->GetHeight(); | |
1519 | |
1520 // Convert from nanoseconds to milliseconds. | |
1521 WebRtc_Word64 clocks = frame_out->GetTimeStamp() / | |
1522 talk_base::kNumNanosecsPerMillisec; | |
1523 | |
1524 return (external_capture_->IncomingFrameI420(frame_i420, clocks) == 0); | |
1525 } | |
1526 | |
1527 bool WebRtcVideoMediaChannel::EnableRtcp() { | |
1528 if (engine()->vie()->rtp()->SetRTCPStatus( | |
1529 vie_channel_, webrtc::kRtcpCompound_RFC4585) != 0) { | |
1530 LOG_RTCERR2(SetRTCPStatus, vie_channel_, webrtc::kRtcpCompound_RFC4585); | |
1531 return false; | |
1532 } | |
1533 return true; | |
1534 } | |
1535 | |
1536 bool WebRtcVideoMediaChannel::EnablePli() { | |
1537 if (engine_->vie()->rtp()->SetKeyFrameRequestMethod( | |
1538 vie_channel_, webrtc::kViEKeyFrameRequestPliRtcp) != 0) { | |
1539 LOG_RTCERR2(SetRTCPStatus, | |
1540 vie_channel_, webrtc::kViEKeyFrameRequestPliRtcp); | |
1541 return false; | |
1542 } | |
1543 return true; | |
1544 } | |
1545 | |
1546 bool WebRtcVideoMediaChannel::EnableTmmbr() { | |
1547 if (engine_->vie()->rtp()->SetTMMBRStatus(vie_channel_, true) != 0) { | |
1548 LOG_RTCERR1(SetTMMBRStatus, vie_channel_); | |
1549 return false; | |
1550 } | |
1551 return true; | |
1552 } | |
1553 | |
1554 bool WebRtcVideoMediaChannel::EnableNack() { | |
1555 if (engine_->vie()->rtp()->SetNACKStatus(vie_channel_, true) != 0) { | |
1556 LOG_RTCERR1(SetNACKStatus, vie_channel_); | |
1557 return false; | |
1558 } | |
1559 return true; | |
1560 } | |
1561 | |
1562 bool WebRtcVideoMediaChannel::SetNackFec(int red_payload_type, | |
1563 int fec_payload_type) { | |
1564 bool enable = (red_payload_type != -1 && fec_payload_type != -1); | |
1565 if (engine_->vie()->rtp()->SetHybridNACKFECStatus( | |
1566 vie_channel_, enable, red_payload_type, fec_payload_type) != 0) { | |
1567 LOG_RTCERR4(SetHybridNACKFECStatus, | |
1568 vie_channel_, enable, red_payload_type, fec_payload_type); | |
1569 return false; | |
1570 } | |
1571 return true; | |
1572 } | |
1573 | |
1574 bool WebRtcVideoMediaChannel::SetSendCodec(const webrtc::VideoCodec& codec, | |
1575 int min_bitrate, | |
1576 int start_bitrate, | |
1577 int max_bitrate) { | |
1578 // Make a copy of the codec | |
1579 webrtc::VideoCodec target_codec = codec; | |
1580 target_codec.startBitrate = start_bitrate; | |
1581 target_codec.minBitrate = min_bitrate; | |
1582 target_codec.maxBitrate = max_bitrate; | |
1583 | |
1584 if (engine()->vie()->codec()->SetSendCodec(vie_channel_, target_codec) != 0) { | |
1585 LOG_RTCERR2(SetSendCodec, vie_channel_, send_codec_->plName); | |
1586 return false; | |
1587 } | |
1588 | |
1589 // Reset the send_codec_ only if SetSendCodec is success. | |
1590 send_codec_.reset(new webrtc::VideoCodec(target_codec)); | |
1591 send_min_bitrate_ = min_bitrate; | |
1592 send_start_bitrate_ = start_bitrate; | |
1593 send_max_bitrate_ = max_bitrate; | |
1594 | |
1595 return true; | |
1596 } | |
1597 | |
1598 int WebRtcVideoMediaChannel::SendPacket(int channel, const void* data, | |
1599 int len) { | |
1600 if (!network_interface_) { | |
1601 return -1; | |
1602 } | |
1603 talk_base::Buffer packet(data, len, kMaxRtpPacketLen); | |
1604 return network_interface_->SendPacket(&packet) ? len : -1; | |
1605 } | |
1606 | |
1607 int WebRtcVideoMediaChannel::SendRTCPPacket(int channel, | |
1608 const void* data, | |
1609 int len) { | |
1610 if (!network_interface_) { | |
1611 return -1; | |
1612 } | |
1613 talk_base::Buffer packet(data, len, kMaxRtpPacketLen); | |
1614 return network_interface_->SendRtcp(&packet) ? len : -1; | |
1615 } | |
1616 | |
1617 } // namespace cricket | |
1618 | |
1619 #endif // HAVE_WEBRTC_VIDEO | |
OLD | NEW |