diff options
Diffstat (limited to 'toxmsi/toxmedia.c')
-rw-r--r-- | toxmsi/toxmedia.c | 825 |
1 files changed, 0 insertions, 825 deletions
diff --git a/toxmsi/toxmedia.c b/toxmsi/toxmedia.c deleted file mode 100644 index 4c9f5261..00000000 --- a/toxmsi/toxmedia.c +++ /dev/null | |||
@@ -1,825 +0,0 @@ | |||
1 | /* AV_codec.c | ||
2 | // * | ||
3 | * Audio and video codec intitialisation, encoding/decoding and playback | ||
4 | * | ||
5 | * Copyright (C) 2013 Tox project All Rights Reserved. | ||
6 | * | ||
7 | * This file is part of Tox. | ||
8 | * | ||
9 | * Tox is free software: you can redistribute it and/or modify | ||
10 | * it under the terms of the GNU General Public License as published by | ||
11 | * the Free Software Foundation, either version 3 of the License, or | ||
12 | * (at your option) any later version. | ||
13 | * | ||
14 | * Tox is distributed in the hope that it will be useful, | ||
15 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
16 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
17 | * GNU General Public License for more details. | ||
18 | * | ||
19 | * You should have received a copy of the GNU General Public License | ||
20 | * along with Tox. If not, see <http://www.gnu.org/licenses/>. | ||
21 | * | ||
22 | */ | ||
23 | |||
24 | /*----------------------------------------------------------------------------------*/ | ||
25 | |||
26 | #ifdef HAVE_CONFIG_H | ||
27 | #include "config.h" | ||
28 | #endif /* HAVE_CONFIG_H */ | ||
29 | |||
30 | #include <stdio.h> | ||
31 | #include <math.h> | ||
32 | #include <libavcodec/avcodec.h> | ||
33 | #include <libavformat/avformat.h> | ||
34 | #include <libswscale/swscale.h> | ||
35 | #include <libavdevice/avdevice.h> | ||
36 | #include <libavutil/opt.h> | ||
37 | #include <AL/al.h> | ||
38 | #include <AL/alc.h> | ||
39 | #include <SDL/SDL.h> | ||
40 | #include <SDL/SDL_thread.h> | ||
41 | #include <pthread.h> | ||
42 | #include <opus/opus.h> | ||
43 | |||
44 | #include "toxmsi.h" | ||
45 | #include "toxmsi_message.h" | ||
46 | #include "../toxrtp/toxrtp_message.h" | ||
47 | #include "../toxrtp/tests/test_helper.h" | ||
48 | #include "phone.h" | ||
49 | #include "toxmedia.h" | ||
50 | |||
51 | SDL_Surface *screen; | ||
52 | |||
53 | int display_received_frame(codec_state *cs, AVFrame *r_video_frame) | ||
54 | { | ||
55 | AVPicture pict; | ||
56 | SDL_LockYUVOverlay(cs->video_picture.bmp); | ||
57 | |||
58 | pict.data[0] = cs->video_picture.bmp->pixels[0]; | ||
59 | pict.data[1] = cs->video_picture.bmp->pixels[2]; | ||
60 | pict.data[2] = cs->video_picture.bmp->pixels[1]; | ||
61 | pict.linesize[0] = cs->video_picture.bmp->pitches[0]; | ||
62 | pict.linesize[1] = cs->video_picture.bmp->pitches[2]; | ||
63 | pict.linesize[2] = cs->video_picture.bmp->pitches[1]; | ||
64 | |||
65 | /* Convert the image into YUV format that SDL uses */ | ||
66 | sws_scale(cs->sws_SDL_r_ctx, (uint8_t const * const *)r_video_frame->data, r_video_frame->linesize, 0, | ||
67 | cs->video_decoder_ctx->height, pict.data, pict.linesize ); | ||
68 | |||
69 | SDL_UnlockYUVOverlay(cs->video_picture.bmp); | ||
70 | SDL_Rect rect; | ||
71 | rect.x = 0; | ||
72 | rect.y = 0; | ||
73 | rect.w = cs->video_decoder_ctx->width; | ||
74 | rect.h = cs->video_decoder_ctx->height; | ||
75 | SDL_DisplayYUVOverlay(cs->video_picture.bmp, &rect); | ||
76 | return 1; | ||
77 | } | ||
78 | |||
79 | struct jitter_buffer { | ||
80 | rtp_msg_t **queue; | ||
81 | uint16_t capacity; | ||
82 | uint16_t size; | ||
83 | uint16_t front; | ||
84 | uint16_t rear; | ||
85 | uint8_t queue_ready; | ||
86 | uint16_t current_id; | ||
87 | uint32_t current_ts; | ||
88 | uint8_t id_set; | ||
89 | }; | ||
90 | |||
91 | struct jitter_buffer *create_queue(int capacity) | ||
92 | { | ||
93 | struct jitter_buffer *q; | ||
94 | q = (struct jitter_buffer *)calloc(sizeof(struct jitter_buffer),1); | ||
95 | q->queue = (rtp_msg_t **)calloc((sizeof(rtp_msg_t) * capacity),1); | ||
96 | int i = 0; | ||
97 | |||
98 | for (i = 0; i < capacity; ++i) { | ||
99 | q->queue[i] = NULL; | ||
100 | } | ||
101 | |||
102 | q->size = 0; | ||
103 | q->capacity = capacity; | ||
104 | q->front = 0; | ||
105 | q->rear = -1; | ||
106 | q->queue_ready = 0; | ||
107 | q->current_id = 0; | ||
108 | q->current_ts = 0; | ||
109 | q->id_set = 0; | ||
110 | return q; | ||
111 | } | ||
112 | |||
113 | /* returns 1 if 'a' has a higher sequence number than 'b' */ | ||
114 | uint8_t sequence_number_older(uint16_t sn_a, uint16_t sn_b, uint32_t ts_a, uint32_t ts_b) | ||
115 | { | ||
116 | /* should be stable enough */ | ||
117 | return (sn_a > sn_b || ts_a > ts_b); | ||
118 | } | ||
119 | |||
120 | /* success is 0 when there is nothing to dequeue, 1 when there's a good packet, 2 when there's a lost packet */ | ||
121 | rtp_msg_t *dequeue(struct jitter_buffer *q, int *success) | ||
122 | { | ||
123 | if (q->size == 0 || q->queue_ready == 0) { | ||
124 | q->queue_ready = 0; | ||
125 | *success = 0; | ||
126 | return NULL; | ||
127 | } | ||
128 | |||
129 | int front = q->front; | ||
130 | |||
131 | if (q->id_set == 0) { | ||
132 | q->current_id = q->queue[front]->_header->_sequence_number; | ||
133 | q->current_ts = q->queue[front]->_header->_timestamp; | ||
134 | q->id_set = 1; | ||
135 | } else { | ||
136 | int next_id = q->queue[front]->_header->_sequence_number; | ||
137 | int next_ts = q->queue[front]->_header->_timestamp; | ||
138 | |||
139 | /* if this packet is indeed the expected packet */ | ||
140 | if (next_id == (q->current_id + 1) % _MAX_SEQU_NUM) { | ||
141 | q->current_id = next_id; | ||
142 | q->current_ts = next_ts; | ||
143 | } else { | ||
144 | if (sequence_number_older(next_id, q->current_id, next_ts, q->current_ts)) { | ||
145 | printf("nextid: %d current: %d\n", next_id, q->current_id); | ||
146 | q->current_id = (q->current_id + 1) % _MAX_SEQU_NUM; | ||
147 | *success = 2; /* tell the decoder the packet is lost */ | ||
148 | return NULL; | ||
149 | } else { | ||
150 | /* packet too old */ | ||
151 | printf("packet too old\n"); | ||
152 | *success = 0; | ||
153 | return NULL; | ||
154 | } | ||
155 | } | ||
156 | } | ||
157 | |||
158 | q->size--; | ||
159 | q->front++; | ||
160 | |||
161 | if (q->front == q->capacity) | ||
162 | q->front = 0; | ||
163 | |||
164 | *success = 1; | ||
165 | q->current_id = q->queue[front]->_header->_sequence_number; | ||
166 | q->current_ts = q->queue[front]->_header->_timestamp; | ||
167 | return q->queue[front]; | ||
168 | } | ||
169 | |||
170 | int empty_queue(struct jitter_buffer *q) | ||
171 | { | ||
172 | while (q->size > 0) { | ||
173 | q->size--; | ||
174 | /* FIXME: */ | ||
175 | /* rtp_free_msg(cs->_rtp_video, q->queue[q->front]); */ | ||
176 | q->front++; | ||
177 | |||
178 | if (q->front == q->capacity) | ||
179 | q->front = 0; | ||
180 | } | ||
181 | |||
182 | q->id_set = 0; | ||
183 | q->queue_ready = 0; | ||
184 | return 0; | ||
185 | } | ||
186 | |||
187 | int queue(struct jitter_buffer *q, rtp_msg_t *pk) | ||
188 | { | ||
189 | if (q->size == q->capacity) { | ||
190 | printf("buffer full, emptying buffer...\n"); | ||
191 | empty_queue(q); | ||
192 | return 0; | ||
193 | } | ||
194 | |||
195 | if (q->size > 8) | ||
196 | q->queue_ready = 1; | ||
197 | |||
198 | ++q->size; | ||
199 | ++q->rear; | ||
200 | |||
201 | if (q->rear == q->capacity) | ||
202 | q->rear = 0; | ||
203 | |||
204 | q->queue[q->rear] = pk; | ||
205 | |||
206 | int a; | ||
207 | int b; | ||
208 | int j; | ||
209 | a = q->rear; | ||
210 | |||
211 | for (j = 0; j < q->size - 1; ++j) { | ||
212 | b = a - 1; | ||
213 | |||
214 | if (b < 0) | ||
215 | b += q->capacity; | ||
216 | |||
217 | if (sequence_number_older(q->queue[b]->_header->_sequence_number, q->queue[a]->_header->_sequence_number, | ||
218 | q->queue[b]->_header->_timestamp, q->queue[a]->_header->_timestamp)) { | ||
219 | rtp_msg_t *temp; | ||
220 | temp = q->queue[a]; | ||
221 | q->queue[a] = q->queue[b]; | ||
222 | q->queue[b] = temp; | ||
223 | printf("had to swap\n"); | ||
224 | } else { | ||
225 | break; | ||
226 | } | ||
227 | |||
228 | a -= 1; | ||
229 | |||
230 | if (a < 0) | ||
231 | a += q->capacity; | ||
232 | } | ||
233 | |||
234 | if (pk) | ||
235 | return 1; | ||
236 | |||
237 | return 0; | ||
238 | } | ||
239 | |||
240 | int init_receive_audio(codec_state *cs) | ||
241 | { | ||
242 | int err = OPUS_OK; | ||
243 | cs->audio_decoder = opus_decoder_create(48000, 1, &err); | ||
244 | opus_decoder_init(cs->audio_decoder, 48000, 1); | ||
245 | printf("init audio decoder successful\n"); | ||
246 | return 1; | ||
247 | } | ||
248 | |||
249 | int init_receive_video(codec_state *cs) | ||
250 | { | ||
251 | cs->video_decoder = avcodec_find_decoder(VIDEO_CODEC); | ||
252 | |||
253 | if (!cs->video_decoder) { | ||
254 | printf("init video_decoder failed\n"); | ||
255 | return 0; | ||
256 | } | ||
257 | |||
258 | cs->video_decoder_ctx = avcodec_alloc_context3(cs->video_decoder); | ||
259 | |||
260 | if (!cs->video_decoder_ctx) { | ||
261 | printf("init video_decoder_ctx failed\n"); | ||
262 | return 0; | ||
263 | } | ||
264 | |||
265 | if (avcodec_open2(cs->video_decoder_ctx, cs->video_decoder, NULL) < 0) { | ||
266 | printf("opening video decoder failed\n"); | ||
267 | return 0; | ||
268 | } | ||
269 | |||
270 | printf("init video decoder successful\n"); | ||
271 | return 1; | ||
272 | } | ||
273 | |||
274 | int init_send_video(codec_state *cs) | ||
275 | { | ||
276 | cs->video_input_format = av_find_input_format(VIDEO_DRIVER); | ||
277 | |||
278 | if (avformat_open_input(&cs->video_format_ctx, DEFAULT_WEBCAM, cs->video_input_format, NULL) != 0) { | ||
279 | printf("opening video_input_format failed\n"); | ||
280 | return 0; | ||
281 | } | ||
282 | |||
283 | avformat_find_stream_info(cs->video_format_ctx, NULL); | ||
284 | av_dump_format(cs->video_format_ctx, 0, DEFAULT_WEBCAM, 0); | ||
285 | |||
286 | int i; | ||
287 | |||
288 | for (i = 0; i < cs->video_format_ctx->nb_streams; ++i) { | ||
289 | if (cs->video_format_ctx->streams[i]->codec->codec_type == AVMEDIA_TYPE_VIDEO) { | ||
290 | cs->video_stream = i; | ||
291 | break; | ||
292 | } | ||
293 | } | ||
294 | |||
295 | cs->webcam_decoder_ctx = cs->video_format_ctx->streams[cs->video_stream]->codec; | ||
296 | cs->webcam_decoder = avcodec_find_decoder(cs->webcam_decoder_ctx->codec_id); | ||
297 | |||
298 | if (cs->webcam_decoder == NULL) { | ||
299 | printf("Unsupported codec\n"); | ||
300 | return 0; | ||
301 | } | ||
302 | |||
303 | if (cs->webcam_decoder_ctx == NULL) { | ||
304 | printf("init webcam_decoder_ctx failed\n"); | ||
305 | return 0; | ||
306 | } | ||
307 | |||
308 | if (avcodec_open2(cs->webcam_decoder_ctx, cs->webcam_decoder, NULL) < 0) { | ||
309 | printf("opening webcam decoder failed\n"); | ||
310 | return 0; | ||
311 | } | ||
312 | |||
313 | cs->video_encoder = avcodec_find_encoder(VIDEO_CODEC); | ||
314 | |||
315 | if (!cs->video_encoder) { | ||
316 | printf("init video_encoder failed\n"); | ||
317 | return 0; | ||
318 | } | ||
319 | |||
320 | cs->video_encoder_ctx = avcodec_alloc_context3(cs->video_encoder); | ||
321 | |||
322 | if (!cs->video_encoder_ctx) { | ||
323 | printf("init video_encoder_ctx failed\n"); | ||
324 | return 0; | ||
325 | } | ||
326 | |||
327 | cs->video_encoder_ctx->bit_rate = VIDEO_BITRATE; | ||
328 | cs->video_encoder_ctx->rc_min_rate = cs->video_encoder_ctx->rc_max_rate = cs->video_encoder_ctx->bit_rate; | ||
329 | av_opt_set_double(cs->video_encoder_ctx->priv_data, "max-intra-rate", 90, 0); | ||
330 | av_opt_set(cs->video_encoder_ctx->priv_data, "quality", "realtime", 0); | ||
331 | |||
332 | cs->video_encoder_ctx->thread_count = 4; | ||
333 | cs->video_encoder_ctx->rc_buffer_aggressivity = 0.95; | ||
334 | cs->video_encoder_ctx->rc_buffer_size = VIDEO_BITRATE * 6; | ||
335 | cs->video_encoder_ctx->profile = 3; | ||
336 | cs->video_encoder_ctx->qmax = 54; | ||
337 | cs->video_encoder_ctx->qmin = 4; | ||
338 | AVRational myrational = {1, 25}; | ||
339 | cs->video_encoder_ctx->time_base = myrational; | ||
340 | cs->video_encoder_ctx->gop_size = 99999; | ||
341 | cs->video_encoder_ctx->pix_fmt = PIX_FMT_YUV420P; | ||
342 | cs->video_encoder_ctx->width = cs->webcam_decoder_ctx->width; | ||
343 | cs->video_encoder_ctx->height = cs->webcam_decoder_ctx->height; | ||
344 | |||
345 | if (avcodec_open2(cs->video_encoder_ctx, cs->video_encoder, NULL) < 0) { | ||
346 | printf("opening video encoder failed\n"); | ||
347 | return 0; | ||
348 | } | ||
349 | |||
350 | printf("init video encoder successful\n"); | ||
351 | return 1; | ||
352 | } | ||
353 | |||
354 | int init_send_audio(codec_state *cs) | ||
355 | { | ||
356 | cs->support_send_audio = 0; | ||
357 | |||
358 | const ALchar *pDeviceList = alcGetString(NULL, ALC_CAPTURE_DEVICE_SPECIFIER); | ||
359 | int i = 0; | ||
360 | const ALchar *device_names[20]; | ||
361 | |||
362 | if (pDeviceList) { | ||
363 | printf("\nAvailable Capture Devices are:\n"); | ||
364 | |||
365 | while (*pDeviceList) { | ||
366 | device_names[i] = pDeviceList; | ||
367 | printf("%d) %s\n", i, device_names[i]); | ||
368 | pDeviceList += strlen(pDeviceList) + 1; | ||
369 | ++i; | ||
370 | } | ||
371 | } | ||
372 | |||
373 | printf("enter capture device number: \n"); | ||
374 | char dev[2]; | ||
375 | fgets(dev, sizeof(dev), stdin); | ||
376 | cs->audio_capture_device = alcCaptureOpenDevice(device_names[dev[0] - 48], AUDIO_SAMPLE_RATE, AL_FORMAT_MONO16, | ||
377 | AUDIO_FRAME_SIZE * 4); | ||
378 | |||
379 | if (alcGetError(cs->audio_capture_device) != AL_NO_ERROR) { | ||
380 | printf("could not start capture device! %d\n", alcGetError(cs->audio_capture_device)); | ||
381 | return 0; | ||
382 | } | ||
383 | |||
384 | int err = OPUS_OK; | ||
385 | cs->audio_bitrate = AUDIO_BITRATE; | ||
386 | cs->audio_encoder = opus_encoder_create(AUDIO_SAMPLE_RATE, 1, OPUS_APPLICATION_VOIP, &err); | ||
387 | err = opus_encoder_ctl(cs->audio_encoder, OPUS_SET_BITRATE(cs->audio_bitrate)); | ||
388 | err = opus_encoder_ctl(cs->audio_encoder, OPUS_SET_COMPLEXITY(10)); | ||
389 | err = opus_encoder_ctl(cs->audio_encoder, OPUS_SET_SIGNAL(OPUS_SIGNAL_VOICE)); | ||
390 | |||
391 | opus_encoder_init(cs->audio_encoder, AUDIO_SAMPLE_RATE, 1, OPUS_APPLICATION_VOIP); | ||
392 | |||
393 | int nfo; | ||
394 | err = opus_encoder_ctl(cs->audio_encoder, OPUS_GET_LOOKAHEAD(&nfo)); | ||
395 | /* printf("Encoder lookahead delay : %d\n", nfo); */ | ||
396 | printf("init audio encoder successful\n"); | ||
397 | |||
398 | return 1; | ||
399 | } | ||
400 | |||
401 | int init_encoder(codec_state *cs) | ||
402 | { | ||
403 | avdevice_register_all(); | ||
404 | avcodec_register_all(); | ||
405 | avdevice_register_all(); | ||
406 | av_register_all(); | ||
407 | |||
408 | pthread_mutex_init(&cs->rtp_msg_mutex_lock, NULL); | ||
409 | pthread_mutex_init(&cs->avcodec_mutex_lock, NULL); | ||
410 | |||
411 | cs->support_send_video = init_send_video(cs); | ||
412 | cs->support_send_audio = init_send_audio(cs); | ||
413 | |||
414 | cs->send_audio = 1; | ||
415 | cs->send_video = 1; | ||
416 | |||
417 | return 1; | ||
418 | } | ||
419 | |||
420 | int init_decoder(codec_state *cs) | ||
421 | { | ||
422 | avdevice_register_all(); | ||
423 | avcodec_register_all(); | ||
424 | avdevice_register_all(); | ||
425 | av_register_all(); | ||
426 | |||
427 | cs->receive_video = 0; | ||
428 | cs->receive_audio = 0; | ||
429 | |||
430 | cs->support_receive_video = init_receive_video(cs); | ||
431 | cs->support_receive_audio = init_receive_audio(cs); | ||
432 | |||
433 | cs->receive_audio = 1; | ||
434 | cs->receive_video = 1; | ||
435 | |||
436 | return 1; | ||
437 | } | ||
438 | |||
439 | int video_encoder_refresh(codec_state *cs, int bps) | ||
440 | { | ||
441 | if (cs->video_encoder_ctx) | ||
442 | avcodec_close(cs->video_encoder_ctx); | ||
443 | |||
444 | cs->video_encoder = avcodec_find_encoder(VIDEO_CODEC); | ||
445 | |||
446 | if (!cs->video_encoder) { | ||
447 | printf("init video_encoder failed\n"); | ||
448 | return -1; | ||
449 | } | ||
450 | |||
451 | cs->video_encoder_ctx = avcodec_alloc_context3(cs->video_encoder); | ||
452 | |||
453 | if (!cs->video_encoder_ctx) { | ||
454 | printf("init video_encoder_ctx failed\n"); | ||
455 | return -1; | ||
456 | } | ||
457 | |||
458 | cs->video_encoder_ctx->bit_rate = bps; | ||
459 | cs->video_encoder_ctx->rc_min_rate = cs->video_encoder_ctx->rc_max_rate = cs->video_encoder_ctx->bit_rate; | ||
460 | av_opt_set_double(cs->video_encoder_ctx->priv_data, "max-intra-rate", 90, 0); | ||
461 | av_opt_set(cs->video_encoder_ctx->priv_data, "quality", "realtime", 0); | ||
462 | |||
463 | cs->video_encoder_ctx->thread_count = 4; | ||
464 | cs->video_encoder_ctx->rc_buffer_aggressivity = 0.95; | ||
465 | cs->video_encoder_ctx->rc_buffer_size = bps * 6; | ||
466 | cs->video_encoder_ctx->profile = 0; | ||
467 | cs->video_encoder_ctx->qmax = 54; | ||
468 | cs->video_encoder_ctx->qmin = 4; | ||
469 | AVRational myrational = {1, 25}; | ||
470 | cs->video_encoder_ctx->time_base = myrational; | ||
471 | cs->video_encoder_ctx->gop_size = 99999; | ||
472 | cs->video_encoder_ctx->pix_fmt = PIX_FMT_YUV420P; | ||
473 | cs->video_encoder_ctx->width = cs->webcam_decoder_ctx->width; | ||
474 | cs->video_encoder_ctx->height = cs->webcam_decoder_ctx->height; | ||
475 | |||
476 | if (avcodec_open2(cs->video_encoder_ctx, cs->video_encoder, NULL) < 0) { | ||
477 | printf("opening video encoder failed\n"); | ||
478 | return -1; | ||
479 | } | ||
480 | return 0; | ||
481 | } | ||
482 | |||
483 | void *encode_video_thread(void *arg) | ||
484 | { | ||
485 | codec_state *cs = (codec_state *)arg; | ||
486 | AVPacket pkt1, *packet = &pkt1; | ||
487 | int p = 0; | ||
488 | int err; | ||
489 | int got_packet; | ||
490 | rtp_msg_t *s_video_msg; | ||
491 | int video_frame_finished; | ||
492 | AVFrame *s_video_frame; | ||
493 | AVFrame *webcam_frame; | ||
494 | s_video_frame = avcodec_alloc_frame(); | ||
495 | webcam_frame = avcodec_alloc_frame(); | ||
496 | AVPacket enc_video_packet; | ||
497 | |||
498 | uint8_t *buffer; | ||
499 | int numBytes; | ||
500 | /* Determine required buffer size and allocate buffer */ | ||
501 | numBytes = avpicture_get_size(PIX_FMT_YUV420P, cs->webcam_decoder_ctx->width, cs->webcam_decoder_ctx->height); | ||
502 | buffer = (uint8_t *)av_calloc(numBytes * sizeof(uint8_t),1); | ||
503 | avpicture_fill((AVPicture *)s_video_frame, buffer, PIX_FMT_YUV420P, cs->webcam_decoder_ctx->width, | ||
504 | cs->webcam_decoder_ctx->height); | ||
505 | cs->sws_ctx = sws_getContext(cs->webcam_decoder_ctx->width, cs->webcam_decoder_ctx->height, | ||
506 | cs->webcam_decoder_ctx->pix_fmt, cs->webcam_decoder_ctx->width, cs->webcam_decoder_ctx->height, PIX_FMT_YUV420P, | ||
507 | SWS_BILINEAR, NULL, NULL, NULL); | ||
508 | |||
509 | while (!cs->quit && cs->send_video) { | ||
510 | |||
511 | if (av_read_frame(cs->video_format_ctx, packet) < 0) { | ||
512 | printf("error reading frame\n"); | ||
513 | |||
514 | if (cs->video_format_ctx->pb->error != 0) | ||
515 | break; | ||
516 | |||
517 | continue; | ||
518 | } | ||
519 | |||
520 | if (packet->stream_index == cs->video_stream) { | ||
521 | if (avcodec_decode_video2(cs->webcam_decoder_ctx, webcam_frame, &video_frame_finished, packet) < 0) { | ||
522 | printf("couldn't decode\n"); | ||
523 | continue; | ||
524 | } | ||
525 | |||
526 | av_free_packet(packet); | ||
527 | sws_scale(cs->sws_ctx, (uint8_t const * const *)webcam_frame->data, webcam_frame->linesize, 0, | ||
528 | cs->webcam_decoder_ctx->height, s_video_frame->data, s_video_frame->linesize); | ||
529 | /* create a new I-frame every 60 frames */ | ||
530 | ++p; | ||
531 | |||
532 | if (p == 60) { | ||
533 | |||
534 | s_video_frame->pict_type = AV_PICTURE_TYPE_BI ; | ||
535 | } else if (p == 61) { | ||
536 | s_video_frame->pict_type = AV_PICTURE_TYPE_I ; | ||
537 | p = 0; | ||
538 | } else { | ||
539 | s_video_frame->pict_type = AV_PICTURE_TYPE_P ; | ||
540 | } | ||
541 | |||
542 | if (video_frame_finished) { | ||
543 | err = avcodec_encode_video2(cs->video_encoder_ctx, &enc_video_packet, s_video_frame, &got_packet); | ||
544 | |||
545 | if (err < 0) { | ||
546 | printf("could not encode video frame\n"); | ||
547 | continue; | ||
548 | } | ||
549 | |||
550 | if (!got_packet) { | ||
551 | continue; | ||
552 | } | ||
553 | |||
554 | pthread_mutex_lock(&cs->rtp_msg_mutex_lock); | ||
555 | THREADLOCK() | ||
556 | |||
557 | if (!enc_video_packet.data) fprintf(stderr, "video packet data is NULL\n"); | ||
558 | |||
559 | s_video_msg = rtp_msg_new ( cs->_rtp_video, enc_video_packet.data, enc_video_packet.size ) ; | ||
560 | |||
561 | if (!s_video_msg) { | ||
562 | printf("invalid message\n"); | ||
563 | } | ||
564 | |||
565 | rtp_send_msg ( cs->_rtp_video, s_video_msg, cs->_networking ); | ||
566 | THREADUNLOCK() | ||
567 | pthread_mutex_unlock(&cs->rtp_msg_mutex_lock); | ||
568 | av_free_packet(&enc_video_packet); | ||
569 | } | ||
570 | } else { | ||
571 | av_free_packet(packet); | ||
572 | } | ||
573 | } | ||
574 | |||
575 | /* clean up codecs */ | ||
576 | pthread_mutex_lock(&cs->avcodec_mutex_lock); | ||
577 | av_free(buffer); | ||
578 | av_free(webcam_frame); | ||
579 | av_free(s_video_frame); | ||
580 | sws_freeContext(cs->sws_ctx); | ||
581 | avcodec_close(cs->webcam_decoder_ctx); | ||
582 | avcodec_close(cs->video_encoder_ctx); | ||
583 | pthread_mutex_unlock(&cs->avcodec_mutex_lock); | ||
584 | pthread_exit ( NULL ); | ||
585 | } | ||
586 | |||
587 | void *encode_audio_thread(void *arg) | ||
588 | { | ||
589 | codec_state *cs = (codec_state *)arg; | ||
590 | rtp_msg_t *s_audio_msg; | ||
591 | unsigned char encoded_data[4096]; | ||
592 | int encoded_size = 0; | ||
593 | int16_t frame[4096]; | ||
594 | int frame_size = AUDIO_FRAME_SIZE; | ||
595 | ALint sample = 0; | ||
596 | alcCaptureStart(cs->audio_capture_device); | ||
597 | |||
598 | while (!cs->quit && cs->send_audio) { | ||
599 | alcGetIntegerv(cs->audio_capture_device, ALC_CAPTURE_SAMPLES, (ALCsizei)sizeof(ALint), &sample); | ||
600 | |||
601 | if (sample >= frame_size) { | ||
602 | alcCaptureSamples(cs->audio_capture_device, frame, frame_size); | ||
603 | encoded_size = opus_encode(cs->audio_encoder, frame, frame_size, encoded_data, 480); | ||
604 | |||
605 | if (encoded_size <= 0) { | ||
606 | printf("Could not encode audio packet\n"); | ||
607 | } else { | ||
608 | pthread_mutex_lock(&cs->rtp_msg_mutex_lock); | ||
609 | THREADLOCK() | ||
610 | rtp_set_payload_type(cs->_rtp_audio, 96); | ||
611 | s_audio_msg = rtp_msg_new (cs->_rtp_audio, encoded_data, encoded_size) ; | ||
612 | rtp_send_msg ( cs->_rtp_audio, s_audio_msg, cs->_networking ); | ||
613 | pthread_mutex_unlock(&cs->rtp_msg_mutex_lock); | ||
614 | THREADUNLOCK() | ||
615 | } | ||
616 | } else { | ||
617 | usleep(1000); | ||
618 | } | ||
619 | } | ||
620 | |||
621 | /* clean up codecs */ | ||
622 | pthread_mutex_lock(&cs->avcodec_mutex_lock); | ||
623 | alcCaptureStop(cs->audio_capture_device); | ||
624 | alcCaptureCloseDevice(cs->audio_capture_device); | ||
625 | |||
626 | pthread_mutex_unlock(&cs->avcodec_mutex_lock); | ||
627 | pthread_exit ( NULL ); | ||
628 | } | ||
629 | |||
630 | |||
631 | int video_decoder_refresh(codec_state *cs, int width, int height) | ||
632 | { | ||
633 | printf("need to refresh\n"); | ||
634 | screen = SDL_SetVideoMode(width, height, 0, 0); | ||
635 | |||
636 | if (cs->video_picture.bmp) | ||
637 | SDL_FreeYUVOverlay(cs->video_picture.bmp); | ||
638 | |||
639 | cs->video_picture.bmp = SDL_CreateYUVOverlay(width, height, SDL_YV12_OVERLAY, screen); | ||
640 | cs->sws_SDL_r_ctx = sws_getContext(width, height, cs->video_decoder_ctx->pix_fmt, width, height, PIX_FMT_YUV420P, | ||
641 | SWS_BILINEAR, NULL, NULL, NULL); | ||
642 | return 1; | ||
643 | } | ||
644 | |||
645 | void *decode_video_thread(void *arg) | ||
646 | { | ||
647 | codec_state *cs = (codec_state *)arg; | ||
648 | cs->video_stream = 0; | ||
649 | rtp_msg_t *r_msg; | ||
650 | int dec_frame_finished; | ||
651 | AVFrame *r_video_frame; | ||
652 | r_video_frame = avcodec_alloc_frame(); | ||
653 | AVPacket dec_video_packet; | ||
654 | av_new_packet (&dec_video_packet, 65536); | ||
655 | int width = 0; | ||
656 | int height = 0; | ||
657 | |||
658 | while (!cs->quit && cs->receive_video) { | ||
659 | r_msg = rtp_recv_msg ( cs->_rtp_video ); | ||
660 | |||
661 | if (r_msg) { | ||
662 | memcpy(dec_video_packet.data, r_msg->_data, r_msg->_length); | ||
663 | dec_video_packet.size = r_msg->_length; | ||
664 | avcodec_decode_video2(cs->video_decoder_ctx, r_video_frame, &dec_frame_finished, &dec_video_packet); | ||
665 | |||
666 | if (dec_frame_finished) { | ||
667 | if (cs->video_decoder_ctx->width != width || cs->video_decoder_ctx->height != height) { | ||
668 | width = cs->video_decoder_ctx->width; | ||
669 | height = cs->video_decoder_ctx->height; | ||
670 | printf("w: %d h%d \n", width, height); | ||
671 | video_decoder_refresh(cs, width, height); | ||
672 | } | ||
673 | |||
674 | display_received_frame(cs, r_video_frame); | ||
675 | } else { | ||
676 | /* TODO: request the sender to create a new i-frame immediatly */ | ||
677 | printf("bad video packet\n"); | ||
678 | } | ||
679 | |||
680 | rtp_free_msg(cs->_rtp_video, r_msg); | ||
681 | } | ||
682 | |||
683 | usleep(1000); | ||
684 | } | ||
685 | |||
686 | printf("vend\n"); | ||
687 | /* clean up codecs */ | ||
688 | pthread_mutex_lock(&cs->avcodec_mutex_lock); | ||
689 | av_free(r_video_frame); | ||
690 | avcodec_close(cs->video_decoder_ctx); | ||
691 | pthread_mutex_unlock(&cs->avcodec_mutex_lock); | ||
692 | pthread_exit ( NULL ); | ||
693 | } | ||
694 | |||
695 | void *decode_audio_thread(void *arg) | ||
696 | { | ||
697 | codec_state *cs = (codec_state *)arg; | ||
698 | rtp_msg_t *r_msg; | ||
699 | |||
700 | int frame_size = AUDIO_FRAME_SIZE; | ||
701 | int data_size; | ||
702 | |||
703 | ALCdevice *dev; | ||
704 | ALCcontext *ctx; | ||
705 | ALuint source, *buffers; | ||
706 | dev = alcOpenDevice(NULL); | ||
707 | ctx = alcCreateContext(dev, NULL); | ||
708 | alcMakeContextCurrent(ctx); | ||
709 | int openal_buffers = 5; | ||
710 | |||
711 | buffers = calloc(sizeof(ALuint) * openal_buffers,1); | ||
712 | alGenBuffers(openal_buffers, buffers); | ||
713 | alGenSources((ALuint)1, &source); | ||
714 | alSourcei(source, AL_LOOPING, AL_FALSE); | ||
715 | |||
716 | ALuint buffer; | ||
717 | ALint val; | ||
718 | |||
719 | ALenum error; | ||
720 | uint16_t zeros[frame_size]; | ||
721 | int i; | ||
722 | |||
723 | for (i = 0; i < frame_size; i++) { | ||
724 | zeros[i] = 0; | ||
725 | } | ||
726 | |||
727 | for (i = 0; i < openal_buffers; ++i) { | ||
728 | alBufferData(buffers[i], AL_FORMAT_MONO16, zeros, frame_size, 48000); | ||
729 | } | ||
730 | |||
731 | alSourceQueueBuffers(source, openal_buffers, buffers); | ||
732 | alSourcePlay(source); | ||
733 | |||
734 | if (alGetError() != AL_NO_ERROR) { | ||
735 | fprintf(stderr, "Error starting audio\n"); | ||
736 | cs->quit = 1; | ||
737 | } | ||
738 | |||
739 | struct jitter_buffer *j_buf = NULL; | ||
740 | |||
741 | j_buf = create_queue(20); | ||
742 | |||
743 | int success = 0; | ||
744 | |||
745 | int dec_frame_len; | ||
746 | |||
747 | opus_int16 PCM[frame_size]; | ||
748 | |||
749 | while (!cs->quit && cs->receive_audio) { | ||
750 | THREADLOCK() | ||
751 | r_msg = rtp_recv_msg ( cs->_rtp_audio ); | ||
752 | |||
753 | if (r_msg) { | ||
754 | /* push the packet into the queue */ | ||
755 | queue(j_buf, r_msg); | ||
756 | } | ||
757 | |||
758 | /* grab a packet from the queue */ | ||
759 | success = 0; | ||
760 | alGetSourcei(source, AL_BUFFERS_PROCESSED, &val); | ||
761 | |||
762 | if (val > 0) | ||
763 | r_msg = dequeue(j_buf, &success); | ||
764 | |||
765 | if (success > 0) { | ||
766 | /* good packet */ | ||
767 | if (success == 1) { | ||
768 | dec_frame_len = opus_decode(cs->audio_decoder, r_msg->_data, r_msg->_length, PCM, frame_size, 0); | ||
769 | rtp_free_msg(cs->_rtp_audio, r_msg); | ||
770 | } | ||
771 | |||
772 | /* lost packet */ | ||
773 | if (success == 2) { | ||
774 | printf("lost packet\n"); | ||
775 | dec_frame_len = opus_decode(cs->audio_decoder, NULL, 0, PCM, frame_size, 1); | ||
776 | } | ||
777 | |||
778 | if (dec_frame_len > 0) { | ||
779 | alGetSourcei(source, AL_BUFFERS_PROCESSED, &val); | ||
780 | |||
781 | if (val <= 0) | ||
782 | continue; | ||
783 | |||
784 | alSourceUnqueueBuffers(source, 1, &buffer); | ||
785 | data_size = av_samples_get_buffer_size(NULL, 1, dec_frame_len, AV_SAMPLE_FMT_S16, 1); | ||
786 | alBufferData(buffer, AL_FORMAT_MONO16, PCM, data_size, 48000); | ||
787 | int error = alGetError(); | ||
788 | |||
789 | if (error != AL_NO_ERROR) { | ||
790 | fprintf(stderr, "Error setting buffer %d\n", error); | ||
791 | break; | ||
792 | } | ||
793 | |||
794 | alSourceQueueBuffers(source, 1, &buffer); | ||
795 | |||
796 | if (alGetError() != AL_NO_ERROR) { | ||
797 | fprintf(stderr, "error: could not buffer audio\n"); | ||
798 | break; | ||
799 | } | ||
800 | |||
801 | alGetSourcei(source, AL_SOURCE_STATE, &val); | ||
802 | |||
803 | if (val != AL_PLAYING) | ||
804 | alSourcePlay(source); | ||
805 | |||
806 | |||
807 | } | ||
808 | } | ||
809 | |||
810 | THREADUNLOCK() | ||
811 | usleep(1000); | ||
812 | } | ||
813 | |||
814 | /* clean up codecs */ | ||
815 | pthread_mutex_lock(&cs->avcodec_mutex_lock); | ||
816 | |||
817 | /* clean up openal */ | ||
818 | alDeleteSources(1, &source); | ||
819 | alDeleteBuffers(openal_buffers, buffers); | ||
820 | alcMakeContextCurrent(NULL); | ||
821 | alcDestroyContext(ctx); | ||
822 | alcCloseDevice(dev); | ||
823 | pthread_mutex_unlock(&cs->avcodec_mutex_lock); | ||
824 | pthread_exit ( NULL ); | ||
825 | } | ||