Could not connect to the Bebop drone using the avformat_open_input() function


#1

Hi,

The goal of my project is to show the Bebop camera stream into an OpenCV window.
I found on the internet the following code. The developer told me it should work for the ARDRONE 2.0

extern "C" {
  #include "libavcodec/avcodec.h"
  #include "libavformat/avformat.h"
  #include "libswscale/swscale.h"
}

include "opencv2/opencv.hpp"
include "testDrone2.h"

/**
* Dans ce programme on n'utilisera pas SDL mais OpenCV pour afficher le streaming.
**/
int main(int argc, char* argv[]) {

  // 3.0. Initializes the video subsystem *must be done before anything other!!
  /*if (SDL_Init(SDL_INIT_VIDEO) < 0) {
    fprintf(stderr, "Unable to init SDL: %s\n", SDL_GetError());
    return -1;
  }*/ // On ne va pas utiliser SDL

  // prepare variables
  // decoding
  char              *drone_addr = "ftp://192.168.42.1";// Avant: http://192.168.1.1:5555
  AVFormatContext   *pFormatCtx = NULL;
  AVCodecContext    *pCodecCtx;
  AVCodec           *pCodec;
  AVPacket          packet;
  AVFrame           *pFrame;
  int               terminate, frameDecoded;

  // converting
  AVFrame           *pFrame_YUV420P;
  uint8_t           *buffer_YUV420P;
  struct SwsContext *pConvertCtx_YUV420P;

  AVFrame           *pFrame_BGR24;
  uint8_t           *buffer_BGR24;
  struct SwsContext *pConvertCtx_BGR24;

  // displaying
  /*SDL_Window        *pWindow1;
  SDL_Renderer      *pRenderer1;
  SDL_Texture       *bmpTex1;
  uint8_t           *pixels1;
  int               pitch1, size1;

  SDL_Window        *pWindow2;
  SDL_Renderer      *pRenderer2;
  SDL_Texture       *bmpTex2;
  uint8_t           *pixels2;
  int               pitch2, size2;

  SDL_Event         event;*/

  // 1.1 Register all formats and codecs
  av_register_all();// Initialize libavformat and register all the muxers, demuxers and protocols. 
  avcodec_register_all();// Contains all the codecs.
  avformat_network_init();// Do global initialization of network components. 

  // 1.2. Open video file
  while(avformat_open_input(&pFormatCtx, drone_addr, NULL, NULL) != 0)
    printf("Could not open the video file\nRetrying...\n");

  // 1.3. Retrieve stream information
  avformat_find_stream_info(pFormatCtx, NULL);
  // Dump information about file to standard output
  av_dump_format(pFormatCtx, 0, drone_addr, 0);

  // 1.4. Get a pointer to the codec context for the video stream
  // and find the decoder for the video stream
  pCodecCtx = pFormatCtx->streams[0]->codec;
  pCodec    = avcodec_find_decoder(pCodecCtx->codec_id);

  // 1.5. Open Codec
  avcodec_open2(pCodecCtx, pCodec, NULL); 


  // 2.1.1. Prepare format conversion for diplaying with SDL
  // Allocate an AVFrame structure
  pFrame_YUV420P = avcodec_alloc_frame();
  if(pFrame_YUV420P == NULL) {
    fprintf(stderr, "Could not allocate pFrame_YUV420P\n");
    return -1;
  }
  // Determine required buffer size and allocate buffer
  buffer_YUV420P = (uint8_t *)av_malloc(avpicture_get_size(PIX_FMT_YUV420P, 
                                            pCodecCtx->width, pCodecCtx->height));  
  // Assign buffer to image planes
  avpicture_fill((AVPicture *)pFrame_YUV420P, buffer_YUV420P, 
                      PIX_FMT_YUV420P, pCodecCtx->width, pCodecCtx->height);
  // format conversion context
  pConvertCtx_YUV420P = sws_getContext(pCodecCtx->width, pCodecCtx->height, pCodecCtx->pix_fmt, 
                                       pCodecCtx->width, pCodecCtx->height, PIX_FMT_YUV420P, 
                                       SWS_SPLINE, NULL, NULL, NULL);

  // 2.2.1. Prepare format conversion for OpenCV
  // Allocate an AVFrame structure
  pFrame_BGR24 = avcodec_alloc_frame();
  if(pFrame_BGR24 == NULL) {
    fprintf(stderr, "Could not allocate pFrame_YUV420P\n");
    return -1;
  }
  // Determine required buffer size and allocate buffer
  buffer_BGR24 = (uint8_t *)av_malloc(avpicture_get_size(PIX_FMT_BGR24, 
                                            pCodecCtx->width, pCodecCtx->height));  
  // Assign buffer to image planes
  avpicture_fill((AVPicture *)pFrame_BGR24, buffer_BGR24, 
                      PIX_FMT_BGR24, pCodecCtx->width, pCodecCtx->height);
  // format conversion context
  pConvertCtx_BGR24 = sws_getContext(pCodecCtx->width, pCodecCtx->height, pCodecCtx->pix_fmt, 
                                     pCodecCtx->width, pCodecCtx->height, PIX_FMT_BGR24, 
                                     SWS_SPLINE, NULL, NULL, NULL);

  // 3.1.1 prepare SDL for YUV
  // allocate window, renderer, texture
  /*pWindow1    = SDL_CreateWindow( "YUV", 0, 0, pCodecCtx->width, pCodecCtx->height, SDL_WINDOW_SHOWN);  
  pRenderer1  = SDL_CreateRenderer(pWindow1, -1, SDL_RENDERER_ACCELERATED);
  bmpTex1     = SDL_CreateTexture(pRenderer1, SDL_PIXELFORMAT_YV12, SDL_TEXTUREACCESS_STREAMING, pCodecCtx->width, pCodecCtx->height);
  size1       = pCodecCtx->width * pCodecCtx->height;
  if(pWindow1==NULL | pRenderer1==NULL | bmpTex1==NULL) {
    fprintf(stderr, "Could not open window1\n");
    return -1;
  }*/ // On n'utilisera pas SDl dans cette application.

  // 3.2.1 prepare SDL for BGR
  // allocate window, renderer, texture
  /*pWindow2    = SDL_CreateWindow( "BGR", pCodecCtx->width+5, 0, pCodecCtx->width, pCodecCtx->height, SDL_WINDOW_SHOWN);  
  pRenderer2  = SDL_CreateRenderer(pWindow2, -1, SDL_RENDERER_ACCELERATED);
  bmpTex2     = SDL_CreateTexture(pRenderer2, SDL_PIXELFORMAT_BGR24, SDL_TEXTUREACCESS_STREAMING, pCodecCtx->width, pCodecCtx->height);
  size2       = pCodecCtx->width * pCodecCtx->height * 3;
  if(pWindow2==NULL | pRenderer2==NULL | bmpTex2==NULL) {
    fprintf(stderr, "Could not open window2\n");
    return -1;
  }*/ // On n'utilisera pas SDL dans cette application.

  // 4.1 Prepare OpenCV matrices. Very important
  cv::Mat   img0(pCodecCtx->height, pCodecCtx->width, CV_8UC3, cv::Scalar(255));
  cv::Mat   img1(pCodecCtx->height, pCodecCtx->width, CV_8UC3, cv::Scalar(255));

  // 1.6. get video frames
  pFrame = avcodec_alloc_frame();
  terminate = 0;

  while(!terminate) {

    // read frame
    if(av_read_frame(pFormatCtx, &packet)<0) {
      fprintf(stderr, "Could not read frame!\n");
      continue;
    }

    // decode the frame
    if(avcodec_decode_video2(pCodecCtx, pFrame, &frameDecoded, &packet) < 0) {
      fprintf(stderr, "Could not decode frame!\n");
      continue;
    }

    if (frameDecoded) {
      // 2.1.2. convert frame to YUV for Displaying
        sws_scale(pConvertCtx_YUV420P, (const uint8_t * const*)pFrame->data, pFrame->linesize, 0,
                  pCodecCtx->height,   pFrame_YUV420P->data, pFrame_YUV420P->linesize);
      // 2.2.2. convert frame to GRAYSCALE [or BGR] for OpenCV
        sws_scale(pConvertCtx_BGR24,   (const uint8_t * const*)pFrame->data, pFrame->linesize, 0,
                  pCodecCtx->height,   pFrame_BGR24->data,   pFrame_BGR24->linesize);

      // 4.2 do somethig with OpenCV 
      // copy the BGR image
      memcpy(img0.data, pFrame_BGR24->data[0], (pCodecCtx->width)*(pCodecCtx->height)*sizeof(uint8_t)*3);
      // sharpen image with Gaussian Blur
      //cv::GaussianBlur(img0, img1, cv::Size(3, 3), cv::BORDER_DEFAULT);
      //cv::addWeighted(img0, 1.5, img1, -0.5, 0, img0);
      // Create a OpenCV window
      cv::namedWindow("Display window",cv::WINDOW_AUTOSIZE);

      // Display the window containing the image
      cv::imshow("Display window",img0);
      cv::waitKey(0);

      // copy back the modified image
      //memcpy(pFrame_BGR24->data[0], img0.data, (pCodecCtx->width)*(pCodecCtx->height)*sizeof(uint8_t)*3);

      // 3.1.2. copy converted YUV to SDL 2.0 texture
      /*SDL_LockTexture(bmpTex1, NULL, (void **)&pixels1, &pitch1);
          memcpy(pixels1,             pFrame_YUV420P->data[0], size1  );
          memcpy(pixels1 + size1,     pFrame_YUV420P->data[2], size1/4);
            memcpy(pixels1 + size1*5/4, pFrame_YUV420P->data[1], size1/4);
      SDL_UnlockTexture(bmpTex1);
      SDL_UpdateTexture(bmpTex1, NULL, pixels1, pitch1);
      // refresh screen
      SDL_RenderClear(pRenderer1);
      SDL_RenderCopy(pRenderer1, bmpTex1, NULL, NULL);
      SDL_RenderPresent(pRenderer1);

      // 3.2.2. copy converted BGR to SDL 2.0 texture
      SDL_LockTexture(bmpTex2, NULL, (void **)&pixels2, &pitch2);
          memcpy(pixels2,             pFrame_BGR24->data[0], size2);
      SDL_UnlockTexture(bmpTex2);
      SDL_UpdateTexture(bmpTex2, NULL, pixels2, pitch2);
      // refresh screen
      SDL_RenderClear(pRenderer2);
      SDL_RenderCopy(pRenderer2, bmpTex2, NULL, NULL);
      SDL_RenderPresent(pRenderer2);*/
    }

    /*SDL_PollEvent(&event);
    switch (event.type) {
      case SDL_KEYDOWN:
        terminate = 1;
        break;
    }*/
  }

  // release
  // *note SDL objects have to be freed before closing avcodec.
  // otherwise it causes segmentation fault for some reason.
  /*SDL_DestroyTexture(bmpTex1);
  SDL_DestroyTexture(bmpTex2);

  SDL_DestroyRenderer(pRenderer1);
  SDL_DestroyRenderer(pRenderer2);

  SDL_DestroyWindow(pWindow1);
  SDL_DestroyWindow(pWindow2);*/ // SDL ne sera pas utilisé dans cette application

  img0.release();// OpenCV operation.
  img1.release();// OpenCV operation.

  av_free(pFrame_YUV420P);
  av_free(buffer_YUV420P);
  sws_freeContext(pConvertCtx_YUV420P);

  av_free(pFrame_BGR24);
  av_free(buffer_BGR24);
  sws_freeContext(pConvertCtx_BGR24);

  av_free(pFrame);
  avcodec_close(pCodecCtx); // <- before freeing this, all other objects, allocated after this, must be freed
  avformat_close_input(&pFormatCtx);

  //SDL_Quit();

  return 0;

}

What I have to do in order to show the Bebop camera stream into an OpenCV window ? Could you help me please ?


#2

Hi,

This code was working with the AR.Drone 2.0 because the video was transferred in a different way (on a separate socket).

With the Bebop, we now use the ARNetwork/ARStream libraries to handle the video streaming. This is a custom transport protocol which is not handled by ffmpeg directly.

You can use the BebopDroneDecodeStream sample (included with the SDK) as a work base: it connects to a Bebop drone, setup everything needed to get the video, and calls ffmpeg to decode the video.
You can customize the Decode_RunDataThread function to display the video stream as you need.

Regards,
Nicolas.


#3

Hi Nicolas,

Ok I see. However, when I run the BebopDecodeStream I always have a segmentation error.
I have also another question. In the Decode_RunDataThread function, in which variable do you allocate the frame ? Is it avframe ?
Afterwards, do you have to convert this YUV frame into an RGB in order to put it into a OpenCV window ?


#4

Hi,

The sample should run fine … Can you share a gist of the sample output so we can try to find what’s wrong ?

Regardless, in BebopDroneDecodeStream.c, line 302, we call the ARCODECS_Manager_Decode() function. This function will return a pointer to the decoded frame in YUV420P format. If you need to use an RGB format, you’ll have to convert each frame (if I remember correcly, you can do it using the swscale API of ffmpeg, but we don’t have a sample for this).

Regards,
Nicolas.


#5

This is the output from my linux terminal:

– Bebop Drone Decode Video Stream –

Arrow keys to move ;
‘z’ = up ; ‘s’ = down ; ‘q’ = yaw left ; ‘d’ = yaw right;
Spacebar to take off/land ;
‘k’ = move camera down ; ‘i’ = move camera up ; ‘j’ = move camera left ; ‘l’ = m
ove camera right ;
‘m’ = EMERGENCY
’esc’ to quit

Battery: 100

Flying state : landedSegmentation fault (core dumped)
make: *** [run] Erreur 139
d

I do not know what is happening. If this program does not work, I do not think that I will be able to integrate OpenCV and ffmpeg to BebopDecodeStream.

Thank you very much for your help


#6

When I delete this code, the segmentation fault does not appear.

// fork the process to launch ffplay
/*if ((child = fork()) == 0)
{
    ARSAL_PRINT(ARSAL_PRINT_ERROR, TAG, "salut");
    // redirect stdout and stderr of mplayer to dev/null to avoid messing with ncurse
    int stdout_fd = open("/dev/null", O_RDONLY);
    if (stdout_fd == -1)
        return -1;
    dup2(stdout_fd, STDOUT_FILENO);
    close(stdout_fd);
    int stderr_fd = open("/dev/null", O_RDONLY);
    if (stderr_fd == -1)
        return -1;
    dup2(stderr_fd, STDERR_FILENO);
    close(stderr_fd);
    /*execlp("mplayer", "mplayer", "./video_decoded_fifo", "-demuxer", "rawvideo", "-rawvideo", "w=640:h=368:fps=30:format=i420", ">/dev/null", "2>/dev/null", NULL);
    ARSAL_PRINT(ARSAL_PRINT_ERROR, TAG, "Missing mplayer, you will not see the video. Please install mplayer.");
    return -1;
}*/

However my linux terminal shows me:

LD_LIBRARY_PATH=…/…/…/ARSDKBuildUtils/Targets/Unix/Install/lib ./BebopDroneDecodeStream
[INF] BebopDroneReceiveStream | 18:57:02:337 | main:431 - – Starting –

As you see, the “IHM” does not appear.
Do you know why ?


#7

Dear,
I found out that OpenCV provides VideoCapture class which is a very simple way to decode frames and convert into BGR format (VideoCapture uses FFmeg decoder so you should install ffmeg dependencies along with OpenCV)! I tested and it worked with BebopDroneReceiveStream sample.
However, eventhough VideoCapture can read livestream (realtime video) from cameras which are connect to my PC, I don’t know how to read livestream from FILE video_fifo as mplayer did (NEED help for this problem). The code I used for testing include a trick that waiting for sometime (e.g 10s) to read stream and write to FILE, then using VideoCapture to read FILE and show to screen. Also I tried to use a loop that read the video_fifo again to show to screen but only the first reading work.
The following is my tested code:

//This code is added before main program sleep for two minutes: sleep(120);
sleep(10); //Wait for 10s stream write to FILE video_fifo (Problem about real time stream)
VideoCapture cap(“video_fifo”); //read videostream from FILE
Mat frame;
namedWindow(“video_out”);
bool notFinish=true;
while (notFinish)
{
notFinish=cap.read(frame); //Grab video frame, notFinish= false if no more frame load
imshow(“video_out”,frame); //Show frame
waitKey(50);
}
cap.release();
// Then try to continue read from file but it crashed:
VideoCapture cap1(“video_fifo”); //read videostream from FILE
Mat frame;
notFinish=true;
while (notFinish)
{
notFinish=cap1.read(frame); //Grab video frame
imshow(“video_out”,frame); //Show frame
waitKey(50);
}
P/s: Sorry if my english is not good since I’m Vietnamese.
Thank you!


#8

Hi darknet,

Your program seems very interesting and I’m going to try it.


#9

Hi Projecttaquaknow,
I’m working with a similar project as your, and it’s maybe better if we can contact each other for further discussion.
I have fixed the last problem and right now I can read live stream from camera using VideoCapture OepnCV, however with some delay time (Delay time is unacceptale for real time application). I found out that there is one solution but I didn’t check for it!
If you feel interesting, please contact to my Facebook for further discussion: https://www.facebook.com/le.thaison.7
Best regards,
Le Thai Son.


#10

Hi darknet,

I’ve just sent you an invitation on facebook. My account is Pietro Fini.


#11

For the segmentation fault, error 139 (http://www.unixguide.net/linux/faq/09.06.shtml), could it be due to a computer performance?
When I used my laptop, I also got the same segmentation fault, but when I moved to a desktop (which has much better cpu and gpu than my laptop), the segmentation fault was removed.

However, I bought my laptop (http://shop.lenovo.com/us/en/laptops/lenovo/y-series/y410p/) 1.5 years ago, so my laptop performance is not great, but decent.


#12

I found this post: CPU usage - Samples code - linux.
And, according to the post, cpu was very busy when running the Bebop decode sample code.