I am multicasting video stream using RTP (rtph264pay) and UDP (udpsink) from a source node. At the client end the video stream is decoded and displayed as they stream. I have coded this using gstreamer in c/c++. I now want to
1. At source, capture the sequence no., packet size (in bytes) and time stamp of each packet of the video stream being transmitted over the network.
2. At receiver, capture the sequence no. and time stamp of the received packets so as to measure delay and packet loss.
It would be great if someone could help me suggest a way to implement this in C/C++. I do not want to use gst debug since I have to do some processing with the video packets. Here is the code for Server and client.
1. At source, capture the sequence no., packet size (in bytes) and time stamp of each packet of the video stream being transmitted over the network.
2. At receiver, capture the sequence no. and time stamp of the received packets so as to measure delay and packet loss.
It would be great if someone could help me suggest a way to implement this in C/C++. I do not want to use gst debug since I have to do some processing with the video packets. Here is the code for Server and client.
Code:
SERVER
#include<gst/gst.h>
static gboolean
bus_call (GstBus *bus,
GstMessage *msg,
gpointer data)
{
GMainLoop *loop = (GMainLoop *) data;
switch (GST_MESSAGE_TYPE (msg)) {
case GST_MESSAGE_EOS:
g_print ("End of video stream\n");
g_main_loop_quit (loop);
break;
case GST_MESSAGE_ERROR: {
gchar *debug;
GError *error;
gst_message_parse_error (msg, &error, &debug);
g_free (debug);
g_printerr ("Error: %s\n", error->message);
g_error_free (error);
g_main_loop_quit (loop);
break;
}
default:
break;
}
return TRUE;
}
static void
on_pad_added (GstElement *element,
GstPad *pad,
gpointer data)
{
GstPad *sinkpad;
GstElement *decoder = (GstElement *) data;
g_print ("Dynamic pad created, linking demuxer/decoder\n");
sinkpad = gst_element_get_static_pad (decoder, "sink");
gst_pad_link (pad, sinkpad);
gst_object_unref (sinkpad);
}
typedef struct Custom_Data{
GMainLoop *loop;
GstElement *pipeline, *source, *demuxer, *decoder, *aratio, *encoder, *rtp, *usink, *que, *videoconvert, *videoscale_capsfilter, *videoscale;
GstBus *bus;
guint bus_watch_id;
guint bitrate;
GstCaps *c, *videoscalecaps;
}Custom_Data;
int
main (int argc,
char *argv[])
{
Custom_Data Data;
/* Initialisation */
gst_init (&argc, &argv);
Data.loop = g_main_loop_new (NULL, FALSE);
/* Check input arguments */
if (argc != 2) {
g_printerr ("Usage: %s <cif filename>\n", argv[0]);
return -1;
}
Data.videoscalecaps = gst_caps_from_string("video/x-raw, width=1024, height=768");
Data.pipeline = gst_pipeline_new ("video-send");
Data.source = gst_element_factory_make ("filesrc", "file-source");
g_assert (Data.source);
Data.demuxer = gst_element_factory_make ("qtdemux", "demuxer");
g_assert (Data.demuxer);
Data.que = gst_element_factory_make ("queue", "que");
g_assert (Data.que);
Data.decoder = gst_element_factory_make ("avdec_h264", "decoder");
g_assert (Data.decoder);
Data.videoscale = gst_element_factory_make ("videoscale", "scale");
g_assert (Data.videoscale);
Data.videoscale_capsfilter = gst_element_factory_make ("capsfilter", "videoscale_capsfilter");
g_assert (Data.videoscale_capsfilter);
Data.aratio=gst_element_factory_make ("aspectratiocrop", "aratio");
g_assert (Data.aratio);
Data.videoconvert = gst_element_factory_make ("videoconvert", "video-convert");
g_assert (Data.videoconvert);
Data.encoder = gst_element_factory_make ("x264enc", "encoder");
g_assert (Data.encoder);
Data.rtp = gst_element_factory_make ("rtph264pay", "rtp");
g_assert (Data.rtp);
Data.usink = gst_element_factory_make ("udpsink", "udp_sink");
g_assert (Data.usink);
g_object_set (G_OBJECT (Data.source), "location", argv[1], NULL);
g_object_set (G_OBJECT (Data.source), "do-timestamp", true, NULL);
g_object_set (G_OBJECT (Data.aratio), "aspect-ratio", 4, 3, NULL);
g_object_set (G_OBJECT (Data.encoder), "b-adapt", true, NULL);
g_object_set (G_OBJECT (Data.usink), "host", 224.0.0.0, NULL);
g_object_set (G_OBJECT (Data.usink), "port", 5007, NULL);
g_object_set (G_OBJECT (Data.usink), "auto-multicast", TRUE, NULL);
g_object_set( G_OBJECT ( Data.videoscale_capsfilter ), "caps", Data.videoscalecaps, NULL );
Data.bus = gst_pipeline_get_bus (GST_PIPELINE (Data.pipeline));
Data.bus_watch_id = gst_bus_add_watch (Data.bus, bus_call, Data.loop);
gst_object_unref (Data.bus);
gst_bin_add(GST_BIN (Data.pipeline), Data.source);
gst_bin_add(GST_BIN (Data.pipeline), Data.demuxer);
gst_bin_add(GST_BIN (Data.pipeline), Data.decoder);
gst_bin_add(GST_BIN (Data.pipeline), Data.videoscale);
gst_bin_add(GST_BIN (Data.pipeline), Data.videoscale_capsfilter);
gst_bin_add(GST_BIN (Data.pipeline), Data.aratio);
gst_bin_add(GST_BIN (Data.pipeline), Data.videoconvert);
gst_bin_add(GST_BIN (Data.pipeline), Data.encoder);
gst_bin_add(GST_BIN (Data.pipeline), Data.rtp);
gst_bin_add(GST_BIN (Data.pipeline), Data.usink);
if(!gst_element_link (Data.source, Data.demuxer))
{
g_printerr ("Here is the problem.\n");
}
if(!gst_element_link_many (Data.decoder, Data.videoscale, Data.videoscale_capsfilter, Data.aratio, Data.videoconvert, Data.encoder, Data.rtp, Data.usink, NULL))
{
g_printerr ("Here is the problem too.\n");
}
g_signal_connect (Data.demuxer, "pad-added", G_CALLBACK (on_pad_added), Data.decoder);
g_print ("Now playing: %s\n", argv[1]);
gst_element_set_state (Data.pipeline, GST_STATE_PLAYING);
g_main_loop_run (Data.loop);
gst_element_set_state (Data.pipeline, GST_STATE_NULL);
gst_object_unref (GST_OBJECT (Data.pipeline));
g_source_remove (Data.bus_watch_id);
g_main_loop_unref (Data.loop);
return 0;
}
CLIENT
#include<gst/gst.h>
static gboolean
bus_call (GstBus *bus,
GstMessage *msg,
gpointer data)
{
GMainLoop *loop = (GMainLoop *) data;
switch (GST_MESSAGE_TYPE (msg)) {
case GST_MESSAGE_EOS:
g_print ("End of stream\n");
g_main_loop_quit (loop);
break;
case GST_MESSAGE_ERROR: {
gchar *debug;
GError *error;
gst_message_parse_error (msg, &error, &debug);
g_free (debug);
g_printerr ("Error: %s\n", error->message);
g_error_free (error);
g_main_loop_quit (loop);
break;
}
default:
break;
}
return TRUE;
}
static void
on_pad_added (GstElement *element,
GstPad *pad,
gpointer data)
{
GstPad *sinkpad;
GstElement *decoder = (GstElement *) data;
g_print ("Dynamic pad created, linking demuxer/decoder\n");
sinkpad = gst_element_get_static_pad (decoder, "sink");
gst_pad_link (pad, sinkpad);
gst_object_unref (sinkpad);
}
typedef struct _CustomData {
GstElement *pipeline, *source, *rtp, *decoder, *sink, *videomixer, *videoconvert, *videoscale, *que, *filter;
GstState state;
} CustomData;
int main(int argc, char *argv[]) {
CustomData data;
GMainLoop *loop;
GstPad *pad;
GstCaps* caps, *filtercaps;
GstBus *bus;
GstStateChangeReturn ret;
gst_init (&argc, &argv);
caps = gst_caps_from_string( "application/x-rtp, media = (string) video, clock-rate = (int) 90000, encoding-name =(string) H264, Payload =(int)96");
loop = g_main_loop_new (NULL, FALSE);
if (argc != 1) {
g_printerr ("Usage: %s <udpsrc>\n", argv[0]);
return -1;
}
data.pipeline = gst_pipeline_new ("video-receive");
data.source = gst_element_factory_make ("udpsrc", "udp-source");
data.videoscale = gst_element_factory_make ("videoscale", "video-scale");
data.videoconvert = gst_element_factory_make ("videoconvert", "video-convert");
data.sink = gst_element_factory_make ("xvimagesink", "video-output");
data.rtp = gst_element_factory_make ("rtph264depay", "rtp");
data.decoder = gst_element_factory_make ("avdec_h264", "decoder");
data.filter = gst_element_factory_make ("capsfilter", "filtercaps");
data.que = gst_element_factory_make ("queue", "que");
filtercaps = gst_caps_new_simple ("video/x-raw", "format", G_TYPE_STRING, "I420", "width", G_TYPE_INT, 200, "height", G_TYPE_INT, 200, NULL);
if (!data.pipeline|| !data.source || !data.rtp || !data.decoder || !data.sink)
{
g_printerr ("One element could not be created.\n");
return -1;
}
g_object_set (G_OBJECT (data.filter), "caps", filtercaps, NULL);
bus = gst_element_get_bus (data.pipeline);
gst_bus_add_signal_watch (bus);
gst_object_unref (bus);
g_object_set (G_OBJECT (data.source), "multicast group", 224.0.0.0, NULL);
g_object_set (G_OBJECT (data.source), "port", 5007, NULL);
g_object_set(G_OBJECT (data.source), "caps", caps, NULL);
g_object_set (G_OBJECT (data.source), "do-timestamp", true, NULL);
g_object_set( G_OBJECT (data.sink), "sync", FALSE, NULL);
gst_bin_add_many (GST_BIN (data.pipeline), data.source, data.rtp, data.decoder, data.que, data.sink, NULL);
gst_element_link(data.source, data.decoder);
gst_element_link_many(data.source, data.rtp, data.decoder, data.que, data.sink, NULL);
g_signal_connect(data.decoder, "pad-added", G_CALLBACK (on_pad_added), data.sink);
g_print ("Now playing...\n");//: %s\n", argv[1]);
gst_element_set_state (data.pipeline, GST_STATE_PLAYING);
g_print ("Running...\n");
g_main_loop_run (loop);
g_print ("Deleting pipeline\n");
gst_object_unref (GST_OBJECT (data.pipeline));
g_main_loop_unref (loop);
return 0;
}