android用live555 rtsp服务器传输camera图像

本程序的硬件编码是基于下文的示例代码修改,再加上live555和libyuv
Android硬编解码接口MediaCodec使用完全解析
这篇文件写得很好,值得一看

live555和libyuv的编译可以看我前面几篇文章的介绍,编译的部分就不介绍了,主要是讲下怎么把camera图像传递给live555服务器

camera预览数据被MediaCodec编码成H264码流后,放到一个阻塞队列里面,然后jni里rtsp获取数据时,回调getFrame方法,从队列里面取数据

首先,需要定义一个DeviceSource.cpp,源码里有示例,这里直接复制一个放到jni根目录,同时把DeviceSource.hh也复制到jni/include目录,这里为不混淆,改名为DeviceSource1

//DeviceSource1.cpp
#include "DeviceSource1.hh"
#include <GroupsockHelper.hh> // for "gettimeofday()"
static int8_t buf[8192*4];
static int count = 0;
DeviceSource1*
DeviceSource1::createNew(UsageEnvironment& env) {
  return new DeviceSource1(env);
}

EventTriggerId DeviceSource1::eventTriggerId = 0;

unsigned DeviceSource1::referenceCount = 0;

DeviceSource1::DeviceSource1(UsageEnvironment& env)
  : FramedSource(env) {
  if (referenceCount == 0) {
    // Any global initialization of the device would be done here:
    //%%% TO BE WRITTEN %%%
  }
  ++referenceCount;

  if (eventTriggerId == 0) {
    eventTriggerId = envir().taskScheduler().createEventTrigger(deliverFrame0);
  }
}

DeviceSource1::~DeviceSource1() {
  --referenceCount;
  if (referenceCount == 0) {
    envir().taskScheduler().deleteEventTrigger(eventTriggerId);
    eventTriggerId = 0;
  }
}

void DeviceSource1::doGetNextFrame() {
  count = getNextFrame(buf);
  deliverFrame();
}

void DeviceSource1::deliverFrame0(void* clientData) {
  ((DeviceSource1*)clientData)->deliverFrame();
}

void DeviceSource1::deliverFrame() {
  if (!isCurrentlyAwaitingData()) return; // we're not ready for the data yet

  u_int8_t* newFrameDataStart = (u_int8_t*)buf; //%%% TO BE WRITTEN %%%
  unsigned newFrameSize = static_cast<unsigned int>(count); //%%% TO BE WRITTEN %%%

  // Deliver the data here:
  if (newFrameSize > fMaxSize) {
    fFrameSize = fMaxSize;
    fNumTruncatedBytes = newFrameSize - fMaxSize;
  } else {
    fFrameSize = newFrameSize;
  }
  gettimeofday(&fPresentationTime, NULL); // If you have a more accurate time - e.g., from an encoder - then use that instead.
  // If the device is *not* a 'live source' (e.g., it comes instead from a file or buffer), then set "fDurationInMicroseconds" here.
  memmove(fTo, newFrameDataStart, fFrameSize);

  // After delivering the data, inform the reader that it is now available:
  FramedSource::afterGetting(this);
}
//DeviceSource1.hh
#ifndef _DEVICE_SOURCE1_HH
#define _DEVICE_SOURCE1_HH

#ifndef _FRAMED_SOURCE_HH

#include <jni.h>
#include "FramedSource.hh"
#endif

class DeviceSource1: public FramedSource {
public:
  static DeviceSource1* createNew(UsageEnvironment& env);

public:
  static EventTriggerId eventTriggerId;
  // Note that this is defined here to be a static class variable, because this code is intended to illustrate how to
  // encapsulate a *single* device - not a set of devices.
  // You can, however, redefine this to be a non-static member variable.

protected:
  DeviceSource1(UsageEnvironment& env);
  // called only by createNew(), or by subclass constructors
  virtual ~DeviceSource1();

private:
  // redefined virtual functions:
  virtual void doGetNextFrame();
    virtual int getNextFrame(int8_t* buf);
  //virtual void doStopGettingFrames(); // optional

private:
  static void deliverFrame0(void* clientData);
  void deliverFrame();

private:
  static unsigned referenceCount; // used to count how many instances of this class currently exist
};

#endif

这里主要是定义了一个方法static void deliverFrame0(void* clientData);,用来获取java层camera的数据

在DeviceSource1.cpp里的 doGetNextFrame方法调用

void DeviceSource1::doGetNextFrame() {
  count = getNextFrame(buf);
  deliverFrame();
}

而这个方法的实现直接放到写jni代码的rtsp.cpp
(由于没怎么写过cpp,代码写得很粗糙,只为了实现demo功能,所以想到怎么写就怎么写了,以后要好好再学一下cpp)

只是简单的调用com.example.rtsp.RtspServer.getFrame方法,注意在jni里要及时删除本地引用,比如下面的jbyteArray就是一个本地引用,如果没有删除,而这个方法又是被其他jni方法里面引用的,没有时机去释放掉,最终会超过512的限制,导致报停

int DeviceSource1::getNextFrame(int8_t* buf) {
    int c = getFrame(buf);
    return c;
}


int getFrame(int8_t* buf) {
    JavaVM *javaVM = g_ctx.javaVM;
    JNIEnv *env;

    jint res = javaVM->GetEnv((void **) &env, JNI_VERSION_1_6);
    jbyteArray arr = (jbyteArray) env->CallStaticObjectMethod(g_ctx.RtspClz, g_ctx.getFrame);
    int count = env->GetArrayLength(arr);
    env->GetByteArrayRegion(arr, 0, count, buf);

    env->DeleteLocalRef(arr);
    return count;
}

这些方法名和类名在JNI_OnLoad里初始化好,jclass是本地引用,不能直接保存在全局变量里,需要NewGlobalRef来创建全局引用,记得release掉全局应用

extern "C"
JNIEXPORT jint JNICALL JNI_OnLoad(JavaVM* vm, void* reserved) {
    JNIEnv* env;
    memset(&g_ctx, 0, sizeof(g_ctx));

    g_ctx.javaVM = vm;
    if (vm->GetEnv((void**)&env, JNI_VERSION_1_6) != JNI_OK) {
        return JNI_ERR; // JNI version not supported.
    }

    jclass clz = env->FindClass("com/example/rtsp/RtspServer");
    g_ctx.RtspClz = (jclass)env->NewGlobalRef(clz);
    g_ctx.getFrame = env->GetStaticMethodID(g_ctx.RtspClz, "getFrame", "()[B");

    return  JNI_VERSION_1_6;
}

RtspServer类里有个loop本地方法用来启动rtsp服务,getFrame就是jni里获取camera数据的方法

package com.example.rtsp;

import android.util.Log;

import java.util.concurrent.ArrayBlockingQueue;

public class RtspServer {
    static {
        System.loadLibrary("rtsp");
    }

    public static native void loop(String addr);

    public static ArrayBlockingQueue<byte[]> queue;

    public static void setQueue(ArrayBlockingQueue<byte[]> queue) {
        RtspServer.queue = queue;
    }

    public static byte[] getFrame() {
        byte[] take = new byte[1];
        try {
            take = RtspServer.queue.take();
        } catch (Exception e) {
            e.printStackTrace();
        }
        return take;
    }
}

下面来实现loop方法,根据testH264VideoStreamer.cpp修改得来,把原来的ByteStreamFileSource替换成了DeviceSource1,这里需要传入的地址是接收端的ip地址,成功启动后,rtsp的url可以从LOGI("Play this stream using the URL \"%s\"", url);这个log得到

扫描二维码关注公众号,回复: 475372 查看本文章
extern "C" JNIEXPORT void JNICALL
Java_com_example_rtsp_RtspServer_loop(JNIEnv *env, jobject obj, jstring addr) {
    // Begin by setting up our usage environment:
    TaskScheduler *scheduler = BasicTaskScheduler::createNew();
    uEnv = BasicUsageEnvironment::createNew(*scheduler);

    // Create 'groupsocks' for RTP and RTCP:
    struct in_addr destinationAddress;
    const char *_addr = env->GetStringUTFChars(addr, NULL);
    destinationAddress.s_addr = our_inet_addr(_addr); /*chooseRandomIPv4SSMAddress(*uEnv);*/
    env->ReleaseStringUTFChars(addr, _addr);
    // Note: This is a multicast address.  If you wish instead to stream
    // using unicast, then you should use the "testOnDemandRTSPServer"
    // test program - not this test program - as a model.

    const unsigned short rtpPortNum = 18888;
    const unsigned short rtcpPortNum = rtpPortNum + 1;
    const unsigned char ttl = 255;

    const Port rtpPort(rtpPortNum);
    const Port rtcpPort(rtcpPortNum);

    Groupsock rtpGroupsock(*uEnv, destinationAddress, rtpPort, ttl);
//    rtpGroupsock.multicastSendOnly(); // we're a SSM source
    Groupsock rtcpGroupsock(*uEnv, destinationAddress, rtcpPort, ttl);
//    rtcpGroupsock.multicastSendOnly(); // we're a SSM source

    // Create a 'H264 Video RTP' sink from the RTP 'groupsock':
    OutPacketBuffer::maxSize = 100000;
    videoSink = H264VideoRTPSink::createNew(*uEnv, &rtpGroupsock, 96);

    // Create (and start) a 'RTCP instance' for this RTP sink:
    const unsigned estimatedSessionBandwidth = 500; // in kbps; for RTCP b/w share
    const unsigned maxCNAMElen = 100;
    unsigned char CNAME[maxCNAMElen + 1];
    gethostname((char *) CNAME, maxCNAMElen);
    CNAME[maxCNAMElen] = '\0'; // just in case
    RTCPInstance *rtcp = RTCPInstance::createNew(*uEnv, &rtcpGroupsock, estimatedSessionBandwidth, CNAME, videoSink, NULL /* we're a server */);
    // Note: This starts RTCP running automatically

    RTSPServer *rtspServer = RTSPServer::createNew(*uEnv, 0);
    if (rtspServer == NULL) {
        LOGE("Failed to create RTSP server: %s", uEnv->getResultMsg());
        exit(1);
    }
    ServerMediaSession *sms = ServerMediaSession::createNew(*uEnv, "streamer","streamer","Session streamed by \"testH264VideoStreamer\"");
    sms->addSubsession(PassiveServerMediaSubsession::createNew(*videoSink, rtcp));
    rtspServer->addServerMediaSession(sms);

    char *url = rtspServer->rtspURL(sms);
    LOGI("Play this stream using the URL \"%s\"", url);
    delete[] url;

    // Start the streaming:
    LOGI("Beginning streaming...");
    play();

    uEnv->taskScheduler().doEventLoop(); // does not return
}

void play() {
    DeviceSource1* devSource
            = DeviceSource1::createNew(*uEnv);
    if (devSource == NULL)
    {
        LOGE("Unable to open source");
        exit(1);
    }
    FramedSource* videoES = devSource;
    // Create a framer for the Video Elementary Stream:
    videoSource = H264VideoStreamFramer::createNew(*uEnv, videoES);
    // Finally, start playing:
    LOGV("Beginning to read from file...");
    videoSink->startPlaying(*videoSource, afterPlaying, videoSink);
}

void afterPlaying(void * /*clientData*/) {
    LOGV("...done reading from file");
    videoSink->stopPlaying();
    Medium::close(videoSource);
}

后续就是把camera的预览数据转换成H264的MediaCodec所需要的格式,然后放入到队列中,然后RtspServer.getFrame的时候,从这个队列里取出,代码就不展示了。写得很乱,可以自己下载代码来看下,记得测试的时候手动打开camera权限,同时修改 MainActivity里的RtspServer.loop("aaa", "172.20.2.47");,将ip替换成rtsp客户端的ip,记得手机要和客户端在同个wifi,第一个参数已经不需要

示例代码下载

猜你喜欢

转载自blog.csdn.net/abc_1234d/article/details/80231331
今日推荐