python opencv图片编码为h264文件的实例

作者:ShellCollector 时间:2023-01-12 13:22:07 

python部分


#!/usr/bin/env Python
# coding=utf-8
from ctypes import *

from PyQt5.QtCore import *
from PyQt5.QtGui import *
from PyQt5.QtWidgets import *
import time
import numpy as np
import cv2
import struct

import datetime

from numba import jit
import os
cam_dict={}

class CamInfo:
def __init__(self, cam_no=0,deviceid="default",cam_name="default"):
 self.cam_no = cam_no
 self.deviceid = deviceid
 self.cam_name = cam_name

@jit
def trans(data,size,height,width):
bbb = string_at(data,size)
nparr = np.fromstring(bbb, np.uint8)
r = nparr.reshape(height,width, 3)
return r
def str2char_p(str_v):
pStr = c_char_p( )
pStr.value = str_v
return pStr

def callb_stream(data,size,cam_no,height,width):
r = trans(data, size,height,width)
r = cv2.cvtColor(r, cv2.COLOR_RGB2BGR)
counter = datetime.datetime.now().strftime('%Y%m%d_%H%M%S_%f')
# print(1, counter)
cv2.imshow(str(cam_no), r)
cv2.waitKey(1)

def callb_camerainfo(cam_no,camera_info,camera_info_size):
# print(cast(camera_info,c_char_p).value)
# print(str(cast(camera_info, c_char_p).value))
bbb = string_at(camera_info, camera_info_size)
info=str(bbb,encoding="utf-8").split(",")
cam_dict[cam_no]= CamInfo(cam_no,info[1],info[2])
print("camerainfo",cam_dict[cam_no].cam_no,cam_dict[cam_no].cam_name,cam_dict[cam_no].deviceid)

class Mythread(QThread):
# 定义信号,定义参数为str类型
breakSignal = pyqtSignal(str,list)

def __init__(self, parent=None):
 super().__init__(parent)
 # super(Mythread, self).__init__()

def callb_error(self, err_type, cam_no, msg_no, msg_level, msg_txt, msg_txtlen):
 print("myerror", err_type, cam_no, msg_no, msg_level, msg_txt, msg_txtlen)
def run(self):
 dll = CDLL(r"./hik_client.dll")
 width=60
 height=40
 dll.pre_encode.restype = c_void_p
 ret=dll.pre_encode(width,height)
 ret=cast(ret,c_void_p)
 for i in range(20000):
  n=i%200+1
  img=cv2.imread("bmp/"+str(n)+".bmp")
  len = img.shape[0] * img.shape[1] * img.shape[2]
  # img=np.transpose(img,(1, 0, 2))
  # b, g, r = cv2.split(img)
  # b = b.reshape(-1)
  # g = g.reshape(-1)
  # r = r.reshape(-1)
  # b = np.append(b, g)
  # img = np.append(b, r)
  img = img.reshape(-1)
  # b, g, r = cv2.split(img)
  # b = b.reshape(-1)
  # g = g.reshape(-1)
  # r = r.reshape(-1)
  # b = np.append(b,g)
  # img = np.append(b, r)
  INPUT = c_int * len
  # 实例化一个长度为2的整型数组
  input = INPUT()
  # 为数组赋值(input这个数组是不支持迭代的)
  for i in range(len):
   input[i] = img[i]

# bytes(aaaa, encoding="utf-8")
  a = dll.push_rtsp(input,len,ret)
 QCoreApplication.instance().quit()
  # print("encode_ok",i)
 # b = string_at(a, 1280*720*3)
 # print(b)
 # nparr = np.fromstring(b, np.uint8)
 # # print(nparr[-10:-1],min(nparr),max(nparr))
 # img_decode = cv2.imdecode(nparr, cv2.IMREAD_COLOR)
 # # if img_decode:
 # cv2.imshow("sadf", img_decode)
 # # cv2.imwrite(str(index)+".jpg", img_decode)
 # cv2.waitKey(0)
 #
 #
 #
 #
 # ErrorCall = CFUNCTYPE(c_void_p, c_int, c_int, c_int,c_int,c_char_p,c_int)
 # error_callback = ErrorCall(self.callb_error)
 # dll.set_callback(error_callback)
 # CamCall = CFUNCTYPE(c_void_p, c_int, c_char_p,c_int)
 # caminfo_CamCall = CamCall(callb_camerainfo)
 # # print(b)
 # if not os.path.exists("video"):
 #  os.makedirs("video")
 # ip = b"127.0.0.1"
 # port = 8888
 # print("start conn")
 # ret=-1
 # while(ret):
 #  print("conn server...")
 #  ret= dll.tcp_init(str2char_p(ip), port)
 #  time.sleep(0.3)
 # if (ret==0):
 #  type=1
 #  ret = dll.getcameralist(type, caminfo_CamCall)
 #  if (1):
 #   # deviceId = b"af94a68df0124d1fbf0fc2b07f3b3c3a"
 #   cam_no=14
 # else:
 #  print("tcp error")
 # for i in range(2000000):
 #  # 发出信号
 #  a=[i,i+1]
 #  self.breakSignal.emit(str(i),a)
 #  # 让程序休眠
 #  time.sleep(0.5)

if __name__ == '__main__':

app = QApplication([])
dlg = QDialog()
dlg.resize(400, 300)
dlg.setWindowTitle("自定义按钮测试")
dlgLayout = QVBoxLayout()
dlgLayout.setContentsMargins(40, 40, 40, 40)
btn = QPushButton('测试按钮')
dlgLayout.addWidget(btn)
dlgLayout.addStretch(40)
dlg.setLayout(dlgLayout)
dlg.show()

def chuli(a,s):
 # dlg.setWindowTitle(s)
 btn.setText(a+str(s[0]*10))

# 创建线程
thread = Mythread()
# # 注册信号处理函数
thread.breakSignal.connect(chuli)
# # 启动线程
thread.start()
dlg.exec_()
app.exit()

c++动态库部分


#include "stdafx.h"
#include "CVdll.h"
#include "SimpleLog.h"
#include <iostream>
#include<fstream>
#include <sys/types.h>
#include "opencv2/opencv.hpp"

#include "Ws2tcpip.h"

#include <winsock2.h>
#include <fcntl.h>
#include <cstring>
#include <cstdio>
#include <signal.h>
#pragma comment(lib,"ws2_32.lib")
#include <queue>
using namespace cv;
extern "C"
{
#include "libavcodec/avcodec.h"
#include "libavformat/avformat.h"
#include "libswscale/swscale.h"
#include "libavdevice/avdevice.h"
#include "libavutil/log.h"

//#include "libavutil/imgutils.h"
};

//说明,动态库需要拷贝三个文件,否则重连会出问题

char* testchar(int plus1) {

char* str = "hello world111111";
return str;
}
char* testimg(char* data,int length) {

char* str = "hello world111111";
return str;
}

int outbuf_size = 100000;

class Rtmp_tool {
public:
int nWidth = 0;
int nHeight = 0;
AVCodecContext *c;
AVFrame *m_pRGBFrame = new AVFrame[1]; //RGB帧数据  
AVFrame *m_pYUVFrame = new AVFrame[1];; //YUV帧数据
uint8_t * yuv_buff;//
uint8_t * outbuf;
SwsContext * scxt;
FILE *f = NULL;

};
void* pre_encode(int width,int height) {

Rtmp_tool *rtmp_tool;
rtmp_tool = new Rtmp_tool();
int nLen;
int fileI;
rtmp_tool->nWidth = width;
rtmp_tool->nHeight = height;

av_register_all();
avcodec_register_all();
//AVFrame *m_pRGBFrame = new AVFrame[1]; //RGB帧数据  
//AVFrame *m_pYUVFrame = new AVFrame[1];; //YUV帧数据
AVCodecContext *c = NULL;
AVCodecContext *in_c = NULL;
AVCodec *pCodecH264; //编码器

//查找h264编码器
pCodecH264 = avcodec_find_encoder(AV_CODEC_ID_H264);

c = avcodec_alloc_context3(pCodecH264);
c->bit_rate = 3000000;// put sample parameters  
c->width = width;//  
c->height = height;//  

// frames per second  
AVRational rate;
rate.num = 1;
rate.den = 5;
c->time_base = rate;//(AVRational){1,25};
c->gop_size = 10; // emit one intra frame every ten frames  
c->max_b_frames = 1;
c->thread_count = 1;
c->pix_fmt = AV_PIX_FMT_YUV420P;//PIX_FMT_RGB24;

//av_opt_set(c->priv_data, /*"preset"*/"libvpx-1080p.ffpreset", /*"slow"*/NULL, 0);
//打开编码器
if (avcodec_open2(c, pCodecH264, NULL)<0)
printf("不能打开编码库");

int size = c->width * c->height;

rtmp_tool->yuv_buff = (uint8_t *)malloc((size * 3) / 2); // size for YUV 420  

//图象编码

rtmp_tool->outbuf = (uint8_t*)malloc(outbuf_size);
int u_size = 0;

const char * filename = "0_Data.h264";
rtmp_tool->f = fopen(filename, "wb");
if (!rtmp_tool->f)
{
printf("could not open %s\n", filename);
exit(1);
}

//初始化SwsContext
rtmp_tool->scxt = sws_getContext(c->width, c->height, AV_PIX_FMT_BGR24, c->width, c->height, AV_PIX_FMT_YUV420P, SWS_POINT, NULL, NULL, NULL);

rtmp_tool->c = c;
return rtmp_tool;
}

char* push_rtsp(int* plus1,int len,void* vp) {
Rtmp_tool *rtmp_tool =(Rtmp_tool *) vp;

for (int i = 0; i < len; i++) {
plus1[i] = (uint8_t)plus1[i];
}

AVCodecContext *c = rtmp_tool->c;// (AVCodecContext*)vp;
printf("2 %d %d\n", c->width, c->height);
//---------------
AVPacket avpkt;
AVFrame *m_pRGBFrame = rtmp_tool->m_pRGBFrame;
AVFrame *m_pYUVFrame = rtmp_tool->m_pYUVFrame;
/*unsigned char *pBmpBuf;
pBmpBuf = new unsigned char[len];*/

//memcpy(rgb_buff, (uint8_t*)plus1, nDataLen);
//
avpicture_fill((AVPicture*)m_pRGBFrame, (uint8_t*)plus1, AV_PIX_FMT_RGB24, rtmp_tool->nWidth, rtmp_tool->nHeight);
m_pRGBFrame->linesize[0] = c->width * 3;
m_pRGBFrame->linesize[1] =0;
m_pRGBFrame->linesize[2] =0;
m_pRGBFrame->linesize[3] =0;
m_pRGBFrame->format = AV_PIX_FMT_RGB24;
m_pRGBFrame->width = rtmp_tool->nWidth;
m_pRGBFrame->height = rtmp_tool->nHeight;

uint8_t *p = m_pRGBFrame->data[0];
int y = 0, x = 0;
for (y = 0; y < rtmp_tool->nHeight; y++) {
for (x = 0; x < rtmp_tool->nWidth; x++) {
*p++ = (uint8_t)plus1[(y*rtmp_tool->nWidth + x) * 3]; // R
*p++ = (uint8_t)plus1[(y*rtmp_tool->nWidth + x) * 3 +1]; // G
*p++ = (uint8_t)plus1[(y*rtmp_tool->nWidth + x) * 3 +2] ; // B
}
}
printf("1 %d %d \n", rtmp_tool->nWidth, rtmp_tool->nHeight);
//将YUV buffer 填充YUV Frame
avpicture_fill((AVPicture*)m_pYUVFrame, (uint8_t*)rtmp_tool->yuv_buff, AV_PIX_FMT_YUV420P, rtmp_tool->nWidth, rtmp_tool->nHeight);

// 翻转RGB图像
//m_pRGBFrame->data[0] += m_pRGBFrame->linesize[0] * (rtmp_tool->nHeight - 1);
//m_pRGBFrame->linesize[0] *= -1;
//m_pRGBFrame->data[1] += m_pRGBFrame->linesize[1] * (rtmp_tool->nHeight / 2 - 1);
//m_pRGBFrame->linesize[1] *= -1;
//m_pRGBFrame->data[2] += m_pRGBFrame->linesize[2] * (rtmp_tool->nHeight / 2 - 1);
//m_pRGBFrame->linesize[2] *= -1;

//将RGB转化为YUV
sws_scale(rtmp_tool->scxt, m_pRGBFrame->data, m_pRGBFrame->linesize, 0, c->height, m_pYUVFrame->data, m_pYUVFrame->linesize);

int got_packet_ptr = 0;
av_init_packet(&avpkt);
avpkt.data = rtmp_tool->outbuf;
avpkt.size = outbuf_size;
int u_size = avcodec_encode_video2(c, &avpkt, m_pYUVFrame, &got_packet_ptr);
m_pYUVFrame->pts++;
if (u_size == 0){
int res = fwrite(avpkt.data, 1, avpkt.size, rtmp_tool->f);
if (res == 0) {
printf("000");
}
else {
printf("1253");
}

}
//-------end---------

//Mat mat;
////加载图片
//mat = imread("bgs.jpg", CV_LOAD_IMAGE_COLOR);
//printf("a %d %d", mat.rows, mat.cols);
////if (!mat.empty()) {

//int m, n;
//n = mat.cols * 3;
//m = mat.rows;
//unsigned char *data = (unsigned char*)malloc(sizeof(unsigned char) * m * n);
//int p = 0;
//for (int i = 0; i < m; i++)
//{
//for (int j = 0; j < n; j++)
//{
//data[p] = mat.at<unsigned char>(i, j);
//p++;
//}
//}
//*plus1 = p;
return NULL;
//return (char*)data;
}

struct RecStruct //数据包
{
int size;
int data_type;
int cam_no;
int error_code;
char recvbuf[1500];
};
struct SendStcuct
{
int size;
int data_type;
int cam_no;
char sendbuf[1000];
}data_send;

static ErrorCallBack g_errorcall = 0;
static CamInfoCallBack g_caminfocall = 0;

typedef struct CameraInfo
{
std::ofstream foutV;
int timeInHour = -1;
}caminfo;
std::map<int, CameraInfo*> cameraMap;
//static std::map<int, queue<RecStruct*>> namemap;

static SOCKET g_sockClient;
HANDLE hMutex;
//char* deviceId;
/**判断str1是否以str2开头
* 如果是返回1
* 不是返回0
* 出错返回-1
* */
int is_begin_with(const char * str1, char *str2)
{
if (str1 == NULL || str2 == NULL)
return -1;
int len1 = strlen(str1);
int len2 = strlen(str2);
if ((len1 < len2) || (len1 == 0 || len2 == 0))
return -1;
char *p = str2;
int i = 0;
while (*p != '\0')
{
if (*p != str1[i])
return 0;
p++;
i++;
}
return 1;
}

char* Strcpy(char * a, const char * b)
{
int i = 0;
while (*b) a[i++] = *b++;
a[i] = 0;
return a;
}

int sendcmd(char* data, int cam_no, int type, int size) {

memset(data_send.sendbuf, 0, 1000);
//data2send.sendbuf = new char[strlen(data)];
//memset(data2send.sendbuf, 0, strlen(data));
data_send.size = size;
data_send.data_type = type;
data_send.cam_no = cam_no;
memcpy(data_send.sendbuf, data, sizeof(char) * (size));
printf("data_send len %d\n", sizeof(data_send));
if (g_sockClient)
send(g_sockClient, (char *)&data_send, sizeof(struct SendStcuct), 0);
return 0;
}
int set_callback(ErrorCallBack terrorcall(int error_type, int cam_no, int err_no, int msg_level, char* msg_txt, int spare)) {
g_errorcall = (ErrorCallBack)terrorcall;
return 0;
}
MYLIBDLL int getcameralist(int type, CamInfoCallBack caminfocall(int cam_no, char* cam_info, int cam_info_size)) {
g_caminfocall = (CamInfoCallBack)caminfocall;
SendStcuct data_send;
memset(&data_send, 0, sizeof(struct SendStcuct));
data_send.size = 20;
data_send.data_type = 1;
data_send.cam_no = 0;
char* data = "getcameralist";
memcpy(data_send.sendbuf, data, sizeof(char) * (20));
printf("data_send len %d\n", sizeof(data_send));
if (g_sockClient)
send(g_sockClient, (char *)&data_send, sizeof(struct SendStcuct), 0);
return 0;
}
DWORD WINAPI RecvThread(LPVOID lpParameter);
DWORD WINAPI RecvThread(LPVOID lpParameter){
SOCKET sockClient = (SOCKET)lpParameter;
while (1) {
RecStruct data_recv;
int ret;
memset(&data_recv, '0', sizeof(struct RecStruct));
ret = recv(sockClient, (char *)&data_recv, sizeof(struct RecStruct), 0); //第二个参数使用强制类型,为一个数据包
if (ret == 0) // server调用了close
{
printf("server close");
break;
}
else if (ret == SOCKET_ERROR) // 网络错误
{
int err = WSAGetLastError();
printf("get message %d %d %d \n", ret, SOCKET_ERROR, err);
if (err == WSAECONNRESET || err == WSAECONNABORTED) {
printf("tcp error %d %d \n", err, SOCKET_ERROR);
//int n = namemap.erase(deviceId);//如果删除了会返回1,否则返回0
}
break;

}
//printf("reve type %d %d", data_recv.data_type, data_recv.size);
switch (data_recv.data_type)
{
case 1://摄像头列表
{
g_caminfocall(data_recv.cam_no, data_recv.recvbuf, data_recv.size);
}
break;
case 3://异常信息
{
if (g_errorcall!=0)
g_errorcall(1, 1, data_recv.error_code,4,NULL,0);
break;
}
case 2:
{
char* recemsg = data_recv.recvbuf;
int is_null = is_begin_with(recemsg, "00000");
if (is_null == 1) {
printf("recv type 2 00000");
continue;
}

break;
}
default:
break;
}

if (ret < 0) {
printf("WSAStartup() failed!\n");
return -1;
}
Sleep(20);

}

return 0;
}

int tcpInit(char* ip, int port)
{
av_log_set_level(AV_LOG_PANIC);
WSADATA wsaData;

if (WSAStartup(MAKEWORD(2, 2), &wsaData) != 0)
{
printf("初始化Winsock失败");
return -1;
}

SOCKADDR_IN addrSrv;
addrSrv.sin_family = AF_INET;
addrSrv.sin_port = htons(port);
SOCKET sockClient = socket(AF_INET, SOCK_STREAM, 0);

int nRecvBuf = 0;//设置为32K
setsockopt(sockClient, SOL_SOCKET, SO_RCVBUF, (const char*)&nRecvBuf, sizeof(int));
//setsockopt(sockClient, SOL_SOCKET, SO_RCVBUF, (char *)&nZero, sizeof(nZero));
inet_pton(AF_INET, ip, &addrSrv.sin_addr.s_addr);
if (connect(sockClient, (struct sockaddr*)&addrSrv, sizeof(addrSrv)) == -1)
return -2;
//throw "连接失败";
if (SOCKET_ERROR == sockClient) {
printf("Socket() error:%d", WSAGetLastError());
return -3;
}
g_sockClient = sockClient;
HANDLE h_thread = CreateThread(NULL, 0, RecvThread, (LPVOID)sockClient, 0, NULL);
CloseHandle(h_thread);
return 0;
}

class DeviceInfo {

public:
string cam_name;
int cam_no;
SOCKET sockClient;
};

//Callback
int read_buffer(void *opaque, uint8_t *buf, int buf_size) {

DeviceInfo deviceInfo = *((DeviceInfo *)opaque);

int null_count=0;
int display_count = 0;
while (1) {
RecStruct data_recv;
int ret;
memset(&data_recv, '0', sizeof(struct RecStruct));

ret = recv(deviceInfo.sockClient, (char *)&data_recv, sizeof(struct RecStruct), 0); //第二个参数使用强制类型,为一个数据包
if (ret == 0) // server调用了close
{
printf("server close");
break;
}
else if (ret == SOCKET_ERROR) // 网络错误
{
printf("get message %d %d \n", ret, SOCKET_ERROR);

int err = WSAGetLastError();
if (g_errorcall != 0)
g_errorcall(1, deviceInfo.cam_no, err,4,"socket err",0);
//if (err == WSAECONNRESET || err == WSAECONNABORTED) {
// printf("server break %s", deviceId);
// //int n = namemap.erase(deviceId);//如果删除了会返回1,否则返回0
//}
break;

}
if (data_recv.size == 0) {
null_count++;
if (null_count %1000==0) {
if (g_errorcall != 0)
g_errorcall(1, deviceInfo.cam_no, 0, 2, "data_recv 0", 11);
printf("reve len=0 type %d\n", data_recv.data_type);
null_count = 0;
}
Sleep(2);
continue;
}
else if (data_recv.size >1500) {
if (g_errorcall != 0)
g_errorcall(1, deviceInfo.cam_no, 0, 2, "data_recv too long", data_recv.size);
printf("reve data too long %d\n", data_recv.size);
continue;
}
if (data_recv.data_type == 3)
{
if (g_errorcall) {
char err_str[10];
_itoa(data_recv.error_code, err_str, 10); //正确解法一
g_errorcall(1, deviceInfo.cam_no, data_recv.error_code, 4, err_str, 0);
}
}
else if (data_recv.data_type == 2)
{
null_count=0;
display_count++;
char* recemsg = data_recv.recvbuf;
int is_null = is_begin_with(recemsg, "00000");
if (is_null == 1) {
printf("recv 00000");
continue;
}
//printf("cam_no %d", data_recv.cam_no);
//int cam_no = data_recv.cam_no;
buf_size = data_recv.size;
memcpy(buf, data_recv.recvbuf, buf_size);

if (g_errorcall && buf_size>1000 && display_count%20==0) {
g_errorcall(2, deviceInfo.cam_no, 1, 0, "rece data", 1);//err_type, cam_no, column, msg_level, msg_txt, spare
display_count = 0;
}
//保存流数据并分小时存储
time_t tt = time(NULL);//这句返回的只是一个时间cuo
tm* t = localtime(&tt);
auto iter = cameraMap.find(deviceInfo.cam_no);
if (iter != cameraMap.end()){
iter->second->foutV.write(data_recv.recvbuf, data_recv.size);
if (t->tm_min == 0 && (iter->second->timeInHour != t->tm_hour)) {
//判断间隔一小时
iter->second->timeInHour = t->tm_hour;
iter->second->foutV.close();
time_t tt = time(NULL);//这句返回的只是一个时间cuo
tm* t = localtime(&tt);
char ctmBegin[20];
strftime(ctmBegin, 20, "/%Y%m%d%H%M", t);
char str3[80];
sprintf(str3, "create data:%s%s%s", deviceInfo.cam_name, ctmBegin, ".dat");
SLOG1(str3);
printf("%s", deviceInfo.cam_name + std::string(ctmBegin) + ".dat");
iter->second->foutV.open(deviceInfo.cam_name + std::string(ctmBegin) + ".dat", ios::binary);
}
}
return buf_size;
}
if (ret < 0) {
printf("WSAStartup() failed!\n");
continue;
//return 0;
}
}
return 0;
}

int send_cmd(int cam_no,int size,int datatype,char* cam_name, SOCKET& sockClient) {
SendStcuct data_send;
memset(&data_send, 0, sizeof(struct SendStcuct));
data_send.size = size;
data_send.data_type = datatype;
data_send.cam_no = cam_no;
memcpy(data_send.sendbuf, cam_name, sizeof(char) * (size));
printf("data_send len %d\n", sizeof(data_send));

send(sockClient, (char *)&data_send, sizeof(struct SendStcuct), 0);
return 0;
}

int tcp_recv_conn(char* ip, int port, char* cam_name, int size, int cam_no, FrameFunc tcallback(char* a, int size, int cam_no, int height, int width))
{
WSADATA wsaData;

if (WSAStartup(MAKEWORD(2, 2), &wsaData) != 0)
{
printf("初始化Winsock失败");
return -1;
}

SOCKADDR_IN addrSrv;
addrSrv.sin_family = AF_INET;
addrSrv.sin_port = htons(port);
SOCKETsockClient = socket(AF_INET, SOCK_STREAM, 0);

int nRecvBuf = 0;//设置为32K
setsockopt(sockClient, SOL_SOCKET, SO_RCVBUF, (const char*)&nRecvBuf, sizeof(int));
setsockopt(sockClient, SOL_SOCKET, SO_SNDBUF, (char *)&nRecvBuf, sizeof(int));
inet_pton(AF_INET, ip, &addrSrv.sin_addr.s_addr);
if (connect(sockClient, (struct sockaddr*)&addrSrv, sizeof(addrSrv)) == -1)
return -2;
//throw "连接失败";
if (SOCKET_ERROR == sockClient) {
printf("Socket() error:%d", WSAGetLastError());
return -3;
}

DeviceInfo deviceInfo;
deviceInfo.cam_no = cam_no;
deviceInfo.sockClient = sockClient;
av_register_all();
unsigned version = avcodec_version();
//printf("FFmpeg version: %d\n", version);

//初始化流文件状态
time_t tt = time(NULL);//这句返回的只是一个时间cuo
tm* t = localtime(&tt);
char ctmBegin[20];
strftime(ctmBegin, 20, "/%Y%m%d%H%M", t);
caminfo cinfoInstance;
deviceInfo.cam_name = cam_name;
//std::string dataName = cam_name;
cinfoInstance.foutV.open(cam_name + std::string(ctmBegin) + ".dat", ios::binary);
//判断间隔一小时
cinfoInstance.timeInHour = t->tm_hour;
cameraMap[cam_no] = &cinfoInstance;
char str3[20];
sprintf(str3, "camno: %d start", cam_no);
SLOG1(str3);
AVFormatContext *pFormatCtx;
int   i, videoindex;
AVCodecContext *pCodecCtx;
AVCodec  *pCodec;
char filepath[] = "video.264";
//av_register_all();
avformat_network_init();
pFormatCtx = avformat_alloc_context();
//patha = "C:\\Users\\sbd01\\Pictures\\ffmpegtest\\Debug\\video.dat";

//fp_open = fopen(patha.c_str(), "rb+");
unsigned char *aviobuffer = (unsigned char *)av_malloc(1512);

send_cmd(cam_no, size,2,cam_name, sockClient);
AVIOContext *avio = avio_alloc_context(aviobuffer, 1512, 0, &deviceInfo, read_buffer, NULL, NULL);

pFormatCtx->pb = avio;
//if (avformat_open_input(&pFormatCtx, patha.c_str(), NULL, NULL) != 0) {
if (avformat_open_input(&pFormatCtx, NULL, NULL, NULL) != 0) {
printf("Couldn't open input stream %d\n", cam_no);
return -1;
}
printf("camno %d find stream\n", cam_no);
pFormatCtx->probesize = 1000 * 1024;
pFormatCtx->max_analyze_duration = 10 * AV_TIME_BASE;

pCodec = NULL;
while (pCodec == NULL) {
printf("%d start find stream info \n", cam_no);
if (avformat_find_stream_info(pFormatCtx, NULL) < 0) {
printf("Couldn't find stream info %d\n", cam_no);
goto restart_stream;
continue;
}
videoindex = -1;
for (i = 0; i < pFormatCtx->nb_streams; i++)
if (pFormatCtx->streams[i]->codec->codec_type == AVMEDIA_TYPE_VIDEO) {
if (videoindex == -1) {
videoindex = i;
}
//break;
}
if (videoindex == -1) {
printf("%d Didn't find a video stream.\n", cam_no);
goto restart_stream;
}
pCodecCtx = pFormatCtx->streams[videoindex]->codec;
//pCodec = avcodec_find_decoder(AV_CODEC_ID_H264);

pCodec = avcodec_find_decoder(pCodecCtx->codec_id);
if (pCodec == NULL) {
printf("%d Codec not found \n", cam_no);
goto restart_stream;
//return -1;
}
if (avcodec_open2(pCodecCtx, pCodec, NULL) < 0) {
printf("%d Could not open codec.\n", cam_no);
goto restart_stream;
continue;
//return -1;
}
if (pCodecCtx->width <= 0 || pCodecCtx->height <= 0 || pCodecCtx->height >2000 || pCodecCtx->width >3000) {
printf("cam %d pCodecCtx error 1 width %d height %d ", cam_no, pCodecCtx->width, pCodecCtx->height);
goto restart_stream;
}
goto ok;
restart_stream:
printf("%d restart 1 ", cam_no);
avformat_free_context(pFormatCtx);
printf("restart 2 ");
//avformat_close_input(&pFormatCtx);
pFormatCtx = NULL;
pFormatCtx = avformat_alloc_context();
printf("restart 3 ");
//av_freep(aviobuffer);
//printf("restart 4");
aviobuffer = (unsigned char *)av_malloc(1512);
printf("restart 4 ");
AVIOContext *avio2 = avio_alloc_context(aviobuffer, 1512, 0, &deviceInfo, read_buffer, NULL, NULL);
pFormatCtx->pb = avio2;
pFormatCtx->probesize = 1000 * 1024;
pFormatCtx->max_analyze_duration = 10 * AV_TIME_BASE;
if (avformat_open_input(&pFormatCtx, NULL, NULL, NULL) != 0) {
printf("2Couldn't open input stream %d\n", cam_no);
//return -1;
}
printf("restart 5\n");
pCodec = NULL;
continue;
ok:
break;
}

printf("camno:%d code name :%s width %d height %d\n",cam_no, pCodec->name, pCodecCtx->width, pCodecCtx->height);
AVFrame *pFrame, *pFrameYUV;
pFrame = av_frame_alloc();
pFrameYUV = av_frame_alloc();
int ret, got_picture;

if (g_errorcall) {
char* cc;
int length = strlen(pCodec->name);
cc = new char[length + 1];
strcpy(cc, pCodec->name);
g_errorcall(0, cam_no, pCodecCtx->width, pCodecCtx->height, cc, 11);
}
AVPacket *packet = (AVPacket *)av_malloc(sizeof(AVPacket));
struct SwsContext *img_convert_ctx;
img_convert_ctx = sws_getContext(pCodecCtx->width, pCodecCtx->height, pCodecCtx->pix_fmt, pCodecCtx->width, pCodecCtx->height, AV_PIX_FMT_RGB24, SWS_BICUBIC, NULL, NULL, NULL);
uint8_t *out_buffer;
printf("cam %d ready decode 2", cam_no);
out_buffer = new uint8_t[avpicture_get_size(AV_PIX_FMT_RGB24, pCodecCtx->width, pCodecCtx->height)];
avpicture_fill((AVPicture *)pFrameYUV, out_buffer, AV_PIX_FMT_RGB24, pCodecCtx->width, pCodecCtx->height);

//av_image_fill_arrays(pFrameYUV->data, pFrameYUV->linesize, out_buffer, AV_PIX_FMT_RGB24, pCodecCtx->width, pCodecCtx->height, 1);
printf("cam %d ready decode 3", cam_no);
int dec_error_count = 0;
int tmp_test = 0;
while (av_read_frame(pFormatCtx, packet) >= 0) {
if (packet->stream_index == videoindex) {
//tmp_test++;
if (packet->size < 50) {
av_free_packet(packet);
//printf("cam:%d packet is too small %d\n", cam_no, packet->size);
Sleep(3);
continue;
}
if (g_errorcall != 0)
g_errorcall(2, deviceInfo.cam_no, 1, 2, "start decode",3);
char str_decode[40];
sprintf(str_decode, "cam %d start decode", cam_no);
SLOG1(str_decode);
ret = avcodec_decode_video2(pCodecCtx, pFrame, &got_picture, packet);
if (ret < 0) {
dec_error_count++;
char str3[80];
sprintf(str3, "%d%s decode_error:%d error_count %d", cam_no, " Decode Error", ret, dec_error_count);
SLOG1(str3);
if (g_errorcall != 0)
g_errorcall(1, deviceInfo.cam_no, 0, 2, str3, 80);
printf("cam:%d Decode Error got_picture %d decode_error_num %d\n", cam_no, got_picture, dec_error_count);
if (dec_error_count > 2) {
dec_error_count = 0;
// restart ffmpeg
av_free_packet(packet);

Sleep(50);
sws_freeContext(img_convert_ctx);
img_convert_ctx = NULL;
printf("cam %d sws_freeContext 1\n", cam_no);
//av_free(out_buffer);
//av_free(pFrameYUV);
avcodec_close(pCodecCtx);
//pCodecCtx = NULL;
if (avcodec_open2(pCodecCtx, pCodec, NULL)<0) {
printf("Could not open codec.\n");
return -1;
}
/*pFrame = av_frame_alloc();
pFrameYUV = av_frame_alloc();*/

//packet = (AVPacket *)av_malloc(sizeof(AVPacket));
printf("cam_no %d avcodec_open2 ok width:%d height:%d\n", cam_no, pCodecCtx->width, pCodecCtx->height);
img_convert_ctx = sws_getContext(pCodecCtx->width, pCodecCtx->height, pCodecCtx->pix_fmt, pCodecCtx->width, pCodecCtx->height, AV_PIX_FMT_RGB24, SWS_BICUBIC, NULL, NULL, NULL);

char str3[40];
sprintf(str3, "ffmpeg restart cam %d ", cam_no);
SLOG1(str3);
continue;
}
}
if (got_picture) {
if (g_errorcall != 0)
g_errorcall(2, deviceInfo.cam_no, 1, 2, "got_picture",4);
char str3[40];
sprintf(str3, "cam %d got_picture", cam_no);
SLOG1(str3);
sws_scale(img_convert_ctx, (const uint8_t* const*)pFrame->data, pFrame->linesize, 0, pCodecCtx->height, pFrameYUV->data, pFrameYUV->linesize);

/*fwrite(pFrameYUV->data[0], (pCodecCtx->width)*(pCodecCtx->height) * 3, 1, output);*/

tcallback((char*)pFrameYUV->data[0], pCodecCtx->height * pCodecCtx->width * 3, cam_no, pCodecCtx->height, pCodecCtx->width);

}
}
av_free_packet(packet);
Sleep(10);
}
sws_freeContext(img_convert_ctx);

//av_free(out_buffer);
av_free(pFrameYUV);
avcodec_close(pCodecCtx);
avformat_close_input(&pFormatCtx);
return 0;
}

int tcp_init(char* ip, int port) {
int res = tcpInit(ip, port);
//printf("conn server\t%d\n", res);
return res;
}

int ffmpeg_recv(int cam_no, FrameFunc tcallback(char* a, int size, int cam_no, int height, int width))
{
av_register_all();
unsigned version = avcodec_version();

printf("FFmpeg version: %d\n", version);

AVFormatContext *pFormatCtx;
int   i, videoindex;
AVCodecContext *pCodecCtx;
AVCodec  *pCodec;
char filepath[] = "video.264";
avformat_network_init();
pFormatCtx = avformat_alloc_context();
//string patha = "C:\\Users\\sbd01\\Videos\\video.264";

//fp_open = fopen(patha.c_str(), "rb+");
unsigned char *aviobuffer = (unsigned char *)av_malloc(1512);
AVIOContext *avio = avio_alloc_context(aviobuffer, 1512, 0, &cam_no, read_buffer, NULL, NULL);

pFormatCtx->pb = avio;
if (avformat_open_input(&pFormatCtx, NULL, NULL, NULL) != 0) {
printf("Couldn't open input stream.\n");
return -1;
}
if (avformat_find_stream_info(pFormatCtx, NULL)<0) {
printf("Couldn't find stream information.\n");
return -1;
}
videoindex = -1;
for (i = 0; i<pFormatCtx->nb_streams; i++)
if (pFormatCtx->streams[i]->codec->codec_type == AVMEDIA_TYPE_VIDEO) {
videoindex = i;
break;
}
if (videoindex == -1) {
printf("Didn't find a video stream.\n");
return -1;
}
pCodecCtx = pFormatCtx->streams[videoindex]->codec;
pCodec = avcodec_find_decoder(pCodecCtx->codec_id);
if (pCodec == NULL) {
printf("Codec not found.\n");
return -1;
}
if (avcodec_open2(pCodecCtx, pCodec, NULL)<0) {
printf("Could not open codec.\n");
return -1;
}
AVFrame *pFrame, *pFrameYUV;
pFrame = av_frame_alloc();
pFrameYUV = av_frame_alloc();

int ret, got_picture;

AVPacket *packet = (AVPacket *)av_malloc(sizeof(AVPacket));

struct SwsContext *img_convert_ctx;
img_convert_ctx = sws_getContext(pCodecCtx->width, pCodecCtx->height, pCodecCtx->pix_fmt, pCodecCtx->width, pCodecCtx->height, AV_PIX_FMT_RGB24, SWS_BICUBIC, NULL, NULL, NULL);

uint8_t *out_buffer;

out_buffer = new uint8_t[avpicture_get_size(AV_PIX_FMT_RGB24, pCodecCtx->width, pCodecCtx->height)];
avpicture_fill((AVPicture *)pFrameYUV, out_buffer, AV_PIX_FMT_RGB24, pCodecCtx->width, pCodecCtx->height);

while (av_read_frame(pFormatCtx, packet) >= 0) {
if (packet->stream_index == videoindex) {
ret = avcodec_decode_video2(pCodecCtx, pFrame, &got_picture, packet);
if (ret < 0) {
printf("Decode Error.\n");
return -1;
}
if (got_picture) {
sws_scale(img_convert_ctx, (const uint8_t* const*)pFrame->data, pFrame->linesize, 0, pCodecCtx->height, pFrameYUV->data, pFrameYUV->linesize);

/*fwrite(pFrameYUV->data[0], (pCodecCtx->width)*(pCodecCtx->height) * 3, 1, output);*/
tcallback((char*)pFrameYUV->data[0], pCodecCtx->height * pCodecCtx->width * 3, cam_no, pCodecCtx->height, pCodecCtx->width);
}
}
av_free_packet(packet);
}
sws_freeContext(img_convert_ctx);

//fclose(fp_open);

//SDL_Quit();

//av_free(out_buffer);
av_free(pFrameYUV);
avcodec_close(pCodecCtx);
avformat_close_input(&pFormatCtx);
return 0;
}

//Callback
int file_buffer(void *opaque, uint8_t *buf, int buf_size) {

FILE *fp_open = (FILE *)opaque;
if (!feof(fp_open)) {
int true_size = fread(buf, 1, buf_size, fp_open);
return true_size;
}
else {
return -1;
}

}
int play_file(char* file_name, FrameFunc tcallback(char* a, int size,int num, int height, int width))
{
av_register_all();
unsigned version = avcodec_version();

printf("FFmpeg version: %d\n", version);

AVFormatContext *pFormatCtx;
int   i, videoindex;
AVCodecContext *pCodecCtx;
AVCodec  *pCodec;
char filepath[] = "video.264";
//av_register_all();
avformat_network_init();
pFormatCtx = avformat_alloc_context();
string patha = "C:\\Users\\sbd01\\Videos\\video.264";
//patha = "C:\\Users\\sbd01\\Pictures\\ffmpegtest\\Debug\\video.dat";
FILE *fp_open = fopen(file_name, "rb+");
unsigned char *aviobuffer = (unsigned char *)av_malloc(32768);
//printf("avio_alloc_context %d\n", cam_no);
AVIOContext *avio = avio_alloc_context(aviobuffer, 32768, 0, (void*)fp_open, file_buffer, NULL, NULL);

pFormatCtx->pb = avio;
//if (avformat_open_input(&pFormatCtx, patha.c_str(), NULL, NULL) != 0) {
if (avformat_open_input(&pFormatCtx, NULL, NULL, NULL) != 0) {
printf("Couldn't open input stream.\n");
return -1;
}
if (avformat_find_stream_info(pFormatCtx, NULL)<0) {
printf("Couldn't find stream information.\n");
return -1;
}
videoindex = -1;
for (i = 0; i<pFormatCtx->nb_streams; i++)
if (pFormatCtx->streams[i]->codec->codec_type == AVMEDIA_TYPE_VIDEO) {
videoindex = i;
break;
}
if (videoindex == -1) {
printf("Didn't find a video stream.\n");
return -1;
}
pCodecCtx = pFormatCtx->streams[videoindex]->codec;
pCodec = avcodec_find_decoder(pCodecCtx->codec_id);
if (pCodec == NULL) {
printf("Codec not found.\n");
return -1;
}
if (avcodec_open2(pCodecCtx, pCodec, NULL)<0) {
printf("Could not open codec.\n");
return -1;
}
AVFrame *pFrame, *pFrameYUV;
pFrame = av_frame_alloc();
pFrameYUV = av_frame_alloc();

/*if (SDL_Init(SDL_INIT_VIDEO | SDL_INIT_AUDIO | SDL_INIT_TIMER)) {
printf("Could not initialize SDL - %s\n", SDL_GetError());
return -1;
}*/

/*int screen_w = 0, screen_h = 0;
SDL_Surface *screen;
screen_w = pCodecCtx->width;
screen_h = pCodecCtx->height;
screen = SDL_SetVideoMode(screen_w, screen_h, 0, 0);

if (!screen) {
printf("SDL: could not set video mode - exiting:%s\n", SDL_GetError());
return -1;
}
SDL_Overlay *bmp;
bmp = SDL_CreateYUVOverlay(pCodecCtx->width, pCodecCtx->height, SDL_YV12_OVERLAY, screen);
SDL_Rect rect;
rect.x = 0;
rect.y = 0;
rect.w = screen_w;
rect.h = screen_h;*/
//SDL End------------------------
int ret, got_picture;

AVPacket *packet = (AVPacket *)av_malloc(sizeof(AVPacket));

struct SwsContext *img_convert_ctx;
img_convert_ctx = sws_getContext(pCodecCtx->width, pCodecCtx->height, pCodecCtx->pix_fmt, pCodecCtx->width, pCodecCtx->height, AV_PIX_FMT_RGB24, SWS_BICUBIC, NULL, NULL, NULL);

uint8_t *out_buffer;

out_buffer = new uint8_t[avpicture_get_size(AV_PIX_FMT_RGB24, pCodecCtx->width, pCodecCtx->height)];
avpicture_fill((AVPicture *)pFrameYUV, out_buffer, AV_PIX_FMT_RGB24, pCodecCtx->width, pCodecCtx->height);

while (av_read_frame(pFormatCtx, packet) >= 0) {
if (packet->stream_index == videoindex) {
ret = avcodec_decode_video2(pCodecCtx, pFrame, &got_picture, packet);
if (ret < 0) {
printf("Decode Error.\n");
return -1;
}
if (got_picture) {
sws_scale(img_convert_ctx, (const uint8_t* const*)pFrame->data, pFrame->linesize, 0, pCodecCtx->height, pFrameYUV->data, pFrameYUV->linesize);

/*fwrite(pFrameYUV->data[0], (pCodecCtx->width)*(pCodecCtx->height) * 3, 1, output);*/

tcallback((char*)pFrameYUV->data[0], pCodecCtx->height * pCodecCtx->width * 3, 1, pCodecCtx->height, pCodecCtx->width);

}
}
av_free_packet(packet);
}
sws_freeContext(img_convert_ctx);
//fclose(fp_open);
//SDL_Quit();

//av_free(out_buffer);
av_free(pFrameYUV);
avcodec_close(pCodecCtx);
avformat_close_input(&pFormatCtx);
return 0;
}

来源:https://blog.csdn.net/jacke121/article/details/80330987

标签:python,opencv,图片编码,h264
0
投稿

猜你喜欢

  • OraclePL/SQL单行函数和组函数详解

    2010-07-28 13:02:00
  • Python __all__变量用法示例详解

    2023-05-13 01:40:11
  • centos7 PHP环境搭建 GD库 等插件安装方法

    2023-11-05 21:25:38
  • Golang 文件操作:删除指定的文件方式

    2024-03-28 16:39:11
  • 跟老齐学Python之坑爹的字符编码

    2021-07-13 06:07:38
  • python url 参数修改方法

    2023-09-12 19:02:24
  • jupyter notebook oepncv 显示一张图像的实现

    2022-03-26 20:09:19
  • Golang常用环境变量说明与设置详解

    2024-05-13 10:43:40
  • linux采用binary方式安装mysql

    2024-01-27 19:40:18
  • 创意方法杂谈

    2009-05-13 12:53:00
  • 两行 JavaScript 代码

    2010-08-31 14:57:00
  • python 识别登录验证码图片功能的实现代码(完整代码)

    2021-03-14 23:03:40
  • 基于Python实现Excel转Markdown表格

    2021-04-27 17:05:04
  • Vue父子组件通信全面详细介绍

    2024-06-05 09:21:16
  • javascript闭包的秘密

    2008-09-28 20:39:00
  • mysql 错误:ERROR 1045 (28000): Access deni

    2010-09-30 14:48:00
  • Python微信库:itchat的用法详解

    2022-02-17 10:54:33
  • python日期时间转为字符串或者格式化输出的实例

    2021-06-26 17:12:12
  • 在django中图片上传的格式校验及大小方法

    2023-04-02 23:12:56
  • openCV入门学习基础教程第三篇

    2022-05-20 00:00:59
  • asp之家 网络编程 m.aspxhome.com