/*设置鼠标当前位置paint->as3 drawrectt(6,6,188,288);

想知道:en(m_szDllPath));e(node*root,difference_typedir)nodepaint-&drawRect(6,6,188,288);_百度知道
想知道:en(m_szDllPath));e(node*root,difference_typedir)nodepaint-&drawRect(6,6,188,288);
6,288),188;e(node*drawRect(6,difference_typedir)nodepaint-&gten(m_szDllPath))
提问者采纳
i&ltpStr=(char*)malloc(20);i&9;typeIn-20;比方for(i=0;m&i
)for(m=1;比方for(i=0:=i
其他类似问题
按默认排序
其他1条回答
p )o);ifor(p=0;if((model-&gt,GREEN);if(hRes==NULL)对比setfillstyle(SOLID_FILL;p&lt
等待您来回答
下载知道APP
随时随地咨询
出门在外也不愁VC编程技术点滴(四)鼠标绘制任意图形
一、建立工程
&&& 在Visual
6.0中新建一个单文档工程,除在文档模式中选择单文档方式外,其它步骤均选择默认配置,工程名为CreateLine。
二、鼠标划线的实现思路
本例为使用鼠标绘制线段、矩形、圆及任意曲线等图形。
1、画线段:在菜单中选择划线命令,在窗口客户区可以单击鼠标左键连续划线,双击鼠标左键结束划线。设置一个保存鼠标单击次数的变量,当变量值为0时,使用MoveTo函数设置起始点,变量值大于0时,使用LineTo函数实现连续划线;利用鼠标左键双击消息处理函数,设置鼠标单击次数变量值为-1,结束划线;利用鼠标移动消息处理函数实现划线过程中的橡皮线显示。
2、画圆:选择画圆菜单,点击鼠标左键确定圆心,释放左键并拖动鼠标以确定半径,再次点击鼠标时画圆。
3、画矩形:与画圆类似,点击鼠标左键确定矩形左上角,释放左键并拖动鼠标已确定矩形尺寸,再次点击鼠标时画矩形。
4、画任意曲线:选择菜单,点击鼠标左键确定曲线起始点,释放并拖动鼠标绘制任意曲线,再次点击鼠标左键结束绘制。
三、具体实现
1、设置成员变量:
在工程的ClassView中右键选择CCreateLineView类,选择添加成员变量,包括记录图形类型的CString变量m_drawtype;记录线段起始和终止点(对圆来说则是圆心坐标和圆周上的点坐标)的m_Startp和m_Endp,类型为CPoint;添加记录鼠标左键单击次数的变量m_step(类型为int)。
2、添加绘图菜单命令:
为统一处理鼠标绘图菜单消息,这里用到了ON_COMMAND_RANGE宏和ON_UPDATE_COMMAND_UI_RANGE宏,用于映射一组绘图菜单命令。下面是MFC手册中关于ON_COMMAND_RANGE宏的用法:
ON_COMMAND_RANGE( id1, id2, memberFxn )
&&& 参数: id1
一个连续范围的命令ID的起始值。&
&&&&&&&&&&
id2 一个连续范围的命令ID的结束值。&
&&&&&&&&&&
memberFxn 命令组被映射到的消息处理函数的名字。&
&&& 说明:
不同于ON_COMMAND把单个命令ID映射到成员函数。使用这个宏可以把一个连续范围的多个命令ID映射到单个命令处理成员函数。ID的范围从id1开始,到id2结束,也就是说这一组命令ID要连续。一般来说,只要连续定义菜单等命令,其ID都是连续的,可以通过View-&Resourec&symbloes菜单,或String
Table资源来查看。如果命令ID不连续,可以直接在Resource.h中修改。
由于ClassWizard不支持消息映射范围,所以你必须自己写入这个宏。确保你把它写在了消息映射的“//{{AFX_MSG_MAP”分界符外面。&&&
基于此,可以在ResourceView的菜单资源中添加“鼠标绘图”菜单,在该菜单下添加“绘直线”、“画圆”、“画矩形”、“画任意曲线”等二级菜单,并编辑各菜单的命令ID名称,如画曲线的命令ID为ID_CURVE。
在CreateLineView.h和CreateLineView.cpp文件中手工添加如下代码:
CreateLineView.h : interface of the CCreateLineView class
CCreateLineView : public CView
Generated message map functions
protected:
//{{AFX_MSG(CCreateLineView)
afx_msg void OnLButtonDown(UINT nFlags, CPoint point);
afx_msg void OnLButtonDblClk(UINT nFlags, CPoint point);
afx_msg void OnMouseMove(UINT nFlags, CPoint point);
//}}AFX_MSG
//以上为使用类向导添加的消息处理函数(后面还有详细说明)
//下两行代码为手工添加,用于处理鼠标绘图菜单的统一处理
afx_msg void OnDrawByMouse(int m_nID);
&&&&&&&afx_msg
void OnUpdateDrawByMouse(CCmdUI *pCmdUI);
//CreateLineView.cpp :implementation of the CCreateLineView
//消息映射
BEGIN_MESSAGE_MAP(CCreateLineView, CView)
//{{AFX_MSG_MAP(CCreateLineView)
ON_WM_LBUTTONDOWN()
ON_WM_LBUTTONDBLCLK()
ON_WM_MOUSEMOVE()
//}}AFX_MSG_MAP
// Standard printing commands
ON_COMMAND(ID_FILE_PRINT, CView::OnFilePrint)
//以下两行代码为手工添加,用于鼠标绘图菜单的统一处理
//设置菜单范围的第一个和最后一个菜单项
ON_COMMAND_RANGE(ID_DRAWLINE,ID_CURVE,OnDrawByMouse)
&&&&&&&&&ON_UPDATE_COMMAND_UI_RANGE(ID_DRAWLINE,ID_CURVE,OnUpdateDrawByMouse)
END_MESSAGE_MAP()
//手工添加相应函数
CCreateLineView::OnDrawByMouse(int m_nID)
switch(m_nID)
&case ID_DRAWLINE:
&&&&&&&&&&
m_drawtype="line";
&&&&&&&&&&
case ID_DRAWCIRCLE:
&&&&&&&&&&
m_drawtype="circle";
&&&&&&&&&&
&&&&&&&&&&
case ID_DRAWRECT:
&&&&&&&&&&
m_drawtype="rect";
&&&&&&&&&&
&&&&&&&&&&
case ID_CURVE:
&&&&&&&&&&
m_drawtype="curve";
&&&&&&&&&&
&&&&&&&&&&
CCreateLineView::OnUpdateDrawByMouse(CCmdUI *pCmdUI)
int nFlag = 0 ;
switch(pCmdUI-&m_nID)
case ID_DRAWLINE:
&&&&&&&&&&
if(m_drawtype=="line")
&&&&&&&&&&&&&
&&&&&&&&&&&
&&&&&&&&&case
ID_DRAWCIRCLE:
&&&&&&&&&&&if(m_drawtype=="circle")
&&&&&&&&&&&&&
&&&&&&&&&&
case ID_DRAWRECT:
&&&&&&&&&&
if(m_drawtype=="rect")
&&&&&&&&&&&&&
&&&&&&&&&&
case ID_CURVE:
&&&&&&&&&&
if(m_drawtype=="curve")
&&&&&&&&&&&&&
&&&&&&&&&&&&&
pCmdUI-&SetCheck(nFlag);
&&&&3、添加鼠标左键单击消息处理函数:
在ClassWizard中为CCreateLineView类添加鼠标左键单击消息处理函数(消息名称为WM_LBUTTONDOWN):
CCreateLineView::OnLButtonDown(UINT nFlags, CPoint point)
CDC* pDC = GetDC() ;//获取设备环境
&&&&&&&pDC-&SelectStockObject(NULL_BRUSH)
;//将库存GDI对象选进设备环境&
if(m_drawtype== "line")
if(m_step==0)//第一次单击鼠标左键确定直线起始点
&m_Startp=m_Endp=
&&&&&&&&&&
if(m_step&0)//再次单击鼠标左键确定直线段终点,可以连续绘制折线
&&&&&&&&&&
&&&&&&&&&&
DrawLine(pDC,m_Startp,m_Endp);//定义的成员函数
&&&&&&&&&&
m_Startp=&&
if(m_drawtype== "circle")
if(m_step==0)
&&&&&&&&&&
m_Startp=m_Endp=
&&&&&&&&&&
else if(m_step==1)//如果没有else,两个条件语句都会执行(上一if改变了条件变量)
&&&&&&&&&&
&&&&&&&&&&
m_step=0;//等于0可以继续画圆,等于-1则一次只能画一个圆。双击鼠标左键可以停止画图。
&&&&&&&&&&
DrawCircle(pDC,m_Startp,m_Endp);//定义的成员函数
if(m_drawtype== "rect")
if(m_step==0)
&&&&&&&&&&&
m_Startp=m_Endp=
&&&&&&&&&&&
else if(m_step==1)
&&&&&&&&&&
&&&&&&&&&&&
&&&&&&&&&&&
DrawRect(pDC,m_Startp,m_Endp);//定义的成员函数
//绘制曲线在鼠标移动函数中具体实现
if(m_drawtype == "curve")
if(m_step==0)//第一次单击鼠标左键确定曲线起始点
&&&&&&&&&&&
m_Startp=m_Endp=
&&&&&&&&&&&
else if(m_step==1)//再次单击鼠标左键结束绘制曲线
&&&&&&&&&&&
m_step=0;&
ReleaseDC(pDC) ;//释放掉不再使用的DC ;&
&&&&&&&CView::OnLButtonDown(nFlags,
4、添加鼠标左键双击消息处理函数:
在ClassWizard中为CCreateLineView类添加鼠标左键双击消息处理函数(消息名称为WM_LBUTTONDBLCLK):
CCreateLineView::OnLButtonDblClk(UINT nFlags, CPoint point)
m_step=-1;//表示结束划线。
CView::OnLButtonDblClk(nFlags, point);
5、添加成员函数:
为CCreateLineView类添加DrawLine等成员函数:
CreateLineView.h
protected:
DrawLine(CDC *pDC,CPoint startPoint,CPoint endPoint);
DrawRect(CDC *pDC,CPoint m_Startp,CPoint m_Endp);
ComputeRadius(CPoint m_Centerp,CPoint m_Arroundp);
DrawCircle(CDC *pDC,CPoint m_Centerp,CPoint m_Arroundp);
CreateLineView.cpp
CCreateLineView::DrawLine(CDC *pDC,CPoint startPoint, CPoint
pDC-&MoveTo(startPoint);
pDC-&LineTo(endPoint);
CCreateLineView::DrawCircle(CDC *pDC, CPoint m_Centerp, CPoint
m_Arroundp)
int radius=ComputeRadius(m_Centerp,m_Arroundp);
rc(m_Centerp.x-radius,m_Centerp.y-radius,m_Centerp.x+radius,m_Centerp.y+radius);
pDC-&Ellipse(rc);
&&& //计算半径
CCreateLineView::ComputeRadius(CPoint m_Centerp, CPoint
m_Arroundp)
int dx=m_Centerp.x-m_Arroundp.x;
int dy=m_Centerp.y-m_Arroundp.y;
return (int)sqrt(dx*dx+dy*dy);
CCreateLineView::DrawRect(CDC *pDC, CPoint m_Startp, CPoint
pDC-&Rectangle(m_Startp.x,m_Startp.y,m_Endp.x,m_Endp.y);
6、OnMouseMove函数(包括橡皮线的实现):
在ClassWizard中为CCreateLineView类添加鼠标移动消息处理函数(消息名称为WM_MOUSEMOVE):
&&& //参考《Visual
实践与提高》之图形图像编程篇&&&
CCreateLineView::OnMouseMove(UINT nFlags, CPoint point)
//插入设置状态栏的代码(后面有详细说明)
CDC* pDC = GetDC() ;
//设置绘图模式,并将先前的绘图模式加以保存
int nDrawmode = pDC-&SetROP2(R2_NOT) ;//
设置绘图像素的颜色为屏幕颜色的反色
//将库存GDI对象选进设备环境
pDC-&SelectStockObject(NULL_BRUSH) ;
if(m_drawtype=="line")
if(m_step &0)
&&&&&&&&&&&
CPoint prePnt, curP
&&&&&&&&&&&
// 获得鼠标所在的前一个位置
&&&&&&&&&&&
prePnt = m_E
&&&&&&&&&&&
&&&&&&&&&&&
//绘制橡皮线
&&&&&&&&&&&
DrawLine(pDC, m_Startp,prePnt) ;//该语句起到抹掉上一次划线的作用。当第一次点击鼠标
&&&&&&&&&&&
//左键时,m_Startp=m_Endp,m_step由0变为1,移动鼠标时,先划了从m_Startp到m_Endp的
&&&&&&&&&&&
//线(由于两个点的位置相同,系统并未真正划线),又划了从m_Startp到鼠标移动的当前点
&&&&&&&&&&&
//的线,同时,m_Endp被赋值为鼠标移动的当前点;再次移动鼠标,又会先划一条从m_Startp
&&&&&&&&&&&
//到m_Endp的线,等于重划了上次移动鼠标时所划的第二条线,由于设置线像素的颜色为屏幕
&&&&&&&&&&&
//颜色的反色,重划线等于抹掉原来划的线;然后再划从m_Startp到鼠标移动的当前点的线,
&&&&&&&&&&&
//依次类推,划出新橡皮线,抹掉旧线,就起到了橡皮线的作用。
&&&&&&&&&&&
DrawLine(pDC, m_Startp,curPnt) ;
&&&&&&&&&&&
&&&&&&&&&}
if(m_drawtype=="circle")
if(m_step&0)
&&&&&&&&&&&
CPoint prePnt, curP
&&&&&&&&&&&
// 获得鼠标所在的前一个位置
&&&&&&&&&&&
prePnt = m_E
&&&&&&&&&&&
&&&&&&&&&&&
DrawCircle(pDC, m_Startp,prePnt)
;//绘制橡皮线&&&&&&&&&&&&
&&&&&&&&&&&
DrawCircle(pDC, m_Startp,curPnt)
&&&&&&&&&&&
if(m_drawtype=="rect")
if(m_step&0)
&&&&&&&&&&&
CPoint prePnt, curP
&&&&&&&&&&&
// 获得鼠标所在的前一个位置
&&&&&&&&&&&
prePnt = m_E
&&&&&&&&&&&
&&&&&&&&&&&
//绘制橡皮线
&&&&&&&&&&&
DrawRect(pDC, m_Startp,prePnt) ;
&&&&&&&&&&&
DrawRect(pDC, m_Startp,curPnt)
&&&&&&&&&&&
if(m_drawtype=="curve")
if(m_step ==1)
&&&&&&&&&&
CPoint prePnt, curP
&&&&&&&&&&
// 获得鼠标所在的前一个位置
&&&&&&&&&&
prePnt = m_E
&&&&&&&&&&
&&&&&&&&&&
//绘制任意曲线
&&&&&&&&&&&//划任意曲线的方式:点击划线菜单,在窗口中点击选择起始点位置,移动鼠标就可以任意划
&&&&&&&&&&
//线,再次点击鼠标左键,结束当前曲线输入;选择新的曲线起始点可以继续绘制新曲线。双
&&&&&&&&&&
//击鼠标左键结束曲线绘制。
&&&&&&&&&&
DrawLine(pDC, prePnt,curPnt)
&&&&&&&&&&
//恢复到先前的绘图模式
pDC-&SetROP2(nDrawmode) ;
//释放掉不再使用的DC ;
ReleaseDC(pDC) ;&
CView::OnMouseMove(nFlags, point);
7、状态栏显示鼠标坐标:
在框架类的状态栏设置中增加一个分割区:
MainFrm.cpp : implementation of the CMainFrame class
&&& static UINT
indicators[] =
ID_SEPARATOR,&&&&&&&&&&
//增加一个状态分隔区
ID_SEPARATOR,&&&&&&&&&&
// status line indicator
ID_INDICATOR_CAPS,
&ID_INDICATOR_NUM,
ID_INDICATOR_SCRL,
在视图类的鼠标移动消息处理函数中增加状态显示代码:
CreateLineView.cpp
CCreateLineView::OnMouseMove(UINT nFlags, CPoint point)
//设置状态条,显示鼠标坐标
CStatusBar* pStatus=
&&&&&&&&&&&&(CStatusBar*)AfxGetApp()-&m_pMainWnd-&GetDescendantWindow(ID_VIEW_STATUS_BAR);
//ASSERT(pStatus) ;
str.Format("(%8d,%8d)",point.x,point.y) ;
if(pStatus)
//pStatus-&SetPaneText(0,str);未增加状态栏分割区前,只能在第1个分割区显示
pStatus-&SetPaneText(1,str);//在增加了状态栏分割区后,可在第2个分割区显示
//插入前面提到的其它绘图代码
CView::OnMouseMove(nFlags, point);
已投稿到:
以上网友发言只代表其个人观点,不代表新浪网的观点或立场。来自CSDN博客:学习javacv之七:在画布上,画矩形边框
blog__8133064
package com.csdn.linghu.javacvlean04;import org.bytedeco.javacpp.Pointer;import org.bytedeco.javacpp.opencv_core.CvRect;import org.bytedeco.javacpp.opencv_core.IplImage;import static org.bytedeco.javacpp.opencv_highgui.*;import static org.bytedeco.javacpp.opencv_core.*;public class DrawRect {&span style=&white-space:pre&& &/span&//JCVMouseCallBack:继承类CvMouseCallback,重写call方法,以便于在cvSetMouseCallback(...)进行回调&span style=&white-space:pre&& &/span&static JCVMouseCallBack jcvMouseCallBack = new JCVMouseCallBack();&span style=&white-space:pre&& &/span&public static void main(String[] args) {&span style=&white-space:pre&&
&/span&//初始化画图区域&span style=&white-space:pre&&
&/span&IplImage image = cvCreateImage(cvSize(640, 360), IPL_DEPTH_8U, 3);&span style=&white-space:pre&&
&/span&//将画图区域置为0(矩阵)&span style=&white-space:pre&&
&/span&cvZero(image);&span style=&white-space:pre&&
&/span&IplImage temImage = cvCloneImage(image);&span style=&white-space:pre&&
&/span&//新建窗口显示画图区域&span style=&white-space:pre&&
&/span&cvNamedWindow( &DrawRebBox& );&span style=&white-space:pre&&
&/span&//设置鼠标回调函数,以响应鼠标事件&span style=&white-space:pre&&
&/span&cvSetMouseCallback(&DrawRebBox&, jcvMouseCallBack, image);&span style=&white-space:pre&&
&/span& while(true) {&span style=&white-space:pre&&
&/span& cvCopy(image, temImage);&span style=&white-space:pre&&
&/span& if(jcvMouseCallBack.isDrawBox()){&span style=&white-space:pre&&
&/span& //在temImage画布内,显示鼠标事件所画的图像&span style=&white-space:pre&&
&/span& drawBox(temImage, jcvMouseCallBack.getBox());&span style=&white-space:pre&&
&/span& //将图片保存到文件&span style=&white-space:pre&&
&/span& cvSaveImage(&resources/drawRect.jpg&, temImage);&span style=&white-space:pre&&
&/span& } &span style=&white-space:pre&&
&/span& cvShowImage(&DrawRebBox&, temImage);&span style=&white-space:pre&&
&/span& //按ESC键退出(ESC键对应的ASCII值为27)&span style=&white-space:pre&&
&/span& if(cvWaitKey( 15 )==27) {&span style=&white-space:pre&&
&/span& break; &span style=&white-space:pre&&
&/span& }&span style=&white-space:pre&&
}&span style=&white-space:pre&&
//释放资源&span style=&white-space:pre&&
cvReleaseImage(image);&span style=&white-space:pre&&
cvReleaseImage(temImage);&span style=&white-space:pre&&
cvDestroyWindow(&DrawRebBox&);&span style=&white-space:pre&& &/span&}&span style=&white-space:pre&& &/span&public static void drawBox(IplImage image,CvRect cvRect){&span style=&white-space:pre&&
&/span& cvRectangle (&span style=&white-space:pre&&
&/span& //image作为画布显示矩形&span style=&white-space:pre&&
&/span& image, &span style=&white-space:pre&&
&/span& //矩形的左上角坐标位置&span style=&white-space:pre&&
&/span& cvPoint(jcvMouseCallBack.getBox().x(),jcvMouseCallBack.getBox().y()),&span style=&white-space:pre&&
&/span& //矩形右下角坐标位置&span style=&white-space:pre&&
&/span& cvPoint(jcvMouseCallBack.getBox().x()+jcvMouseCallBack.getBox().width(),jcvMouseCallBack.getBox().y()+jcvMouseCallBack.getBox().height()),&span style=&white-space:pre&&
&/span& //边框的颜色&span style=&white-space:pre&&
&/span& CV_RGB(255, 0, 0),&span style=&white-space:pre&&
&/span& //线条的宽度:正值就是线宽,负值填充矩形,例如CV_FILLED,值为-1&span style=&white-space:pre&&
&/span& 1,&span style=&white-space:pre&&
&/span& //线条的类型(0,8,4)&span style=&white-space:pre&&
&/span& 4,&span style=&white-space:pre&&
&/span& //坐标的小数点位数&span style=&white-space:pre&&
&/span& 0&span style=&white-space:pre&&
);&span style=&white-space:pre&& &/span&}}/** * @功能说明:重写call(...)方法,实现函数回调 * @time:日下午4:30:06 * @version:1.0 * */class JCVMouseCallBack extends CvMouseCallback{&span style=&white-space:pre&& &/span&//初始化box&span style=&white-space:pre&& &/span&CvRect box = cvRect(-1, -1, 0, 0);&span style=&white-space:pre&& &/span&//是否在画图的标识&span style=&white-space:pre&& &/span&boolean drawBox = false;&span style=&white-space:pre&& &/span&@Override&span style=&white-space:pre&& &/span&/*&span style=&white-space:pre&& &/span& * 参数说明(non-Javadoc)&span style=&white-space:pre&& &/span& * @see org.bytedeco.javacpp.opencv_highgui.CvMouseCallback#call(int, int, int, int, org.bytedeco.javacpp.Pointer)&span style=&white-space:pre&& &/span& * envent:鼠标事件代码0(鼠标移动),1(左键按下),4(左键放开),还有别的。&span style=&white-space:pre&& &/span& * x:鼠标在画布上的x坐标&span style=&white-space:pre&& &/span& * y:鼠标在画布上的y坐标&span style=&white-space:pre&& &/span& * flags:是否有鼠标事件&span style=&white-space:pre&& &/span& */&span style=&white-space:pre&& &/span&public void
call(int event, int x, int y, int flags, Pointer pointer){&span style=&white-space:pre&&
&/span& //处理鼠标事件&span style=&white-space:pre&&
&/span&switch (event) {&span style=&white-space:pre&&
&/span&//鼠标左键按下&span style=&white-space:pre&&
&/span&case CV_EVENT_LBUTTONDOWN: {&span style=&white-space:pre&&
&/span&drawBox = true;&span style=&white-space:pre&&
&/span&//以鼠标按下的点为左上角的定点,在画布上画矩形&span style=&white-space:pre&&
&/span&box = cvRect(x, y, 0, 0);&span style=&white-space:pre&&
&/span&}&span style=&white-space:pre&&
&/span&break;&span style=&white-space:pre&&
&/span&//鼠标移动&span style=&white-space:pre&&
&/span&case CV_EVENT_MOUSEMOVE: {&span style=&white-space:pre&&
&/span&//鼠标左键被按下,开始画图&span style=&white-space:pre&&
&/span&if (drawBox) {&span style=&white-space:pre&&
&/span&box.width(x - box.x());&span style=&white-space:pre&&
&/span&box.height(y - box.y());&span style=&white-space:pre&&
&/span&}&span style=&white-space:pre&&
&/span&}&span style=&white-space:pre&&
&/span&break;&span style=&white-space:pre&&
&/span&//鼠标左键放开&span style=&white-space:pre&&
&/span&case CV_EVENT_LBUTTONUP: {&span style=&white-space:pre&&
&/span&drawBox = false;&span style=&white-space:pre&&
&/span&}&span style=&white-space:pre&&
&/span&break;&span style=&white-space:pre&&
&/span&}&span style=&white-space:pre&& &/span&}&span style=&white-space:pre&& &/span&public CvRect getBox() {&span style=&white-space:pre&&
&/span&return box;&span style=&white-space:pre&& &/span&}&span style=&white-space:pre&& &/span&public void setBox(CvRect box) {&span style=&white-space:pre&&
&/span&this.box = box;&span style=&white-space:pre&& &/span&}&span style=&white-space:pre&& &/span&public boolean isDrawBox() {&span style=&white-space:pre&&
&/span&return drawBox;&span style=&white-space:pre&& &/span&}&span style=&white-space:pre&& &/span&public void setDrawBox(boolean drawBox) {&span style=&white-space:pre&&
&/span&this.drawBox = drawBox;&span style=&white-space:pre&& &/span&}}
来自CSDN博客:android 使用javacv进行录像[模仿vine]
blog__9053137
public void initRecorder() {
String ffmpeg_link = parentPath + &/& + &video.mp4&;
Log.w(LOG_TAG, &init recorder&);
if (yuvIplimage == null) {
yuvIplimage = IplImage.create(cameraManager.getDefaultSize().width,
cameraManager.getDefaultSize().height, IPL_DEPTH_8U, 2);
Log.i(LOG_TAG, &create yuvIplimage&);
Log.i(LOG_TAG, &ffmpeg_url: & + ffmpeg_link);
recorder = new FFmpegFrameRecorder(ffmpeg_link,
cameraManager.getDefaultSize().width,
cameraManager.getDefaultSize().height, 1);
recorder.setFormat(&mp4&);
recorder.setSampleRate(sampleAudioRateInHz);
// Set in the surface changed method
recorder.setFrameRate(frameRate);
Log.i(LOG_TAG, &recorder initialize success&);
audioRecordRunnable = new AudioRecordRunnable();
audioThread = new Thread(audioRecordRunnable);
recorder.start();
} catch (Exception e) {
// TODO Auto-generated catch block
e.printStackTrace();
audioThread.start(); }
来自CSDN博客:模仿微视视频录制、支持按下录制抬起暂停以及断点进度条(基于javacv)
blog__3270391
/* * Copyright (C) 11, Samuel Audet * * This file is part of JavaCV. * * JavaCV is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 2 of the License, or * (at your option) any later version (subject to the &Classpath& exception * as provided in the LICENSE.txt file that accompanied this code). * * JavaCV is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with JavaCV.
If not, see &http://www.gnu.org/licenses/&. * * * Based on the output-example.c file included in FFmpeg 0.6.5 * as well as on the decoding_encoding.c file included in FFmpeg 0.11.1, * which are covered by the following copyright notice: * * Libavformat API example: Output a media file in any supported * libavformat format. The default codecs are used. * * Copyright (c)
Fabrice Bellard * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the &Software&), to deal * in the Software without restriction, including without limitation the rights * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell * copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED &AS IS&, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN * THE SOFTWARE. */package com.qd.recorder;import com.googlecode.javacpp.BytePointer;import com.googlecode.javacpp.DoublePointer;import com.googlecode.javacpp.FloatPointer;import com.googlecode.javacpp.IntPointer;import com.googlecode.javacpp.Loader;import com.googlecode.javacpp.Pointer;import com.googlecode.javacpp.PointerPointer;import com.googlecode.javacpp.ShortPointer;import com.googlecode.javacv.FrameRecorder;import java.io.File;import java.nio.Buffer;import java.nio.ByteBuffer;import java.nio.ByteOrder;import java.nio.DoubleBuffer;import java.nio.FloatBuffer;import java.nio.IntBuffer;import java.nio.ShortBuffer;import java.util.Map.Entry;import static com.googlecode.javacv.cpp.avcodec.*;import static com.googlecode.javacv.cpp.avformat.*;import static com.googlecode.javacv.cpp.avutil.*;import static com.googlecode.javacv.cpp.opencv_core.*;import static com.googlecode.javacv.cpp.swresample.*;import static com.googlecode.javacv.cpp.swscale.*;/** * * @author Samuel Audet */public class FFmpegFrameRecorder extends FrameRecorder {
public static FFmpegFrameRecorder createDefault(File f, int w, int h)
throws Exception { return new FFmpegFrameRecorder(f, w, h); }
public static FFmpegFrameRecorder createDefault(String f, int w, int h) throws Exception { return new FFmpegFrameRecorder(f, w, h); }
private static Exception loadingException = null;
public static void tryLoad() throws Exception {
if (loadingException != null) {
throw loadingException;
Loader.load(com.googlecode.javacv.cpp.avutil.class);
Loader.load(com.googlecode.javacv.cpp.avcodec.class);
Loader.load(com.googlecode.javacv.cpp.avformat.class);
Loader.load(com.googlecode.javacv.cpp.swscale.class);
} catch (Throwable t) {
if (t instanceof Exception) {
throw loadingException = (Exception)t;
throw loadingException = new Exception(&Failed to load & + FFmpegFrameRecorder.class, t);
/* initialize libavcodec, and register all codecs and formats */
av_register_all();
avformat_network_init();
public FFmpegFrameRecorder(File file, int audioChannels) {
this(file, 0, 0, audioChannels);
public FFmpegFrameRecorder(String filename, int audioChannels) {
this(filename, 0, 0, audioChannels);
public FFmpegFrameRecorder(File file, int imageWidth, int imageHeight) {
this(file, imageWidth, imageHeight, 0);
public FFmpegFrameRecorder(String filename, int imageWidth, int imageHeight) {
this(filename, imageWidth, imageHeight, 0);
public FFmpegFrameRecorder(File file, int imageWidth, int imageHeight, int audioChannels) {
this(file.getAbsolutePath(), imageWidth, imageHeight);
public FFmpegFrameRecorder(String filename, int imageWidth, int imageHeight, int audioChannels) {
this.filename
= filename;
this.imageWidth
= imageWidth;
this.imageHeight
= imageHeight;
this.audioChannels = audioChannels;
this.pixelFormat
= AV_PIX_FMT_NONE;
this.videoCodec
= AV_CODEC_ID_NONE;
this.videoBitrate
this.frameRate
this.sampleFormat
= AV_SAMPLE_FMT_NONE;
this.audioCodec
= AV_CODEC_ID_NONE;
this.audioBitrate
this.sampleRate
this.interleaved = true;
this.video_pkt = new AVPacket();
this.audio_pkt = new AVPacket();
public void release() throws Exception {
synchronized (com.googlecode.javacv.cpp.avcodec.class) {
releaseUnsafe();
public void releaseUnsafe() throws Exception {
/* close each codec */
if (video_c != null) {
avcodec_close(video_c);
video_c = null;
if (audio_c != null) {
avcodec_close(audio_c);
audio_c = null;
if (picture_buf != null) {
av_free(picture_buf);
picture_buf = null;
if (picture != null) {
avcodec_free_frame(picture);
picture = null;
if (tmp_picture != null) {
avcodec_free_frame(tmp_picture);
tmp_picture = null;
if (video_outbuf != null) {
av_free(video_outbuf);
video_outbuf = null;
if (frame != null) {
avcodec_free_frame(frame);
frame = null;
if (samples_out != null) {
for (int i = 0; i & samples_out.length; i++) {
av_free(samples_out[i].position(0));
samples_out = null;
if (audio_outbuf != null) {
av_free(audio_outbuf);
audio_outbuf = null;
video_st = null;
audio_st = null;
if (oc != null && !oc.isNull()) {
if ((oformat.flags() & AVFMT_NOFILE) == 0) {
/* close the output file */
avio_close(oc.pb());
/* free the streams */
int nb_streams = oc.nb_streams();
for(int i = 0; i & nb_streams; i++) {
av_free(oc.streams(i).codec());
av_free(oc.streams(i));
/* free the stream */
av_free(oc);
oc = null;
if (img_convert_ctx != null) {
sws_freeContext(img_convert_ctx);
img_convert_ctx = null;
if (samples_convert_ctx != null) {
swr_free(samples_convert_ctx);
samples_convert_ctx = null;
@Override protected void finalize() throws Throwable {
super.finalize();
release();
private String filename;
private AVFrame picture, tmp_picture;
private BytePointer picture_buf;
private BytePointer video_outbuf;
private int video_outbuf_size;
private AVFrame frame;
private Pointer[] samples_in;
private BytePointer[] samples_out;
private PointerPointer samples_in_ptr;
private PointerPointer samples_out_ptr;
private BytePointer audio_outbuf;
private int audio_outbuf_size;
private int audio_input_frame_size;
private AVOutputFormat oformat;
private AVFormatContext oc;
private AVCodec video_codec, audio_codec;
private AVCodecContext video_c, audio_c;
private AVStream video_st, audio_st;
private SwsContext img_convert_ctx;
private SwrContext samples_convert_ctx;
private AVPacket video_pkt, audio_pkt;
private int[] got_video_packet, got_audio_packet;
@Override public int getFrameNumber() {
return picture == null ? super.getFrameNumber() : (int)picture.pts();
@Override public void setFrameNumber(int frameNumber) {
if (picture == null) { super.setFrameNumber(frameNumber); } else { picture.pts(frameNumber); }
// best guess for timestamp in microseconds...
@Override public long getTimestamp() {
return Math.round(getFrameNumber() * 1000000L / getFrameRate());
@Override public void setTimestamp(long timestamp)
setFrameNumber((int)Math.round(timestamp * getFrameRate() / 1000000L));
public void start() throws Exception {
synchronized (com.googlecode.javacv.cpp.avcodec.class) {
startUnsafe();
public void startUnsafe() throws Exception {
picture = null;
tmp_picture = null;
picture_buf = null;
frame = null;
video_outbuf = null;
audio_outbuf = null;
oc = null;
video_c = null;
audio_c = null;
video_st = null;
audio_st = null;
got_video_packet = new int[1];
got_audio_packet = new int[1];
/* auto detect the output format from the name. */
String format_name = format == null || format.length() == 0 ? null : format;
if ((oformat = av_guess_format(format_name, filename, null)) == null) {
int proto = filename.indexOf(&://&);
if (proto & 0) {
format_name = filename.substring(0, proto);
if ((oformat = av_guess_format(format_name, filename, null)) == null) {
throw new Exception(&av_guess_format() error: Could not guess output format for \&& + filename + &\& and & + format + & format.&);
format_name = oformat.name().getString();
/* allocate the output media context */
if ((oc = avformat_alloc_context()) == null) {
throw new Exception(&avformat_alloc_context() error: Could not allocate format context&);
oc.oformat(oformat);
oc.filename().putString(filename);
/* add the audio and video streams using the format codecs
and initialize the codecs */
if (imageWidth & 0 && imageHeight & 0) {
if (videoCodec != AV_CODEC_ID_NONE) {
oformat.video_codec(videoCodec);
} else if (&flv&.equals(format_name)) {
oformat.video_codec(AV_CODEC_ID_FLV1);
} else if (&mp4&.equals(format_name)) {
oformat.video_codec(AV_CODEC_ID_MPEG4);
} else if (&3gp&.equals(format_name)) {
oformat.video_codec(AV_CODEC_ID_H263);
} else if (&avi&.equals(format_name)) {
oformat.video_codec(AV_CODEC_ID_HUFFYUV);
/* find the video encoder */
if ((video_codec = avcodec_find_encoder_by_name(videoCodecName)) == null &&
(video_codec = avcodec_find_encoder(oformat.video_codec())) == null) {
release();
throw new Exception(&avcodec_find_encoder() error: Video codec not found.&);
AVRational frame_rate = av_d2q(frameRate, 1001000);
AVRational supported_framerates = video_codec.supported_framerates();
if (supported_framerates != null) {
int idx = av_find_nearest_q_idx(frame_rate, supported_framerates);
frame_rate = supported_framerates.position(idx);
/* add a video output stream */
if ((video_st = avformat_new_stream(oc, video_codec)) == null) {
release();
throw new Exception(&avformat_new_stream() error: Could not allocate video stream.&);
video_c = video_st.codec();
video_c.codec_id(oformat.video_codec());
video_c.codec_type(AVMEDIA_TYPE_VIDEO);
/* put sample parameters */
video_c.bit_rate(videoBitrate);
/* resolution must be a multiple of two, but round up to 16 as often required */
video_c.width((imageWidth + 15) / 16 * 16);
video_c.height(imageHeight);
/* time base: this is the fundamental unit of time (in seconds) in terms
of which frame timestamps are represented. for fixed-fps content,
timebase should be 1/framerate and timestamp increments should be
identically 1. */
video_c.time_base(av_inv_q(frame_rate));
video_c.gop_size(12); /* emit one intra frame every twelve frames at most */
if (videoQuality &= 0) {
video_c.flags(video_c.flags() | CODEC_FLAG_QSCALE);
video_c.global_quality((int)Math.round(FF_QP2LAMBDA * videoQuality));
if (pixelFormat != AV_PIX_FMT_NONE) {
video_c.pix_fmt(pixelFormat);
} else if (video_c.codec_id() == AV_CODEC_ID_RAWVIDEO || video_c.codec_id() == AV_CODEC_ID_PNG ||
video_c.codec_id() == AV_CODEC_ID_HUFFYUV
|| video_c.codec_id() == AV_CODEC_ID_FFV1) {
video_c.pix_fmt(AV_PIX_FMT_RGB32);
// appropriate for common lossless formats
video_c.pix_fmt(AV_PIX_FMT_YUV420P); // lossy, but works with about everything
if (video_c.codec_id() == AV_CODEC_ID_MPEG2VIDEO) {
/* just for testing, we also add B frames */
video_c.max_b_frames(2);
} else if (video_c.codec_id() == AV_CODEC_ID_MPEG1VIDEO) {
/* Needed to avoid using macroblocks in which some coeffs overflow.
This does not happen with normal video, it just happens here as
the motion of the chroma plane does not match the luma plane. */
video_c.mb_decision(2);
} else if (video_c.codec_id() == AV_CODEC_ID_H263) {
// H.263 does not support any other resolution than the following
if (imageWidth &= 128 && imageHeight &= 96) {
video_c.width(128).height(96);
} else if (imageWidth &= 176 && imageHeight &= 144) {
video_c.width(176).height(144);
} else if (imageWidth &= 352 && imageHeight &= 288) {
video_c.width(352).height(288);
} else if (imageWidth &= 704 && imageHeight &= 576) {
video_c.width(704).height(576);
video_c.width(1408).height(1152);
} else if (video_c.codec_id() == AV_CODEC_ID_H264) {
// default to constrained baseline to produce content that plays back on anything,
// without any significant tradeoffs for most use cases
video_c.profile(AVCodecContext.FF_PROFILE_H264_CONSTRAINED_BASELINE);
// some formats want stream headers to be separate
if ((oformat.flags() & AVFMT_GLOBALHEADER) != 0) {
video_c.flags(video_c.flags() | CODEC_FLAG_GLOBAL_HEADER);
if ((video_codec.capabilities() & CODEC_CAP_EXPERIMENTAL) != 0) {
video_c.strict_std_compliance(AVCodecContext.FF_COMPLIANCE_EXPERIMENTAL);
* add an audio output stream
if (audioChannels & 0 && audioBitrate & 0 && sampleRate & 0) {
if (audioCodec != AV_CODEC_ID_NONE) {
oformat.audio_codec(audioCodec);
} else if (&flv&.equals(format_name) || &mp4&.equals(format_name) || &3gp&.equals(format_name)) {
oformat.audio_codec(AV_CODEC_ID_AAC);
} else if (&avi&.equals(format_name)) {
oformat.audio_codec(AV_CODEC_ID_PCM_S16LE);
/* find the audio encoder */
if ((audio_codec = avcodec_find_encoder_by_name(audioCodecName)) == null &&
(audio_codec = avcodec_find_encoder(oformat.audio_codec())) == null) {
release();
throw new Exception(&avcodec_find_encoder() error: Audio codec not found.&);
if ((audio_st = avformat_new_stream(oc, audio_codec)) == null) {
release();
throw new Exception(&avformat_new_stream() error: Could not allocate audio stream.&);
audio_c = audio_st.codec();
audio_c.codec_id(oformat.audio_codec());
audio_c.codec_type(AVMEDIA_TYPE_AUDIO);
/* put sample parameters */
audio_c.bit_rate(audioBitrate);
audio_c.sample_rate(sampleRate);
audio_c.channels(audioChannels);
audio_c.channel_layout(av_get_default_channel_layout(audioChannels));
if (sampleFormat != AV_SAMPLE_FMT_NONE) {
audio_c.sample_fmt(sampleFormat);
} else if (audio_c.codec_id() == AV_CODEC_ID_AAC &&
(audio_codec.capabilities() & CODEC_CAP_EXPERIMENTAL) != 0) {
audio_c.sample_fmt(AV_SAMPLE_FMT_FLTP);
audio_c.sample_fmt(AV_SAMPLE_FMT_S16);
audio_c.time_base().num(1).den(sampleRate);
switch (audio_c.sample_fmt()) {
case AV_SAMPLE_FMT_U8:
case AV_SAMPLE_FMT_U8P:
audio_c.bits_per_raw_sample(8);
case AV_SAMPLE_FMT_S16:
case AV_SAMPLE_FMT_S16P: audio_c.bits_per_raw_sample(16); break;
case AV_SAMPLE_FMT_S32:
case AV_SAMPLE_FMT_S32P: audio_c.bits_per_raw_sample(32); break;
case AV_SAMPLE_FMT_FLT:
case AV_SAMPLE_FMT_FLTP: audio_c.bits_per_raw_sample(32); break;
case AV_SAMPLE_FMT_DBL:
case AV_SAMPLE_FMT_DBLP: audio_c.bits_per_raw_sample(64); break;
default: assert false;
if (audioQuality &= 0) {
audio_c.flags(audio_c.flags() | CODEC_FLAG_QSCALE);
audio_c.global_quality((int)Math.round(FF_QP2LAMBDA * audioQuality));
// some formats want stream headers to be separate
if ((oformat.flags() & AVFMT_GLOBALHEADER) != 0) {
audio_c.flags(audio_c.flags() | CODEC_FLAG_GLOBAL_HEADER);
if ((audio_codec.capabilities() & CODEC_CAP_EXPERIMENTAL) != 0) {
audio_c.strict_std_compliance(AVCodecContext.FF_COMPLIANCE_EXPERIMENTAL);
av_dump_format(oc, 0, filename, 1);
/* now that all the parameters are set, we can open the audio and
video codecs and allocate the necessary encode buffers */
if (video_st != null) {
AVDictionary options = new AVDictionary(null);
if (videoQuality &= 0) {
av_dict_set(options, &crf&, && + videoQuality, 0);
for (Entry&String, String& e : videoOptions.entrySet()) {
av_dict_set(options, e.getKey(), e.getValue(), 0);
/* open the codec */
if ((ret = avcodec_open2(video_c, video_codec, options)) & 0) {
release();
throw new Exception(&avcodec_open2() error & + ret + &: Could not open video codec.&);
av_dict_free(options);
video_outbuf = null;
if ((oformat.flags() & AVFMT_RAWPICTURE) == 0) {
/* allocate output buffer */
/* XXX: API change will be done */
/* buffers passed into lav* can be allocated any way you prefer,
as long as they're aligned enough for the architecture, and
they're freed appropriately (such as using av_free for buffers
allocated with av_malloc) */
video_outbuf_size = Math.max(256 * 1024, 8 * video_c.width() * video_c.height()); // a la ffmpeg.c
video_outbuf = new BytePointer(av_malloc(video_outbuf_size));
/* allocate the encoded raw picture */
if ((picture = avcodec_alloc_frame()) == null) {
release();
throw new Exception(&avcodec_alloc_frame() error: Could not allocate picture.&);
picture.pts(0); // magic required by libx264
int size = avpicture_get_size(video_c.pix_fmt(), video_c.width(), video_c.height());
if ((picture_buf = new BytePointer(av_malloc(size))).isNull()) {
release();
throw new Exception(&av_malloc() error: Could not allocate picture buffer.&);
/* if the output format is not equal to the image format, then a temporary
picture is needed too. It is then converted to the required output format */
if ((tmp_picture = avcodec_alloc_frame()) == null) {
release();
throw new Exception(&avcodec_alloc_frame() error: Could not allocate temporary picture.&);
if (audio_st != null) {
AVDictionary options = new AVDictionary(null);
if (audioQuality &= 0) {
av_dict_set(options, &crf&, && + audioQuality, 0);
for (Entry&String, String& e : audioOptions.entrySet()) {
av_dict_set(options, e.getKey(), e.getValue(), 0);
/* open the codec */
if ((ret = avcodec_open2(audio_c, audio_codec, options)) & 0) {
release();
throw new Exception(&avcodec_open2() error & + ret + &: Could not open audio codec.&);
av_dict_free(options);
audio_outbuf_size = 256 * 1024;
audio_outbuf = new BytePointer(av_malloc(audio_outbuf_size));
/* ugly hack for PCM codecs (will be removed ASAP with new PCM
support to compute the input frame size in samples */
if (audio_c.frame_size() &= 1) {
audio_outbuf_size = FF_MIN_BUFFER_SIZE;
audio_input_frame_size = audio_outbuf_size / audio_c.channels();
switch (audio_c.codec_id()) {
case AV_CODEC_ID_PCM_S16LE:
case AV_CODEC_ID_PCM_S16BE:
case AV_CODEC_ID_PCM_U16LE:
case AV_CODEC_ID_PCM_U16BE:
audio_input_frame_size &&= 1;
audio_input_frame_size = audio_c.frame_size();
//int bufferSize = audio_input_frame_size * audio_c.bits_per_raw_sample()/8 * audio_c.channels();
int planes = av_sample_fmt_is_planar(audio_c.sample_fmt()) != 0 ? (int)audio_c.channels() : 1;
int data_size = av_samples_get_buffer_size((IntPointer)null, audio_c.channels(),
audio_input_frame_size, audio_c.sample_fmt(), 1) / planes;
samples_out = new BytePointer[planes];
for (int i = 0; i & samples_out.length; i++) {
samples_out[i] = new BytePointer(av_malloc(data_size)).capacity(data_size);
samples_in = new Pointer[AVFrame.AV_NUM_DATA_POINTERS];
samples_in_ptr
= new PointerPointer(AVFrame.AV_NUM_DATA_POINTERS);
samples_out_ptr = new PointerPointer(AVFrame.AV_NUM_DATA_POINTERS);
/* allocate the audio frame */
if ((frame = avcodec_alloc_frame()) == null) {
release();
throw new Exception(&avcodec_alloc_frame() error: Could not allocate audio frame.&);
/* open the output file, if needed */
if ((oformat.flags() & AVFMT_NOFILE) == 0) {
AVIOContext pb = new AVIOContext(null);
if ((ret = avio_open(pb, filename, AVIO_FLAG_WRITE)) & 0) {
release();
throw new Exception(&avio_open error() error & + ret + &: Could not open '& + filename + &'&);
oc.pb(pb);
/* write the stream header, if any */
avformat_write_header(oc, (PointerPointer)null);
public void stop() throws Exception {
if (oc != null) {
/* flush all the buffers */
while (video_st != null && record((IplImage)null, AV_PIX_FMT_NONE));
while (audio_st != null && record((AVFrame)null));
if (interleaved && video_st != null && audio_st != null) {
av_interleaved_write_frame(oc, null);
av_write_frame(oc, null);
/* write the trailer, if any */
av_write_trailer(oc);
} finally {
release();
public boolean record(IplImage image) throws Exception {
return record(image, AV_PIX_FMT_NONE);
public boolean record(IplImage image, int pixelFormat) throws Exception {
if (video_st == null) {
throw new Exception(&No video output stream (Is imageWidth & 0 && imageHeight & 0 and has start() been called?)&);
if (image == null) {
/* no more frame to compress. The codec has a latency of a few
frames if using B frames, so we get the last frames by
passing the same picture again */
int width = image.width();
int height = image.height();
int step = image.widthStep();
BytePointer data = image.imageData();
if (pixelFormat == AV_PIX_FMT_NONE) {
int depth = image.depth();
int channels = image.nChannels();
if ((depth == IPL_DEPTH_8U || depth == IPL_DEPTH_8S) && channels == 3) {
pixelFormat = AV_PIX_FMT_BGR24;
} else if ((depth == IPL_DEPTH_8U || depth == IPL_DEPTH_8S) && channels == 1) {
pixelFormat = AV_PIX_FMT_GRAY8;
} else if ((depth == IPL_DEPTH_16U || depth == IPL_DEPTH_16S) && channels == 1) {
pixelFormat = ByteOrder.nativeOrder().equals(ByteOrder.BIG_ENDIAN) ?
AV_PIX_FMT_GRAY16BE : AV_PIX_FMT_GRAY16LE;
} else if ((depth == IPL_DEPTH_8U || depth == IPL_DEPTH_8S) && channels == 4) {
pixelFormat = AV_PIX_FMT_RGBA;
} else if ((depth == IPL_DEPTH_8U || depth == IPL_DEPTH_8S) && channels == 2) {
pixelFormat = AV_PIX_FMT_NV21; // Android's camera capture format
step = width;
throw new Exception(&Could not guess pixel format of image: depth=& + depth + &, channels=& + channels);
if (video_c.pix_fmt() != pixelFormat || video_c.width() != width || video_c.height() != height) {
/* convert to the codec pixel format if needed */
img_convert_ctx = sws_getCachedContext(img_convert_ctx, width, height, pixelFormat,
video_c.width(), video_c.height(), video_c.pix_fmt(), SWS_BILINEAR,
null, null, (DoublePointer)null);
if (img_convert_ctx == null) {
throw new Exception(&sws_getCachedContext() error: Cannot initialize the conversion context.&);
avpicture_fill(new AVPicture(tmp_picture), data, pixelFormat, width, height);
avpicture_fill(new AVPicture(picture), picture_buf, video_c.pix_fmt(), video_c.width(), video_c.height());
tmp_picture.linesize(0, step);
sws_scale(img_convert_ctx, new PointerPointer(tmp_picture), tmp_picture.linesize(),
0, height, new PointerPointer(picture), picture.linesize());
avpicture_fill(new AVPicture(picture), data, pixelFormat, width, height);
picture.linesize(0, step);
if ((oformat.flags() & AVFMT_RAWPICTURE) != 0) {
if (image == null) {
return false;
/* raw video case. The API may change slightly in the future for that? */
av_init_packet(video_pkt);
video_pkt.flags(video_pkt.flags() | AV_PKT_FLAG_KEY);
video_pkt.stream_index(video_st.index());
video_pkt.data(new BytePointer(picture));
video_pkt.size(Loader.sizeof(AVPicture.class));
/* encode the image */
av_init_packet(video_pkt);
video_pkt.data(video_outbuf);
video_pkt.size(video_outbuf_size);
picture.quality(video_c.global_quality());
if ((ret = avcodec_encode_video2(video_c, video_pkt, image == null ? null : picture, got_video_packet)) & 0) {
throw new Exception(&avcodec_encode_video2() error & + ret + &: Could not encode video packet.&);
picture.pts(picture.pts() + 1); // magic required by libx264
/* if zero size, it means the image was buffered */
if (got_video_packet[0] != 0) {
if (video_pkt.pts() != AV_NOPTS_VALUE) {
video_pkt.pts(av_rescale_q(video_pkt.pts(), video_c.time_base(), video_st.time_base()));
if (video_pkt.dts() != AV_NOPTS_VALUE) {
video_pkt.dts(av_rescale_q(video_pkt.dts(), video_c.time_base(), video_st.time_base()));
video_pkt.stream_index(video_st.index());
return false;
synchronized (oc) {
/* write the compressed frame in the media file */
if (interleaved && audio_st != null) {
if ((ret = av_interleaved_write_frame(oc, video_pkt)) & 0) {
throw new Exception(&av_interleaved_write_frame() error & + ret + & while writing interleaved video frame.&);
if ((ret = av_write_frame(oc, video_pkt)) & 0) {
throw new Exception(&av_write_frame() error & + ret + & while writing video frame.&);
return picture.key_frame() != 0;
@Override public boolean record(int sampleRate, Buffer ... samples) throws Exception {
if (audio_st == null) {
throw new Exception(&No audio output stream (Is audioChannels & 0 and has start() been called?)&);
int inputSize = samples[0].limit() - samples[0].position();
int inputFormat = AV_SAMPLE_FMT_NONE;
int inputChannels = samples.length & 1 ? 1 : audioChannels;
int inputDepth = 0;
int outputFormat = audio_c.sample_fmt();
int outputChannels = samples_out.length & 1 ? 1 : audioChannels;
int outputDepth = av_get_bytes_per_sample(outputFormat);
if (sampleRate &= 0) {
sampleRate = audio_c.sample_rate();
if (samples[0] instanceof ByteBuffer) {
inputFormat = samples.length & 1 ? AV_SAMPLE_FMT_U8P : AV_SAMPLE_FMT_U8;
inputDepth = 1;
for (int i = 0; i & samples.length; i++) {
ByteBuffer b = (ByteBuffer)samples[i];
if (samples_in[i] instanceof BytePointer && samples_in[i].capacity() &= inputSize && b.hasArray()) {
((BytePointer)samples_in[i]).position(0).put(b.array(), b.position(), inputSize);
samples_in[i] = new BytePointer(b);
} else if (samples[0] instanceof ShortBuffer) {
inputFormat = samples.length & 1 ? AV_SAMPLE_FMT_S16P : AV_SAMPLE_FMT_S16;
inputDepth = 2;
for (int i = 0; i & samples.length; i++) {
ShortBuffer b = (ShortBuffer)samples[i];
if (samples_in[i] instanceof ShortPointer && samples_in[i].capacity() &= inputSize && b.hasArray()) {
((ShortPointer)samples_in[i]).position(0).put(b.array(), samples[i].position(), inputSize);
samples_in[i] = new ShortPointer(b);
} else if (samples[0] instanceof IntBuffer) {
inputFormat = samples.length & 1 ? AV_SAMPLE_FMT_S32P : AV_SAMPLE_FMT_S32;
inputDepth = 4;
for (int i = 0; i & samples.length; i++) {
IntBuffer b = (IntBuffer)samples[i];
if (samples_in[i] instanceof IntPointer && samples_in[i].capacity() &= inputSize && b.hasArray()) {
((IntPointer)samples_in[i]).position(0).put(b.array(), samples[i].position(), inputSize);
samples_in[i] = new IntPointer(b);
} else if (samples[0] instanceof FloatBuffer) {
inputFormat = samples.length & 1 ? AV_SAMPLE_FMT_FLTP : AV_SAMPLE_FMT_FLT;
inputDepth = 4;
for (int i = 0; i & samples.length; i++) {
FloatBuffer b = (FloatBuffer)samples[i];
if (samples_in[i] instanceof FloatPointer && samples_in[i].capacity() &= inputSize && b.hasArray()) {
((FloatPointer)samples_in[i]).position(0).put(b.array(), b.position(), inputSize);
samples_in[i] = new FloatPointer(b);
} else if (samples[0] instanceof DoubleBuffer) {
inputFormat = samples.length & 1 ? AV_SAMPLE_FMT_DBLP : AV_SAMPLE_FMT_DBL;
inputDepth = 8;
for (int i = 0; i & samples.length; i++) {
DoubleBuffer b = (DoubleBuffer)samples[i];
if (samples_in[i] instanceof DoublePointer && samples_in[i].capacity() &= inputSize && b.hasArray()) {
((DoublePointer)samples_in[i]).position(0).put(b.array(), b.position(), inputSize);
samples_in[i] = new DoublePointer(b);
throw new Exception(&Audio samples Buffer has unsupported type: & + samples);
if (samples_convert_ctx == null) {
samples_convert_ctx = swr_alloc_set_opts(null,
audio_c.channel_layout(), outputFormat, audio_c.sample_rate(),
audio_c.channel_layout(), inputFormat, sampleRate, 0, null);
if (samples_convert_ctx == null) {
throw new Exception(&swr_alloc_set_opts() error: Cannot allocate the conversion context.&);
} else if ((ret = swr_init(samples_convert_ctx)) & 0) {
throw new Exception(&swr_init() error & + ret + &: Cannot initialize the conversion context.&);
for (int i = 0; i & samples.length; i++) {
samples_in[i].position(samples_in[i].position() * inputDepth).
limit((samples_in[i].position() + inputSize) * inputDepth);
while (true) {
int inputCount = (samples_in[0].limit() - samples_in[0].position()) / (inputChannels * inputDepth);
int outputCount = (samples_out[0].limit() - samples_out[0].position()) / (outputChannels * outputDepth);
inputCount = Math.min(inputCount, 2 * (outputCount * sampleRate) / audio_c.sample_rate());
for (int i = 0; i & samples.length; i++) {
samples_in_ptr.put(i, samples_in[i]);
for (int i = 0; i & samples_out.length; i++) {
samples_out_ptr.put(i, samples_out[i]);
if ((ret = swr_convert(samples_convert_ctx, samples_out_ptr, outputCount, samples_in_ptr, inputCount)) & 0) {
throw new Exception(&swr_convert() error & + ret + &: Cannot convert audio samples.&);
} else if (ret == 0) {
for (int i = 0; i & samples.length; i++) {
samples_in[i].position(samples_in[i].position() + inputCount * inputChannels * inputDepth);
for (int i = 0; i & samples_out.length; i++) {
samples_out[i].position(samples_out[i].position() + ret * outputChannels * outputDepth);
if (samples_out[0].position() &= samples_out[0].limit()) {
frame.nb_samples(audio_input_frame_size);
avcodec_fill_audio_frame(frame, audio_c.channels(), outputFormat, samples_out[0], samples_out[0].limit(), 0);
for (int i = 0; i & samples_out.length; i++) {
frame.data(i, samples_out[i].position(0));
frame.linesize(i, samples_out[i].limit());
frame.quality(audio_c.global_quality());
record(frame);
return frame.key_frame() != 0;
boolean record(AVFrame frame) throws Exception {
av_init_packet(audio_pkt);
audio_pkt.data(audio_outbuf);
audio_pkt.size(audio_outbuf_size);
if ((ret = avcodec_encode_audio2(audio_c, audio_pkt, frame, got_audio_packet)) & 0) {
throw new Exception(&avcodec_encode_audio2() error & + ret + &: Could not encode audio packet.&);
if (got_audio_packet[0] != 0) {
if (audio_pkt.pts() != AV_NOPTS_VALUE) {
audio_pkt.pts(av_rescale_q(audio_pkt.pts(), audio_c.time_base(), audio_c.time_base()));
if (audio_pkt.dts() != AV_NOPTS_VALUE) {
audio_pkt.dts(av_rescale_q(audio_pkt.dts(), audio_c.time_base(), audio_c.time_base()));
audio_pkt.flags(audio_pkt.flags() | AV_PKT_FLAG_KEY);
audio_pkt.stream_index(audio_st.index());
return false;
/* write the compressed frame in the media file */
synchronized (oc) {
if (interleaved && video_st != null) {
if ((ret = av_interleaved_write_frame(oc, audio_pkt)) & 0) {
throw new Exception(&av_interleaved_write_frame() error & + ret + & while writing interleaved audio frame.&);
if ((ret = av_write_frame(oc, audio_pkt)) & 0) {
throw new Exception(&av_write_frame() error & + ret + & while writing audio frame.&);
return true;
来自CSDN博客:JavaCV-学习笔记一
blog__550280
import static com.googlecode.javacv.cpp.opencv_core.*;import static com.googlecode.javacv.cpp.opencv_imgproc.*;import static com.googlecode.javacv.cpp.opencv_highgui.*;public class Smoother {
public static void smooth(String filename) {
IplImage image = cvLoadImage(filename);
if (image != null) {
cvSmooth(image, image, CV_GAUSSIAN, 3);
cvSaveImage(filename, image);
cvReleaseImage(image);
来自CSDN博客:学习javacv之八:拖动进度条,控制播放视频
blog__699489
package com.csdn.linghu.javacvlean04;import java.nio.IntBuffer;import org.bytedeco.javacpp.opencv_core.IplImage;import org.bytedeco.javacpp.opencv_highgui.CvCapture;import org.bytedeco.javacpp.opencv_highgui.CvTrackbarCallback;import static org.bytedeco.javacpp.opencv_highgui.*;public class Trackbar {public static void main(String[] args) { //进度条最大值 int switchValue =30; //每帧要显示的图像 IplImage frammeImage = null; //回调方法载体 SwitchCallBack switchCallBack =
new SwitchCallBack(); //读取视频 CvCapture cvCapture = cvCreateFileCapture(&resources/when.avi&); if (cvCapture==null) {
System.out.println(&读取文件出错!&);
return; } //创建窗口,大小可调整 cvNamedWindow(&Trackbar&, 0); cvCreateTrackbar(
//进度条的名称
&Progress&,
//窗口的名称
&Trackbar&,
//当前进度条的值
switchCallBack.getIntBuffer(),
//进度条最大值
switchValue,
//回调函数载体
switchCallBack
); while(true) {
//只要进度条,没有拖到最后,就进行播放
if(switchCallBack.getIntBuffer().get(0)!=30&&switchCallBack.getIntBuffer().get(0)!=0){
frammeImage = cvQueryFrame(cvCapture);
if(frammeImage==null) break;
//展示当前帧,以实现视频的播放
cvShowImage( &Trackbar&, frammeImage);
if(cvWaitKey(10)==27 ){
} //释放资源 cvReleaseCapture(cvCapture); cvDestroyWindow(&Trackbar&);}}/** * @功能说明:实现拖动进度条的过程中的函数回调 * @time:日下午6:05:28 * @version:1.0 * */class SwitchCallBack extends CvTrackbarCallback{ private IntBuffer intBuffer = IntBuffer.allocate(30); @Override public void call(int position){
//进度条当前位置在0,提示关闭
if (position ==0) {
switchBegin();
intBuffer.clear();
intBuffer.put(position);
//进度条当前位置是30,播放结束
}else if(position==30){
switchEnd();
intBuffer.clear();
intBuffer.put(position);
//清空进度条缓存值,放入当前值
intBuffer.clear();
intBuffer.put(position);
switchOn();
} public void switchOn(){
System.out.println(&正在播放:&); } public void switchBegin(){
System.out.println(&即将开始播放!&); } public void switchEnd(){
System.out.println(&播放完毕&); } public IntBuffer getIntBuffer() {
return intBuffer; } public void setIntBuffer(IntBuffer intBuffer) {
this.intBuffer = intBuffer; } }
来自CSDN博客:javaCV 之目标跟踪
blog__2050423
import com.googlecode.javacpp.Pointer;import com.googlecode.javacv.cpp.opencv_highgui.CvCapture;import static com.googlecode.javacv.cpp.opencv_core.*;import static com.googlecode.javacv.cpp.opencv_imgproc.*;import static com.googlecode.javacv.cpp.opencv_highgui.*;import static com.googlecode.javacv.cpp.opencv_video.*;public class JavaCVCamShift{ IplImage frame, image , hsv , hue , mask , backproject , histimg ; IplImage[] imageArray; //用HSV中的Hue分量进行跟踪 CvHistogram hist ; //直方图类 int x1=0,y1=0,x2=0,y2=0;//选取对象的坐标 int backproject_mode = 0; int select_object = 0; int track_object = 0; int show_hist = 1; CvPoint origin; CvPoint
cp1,cp2; CvRect selection; CvRect track_window; CvBox2D track_box; float[] max_val=new float[1]; int[] hdims = {16}; //划分直方图bins的个数,越多越精确 float[][] hranges_arr = {{0,180}}; //像素值的范围 float[][] hranges = hranges_arr; //用于初始化CvHistogram类
CvConnectedComp track_comp;
public JavaCVCamShift() {
imageArray=new IplImage[1];
CvCapture capture= cvCreateCameraCapture(0);
cvNamedWindow(&imageName&,CV_WINDOW_AUTOSIZE);
Pointer pointer=null;
cvSetMouseCallback(&imageName&,new mouseClike(),pointer);
track_comp=new CvConnectedComp();
while(true)
frame=cvQueryFrame(capture);
if(frame==null)break;
if( image==null )
//image为空,表明刚开始还未对image操作过,先建立一些缓冲区
image = cvCreateImage( cvGetSize(frame), 8, 3 );
image.origin(frame.origin());
hsv = cvCreateImage( cvGetSize(frame), 8, 3 );
hue = cvCreateImage( cvGetSize(frame), 8, 1 );
mask =cvCreateImage( cvGetSize(frame), 8, 1);
//分配掩膜图像空间
backproject = cvCreateImage( cvGetSize(frame), 8, 1 );
//分配反向投影图空间,大小一样,单通道
hist = cvCreateHist( 1, hdims, CV_HIST_ARRAY, hranges, 1 );
//分配直方图空间
cvCopy(frame,image);
cvCvtColor( image, hsv, CV_BGR2HSV );
if( track_object !=0)
//track_object非零,表示有需要跟踪的物体
double _vmin = 10.0, _vmax = 256.0,smin=30.0;
cvInRangeS( hsv, cvScalar(0.0,smin,Math.min(_vmin,_vmax),0.0), cvScalar(180.0,256.0,Math.max(_vmin,_vmax),0.0), mask );
//,只处理像素值为H:0~180,S:smin~256,V:vmin~vmax之间的部分制作掩膜板
cvSplit( hsv, hue, null, null, null );
//分离H分量
imageArray[0]=hue;
if( track_object & 0 )
//如果需要跟踪的物体还没有进行属性提取,则进行选取框类的图像属性提取
cvSetImageROI( imageArray[0],selection );
//设置原选择框为ROI
cvSetImageROI( mask,selection );
//设置掩膜板选择框为ROI
cvCalcHist( imageArray,hist,0,mask );
//得到选择框内且满足掩膜板内的直方图
cvGetMinMaxHistValue( hist, null, max_val, null, null );
cvConvertScale( hist.bins(), hist.bins(),max_val[0]&0 ? (double)255/ max_val[0]:0.0,0 );
// 对直方图的数值转为0~255
cvResetImageROI( imageArray[0] );
cvResetImageROI( mask );
track_window = selection;
track_object = 1;
//置track_object为1,表明属性提取完成
cvCalcBackProject( imageArray, backproject, hist );
//计算hue的反向投影图
cvAnd( backproject, mask, backproject, null );
//得到掩膜内的反向投影
cvCamShift(backproject, track_window,
cvTermCriteria( CV_TERMCRIT_EPS | CV_TERMCRIT_ITER, 10, 1 ),
track_comp,track_box);
//使用MeanShift算法对backproject中的内容进行搜索,返回跟踪结果
track_window = track_comp.rect();
//得到跟踪结果的矩形框
cp1=cvPoint(track_window.x(),track_window.y());
cp2=cvPoint(track_window.x()+track_window.width(),track_window.y()+track_window.height());
if( image.origin()&0 )
track_box.angle(-track_box.angle());
cvRectangle(frame,cp1,cp2, CV_RGB(0,255,0),3,CV_AA,0);
if( select_object==1 && selection.width() & 0 && selection.height() & 0 )
//如果正处于物体选择,画出选择框
cvSetImageROI( frame, selection );
cvXorS(frame,cvScalarAll(255),frame,null );
cvResetImageROI( frame );
cvShowImage(&imageName&,frame);
int c=cvWaitKey(33);
if(c==27) break;
cvReleaseCapture(capture);
cvDestroyWindow(&imageName&);
} public static void main(String[] args) {
new JavaCVCamShift(); }
class mouseClike extends CvMouseCallback{
public void call(int event,int x, int y,int flags, Pointer param) //鼠标回调函数,该函数用鼠标进行跟踪目标的选择 {
if( image==null )
if( image.origin()!=0 )
y = image.height() - y;
//如果图像原点坐标在左下,则将其改为左上
if( select_object==1 )
//select_object为1,表示在用鼠标进行目标选择
//此时对矩形类selection用当前的鼠标位置进行设置
selection.x(Math.min(x,origin.x()));
selection.y(Math.min(y,origin.y()));
selection.width(selection.x() + Math.abs(x - origin.x()));
selection.height(selection.y() + Math.abs(y - origin.y()));
selection.x(Math.max(selection.x(),0));
selection.y(Math.max(selection.y(),0 ));
selection.width(Math.min( selection.width(), image.width() ));
selection.height(Math.min( selection.height(), image.height()));
selection.width(selection.width()-selection.x());
selection.height( selection.height()-selection.y());
switch( event )
case CV_EVENT_LBUTTONDOWN:
//鼠标按下,开始点击选择跟踪物体
origin = cvPoint(x,y);
selection = cvRect(0,0,0,0);
select_object = 1;
case CV_EVENT_LBUTTONUP:
//鼠标松开,完成选择跟踪物体
select_object = 0;
if( selection.width() & 0 && selection.height() & 0 )
//如果选择物体有效,则打开跟踪功能
track_object = -1;
来自CSDN博客:HadoopCV(2)—— hadoop平台上运行javaCV程序
blog__2931939
&span style=&font-family: Arial, Helvetica, sans- background-color: rgb(255, 255, 255);&&&span style=&font-size: 14&& &/span&&span style=&font-size:14&&
本文介绍如何调通 hadoop-computer-vision这个例子的,该工程使用javacv在hadoop平台上处理图像,这个工程的链接如下&/span&&/span&}

我要回帖

更多关于 as3 drawrect 的文章

更多推荐

版权声明:文章内容来源于网络,版权归原作者所有,如有侵权请点击这里与我们联系,我们将及时删除。

点击添加站长微信