java API Simple Operation Hadoop

2023-03-14  

ReferenceQT-FFMPEG+QAUDIOUTPUT Implementation Audio PlayerSource code is incomplete, and it is completed by yourself. By the way, FFMPEG played audio review. In the next step, there is time to increase the video playback part. Since there is no MP3 file on Linux, test the audio in the video file and the MP3 file interface in Windows and the MP3 file interface on the video file and the MP3 file interface interface. It’s relatively simple, you can refer to the original author’s modification by yourself.

#include "widget.h"
#include "ui_widget.h"
#include <QDebug>
#include <QMimeData>
Widget::Widget(QWidget *parent) :
    QWidget(parent),
    ui(new Ui::Widget)
{
  
    ui->setupUi(this);
    this->setAcceptDrops(true);
    thread = new playthread();
    connect(thread,SIGNAL(duration(int,int)),this,SLOT(onDuration(int,int)));
    connect(thread,SIGNAL(seekOk()),this,SLOT(onSeekOk()));
//    connect(ui->pushButton_start,SIGNAL(clicked()),this,SLOT(on_btn_start_clicked()));
//    connect(ui->Stopbuttonpause,SIGNAL(clicked()),this,SLOT(on_btn_pause_clicked()));
//    connect(ui->pushButtonstop,SIGNAL(clicked()),this,SLOT(on_btn_stop_clicked()));
//    connect(ui->pushButtonResume,SIGNAL(clicked()),this,SLOT(on_btn_resume_clicked()));


    sliderSeeking = false;

}

Widget::~Widget()
{
  
    delete ui;
    thread->stop();
}

void Widget::onSeekOk()
{
  
    sliderSeeking=false;
}

void Widget::onDuration(int currentMs,int destMs)
{
  
    static int currentMs1=-1,destMs1=-1;

    if(currentMs1==currentMs&&destMs1==destMs)
    {
  
        return;
    }

    currentMs1 = currentMs;
    destMs1   =  destMs;

    qDebug()<<"onDuration:"<<currentMs<<destMs<<sliderSeeking;

    QString currentTime = QString("%1:%2:%3").arg(currentMs1/360000%60,2,10,QChar('0')).arg(currentMs1/6000%60,2,10,QChar('0')).arg(currentMs1/1000%60,2,10,QChar('0'));

    QString destTime = QString("%1:%2:%3").arg(destMs1/360000%60,2,10,QChar('0')).arg(destMs1/6000%60,2,10,QChar('0')).arg(destMs1/1000%60,2,10,QChar('0'));


    ui->label_duration->setText(currentTime+"/"+destTime);

if (! Sliderseking) // Not sliding
    {
  
        ui->slider->setMaximum(destMs);
        ui->slider->setValue(currentMs);
    }
}

void Widget::dragEnterEvent(QDragEnterEvent *event)
{
  
if (event-> mimedata ()-> hasurls ()) // Judging the type of drag
      {
  
            event->acceptProposedAction();
      }
      else
      {
  
            event->ignore();
      }
}

void Widget::dropEvent(QDropEvent *event)
{
  
if (event-> mimedata ()-> hasurls ()) //
    {
  

        QList<QUrl> List = event->mimeData()->urls();

        if(List.length()!=0)
        {
  
          ui->line_audioPath->setText(List[0].toLocalFile());
        }

    }
    else
    {
  
          event->ignore();
    }
}


void Widget::on_btn_start_clicked()
{
  

    sliderSeeking=false;

    thread->play(ui->line_audioPath->text());

}

void Widget::on_btn_stop_clicked()
{
  
    thread->stop();
}

void Widget::on_btn_pause_clicked()
{
  
    thread->pause();
}

void Widget::on_btn_resume_clicked()
{
  
   thread->resume();
}


void Widget::on_slider_sliderPressed()
{
  
    sliderSeeking=true;
}

void Widget::on_slider_sliderReleased()
{
  

    thread->seek(ui->slider->value());

}
#ifndef WIDGET_H
#define WIDGET_H

#include <QWidget>
#include <QDragEnterEvent>
#include "playthread.h"

namespace Ui {
  
class Widget;
}

class Widget : public QWidget
{
  
    Q_OBJECT

public:
    explicit Widget(QWidget *parent = nullptr);
    ~Widget();

private:
    Ui::Widget *ui;

    playthread *thread;
    bool sliderSeeking;

public slots:
    void onDuration(int currentMs,int destMs);
    void on_btn_stop_clicked();
    void on_btn_start_clicked();
    void on_btn_pause_clicked();
    void on_btn_resume_clicked();
    void on_slider_sliderPressed();
    void on_slider_sliderReleased();
    void onSeekOk();

protected:
    virtual void dragEnterEvent(QDragEnterEvent *event);
    virtual void dropEvent(QDropEvent *event);
};

#endif // WIDGET_H

#ifndef PLAYTHREAD_H
#define PLAYTHREAD_H
#include <QThread>
#include <QObject>
#include <QAudioOutput>

enum controlType
{
  
    control_none,
    control_stop,
    control_pause,
    control_resume,
    control_play,
    control_type,
    control_seek
};


class playthread : public QThread
{
  
    Q_OBJECT;
public:
    explicit playthread(QObject* parent = NULL);
    bool initAudio(int SampleRate);
    void play(QString filePath);
    void stop();
    void pause();
    void resume();
    void seek(int value);
    void debugErr(QString prefix, int err);
    bool runIsBreak();
    void runPlay();
    void run();


private:
    QAudioOutput *audio;
    controlType type;
    QString filePath;
    int seekMs;

signals:
    void ERROR(QString qstrError);
    void seekOk();
    void duration(int ndestMs, int ndestMs1);


};

#endif // PLAYTHREAD_H
#include "playthread.h"
#include <QAudioFormat>
#include <QAudioDeviceInfo>
#include <QDebug>
extern "C"
{
  
#include <libavutil/error.h>
#include <libavcodec/avcodec.h>
#include <libavformat/avformat.h>
#include <libswresample/swresample.h>
}



playthread::playthread(QObject* parent)
{
  
    audio = NULL;
    type = control_none;
}

bool playthread::initAudio(int SampleRate)
{
  
    QAudioFormat format;
    if(audio != NULL) {
  
        return true;
    }

    format.setSampleRate(SampleRate);
    format.setChannelCount(2);
    format.setSampleSize(16);
    format.setCodec("audio/pcm");
    format.setByteOrder(QAudioFormat::LittleEndian);
qaudiodeViceInfo Info (qaudiodeviceInfo :: defaultOutPutDevice ()); // Select the default output device
    foreach(int count,info.supportedChannelCounts())
    {
  
qdebug () << "The number of channels supported by the output device:" Count;
    }

    foreach(int count,info.supportedSampleRates())
    {
  
qdebug () << "The sampling rate supported by the output device:" Count;
    }

    foreach(int count,info.supportedSampleSizes())
    {
  
qdebug () << "The sample data digit supported by the output device:" << Count;
    }

    audio = new QAudioOutput(format, this);
    audio->setBufferSize(100000);

   return true;
}

void playthread::play(QString filePath)
{
  
    this->filePath = filePath;
    type = control_play;

    if(!isRunning()) {
  
        this->start();
    }
}

void playthread::stop()
{
  
    if(isRunning()) {
  
        type = control_stop;
    }
}

void playthread::pause()
{
  
    if(isRunning()) {
  
        type = control_pause;
    }
}


void playthread::resume()
{
  
    if(this->isRunning())
    {
  
        type = control_resume;
    }
}


void playthread::seek(int value)
{
  

    if(this->isRunning())
    {
  
        seekMs = value;
        type = control_seek;
    }
}

void playthread :: debugerr (qstring prefix, int err) // Obtain error information according to the error number and print and print it and print it
{
  
    char errbuf[512]={0};

    av_strerror(err,errbuf,sizeof(errbuf));

    qDebug()<<prefix<<":"<<errbuf;

    emit ERROR(prefix+":"+errbuf);
}

BOOL PLAYTHREAD :: Runisbreak () // Treatment control to determine whether it needs to be stopped
{
  

    bool ret = false;
// Treatment of Play Playing
    if(type == control_pause)
    {
  
        while(type == control_pause)
        {
  
             audio->suspend();
             msleep(500);
        }

        if(type == control_resume)
        {
  
             audio->resume();
        }
    }

if (type == Control_play) // Re -play
    {
  
        ret = true;
        if(audio->state()== QAudio::ActiveState)
            audio->stop();
    }

if (type == Control_stop) // Stop
    {
  
         ret = true;
         if(audio->state()== QAudio::ActiveState)
             audio->stop();
    }
    return ret;
}


void playthread::runPlay()
{
  
    int ret;
    int destMs, currentMs;
    if(audio==NULL) {
  
emit error ("The output device does not support the format, cannot play audio");
        return;
    }
// Initialize the network library (you can open the streaming video of the RTSP RTMP HTTP protocol)
    av_register_all();
    avformat_network_init();
    AVFormatContext *pFmtCtx = NULL;
    ret = avformat_open_input(&pFmtCtx, this->filePath.toLocal8Bit().data(), NULL, NULL);
    if (ret!= 0)
    {
  
        debugErr("avformat_open_input",ret);
        return ;
    }
    ret = avformat_find_stream_info(pFmtCtx, NULL);
    if (ret!= 0)
    {
  
        debugErr("avformat_find_stream_info",ret);
        return ;
    }
    int audioindex = -1;
    audioindex = av_find_best_stream(pFmtCtx, AVMEDIA_TYPE_AUDIO, -1, -1,
                                     NULL, 0);

    qDebug()<<"audioindex:"<<audioindex;
    AVCodec *acodec = avcodec_find_decoder(pFmtCtx->streams[audioindex]->codecpar->codec_id);

    AVCodecContext *acodecCtx = avcodec_alloc_context3(acodec);

avcodec_parameters_to_context (Acodecctx, PFMTCTX-> Streams [AudioIndex]-> Codecpar); // // initial AVCodeCContext
RET = AVCodec_open2 (Acodecctx, NULL, NULL); // Open the decoder, because the previously called AVCODEC_ALLOC_CONTEXT3 (VCodec) to initialize VC, then CODEC (second parameter) can fill NULL
    if (ret!= 0)
    {
  
        debugErr("avcodec_open2",ret);
        return ;
    }
    SwrContext *swrctx = NULL;
    swrctx = swr_alloc_set_opts(swrctx,
                                av_get_default_channel_layout(2),
                                AV_SAMPLE_FMT_S16,
                                44100,
                                acodecCtx->channel_layout,
                                acodecCtx->sample_fmt,
                                acodecCtx->sample_rate,
                                NULL, NULL);
    swr_init(swrctx);
    destMs = av_q2d(pFmtCtx->streams[audioindex]->time_base)*1000*pFmtCtx->streams[audioindex]->duration;
qdebug () << "Code rate:" << Acodecctx-> Bit_rate;
qdebug () << "Format:" << Acodecctx-> SAMPLE_FMT;
qdebug () << "channel:" << Acodecctx-> Channels;
qdebug () << "Sampling rate:" << Acodecctx-> SAMPLE_ATE;
qdebug () << "" duration: << Destms;
qdebug () << "decoder:" << Acodec-> name;


    AVPacket *packet = av_packet_alloc();
    AVFrame *frame = av_frame_alloc();
    audio->stop();
    QIODevice *io = audio->start();

    while(1) {
  
        if(runIsBreak()) {
  
            break;
        }
        if(type == control_seek)
        {
  
            av_seek_frame(pFmtCtx, audioindex, seekMs/(double)1000/av_q2d(pFmtCtx->streams[audioindex]->time_base),AVSEEK_FLAG_BACKWARD);
            type = control_none;
            emit seekOk();
        }
        ret = av_read_frame(pFmtCtx, packet);
        if(ret != 0) {
  
            debugErr("av_read_frame",ret);
            emit duration(destMs,destMs);
            break ;
        }

        if(packet->stream_index == audioindex) {
  
            //ret = avcodec_send_frame(acodecCtx, frame);
            av_packet_unref(packet);
            if(ret != 0){
  
                debugErr("avcodec_send_packet",ret);
                continue ;
            }
            while(avcodec_receive_frame(acodecCtx, frame) == 0) {
  
                if(runIsBreak()) {
  
                    break;
                }
                uint8_t *data[2] = {0};
                int byteCnt = frame->nb_samples * 2 * 2;
                unsigned char *pcm = new uint8_t[byteCnt];
                data[0] = pcm;
                ret = swr_convert(swrctx,
                                  data,
                                  frame->nb_samples,
                                  (const uint8_t**)frame->data,
                                  frame->nb_samples);
// Send the re -sampling data data to the output device for playback
                while (audio->bytesFree() < byteCnt)
                {
  
                    if(runIsBreak())
                        break;
                    msleep(10);
                }

                if(!runIsBreak())
                 io->write((const char *)pcm,byteCnt);

                currentMs = av_q2d(pFmtCtx->streams[audioindex]->time_base)*1000*frame->pts;
// qdebug () << "" duration: << Destms << Currentms;
                emit duration(currentMs,destMs);

                delete[] pcm;
            }
        }
    }
// Release memory
    av_frame_free(&frame);
    av_packet_free(&packet);
    swr_free(&swrctx);
    avcodec_free_context(&acodecCtx);
    avformat_close_input(&pFmtCtx);
}

void playthread::run()
{
  

    if(!initAudio(44100))
    {
  
emit error ("The output device does not support the format, cannot play audio");
    }

    while(1)
    {
  

        switch(type)
        {
  
            case control_none: msleep(100);    break;
case control_play: type = control_none; runplay (); break; // play
            default: type=control_none;   break;
        }
    }

}
Since my current linux cannot be screenshot, it cannot be passed on the renderings of running

source

Related Posts

HDU5536 Chip Factory 【01 dictionary tree】

Parameter transmission (value transmission, reference transfer, pointer transmission) LC

Detailed explanation of GSON use (4) MAP object conversion

mysql database security backup

java API Simple Operation Hadoop

Random Posts

Plug

Simple Rubik’s Cube restoration method, Rubik’s cube restore formula, illustration

Drag -Drag progress bar display progress

[python] Pandas implements two tables to check highlights, go to kinglake

DWR3 Implementation Message Precise Push Detailed Step