Using rt2x00 wireless driver with hostapd

The problem

A few month ago i bought an rt73-base usb wifi dongle to make a low-cost access-point from my old linux box. Unfortunately i couldn’t make it work until last week, when i finally figured out the root of the problem. First of all, i used the latest rt2x00 development kernel (how to get it), and the latest hostapd from the git repository. The official howto on the rt2x00 wiki is quite good, but it didn’t work with my TL-WN321G wifi dongle, so i had to find out what’s the problem. First the hostapd started up, and the clients could associate, but when i tried to send packages, the hostapd dropped error messages in an infinite loop:

wlan0: STA 00:12:f0:76:03:b9 IEEE 802.11: association OK (aid 1)MGMT (TX callback) failmgmt::assoc_resp cbwlan0: STA 00:12:f0:76:03:b9 IEEE 802.11: did not acknowledge associationresponseSending disassociation info to STA 00:12:f0:76:03:b9MGMT (TX callback) failunknown mgmt cb frame subtype 10

Investigation

And then it reassociates with the client, does it again and again. So after few days spent on trying to figure out what is the problem, i found some interesting posts on hostapd, and rt2x00 mailing lists on this topic. The most interesting is this thread. The discussion is about three patch, and the first one is the important one. They say, that the driver can not ackowledge the completion of sending certain frames, because of hardware limitations. So, the case is, that the driver can’t acknowledge these frames, but the hostapd wants an acknowledgement, or it won’t function function properly.

The possible solutions

There are two solutions.

  • Patch the driver to acknowledge the frames even if it is not sure that they have been succesfully sent.
  • Patch hostapd to ignore the lack of acknowledgement.

The second one seems to be the easier way, so i have chosen to patch hostapd.

Patching hostapd

You have to comment out two lines in the ieee802_11.c file. Search for “did not acknowledge” in the file, and comment out the “return;” command after the lines that contain the “did not acknowledge” string. So after commenting out the return lines, the two blocks look like this:

if (!ok) {
hostapd_logger(hapd, mgmt->da, HOSTAPD_MODULE_IEEE80211,
HOSTAPD_LEVEL_NOTICE,
"did not acknowledge authentication response");
//return;
}

if (!ok) {
hostapd_logger(hapd, mgmt->da, HOSTAPD_MODULE_IEEE80211,
HOSTAPD_LEVEL_DEBUG,
"did not acknowledge association response");
//return;
}

ref:http://eznemegy.blog.hu/2008/12/14/using_rt2x00_wireless_driver_with_hostapd/fullcommentlist

移植tslib到Android

环境变量

export  env
export TSLIB_TSEVENTTYPE=INPUT
export TSLIB_TSDEVICE=/dev/input/event4
export TSLIB_CALIBFILE=/system/etc/pointercal
export TSLIB_CONFFILE=/system/etc/ts.conf
export TSLIB_PLUGINDIR=/system/lib/ts/plugs
export TSLIB_FBDEVICE=/dev/fb0
export TSLIB_CONSOLEDEVICE=none
export TSTS_INFO_FILE=/sys/devices/virtual/input/input1/uevent
export QWS_MOUSE_PROTO=tslib:/dev/input/event4

交叉编译过程中的问题

arm-eabi/bin/ld: crt0.o: No such file: No such file or directory
解决:configure加 CFLAGS="-nostdlib"

我的Android.mk ,测试通过

LOCAL_PATH:=$(call my-dir)
include $(CLEAR_VARS)
TSLIB_PLUGINDIR:=/system/lib/ts/plugins
LOCAL_SRC_FILES:=\
        src/ts_attach.c\
        src/ts_close.c \
        src/ts_config.c \
        src/ts_error.c \
        src/ts_fd.c \
        src/ts_load_module.c \
        src/ts_open.c \
        src/ts_parse_vars.c \
        src/ts_read.c \
        src/ts_option.c \
        src/ts_read_raw.c \

LOCAL_C_INCLUDES += $(LOCAL_PATH)/src/ \
        /usr/include/
LOCAL_SHARED_LIBRARIES += libdl
LOCAL_MODULE := libts
include $(BUILD_SHARED_LIBRARY)
#
# plugin: input-raw
#
include $(CLEAR_VARS)
LOCAL_SRC_FILES := plugins/input-raw.c
LOCAL_C_INCLUDES += $(LOCAL_PATH)/src/
LOCAL_SHARED_LIBRARIES := libdl \
        libts
LOCAL_MODULE := ts/plugins/input-raw
include $(BUILD_SHARED_LIBRARY)
#
# plugin: pthres
#
include $(CLEAR_VARS)
LOCAL_SRC_FILES := plugins/pthres.c
LOCAL_C_INCLUDES += $(LOCAL_PATH)/src/
LOCAL_SHARED_LIBRARIES := libdl \
        libts
LOCAL_MODULE := ts/plugins/pthres
include $(BUILD_SHARED_LIBRARY)
#
# plugin: linear
#
include $(CLEAR_VARS)
LOCAL_SRC_FILES := plugins/linear.c
LOCAL_C_INCLUDES += $(LOCAL_PATH)/src/
LOCAL_SHARED_LIBRARIES := libdl \
        libts
LOCAL_MODULE := ts/plugins/linear
include $(BUILD_SHARED_LIBRARY)
#
# plugin: dejitter
#
include $(CLEAR_VARS)
LOCAL_SRC_FILES := plugins/dejitter.c
LOCAL_C_INCLUDES += $(LOCAL_PATH)/src/
LOCAL_SHARED_LIBRARIES := libdl \
        libts
LOCAL_MODULE := ts/plugins/dejitter
include $(BUILD_SHARED_LIBRARY)
#
# plugin: variance
#
include $(CLEAR_VARS)
LOCAL_SRC_FILES := plugins/variance.c
LOCAL_C_INCLUDES += $(LOCAL_PATH)/src/
LOCAL_SHARED_LIBRARIES := libdl \
        libts
LOCAL_MODULE := ts/plugins/variance
include $(BUILD_SHARED_LIBRARY)
#
# ts_calibrate
#
include $(CLEAR_VARS)
LOCAL_SRC_FILES := tests/testutils.c \
        tests/fbutils.c \
        tests/font_8x8.c \
        tests/ts_calibrate.c
LOCAL_C_INCLUDES += $(LOCAL_PATH)/src/ \
        tests/ts_calibrate.h \
        /usr/include/
LOCAL_SHARED_LIBRARIES := libdl \
        libts
LOCAL_MODULE := ts_calibrate
include $(BUILD_EXECUTABLE)
#
# ts_test
#
include $(CLEAR_VARS)
LOCAL_SRC_FILES := tests/testutils.c \
        tests/fbutils.c \
        tests/font_8x8.c \
        tests/ts_test.c
LOCAL_C_INCLUDES += $(LOCAL_PATH)/src/ \
        /usr/include/
LOCAL_SHARED_LIBRARIES := libdl \
        libts
LOCAL_MODULE := ts_test
include $(BUILD_EXECUTABLE)
#
# ts_print
#
include $(CLEAR_VARS)
LOCAL_SRC_FILES := tests/testutils.c \
        tests/fbutils.c \
        tests/font_8x8.c \
        tests/ts_print.c
LOCAL_C_INCLUDES += $(LOCAL_PATH)/src/ \
        /usr/include/
LOCAL_SHARED_LIBRARIES := libdl \
        libts
LOCAL_MODULE := ts_print
include $(BUILD_EXECUTABLE)
#
# ts_print_raw
#
include $(CLEAR_VARS)
LOCAL_SRC_FILES := tests/testutils.c \
        tests/fbutils.c \
        tests/font_8x8.c \
        tests/ts_print_raw.c
LOCAL_C_INCLUDES += $(LOCAL_PATH)/src/ \
        /usr/include/
LOCAL_SHARED_LIBRARIES := libdl \
        libts
LOCAL_MODULE := ts_print_raw
include $(BUILD_EXECUTABLE)
#
# ts_harvest
#
include $(CLEAR_VARS)
LOCAL_SRC_FILES := tests/testutils.c \
        tests/fbutils.c \
        tests/font_8x8.c \
        tests/ts_harvest.c
LOCAL_C_INCLUDES += $(LOCAL_PATH)/src/ \
        /usr/include/
LOCAL_SHARED_LIBRARIES := libdl \
        libts
LOCAL_MODULE := ts_harvest
include $(BUILD_EXECUTABLE)

FFMPEG SDL音频播放分析

抽象流程:

设置SDL的音频参数 —-> 打开声音设备,播放静音 —-> ffmpeg读取音频流中数据放入队列 —-> SDL调用用户设置的函数来获取音频数据 —-> 播放音频

SDL内部维护了一个buffer来存放解码后的数据,这个buffer中的数据来源是我们注册的回调函数(audio_callback),audio_callback调用audio_decode_frame来做具体的音频解码工作,需要引起注意的是:从流中读取出的一个音频包(avpacket)可能含有多个音频桢(avframe),所以需要多次调用avcodec_decode_audio4来完成整个包的解码,解码出来的数据存放在我们自己的缓冲中(audio_buf2)。SDL每一次回调都会引起数据从audio_buf2拷贝到SDL内部缓冲区,当audio_buf2中的数据大于SDL的缓冲区大小时,需要分多次拷贝。

关键实现:

main()函数

int main(int argc, char **argv){
    SDL_Event event; //SDL事件变量
    VideoState    *is; // 纪录视频及解码器等信息的大结构体
    is = (VideoState*) av_mallocz(sizeof(VideoState));
    if(argc < 2){
        fprintf(stderr, "Usage: play <file>\n");
        exit(1);
    }
    av_register_all(); //注册所有ffmpeg的解码器
    /* 初始化SDL,这里只实用了AUDIO,如果有视频,好需要SDL_INIT_VIDEO等等 */
    if(SDL_Init(SDL_INIT_AUDIO)){
        fprintf(stderr, "Count not initialize SDL - %s\n", SDL_GetError());
        exit(1);
    }
    is_strlcpy(is->filename, argv[1], sizeof(is->filename));
    /* 创建一个SDL线程来做视频解码工作,主线程进入SDL事件循环 */
    is->parse_tid = SDL_CreateThread(decode_thread, is);
    if(!is->parse_tid){
        SDL_WaitEvent(&event);
        switch(event.type){
            case FF_QUIT_EVENT:
            case SDL_QUIT:
                 is->quit = 1;
                SDL_Quit();
                exit(0);
                break;
            default:
                 break;
        }
    }
    return 0;
}

decode_thread()读取文件信息和音频包

static int decode_thread(void *arg){
    VideoState *is = (VideoState*)arg;
    AVFormatContext *ic = NULL;
    AVPacket pkt1, *packet = &pkt1;
    int ret, i, audio_index = -1;

    is->audioStream = -1;
    global_video_state = is; 
    /*  使用ffmpeg打开视频,解码器等 常规工作 */
    if(avFormat_open_input(&ic, is->filename, NULL,  NULL) != 0)  {
        fprintf(stderr, "open file error: %s\n", is->filename);
        return -1;
    }
    is->ic = ic;
    if(avformat_find_stream_info(ic, NULL) < 0){
        fprintf(stderr, "find stream info error\n");
        return -1;
    }
    av_dump_format(ic, 0, is->filename, 0);
    for(i  = 0; i < ic->nb_streams; i++){
         if(ic->streams[i])->codec->codec_type == AVMEDIA_TYPE_AUDIO && audio_index == -1){
            audio_index = i;
            break;
        }
    }
    if(audio_index >= 0) {
        /* 所有设置SDL音频流信息的步骤都在这个函数里完成 */
        stream_component_open(is, audio_index);
    }
    if(is->audioStream < 0){
        fprintf(stderr, "could not open codecs for file: %s\n", is->filename);
        goto fail;
    }
    /* 读包的主循环, av_read_frame不停的从文件中读取数据包(这里只取音频包)*/
    for(;;){
        if(is->quit) break;
        /* 这里audioq.size是指队列中的所有数据包带的音频数据的总量,并不是包的数量 */
        if(is->audioq.size > MAX_AUDIO_SIZE){
            SDL_Delay(10); // 毫秒
            continue;
        }
         ret = av_read_frame(is->ic, packet);
         if(ret < 0){
                if(ret == AVERROR_EOF || url_feof(is->ic->pb))    break;
                if(is->ic->pb && is->ic->pb->error)    break;
                contiue;                  
          }  
          if(packet->stream_index == is->audioStream){
                    packet_queue_put(&is->audioq, packet);
           } else{
                     av_free_packet(packet);
            }
    }
     while(!is->quit)    SDL_Delay(100);
fail: {
               SDL_Event event;
               event.type = FF_QUIT_EVENT;
               event.user.data1 = is;
               SDL_PushEvent(&event);
        }
        return 0;
}

stream_component_open():设置音频参数和打开设备

int stream_component_open(videoState *is, int stream_index){
    AVFormatContext *ic = is->ic;
    AVCodecContext *codecCtx;
    AVCodec *codec;
    /* 在用SDL_OpenAudio()打开音频设备的时候需要这两个参数*/
    /* wanted_spec是我们期望设置的属性,spec是系统最终接受的参数 */
    /* 我们需要检查系统接受的参数是否正确 */
    SDL_AudioSpec wanted_spec, spec;
    int64_t wanted_channel_layout = 0; // 声道布局(SDL中的具体定义见“FFMPEG结构体”部分) 
    int wanted_nb_channels; // 声道数
    /*  SDL支持的声道数为 1, 2, 4, 6 */
    /*  后面我们会使用这个数组来纠正不支持的声道数目 */
    const int next_nb_channels[] = { 0, 0, 1, 6,  2, 6, 4, 6 }; 

    if(stream_index < 0 || stream_index >= ic->nb_streams)    return -1;
    codecCtx = ic->streams[stream_index]->codec;
    wanted_nb_channels = codecCtx->channels;
    if(!wanted_channel_layout || wanted_nb_channels != av_get_channel_layout_nb_channels(wanted_channel_layout)) {
        wanted_channel_layout = av_get_default_channel_lauout(wanted_channel_nb_channels);
        wanted_channel_layout &= ~AV_CH_LAYOUT_STEREO_DOWNMIX;
    }
    wanted_spec.channels = av_get_channels_layout_nb_channels(wanted_channel_layout);
    wanted_spec.freq = codecCtx->sample_rate;
    if(wanted_spec.freq <= 0 || wanted_spec.channels <=0){
           fprintf(stderr, "Invaild sample rate or channel count!\n");
            return -1;
    }
    wanted_spec.format = AUDIO_S16SYS; // 具体含义请查看“SDL宏定义”部分
    wanted_spec.silence = 0; // 0指示静音
    wanted_spec.samples = SDL_AUDIO_BUFFER_SIZE; // 自定义SDL缓冲区大小
    wanted_spec.callback = audio_callback; // 音频解码的关键回调函数
    wanted_spec.userdata = is; // 传给上面回调函数的外带数据

    /*  打开音频设备,这里使用一个while来循环尝试打开不同的声道数(由上面 */
    /*  next_nb_channels数组指定)直到成功打开,或者全部失败 */
    while(SDL_OpenAudio(&wanted_spec, &spec) < 0){
        fprintf(stderr, "SDL_OpenAudio(%d channels): %s\n", wanted_spec.channels, SDL_GetError());
        wanted_spec.channels = next_nb_channels[FFMIN(7, wanted_spec.channels)]; // FFMIN()由ffmpeg定义的宏,返回较小的数
        if(!wanted_spec.channels){
              fprintf(stderr, "No more channel to try\n");
              return -1;
        }
        wanted_channel_layout = av_get_default_channel_layout(wanted_spec.channels);
    }
    /* 检查实际使用的配置(保存在spec,由SDL_OpenAudio()填充) */
    if(spec.format != AUDIO_S16SYS){
        fprintf(stderr, "SDL advised audio format %d is not supported\n", spec.format);
        return -1;
    }
    if(spec.channels != wanted_spec.channels) {
        wanted_channel_layout = av_get_default_channel_layout(spec.channels);
        if(!wanted_channel_layout){
                fprintf(stderr, "SDL advised channel count %d is not support\n", spec.channels);
                return -1;
        }
    }
    /* 把设置好的参数保存到大结构中 */
    is->audio_src_fmt = is->audio_tgt_fmt = AV_SAMPLE_FMT_S16;
    is->audio_src_freq = is->audio_tgt_freq = spec.freq;
    is->audio_src_channel_layout = is->audio_tgt_layout = wanted_channel_layout;
    is->audio_src_channels = is->audio_tat_channels = spec.channels;

    codec = avcodec_find_decoder(codecCtx>codec_id);
    if(!codec || (avcodec_open2(codecCtx, codec, NULL) < 0)){
        fprintf(stderr, "Unsupported codec!\n");
        return -1;
    }
    ic->streams[stream_index]->discard = AVDISCARD_DEFAULT; //具体含义请查看“FFMPEG宏定义”部分
    is->audioStream = stream_index;
    is->audio_st = ic->streams[stream_index];
    is->audio_buf_size = 0;
    is->audio_buf_index = 0;
    memset(&is->audio_pkt, 0, sizeof(is->audio_pkt));
    packet_queue_init(&is->audioq);
    SDL_PauseAudio(0); // 开始播放静音
}

audio_callback(): 回调函数,向SDL缓冲区填充数据

void audio_callback(void *userdata, Uint8 *stream, int len){
    VideoState *is = (VideoState*)userdata;
    int len1, audio_data_size;

    /*   len是由SDL传入的SDL缓冲区的大小,如果这个缓冲未满,我们就一直往里填充数据 */
    while(len > 0){
        /*  audio_buf_index 和 audio_buf_size 标示我们自己用来放置解码出来的数据的缓冲区,*/
        /*   这些数据待copy到SDL缓冲区, 当audio_buf_index >= audio_buf_size的时候意味着我*/
        /*   们的缓冲为空,没有数据可供copy,这时候需要调用audio_decode_frame来解码出更
        /*   多的桢数据 */
        if(is->audio_buf_index >= is->audio_buf_size){
                audio_data_size = audio_decode_frame(is);
                /* audio_data_size < 0 标示没能解码出数据,我们默认播放静音 */
                is(audio_data_size < 0){
                         is->audio_buf_size = 1024;
                         /* 清零,静音 */
			 memset(is->audio_buf, 0, is->audio_buf_size);
                } else{
                          is->audio_buf_size = audio_data_size;
                 }
                 is->audio_buf_index = 0;
        }
        /*  查看stream可用空间,决定一次copy多少数据,剩下的下次继续copy */
        len1 = is->audio_buf_size - is->audio_buf_index;
        if(len1 > len)    len1 = len;

        memcpy(stream, (uint8_t*)is->audio_buf + is->audio_buf_index, len1);
        len -= len1;
        stream += len1;
        is->audio_buf_index += len1;
    }
}

audio_decode_frame():解码音频

int audio_decode_frame(VideoState *is){
    int len1, len2, decoded_data_size;
    AVPacket *pkt = &is->audio_pkt;
    int got_frame = 0;
    int64_t dec_channel_layout;
    int wanted_nb_samples, resampled_data_size;

    for(;;){
      while(is->audio_pkt_size > 0){
        if(!is->audio_frame){
            if(!(is->audio_frame = avacodec_alloc_frame())){
                return AVERROR(ENOMEM);
            }
        } else
          avcodec_get_frame_defaults(is->audio_frame);

        len1 = avcodec_decode_audio4(is->audio_st_codec, is->audio_frame, got_frame, pkt);
        /* 解码错误,跳过整个包 */
        if(len1 < 0){
           is->audio_pkt_size = 0;
           break;
        }
        is->audio_pkt_data += len1;
        is->audio_pkt_size -= len1;
        if(!got_frame)   continue;
        /* 计算解码出来的桢需要的缓冲大小 */
        decoded_data_size = av_samples_get_buffer_size(NULL,
                            is->audio_frame_channels,
                            is->audio_frame_nb_samples,
                            is->audio_frame_format, 1);
        dec_channel_layout = (is->audio_frame->channel_layout && is->audio_frame->channels
                   == av_get_channel_layout_nb_channels(is->audio_frame->channel_layout))
                   ? is->audio_frame->channel_layout : av_get_default_channel_layout(is->audio_frame->channels);                       
        wanted_nb_samples =  is->audio_frame->nb_samples;
        if (is->audio_frame->format != is->audio_src_fmt || 
            dec_channel_layout != is->audio_src_channel_layout ||
            is->audio_frame->sample_rate != is->audio_src_freq || 
            (wanted_nb_samples != is->audio_frame->nb_samples && !is->swr_ctx)) {
                if (is->swr_ctx) swr_free(&is->swr_ctx);
                is->swr_ctx = swr_alloc_set_opts(NULL,
                                                 is->audio_tgt_channel_layout,
                                                 is->audio_tgt_fmt,
                                                 is->audio_tgt_freq,
                                                 dec_channel_layout,
                                                 is->audio_frame->format,
                                                 is->audio_frame->sample_rate,
                                                 0, NULL);
                 if (!is->swr_ctx || swr_init(is->swr_ctx) < 0) {
                     fprintf(stderr, "swr_init() failed\n");
                     break;
                 }
                 is->audio_src_channel_layout = dec_channel_layout;
                 is->audio_src_channels = is->audio_st->codec->channels;
                 is->audio_src_freq = is->audio_st->codec->sample_rate;
                 is->audio_src_fmt = is->audio_st->codec->sample_fmt;
         }
         /* 这里我们可以对采样数进行调整,增加或者减少,一般可以用来做声画同步 */
         if (is->swr_ctx) {
             const uint8_t **in = (const uint8_t **)is->audio_frame->extended_data;
             uint8_t *out[] = { is->audio_buf2 };
             if (wanted_nb_samples != is->audio_frame->nb_samples) {
                if(swr_set_compensation(is->swr_ctx, 
                  (wanted_nb_samples - is->audio_frame->nb_samples)*is->audio_tgt_freq/is->audio_frame->sample_rate,
                   wanted_nb_samples * is->audio_tgt_freq/is->audio_frame->sample_rate) < 0) {
                        fprintf(stderr, "swr_set_compensation() failed\n");
                        break;
                   }
             }
             len2 = swr_convert(is->swr_ctx, out,  
                  sizeof(is->audio_buf2)/is->audio_tgt_channels/av_get_bytes_per_sample(is->audio_tgt_fmt),  
                  in, is->audio_frame->nb_samples);
             if (len2 < 0) {
                  fprintf(stderr, "swr_convert() failed\n");
                  break;
             }
             if(len2 == sizeof(is->audio_buf2)/is->audio_tgt_channels/av_get_bytes_per_sample(is->audio_tgt_fmt)) {
                 fprintf(stderr, "warning: audio buffer is probably too small\n");
                 swr_init(is->swr_ctx);
             }
             is->audio_buf = is->audio_buf2;
             resampled_data_size = len2*is->audio_tgt_channels*av_get_bytes_per_sample(is->audio_tgt_fmt);
           } else {
             resampled_data_size = decoded_data_size;
             is->audio_buf = is->audio_frame->data[0];
           }
           /*  返回得到的数据 */
           return resampled_data_size;
       }
       if (pkt->data) av_free_packet(pkt);
       memset(pkt, 0, sizeof(*pkt));
       if (is->quit) return -1;
       if (packet_queue_get(&is->audioq, pkt, 1) < 0) return -1;
       is->audio_pkt_data = pkt->data;
       is->audio_pkt_size = pkt->size;

     }
}

FFMPEG结构体

channel_layout_map

static const struct {
const char *name;
int nb_channels;
uint64_t layout;
} channel_layout_map[] = {
{ "mono", 1, AV_CH_LAYOUT_MONO },
{ "stereo", 2, AV_CH_LAYOUT_STEREO },
{ "2.1", 3, AV_CH_LAYOUT_2POINT1 },
{ "3.0", 3, AV_CH_LAYOUT_SURROUND },
{ "3.0(back)", 3, AV_CH_LAYOUT_2_1 },
{ "4.0", 4, AV_CH_LAYOUT_4POINT0 },
{ "quad", 4, AV_CH_LAYOUT_QUAD },
{ "quad(side)", 4, AV_CH_LAYOUT_2_2 },
{ "3.1", 4, AV_CH_LAYOUT_3POINT1 },
{ "5.0", 5, AV_CH_LAYOUT_5POINT0_BACK },
{ "5.0(side)", 5, AV_CH_LAYOUT_5POINT0 },
{ "4.1", 5, AV_CH_LAYOUT_4POINT1 },
{ "5.1", 6, AV_CH_LAYOUT_5POINT1_BACK },
{ "5.1(side)", 6, AV_CH_LAYOUT_5POINT1 },
{ "6.0", 6, AV_CH_LAYOUT_6POINT0 },
{ "6.0(front)", 6, AV_CH_LAYOUT_6POINT0_FRONT },
{ "hexagonal", 6, AV_CH_LAYOUT_HEXAGONAL },
{ "6.1", 7, AV_CH_LAYOUT_6POINT1 },
{ "6.1", 7, AV_CH_LAYOUT_6POINT1_BACK },
{ "6.1(front)", 7, AV_CH_LAYOUT_6POINT1_FRONT },
{ "7.0", 7, AV_CH_LAYOUT_7POINT0 },
{ "7.0(front)", 7, AV_CH_LAYOUT_7POINT0_FRONT },
{ "7.1", 8, AV_CH_LAYOUT_7POINT1 },
{ "7.1(wide)", 8, AV_CH_LAYOUT_7POINT1_WIDE },
{ "octagonal", 8, AV_CH_LAYOUT_OCTAGONAL },
{ "downmix", 2, AV_CH_LAYOUT_STEREO_DOWNMIX, },
};

FFMPEG宏定义

Audio channel convenience macros

 #define AV_CH_LAYOUT_MONO              (AV_CH_FRONT_CENTER)
 #define AV_CH_LAYOUT_STEREO            (AV_CH_FRONT_LEFT|AV_CH_FRONT_RIGHT)
 #define AV_CH_LAYOUT_2POINT1           (AV_CH_LAYOUT_STEREO|AV_CH_LOW_FREQUENCY)
 #define AV_CH_LAYOUT_2_1               (AV_CH_LAYOUT_STEREO|AV_CH_BACK_CENTER)
 #define AV_CH_LAYOUT_SURROUND          (AV_CH_LAYOUT_STEREO|AV_CH_FRONT_CENTER)
 #define AV_CH_LAYOUT_3POINT1           (AV_CH_LAYOUT_SURROUND|AV_CH_LOW_FREQUENCY)
 #define AV_CH_LAYOUT_4POINT0           (AV_CH_LAYOUT_SURROUND|AV_CH_BACK_CENTER)
 #define AV_CH_LAYOUT_4POINT1           (AV_CH_LAYOUT_4POINT0|AV_CH_LOW_FREQUENCY)
 #define AV_CH_LAYOUT_2_2               (AV_CH_LAYOUT_STEREO|AV_CH_SIDE_LEFT|AV_CH_SIDE_RIGHT)
 #define AV_CH_LAYOUT_QUAD              (AV_CH_LAYOUT_STEREO|AV_CH_BACK_LEFT|AV_CH_BACK_RIGHT)
 #define AV_CH_LAYOUT_5POINT0           (AV_CH_LAYOUT_SURROUND|AV_CH_SIDE_LEFT|AV_CH_SIDE_RIGHT)
 #define AV_CH_LAYOUT_5POINT1           (AV_CH_LAYOUT_5POINT0|AV_CH_LOW_FREQUENCY)
 #define AV_CH_LAYOUT_5POINT0_BACK      (AV_CH_LAYOUT_SURROUND|AV_CH_BACK_LEFT|AV_CH_BACK_RIGHT)
 #define AV_CH_LAYOUT_5POINT1_BACK      (AV_CH_LAYOUT_5POINT0_BACK|AV_CH_LOW_FREQUENCY)
 #define AV_CH_LAYOUT_6POINT0           (AV_CH_LAYOUT_5POINT0|AV_CH_BACK_CENTER)
 #define AV_CH_LAYOUT_6POINT0_FRONT     (AV_CH_LAYOUT_2_2|AV_CH_FRONT_LEFT_OF_CENTER|AV_CH_FRONT_RIGHT_OF_CENTER)
 #define AV_CH_LAYOUT_HEXAGONAL         (AV_CH_LAYOUT_5POINT0_BACK|AV_CH_BACK_CENTER)
 #define AV_CH_LAYOUT_6POINT1           (AV_CH_LAYOUT_5POINT1|AV_CH_BACK_CENTER)
 #define AV_CH_LAYOUT_6POINT1_BACK      (AV_CH_LAYOUT_5POINT1_BACK|AV_CH_BACK_CENTER)
 #define AV_CH_LAYOUT_6POINT1_FRONT     (AV_CH_LAYOUT_6POINT0_FRONT|AV_CH_LOW_FREQUENCY)
 #define AV_CH_LAYOUT_7POINT0           (AV_CH_LAYOUT_5POINT0|AV_CH_BACK_LEFT|AV_CH_BACK_RIGHT)
 #define AV_CH_LAYOUT_7POINT0_FRONT     (AV_CH_LAYOUT_5POINT0|AV_CH_FRONT_LEFT_OF_CENTER|AV_CH_FRONT_RIGHT_OF_CENTER)
 #define AV_CH_LAYOUT_7POINT1           (AV_CH_LAYOUT_5POINT1|AV_CH_BACK_LEFT|AV_CH_BACK_RIGHT)
#define AV_CH_LAYOUT_7POINT1_WIDE      (AV_CH_LAYOUT_5POINT1|AV_CH_FRONT_LEFT_OF_CENTER|AV_CH_FRONT_RIGHT_OF_CENTER)
#define AV_CH_LAYOUT_7POINT1_WIDE_BACK (AV_CH_LAYOUT_5POINT1_BACK|AV_CH_FRONT_LEFT_OF_CENTER|AV_CH_FRONT_RIGHT_OF_CENTER)
#define AV_CH_LAYOUT_OCTAGONAL         (AV_CH_LAYOUT_5POINT0|AV_CH_BACK_LEFT|AV_CH_BACK_CENTER|AV_CH_BACK_RIGHT)
#define AV_CH_LAYOUT_STEREO_DOWNMIX    (AV_CH_STEREO_LEFT|AV_CH_STEREO_RIGHT)

SDL宏定义

SDL_AudioSpec format

AUDIO_U8           Unsigned 8-bit samples
AUDIO_S8            Signed 8-bit samples
AUDIO_U16LSB    Unsigned 16-bit samples, in little-endian byte order
AUDIO_S16LSB    Signed 16-bit samples, in little-endian byte order
AUDIO_U16MSB    Unsigned 16-bit samples, in big-endian byte order
AUDIO_S16MSB    Signed 16-bit samples, in big-endian byte order
AUDIO_U16           same as AUDIO_U16LSB (for backwards compatability probably)
AUDIO_S16           same as AUDIO_S16LSB (for backwards compatability probably)
AUDIO_U16SYS    Unsigned 16-bit samples, in system byte order
AUDIO_S16SYS     Signed 16-bit samples, in system byte order

git clone https://github.com/lnmcc/musicPlayer.git

天才de睡眠排序算法

#!/bin/bash
function f() {
    sleep "$1"
    echo "$1"
}
while [ -n "$1" ]
do
    f "$1" &
    shift
done

使用方法

./sleepsort.bash 5 3 6 3 6 3 1 4 7

ref:http://dis.4chan.org/read/prog/1295544154

JPEG转换OpenGL Texture

#include <stdio.h>
#include <unistd.h>
#include <jpeglib.h>
#include <stdlib.h>
#include <GL/gl.h>
#include <GL/glut.h>

void LoadJpgTextureGL(char *file)
{
	FILE *jpegFile;
	int sizebuf;
	unsigned char* buffer;
	unsigned char* texturebuf;
	struct jpeg_decompress_struct cinfo;
	struct jpeg_error_mgr jerr;
	cinfo.err = jpeg_std_error(&jerr);
	jpeg_create_decompress(&cinfo);

	if((jpegfile = fopen(file,"rb"))==NULL)
	{
	    perror("fopen jpeg");
	    return; 
	}
	jpeg_stdio_src(&cinfo,jpefile);
	jpeg_read_header(&cinfo,TRUE);
	jpeg_start_decompress(&cinfo);

	sizebuf = cinfo.output_width * cinfo.output_components;
	buffer = (unsigned char *)malloc(sizebuf);
	texturebuf = (unsigned char *)malloc(sizebuf * cinfo.output_height);
	unsigned char *tempjpg = texturebuf;
	while(cinfo.output_scanline < cinfo.output_height)
	{
	    jpeg_read_scanlines(&cinfo,&buffer,1);
	    memcpy(texturebuf,buffer,sizebuf);
 	   texturebuf +=sizebuf;
	}
	texturebuf = tempjpg;
	/*do something .eg: glTexImage2D()*/

	jpeg_finish_decompress(&cinfo);
	jpeg_destroy_decompress(&cinfo);

	free(buffer);
	fclose(jpegfile);
}