关于中黑法的测试

      2 Comments on 关于中黑法的测试

#1:

mediainfo ac.mp4

General

File size                                : 11.7 MiB

Duration                                 : 1mn 19s

Overall bit rate                         : 1 229 Kbps

请看一下:http://www.cnbeining.com/2014/04/reprint-sina-another-approach-the-black-law/

操作说明。

mediainfo acblack.flv

General

File size                                : 11.6 MiB

Duration                                 : 7mn 6s

Overall bit rate                         : 227 Kbps

http://video.sina.com.cn/v/b/132154892-1062265712.html

#2:

mediainfo a.mp4

File size                                : 134 MiB

Duration                                 : 24mn 16s

Overall bit rate                         : 771 Kbps

转码成功。过审是个人造化了。

Biligrab 0.63: Hotfix一下有时title取不到的问题

按理说应该用XML,但是我觉得。。。暂时犯不上。
建议更新。
老地方:https://gist.github.com/superwbd/9605757
 

'''
Biligrab 0.63
Beining@ACICFG
cnbeining[at]gmail.com
http://www.cnbeining.com
MIT licence
'''
import sys
import os
from StringIO import StringIO
import gzip
import urllib2
import sys
import commands
from xml.dom.minidom import parse, parseString
import xml.dom.minidom
reload(sys)
sys.setdefaultencoding('utf-8')
global vid
global cid
global partname
global title
global videourl
global part_now
def list_del_repeat(list):
    """delete repeating items in a list, and keep the order.
    http://www.cnblogs.com/infim/archive/2011/03/10/1979615.html"""
    l2 = []
    [l2.append(i) for i in list if not i in l2]
    return(l2)
#----------------------------------------------------------------------
def find_cid_api(vid, p):
    """find cid and print video detail"""
    global cid
    global partname
    global title
    global videourl
    cid = 0
    title = ''
    partname = ''
    biliurl = 'http://api.bilibili.tv/view?type=xml&appkey=876fe0ebd0e67a0f&id=' + str(vid) + '&page=' + str(p)
    videourl = 'http://www.bilibili.tv/video/av'+ str(vid)+'/index_'+ str(p)+'.html'
    print('Fetching webpage...')
    try:
        request = urllib2.Request(biliurl, headers={ 'User-Agent' : 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_9_0) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/31.0.1650.63 Safari/537.36', 'Cache-Control': 'no-cache', 'Pragma': 'no-cache' })
        response = urllib2.urlopen(request)
        data = response.read()
        data_list = data.split('\n')
        for lines in data_list:
            if 'cid' in lines:
                cid = lines[7:-6]
                print('cid is ' + str(cid))
                break
        for lines in data_list:
            if 'partname' in lines:
                partname = lines[12:-11]
                print('partname is ' + str(partname))
                break
        for lines in data_list:
            if 'title' in lines:
                title = lines[9:-8]
                print('title is ' + str(title))
                break
    except:  #If API failed
        print('ERROR: Cannot connect to API server!')
#----------------------------------------------------------------------
def find_cid_flvcd(videourl):
    """"""
    global vid
    global cid
    global partname
    global title
    print('Fetching webpage via Flvcd...')
    request = urllib2.Request(videourl, headers={ 'User-Agent' : 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_9_0) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/31.0.1650.63 Safari/537.36', 'Cache-Control': 'no-cache', 'Pragma': 'no-cache' })
    request.add_header('Accept-encoding', 'gzip')
    response = urllib2.urlopen(request)
    if response.info().get('Content-Encoding') == 'gzip':
        buf = StringIO( response.read())
        f = gzip.GzipFile(fileobj=buf)
        data = f.read()
    data_list = data.split('\n')
    #Todo: read title
    for lines in data_list:
        if 'cid=' in lines:
            cid = lines.split('&')
            cid = cid[0].split('=')
            cid = cid[-1]
            print('cid is ' + str(cid))
            break
#----------------------------------------------------------------------
def main(vid, p, oversea):
    global cid
    global partname
    global title
    global videourl
    biliurl = 'http://api.bilibili.tv/view?type=xml&appkey=876fe0ebd0e67a0f&id=' + str(vid) + '&page=' + str(p)
    videourl = 'http://www.bilibili.tv/video/av'+ str(vid)+'/index_'+ str(p)+'.html'
    output = commands.getstatusoutput('ffmpeg --help')
    if str(output[0]) == '32512':
        print('FFmpeg does not exist! Trying to get you a binary, need root...')
        os.system('sudo curl -o /usr/bin/ffmpeg https://raw.githubusercontent.com/superwbd/ABPlayerHTML5-Py--nix/master/ffmpeg')
    output = commands.getstatusoutput('aria2c --help')
    if str(output[0]) == '32512':
        print('aria2c does not exist! Trying to get you a binary, need root... Thanks for @MartianZ \'s work.')
        os.system('sudo curl -o /usr/bin/aria2c https://raw.githubusercontent.com/MartianZ/fakeThunder/master/fakeThunder/aria2c')
    find_cid_api(vid, p)
    global cid
    if cid is 0:
        print('Cannot find cid, trying to do it brutely...')
        find_cid_flvcd(videourl)
    if cid is 0:
        is_black3 = str(raw_input('Strange, still cannot find cid... Type y for trying the unpredictable way, or input the cid by yourself, press ENTER to quit.'))
        if 'y' in str(is_black3):
            vid = vid - 1
            p = 1
            find_cid_api(vid-1, p)
            cid = cid + 1
        elif str(is_black3) is '':
            print('Cannot get cid anyway! Quit.')
            exit()
        else:
            cid = str(is_black3)
    #start to make folders...
    if title is not '':
        folder = title
    else:
        folder = cid
    if partname is not '':
        filename = partname
    elif title is not '':
        filename = title
    else:
        filename = cid
    folder_to_make = os.getcwd() + '/' + folder
    if not os.path.exists(folder_to_make):
        os.makedirs(folder_to_make)
    os.chdir(folder_to_make)
    print('Fetching XML...')
    os.system('curl -o "'+filename+'.xml" --compressed  http://comment.bilibili.cn/'+cid+'.xml')
    #os.system('gzip -d '+cid+'.xml.gz')
    print('The XML file, ' + filename + '.xml should be ready...enjoy!')
    print('Finding video location...')
    #try api
    if oversea == '1':
        try:
            request = urllib2.Request('http://interface.bilibili.cn/v_cdn_play?cid='+cid, headers={ 'User-Agent' : 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_9_0) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/31.0.1650.63 Safari/537.36', 'Cache-Control': 'no-cache', 'Pragma': 'no-cache' })
        except:
            print('ERROR: Cannot connect to API server!')
    else:
        try:
            request = urllib2.Request('http://interface.bilibili.tv/playurl?cid='+cid, headers={ 'User-Agent' : 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_9_0) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/31.0.1650.63 Safari/537.36', 'Cache-Control': 'no-cache', 'Pragma': 'no-cache' })
        except:
            print('ERROR: Cannot connect to API server!')
    response = urllib2.urlopen(request)
    data = response.read()
    #print(data_list)
    rawurl = []
    dom = parseString(data)
    for node in dom.getElementsByTagName('url'):
        if node.parentNode.tagName == "durl":
            rawurl.append(node.toxml()[14:-9])
            #print(str(node.toxml()[14:-9]))
        pass
    if rawurl is []:  #hope this never happen
        request = urllib2.Request('http://www.flvcd.com/parse.php?kw='+videourl, headers={ 'User-Agent' : 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_9_0) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/31.0.1650.63 Safari/537.36', 'Cache-Control': 'no-cache', 'Pragma': 'no-cache' })
        request.add_header('Accept-encoding', 'gzip')
        response = urllib2.urlopen(request)
        data = response.read()
        data_list = data.split('\n')
        for items in data_list:
            if 'name' in items and 'inf' in items and 'input' in items:
                c = items
                rawurl = c[39:-5]
                rawurl = rawurl.split('|')
                break
    #print(rawurl)
    vid_num = len(rawurl)
    #print(rawurl)
    print(str(vid_num) + ' videos in part ' + str(part_now) + ' to download, fetch yourself a cup of coffee...')
    for i in range(vid_num):
        print('Downloading ' + str(i+1) + ' of ' + str(vid_num) + ' videos in part ' + str(part_now) + '...')
        #print('aria2c -llog.txt -c -s16 -x16 -k1M --out '+str(i)+'.flv "'+rawurl[i]+'"')
        os.system('aria2c -larialog.txt -c -s16 -x16 -k1M --out '+str(i)+'.flv "'+rawurl[i]+'"')
        #os.system('aria2c -larialog.txt -c -s16 -x16 -k1M --out '+str(i)+'.flv "'+rawurl[i]+'"')
        #not debugging, not fun.
    f = open('ff.txt', 'w')
    ff = ''
    os.getcwd()
    for i in range(vid_num):
        ff = ff + 'file \'' + str(os.getcwd()) + '/'+ str(i) + '.flv\'\n'
    ff = ff.encode("utf8")
    f.write(ff)
    f.close()
    print('Concating videos...')
    os.system('ffmpeg -f concat -i ff.txt -c copy "'+filename+'".mp4')
    os.system('rm -r ff.txt')
    for i in range(vid_num):
        os.system('rm -r '+str(i)+'.flv')
    print('Done, enjoy yourself!')
    #
vid = str(raw_input('av'))
p_raw = str(raw_input('P'))
oversea = str(input('Oversea?'))
p_list = []
p_raw = p_raw.split(',')
for item in p_raw:
    if '~' in item:
        #print(item)
        lower = 0
        higher = 0
        item = item.split('~')
        try:
            lower = int(item[0])
        except:
            print('Cannot read lower!')
        try:
            higher = int(item[1])
        except:
            print('Cannot read higher!')
        if lower == 0 or higher == 0:
            if lower == 0 and higher != 0:
                lower = higher
            elif lower != 0 and higher == 0:
                higher = lower
            else:
                print('Cannot find any higher or lower, ignoring...')
                #break
        mid = 0
        if higher < lower:
            mid = higher
            higher = lower
            lower = mid
        p_list.append(lower)
        while lower < higher:
            lower = lower + 1
            p_list.append(lower)
        #break
    else:
        try:
            p_list.append(int(item))
        except:
            print('Cannot read "'+str(item)+'", abondon it.')
            #break
p_list = list_del_repeat(p_list)
part_now = '0'
print(p_list)
for p in p_list:
    part_now = str(p)
    main(vid, p, oversea)
exit()
'''
        data_list = data.split('\r')
        for lines in data_list:
            lines = str(lines)
            if '<url>' in lines:
                if 'youku'  in lines:
                    url = lines[17:-9]
                elif 'sina' in lines:
                    url = lines[16:-9]
                elif 'qq.com' in lines:
                    url = lines[17:-9]
                elif 'letv.com' in lines:
                    url = lines[17:-9]
                    break
                elif 'acgvideo' in lines:
                    url = lines[17:-9]
                    is_local = 1
                rawurl.append(url)
            if 'backup_url' in lines and is_local is 1:
                break'''

 

求分析:这些源是哪来的?

      No Comments on 求分析:这些源是哪来的?

http://interface.bilibili.cn/v_cdn_play?cid=1372959

<video>
<result>suee</result>
<timelength>2699638</timelength>
<framecount>67490950</framecount>
<src>400</src>
<stream>
<![CDATA[ cloud ]]>
</stream>
<letv-args>
<![CDATA[ ] ]]>
</letv-args>
<from>
<![CDATA[ sina ]]>
</from>
<vround>20</vround>
<ext>125559448</ext>
<ad>
<![CDATA[ ]]>
</ad>
<vstr>
<![CDATA[ 0a13 ]]>
</vstr>
<vip>
<![CDATA[ 16777343 ]]>
</vip>
<durl>
<order>1</order>
<length>361067</length>
<url>
<![CDATA[
http://119.188.72.54:8080/6/t/433/177/125559494.hlv?type=flash
]]>
</url>
</durl>
<durl>
<order>2</order>
<length>360072</length>
<url>
<![CDATA[
http://edge.v.iask.com.lxdns.com/125559699.hlv?KID=sina,viask&Expires=1398528000&ssig=8%2B7skCArq2
]]>
</url>
</durl>
<durl>
<order>3</order>
<length>364878</length>
<url>
<![CDATA[
http://119.188.72.25:8080/vod/1/t/712/200/125559496.hlv?type=flash
]]>
</url>
</durl>
<durl>
<order>4</order>
<length>360019</length>
<url>
<![CDATA[
http://119.188.72.23:8080/vod/1/t/917/149/125559701.hlv?type=flash
]]>
</url>
</durl>
<durl>
<order>5</order>
<length>360919</length>
<url>
<![CDATA[
http://218.9.147.205:8080/5/t/209/209/125559703.hlv?type=flash
]]>
</url>
</durl>
<durl>
<order>6</order>
<length>363028</length>
<url>
<![CDATA[
http://edge.v.iask.com.lxdns.com/125559502.hlv?KID=sina,viask&Expires=1398528000&ssig=1eqMZG0%2F1%2F
]]>
</url>
</durl>
<durl>
<order>7</order>
<length>361118</length>
<url>
<![CDATA[
http://119.188.72.50:8080/vod/10/t/921/153/125559705.hlv?type=flash
]]>
</url>
</durl>
<durl>
<order>8</order>
<length>168520</length>
<url>
<![CDATA[
http://edge.v.iask.com.lxdns.com/125559707.hlv?KID=sina,viask&Expires=1398528000&ssig=IEUkaMlxtC
]]>
</url>
</durl>
</video>

<![CDATA[ cloud ]]>
这里可以看出 不是B站源 而且flvcd也可以解析出这种东西 但是这实在不像蓝汛的CDN啊。
而且 之前的地址应该是http://edge.v.iask.com/125559707.hlv?KID=sina,viask&Expires=1398528000&ssig=IEUkaMlxtC 然后302到http://edge.v.iask.com.lxdns.com/125559707.hlv?KID=sina,viask&Expires=1398528000&ssig=IEUkaMlxtC&corp=2 再302到http://171.111.152.31/edge.v.iask.com/125559707.hlv?KID=sina,viask&Expires=1398528000&ssig=IEUkaMlxtC&corp=2&wshc_tag=0&wsiphost=ipdbm 最后200.
http://edge.v.iask.com.lxdns.com/125559707.hlv?KID=sina,viask&Expires=1398528000&ssig=IEUkaMlxtC  少个参数。
http://119.188.72.50:8080/vod/10/t/921/153/125559705.hlv?type=flash   为什么8080端口?
 
求讨论。。
 

炒冷饭:Acfun的API解析

      No Comments on 炒冷饭:Acfun的API解析

首先还是要取aid。
这个aid实在是不知道怎么用API取,没找到任何用API的实现。
所以就直接urllib解决吧。
r然后是视频信息API:
http://www.acfun.com/video/getVideo.aspx?id=174887
好家伙,把视频信息全供出来了。
熟悉的ykid啊。要vid也有啊。盗源党表示呵呵呵。
续上次的sina解析API:
http://2dland.sinaapp.com/video.php?action=xml&type=acfun_youku&vid=XNTQxMDkzMDky
http://2dland.sinaapp.com/video.php?action=xml&type=youku&vid=XNTQxMDkzMDky
还看?没了
 

Biligrab 0.6: 重写视频地址获取逻辑,修复部分自建源下载不正常

 
还是需要XML啊。
重写了视频源获取,这样B站自己的源也可以保证没问题了。
老地方:https://gist.github.com/superwbd/9605757

'''
Biligrab 0.6
Beining@ACICFG
cnbeining[at]gmail.com
MIT licence
'''
import sys
import os
from StringIO import StringIO
import gzip
import urllib2
import sys
import commands
from xml.dom.minidom import parse, parseString
import xml.dom.minidom
reload(sys)
sys.setdefaultencoding('utf-8')
global vid
global cid
global partname
global title
global videourl
global part_now
def list_del_repeat(list):
    """delete repeating items in a list, and keep the order.
    http://www.cnblogs.com/infim/archive/2011/03/10/1979615.html"""
    l2 = []
    [l2.append(i) for i in list if not i in l2]
    return(l2)
#----------------------------------------------------------------------
def find_cid_api(vid, p):
    """find cid and print video detail"""
    global cid
    global partname
    global title
    global videourl
    cid = 0
    title = ''
    partname = ''
    biliurl = 'http://api.bilibili.tv/view?type=xml&appkey=876fe0ebd0e67a0f&id=' + str(vid) + '&page=' + str(p)
    videourl = 'http://www.bilibili.tv/video/av'+ str(vid)+'/index_'+ str(p)+'.html'
    print('Fetching webpage...')
    try:
        request = urllib2.Request(biliurl, headers={ 'User-Agent' : 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_9_0) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/31.0.1650.63 Safari/537.36', 'Cache-Control': 'no-cache', 'Pragma': 'no-cache' })
        response = urllib2.urlopen(request)
        data = response.read()
        data_list = data.split('\n')
        for lines in data_list:
            if 'cid' in lines:
                cid = lines[7:-6]
                print('cid is ' + str(cid))
            if 'partname' in lines:
                partname = lines[12:-11]
                print('partname is ' + str(partname))
            if 'title' in lines:
                title = lines[9:-8]
                print('title is ' + str(title))
    except:  #If API failed
        print('ERROR: Cannot connect to API server!')
#----------------------------------------------------------------------
def find_cid_flvcd(videourl):
    """"""
    global vid
    global cid
    global partname
    global title
    print('Fetching webpage via Flvcd...')
    request = urllib2.Request(videourl)
    request.add_header('Accept-encoding', 'gzip')
    response = urllib2.urlopen(request, headers={ 'User-Agent' : 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_9_0) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/31.0.1650.63 Safari/537.36', 'Cache-Control': 'no-cache', 'Pragma': 'no-cache' })
    if response.info().get('Content-Encoding') == 'gzip':
        buf = StringIO( response.read())
        f = gzip.GzipFile(fileobj=buf)
        data = f.read()
    data_list = data.split('\n')
    #Todo: read title
    for lines in data_list:
        if 'cid=' in lines:
            cid = lines.split('&')
            cid = cid[0].split('=')
            cid = cid[-1]
            print('cid is ' + str(cid))
            break
#----------------------------------------------------------------------
def main(vid, p, oversea):
    global cid
    global partname
    global title
    global videourl
    output = commands.getstatusoutput('ffmpeg --help')
    if str(output[0]) == '32512':
        print('FFmpeg does not exist! Trying to get you a binary, need root...')
        os.system('sudo curl -o /usr/bin/ffmpeg https://raw.githubusercontent.com/superwbd/ABPlayerHTML5-Py--nix/master/ffmpeg')
    output = commands.getstatusoutput('aria2c --help')
    if str(output[0]) == '32512':
        print('aria2c does not exist! Trying to get you a binary, need root... Thanks for @MartianZ \'s work.')
        os.system('sudo curl -o /usr/bin/aria2c https://raw.githubusercontent.com/MartianZ/fakeThunder/master/fakeThunder/aria2c')
    find_cid_api(vid, p)
    global cid
    if cid is 0:
        print('Cannot find cid, trying to do it brutely...')
        find_cid_flvcd(videourl)
    if cid is 0:
        print('Strange, still cannot find cid... One last try, unpredictable')
        vid = vid - 1
        p = 1
        find_cid_api(vid-1, p)
        cid = cid + 1
    if cid is 0:
        cid = str(input('Cannot get cid anyway! If you know the cid, please type it in here, or I will just quit.'))
        exit()
    #start to make folders...
    if title is not '':
        folder = title
    else:
        folder = cid
    if partname is not '':
        filename = partname
    elif title is not '':
        filename = title
    else:
        filename = cid
    folder_to_make = os.getcwd() + '/' + folder
    if not os.path.exists(folder_to_make):
        os.makedirs(folder_to_make)
    os.chdir(folder_to_make)
    print('Fetching XML...')
    os.system('curl -o "'+filename+'.xml" --compressed  http://comment.bilibili.cn/'+cid+'.xml')
    #os.system('gzip -d '+cid+'.xml.gz')
    print('The XML file, ' + filename + '.xml should be ready...enjoy!')
    print('Finding video location...')
    #try api
    if oversea == '1':
        try:
            request = urllib2.Request('http://interface.bilibili.cn/v_cdn_play?cid='+cid, headers={ 'User-Agent' : 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_9_0) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/31.0.1650.63 Safari/537.36', 'Cache-Control': 'no-cache', 'Pragma': 'no-cache' })
        except:
            print('ERROR: Cannot connect to API server!')
    else:
        try:
            request = urllib2.Request('http://interface.bilibili.tv/playurl?cid='+cid, headers={ 'User-Agent' : 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_9_0) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/31.0.1650.63 Safari/537.36', 'Cache-Control': 'no-cache', 'Pragma': 'no-cache' })
        except:
            print('ERROR: Cannot connect to API server!')
    response = urllib2.urlopen(request)
    data = response.read()
    #print(data_list)
    rawurl = []
    dom = parseString(data)
    for node in dom.getElementsByTagName('url'):
        if node.parentNode.tagName == "durl":
            rawurl.append(node.toxml()[14:-9])
            #print(str(node.toxml()[14:-9]))
        pass
    if rawurl is []:  #hope this never happen
        request = urllib2.Request('http://www.flvcd.com/parse.php?kw='+videourl, headers={ 'User-Agent' : 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_9_0) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/31.0.1650.63 Safari/537.36', 'Cache-Control': 'no-cache', 'Pragma': 'no-cache' })
        request.add_header('Accept-encoding', 'gzip')
        response = urllib2.urlopen(request)
        data = response.read()
        data_list = data.split('\n')
        for items in data_list:
            if 'name' in items and 'inf' in items and 'input' in items:
                c = items
                rawurl = c[39:-5]
                rawurl = rawurl.split('|')
                break
    #print(rawurl)
    vid_num = len(rawurl)
    #print(rawurl)
    print(str(vid_num) + ' videos in part ' + str(part_now) + ' to download, fetch yourself a cup of coffee...')
    for i in range(vid_num):
        print('Downloading ' + str(i+1) + ' of ' + str(vid_num) + ' videos in part ' + str(part_now) + '...')
        #print('aria2c -llog.txt -c -s16 -x16 -k1M --out '+str(i)+'.flv "'+rawurl[i]+'"')
        os.system('aria2c -larialog.txt -c -s16 -x16 -k1M --out '+str(i)+'.flv "'+rawurl[i]+'"')
        #os.system('aria2c -larialog.txt -c -s16 -x16 -k1M --out '+str(i)+'.flv "'+rawurl[i]+'"')
        #not debugging, not fun.
    f = open('ff.txt', 'w')
    ff = ''
    os.getcwd()
    for i in range(vid_num):
        ff = ff + 'file \'' + str(os.getcwd()) + '/'+ str(i) + '.flv\'\n'
    ff = ff.encode("utf8")
    f.write(ff)
    f.close()
    print('Concating videos...')
    os.system('ffmpeg -f concat -i ff.txt -c copy "'+filename+'".mp4')
    os.system('rm -r ff.txt')
    for i in range(vid_num):
        os.system('rm -r '+str(i)+'.flv')
    print('Done, enjoy yourself!')
    exit()
vid = str(raw_input('av'))
p_raw = str(raw_input('P'))
oversea = str(input('Oversea?'))
p_list = []
p_raw = p_raw.split(',')
for item in p_raw:
    if '~' in item:
        #print(item)
        lower = 0
        higher = 0
        item = item.split('~')
        try:
            lower = int(item[0])
        except:
            print('Cannot read lower!')
        try:
            higher = int(item[1])
        except:
            print('Cannot read higher!')
        if lower == 0 or higher == 0:
            if lower == 0 and higher != 0:
                lower = higher
            elif lower != 0 and higher == 0:
                higher = lower
            else:
                print('Cannot find any higher or lower, ignoring...')
                break
        mid = 0
        if higher < lower:
            mid = higher
            higher = lower
            lower = mid
        p_list.append(lower)
        while lower < higher:
            lower = lower + 1
            p_list.append(lower)
        break
    try:
        p_list.append(int(item))
    except:
        print('Cannot read "'+str(item)+'", abondon it.')
        break
p_list = list_del_repeat(p_list)
part_now = '0'
for p in p_list:
    part_now = str(p)
    main(vid, p, oversea)
'''
        data_list = data.split('\r')
        for lines in data_list:
            lines = str(lines)
            if '<url>' in lines:
                if 'youku'  in lines:
                    url = lines[17:-9]
                elif 'sina' in lines:
                    url = lines[16:-9]
                elif 'qq.com' in lines:
                    url = lines[17:-9]
                elif 'letv.com' in lines:
                    url = lines[17:-9]
                    break
                elif 'acgvideo' in lines:
                    url = lines[17:-9]
                    is_local = 1
                rawurl.append(url)
            if 'backup_url' in lines and is_local is 1:
                break'''

 

Linux与OSX下原生战渣浪!

      6 Comments on Linux与OSX下原生战渣浪!

我自己不用RSS(没找到好的阅读器。。。我的GR啊),老文章更新后,RSS会提示么?
如果不想,就得发个状态了。
这次是Linux与OSX下原生战渣浪的方法,brought you by dantmnf(http://danknest.org/ ).
项目地址:https://github.com/dantmnf/FlvPatcher/blob/master/blacker.sh
使用方法:
先chmod 777 ./blacker.sh
然后
./blacker.sh -b 500000 /root/fff.mp4 /root/ff400.flv
-b 后接比特数,不是千比特,请注意!如果后黑目标码率大于原始码率,会不能进行。
输入文件需要可以被ffmpeg直接转换成flv。
需要新版ffmpeg,ffmsindex,mkvmerge。
 
测试视频:http://video.sina.com.cn/v/b/131212996-1062265712.html
原长度4:54,sina播放器显示是这个,但是的确后黑了。