mirror of
https://github.com/ytdl-org/youtube-dl.git
synced 2024-12-30 04:23:37 +00:00
Compare commits
No commits in common. "b8f7389a712f0962a17927b8ce9557d50fb6a3ca" and "705925382023b3dc1253d7461e0a1e74553cec13" have entirely different histories.
b8f7389a71
...
7059253820
434
.github/workflows/ci.yml
vendored
434
.github/workflows/ci.yml
vendored
@ -1,445 +1,81 @@
|
||||
name: CI
|
||||
|
||||
env:
|
||||
all-cpython-versions: 2.6, 2.7, 3.2, 3.3, 3.4, 3.5, 3.6, 3.7, 3.8, 3.9, 3.10, 3.11, 3.12
|
||||
main-cpython-versions: 2.7, 3.2, 3.5, 3.9, 3.11
|
||||
pypy-versions: pypy-2.7, pypy-3.6, pypy-3.7
|
||||
cpython-versions: main
|
||||
test-set: core
|
||||
|
||||
on:
|
||||
push:
|
||||
inputs:
|
||||
cpython-versions:
|
||||
type: string
|
||||
default: all
|
||||
test-set:
|
||||
type: string
|
||||
default: core
|
||||
pull_request:
|
||||
inputs:
|
||||
cpython-versions:
|
||||
type: string
|
||||
default: main
|
||||
test-set:
|
||||
type: string
|
||||
default: both
|
||||
workflow_dispatch:
|
||||
inputs:
|
||||
cpython-versions:
|
||||
type: choice
|
||||
description: CPython versions (main = 2.7, 3.2, 3.5, 3.9, 3.11)
|
||||
options:
|
||||
- all
|
||||
- main
|
||||
required: true
|
||||
default: main
|
||||
test-set:
|
||||
type: choice
|
||||
description: core, download
|
||||
options:
|
||||
- both
|
||||
- core
|
||||
- download
|
||||
required: true
|
||||
default: both
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
|
||||
on: [push, pull_request]
|
||||
jobs:
|
||||
select:
|
||||
name: Select tests from inputs
|
||||
runs-on: ubuntu-latest
|
||||
outputs:
|
||||
cpython-versions: ${{ steps.run.outputs.cpython-versions }}
|
||||
test-set: ${{ steps.run.outputs.test-set }}
|
||||
own-pip-versions: ${{ steps.run.outputs.own-pip-versions }}
|
||||
steps:
|
||||
- name: Make version array
|
||||
id: run
|
||||
run: |
|
||||
# Make a JSON Array from comma/space-separated string (no extra escaping)
|
||||
json_list() { \
|
||||
ret=""; IFS="${IFS},"; set -- $*; \
|
||||
for a in "$@"; do \
|
||||
ret=$(printf '%s"%s"' "${ret}${ret:+, }" "$a"); \
|
||||
done; \
|
||||
printf '[%s]' "$ret"; }
|
||||
tests="${{ inputs.test-set || env.test-set }}"
|
||||
[ $tests = both ] && tests="core download"
|
||||
printf 'test-set=%s\n' "$(json_list $tests)" >> "$GITHUB_OUTPUT"
|
||||
versions="${{ inputs.cpython-versions || env.cpython-versions }}"
|
||||
if [ "$versions" = all ]; then \
|
||||
versions="${{ env.all-cpython-versions }}"; else \
|
||||
versions="${{ env.main-cpython-versions }}"; \
|
||||
fi
|
||||
printf 'cpython-versions=%s\n' \
|
||||
"$(json_list ${versions}${versions:+, }${{ env.pypy-versions }})" >> "$GITHUB_OUTPUT"
|
||||
# versions with a special get-pip.py in a per-version subdirectory
|
||||
printf 'own-pip-versions=%s\n' \
|
||||
"$(json_list 2.6, 2.7, 3.2, 3.3, 3.4, 3.5, 3.6)" >> "$GITHUB_OUTPUT"
|
||||
tests:
|
||||
name: Run tests
|
||||
needs: select
|
||||
permissions:
|
||||
contents: read
|
||||
packages: write
|
||||
name: Tests
|
||||
runs-on: ${{ matrix.os }}
|
||||
env:
|
||||
PIP: python -m pip
|
||||
PIP_DISABLE_PIP_VERSION_CHECK: true
|
||||
PIP_NO_PYTHON_VERSION_WARNING: true
|
||||
strategy:
|
||||
fail-fast: true
|
||||
matrix:
|
||||
os: [ubuntu-20.04]
|
||||
python-version: ${{ fromJSON(needs.select.outputs.cpython-versions) }}
|
||||
os: [ubuntu-18.04]
|
||||
# TODO: python 2.6
|
||||
python-version: [2.7, 3.3, 3.4, 3.5, 3.6, 3.7, 3.8, 3.9, pypy-2.7, pypy-3.6, pypy-3.7]
|
||||
python-impl: [cpython]
|
||||
ytdl-test-set: ${{ fromJSON(needs.select.outputs.test-set) }}
|
||||
ytdl-test-set: [core, download]
|
||||
run-tests-ext: [sh]
|
||||
include:
|
||||
# python 3.2 is only available on windows via setup-python
|
||||
- os: windows-2019
|
||||
python-version: 3.4
|
||||
python-version: 3.2
|
||||
python-impl: cpython
|
||||
ytdl-test-set: ${{ contains(needs.select.outputs.test-set, 'core') && 'core' || 'nocore' }}
|
||||
ytdl-test-set: core
|
||||
run-tests-ext: bat
|
||||
- os: windows-2019
|
||||
python-version: 3.4
|
||||
python-version: 3.2
|
||||
python-impl: cpython
|
||||
ytdl-test-set: ${{ contains(needs.select.outputs.test-set, 'download') && 'download' || 'nodownload' }}
|
||||
ytdl-test-set: download
|
||||
run-tests-ext: bat
|
||||
# jython
|
||||
- os: ubuntu-20.04
|
||||
python-version: 2.7
|
||||
- os: ubuntu-18.04
|
||||
python-impl: jython
|
||||
ytdl-test-set: ${{ contains(needs.select.outputs.test-set, 'core') && 'core' || 'nocore' }}
|
||||
ytdl-test-set: core
|
||||
run-tests-ext: sh
|
||||
- os: ubuntu-20.04
|
||||
python-version: 2.7
|
||||
- os: ubuntu-18.04
|
||||
python-impl: jython
|
||||
ytdl-test-set: ${{ contains(needs.select.outputs.test-set, 'download') && 'download' || 'nodownload' }}
|
||||
ytdl-test-set: download
|
||||
run-tests-ext: sh
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v3
|
||||
#-------- Python 3 -----
|
||||
- name: Set up supported Python ${{ matrix.python-version }}
|
||||
id: setup-python
|
||||
if: ${{ matrix.python-impl == 'cpython' && matrix.python-version != '2.6' && matrix.python-version != '2.7' && matrix.python-version != '3.12'}}
|
||||
# wrap broken actions/setup-python@v4
|
||||
uses: ytdl-org/setup-python@v1
|
||||
- uses: actions/checkout@v2
|
||||
- name: Set up Python ${{ matrix.python-version }}
|
||||
uses: actions/setup-python@v2
|
||||
if: ${{ matrix.python-impl == 'cpython' }}
|
||||
with:
|
||||
python-version: ${{ matrix.python-version }}
|
||||
cache-build: true
|
||||
allow-build: info
|
||||
- name: Locate supported Python ${{ matrix.python-version }}
|
||||
if: ${{ env.pythonLocation }}
|
||||
shell: bash
|
||||
run: |
|
||||
echo "PYTHONHOME=${pythonLocation}" >> "$GITHUB_ENV"
|
||||
export expected="${{ steps.setup-python.outputs.python-path }}"
|
||||
dirname() { printf '%s\n' \
|
||||
'import os, sys' \
|
||||
'print(os.path.dirname(sys.argv[1]))' \
|
||||
| ${expected} - "$1"; }
|
||||
expd="$(dirname "$expected")"
|
||||
export python="$(command -v python)"
|
||||
[ "$expd" = "$(dirname "$python")" ] || echo "PATH=$expd:${PATH}" >> "$GITHUB_ENV"
|
||||
[ -x "$python" ] || printf '%s\n' \
|
||||
'import os' \
|
||||
'exp = os.environ["expected"]' \
|
||||
'python = os.environ["python"]' \
|
||||
'exps = os.path.split(exp)' \
|
||||
'if python and (os.path.dirname(python) == exp[0]):' \
|
||||
' exit(0)' \
|
||||
'exps[1] = "python" + os.path.splitext(exps[1])[1]' \
|
||||
'python = os.path.join(*exps)' \
|
||||
'try:' \
|
||||
' os.symlink(exp, python)' \
|
||||
'except AttributeError:' \
|
||||
' os.rename(exp, python)' \
|
||||
| ${expected} -
|
||||
printf '%s\n' \
|
||||
'import sys' \
|
||||
'print(sys.path)' \
|
||||
| ${expected} -
|
||||
#-------- Python 3.12 -
|
||||
- name: Set up CPython 3.12 environment
|
||||
if: ${{ matrix.python-impl == 'cpython' && matrix.python-version == '3.12' }}
|
||||
shell: bash
|
||||
run: |
|
||||
PYENV_ROOT=$HOME/.local/share/pyenv
|
||||
echo "PYENV_ROOT=${PYENV_ROOT}" >> "$GITHUB_ENV"
|
||||
- name: Cache Python 3.12
|
||||
id: cache312
|
||||
if: ${{ matrix.python-impl == 'cpython' && matrix.python-version == '3.12' }}
|
||||
uses: actions/cache@v3
|
||||
with:
|
||||
key: python-3.12
|
||||
path: |
|
||||
${{ env.PYENV_ROOT }}
|
||||
- name: Build and set up Python 3.12
|
||||
if: ${{ matrix.python-impl == 'cpython' && matrix.python-version == '3.12' && ! steps.cache312.outputs.cache-hit }}
|
||||
# dl and build locally
|
||||
shell: bash
|
||||
run: |
|
||||
# Install build environment
|
||||
sudo apt-get install -y build-essential llvm libssl-dev tk-dev \
|
||||
libncursesw5-dev libreadline-dev libsqlite3-dev \
|
||||
libffi-dev xz-utils zlib1g-dev libbz2-dev liblzma-dev
|
||||
# Download PyEnv from its GitHub repository.
|
||||
export PYENV_ROOT=${{ env.PYENV_ROOT }}
|
||||
export PATH=$PYENV_ROOT/bin:$PATH
|
||||
git clone "https://github.com/pyenv/pyenv.git" "$PYENV_ROOT"
|
||||
pyenv install 3.12.0b4
|
||||
- name: Locate Python 3.12
|
||||
if: ${{ matrix.python-impl == 'cpython' && matrix.python-version == '3.12' }}
|
||||
shell: bash
|
||||
run: |
|
||||
PYTHONHOME="${{ env.PYENV_ROOT }}/versions/3.12.0b4"
|
||||
echo "PYTHONHOME=$PYTHONHOME" >> "$GITHUB_ENV"
|
||||
echo "PATH=${PYTHONHOME}/bin:$PATH" >> "$GITHUB_ENV"
|
||||
#-------- Python 2.7 --
|
||||
- name: Set up Python 2.7
|
||||
if: ${{ matrix.python-impl == 'cpython' && matrix.python-version == '2.7' }}
|
||||
# install 2.7
|
||||
shell: bash
|
||||
run: |
|
||||
sudo apt-get install -y python2 python-is-python2
|
||||
echo "PYTHONHOME=/usr" >> "$GITHUB_ENV"
|
||||
#-------- Python 2.6 --
|
||||
- name: Set up Python 2.6 environment
|
||||
if: ${{ matrix.python-impl == 'cpython' && matrix.python-version == '2.6' }}
|
||||
shell: bash
|
||||
run: |
|
||||
openssl_name=openssl-1.0.2u
|
||||
echo "openssl_name=${openssl_name}" >> "$GITHUB_ENV"
|
||||
openssl_dir=$HOME/.local/opt/$openssl_name
|
||||
echo "openssl_dir=${openssl_dir}" >> "$GITHUB_ENV"
|
||||
PYENV_ROOT=$HOME/.local/share/pyenv
|
||||
echo "PYENV_ROOT=${PYENV_ROOT}" >> "$GITHUB_ENV"
|
||||
sudo apt-get install -y openssl ca-certificates
|
||||
- name: Cache Python 2.6
|
||||
id: cache26
|
||||
if: ${{ matrix.python-version == '2.6' }}
|
||||
uses: actions/cache@v3
|
||||
with:
|
||||
key: python-2.6.9
|
||||
path: |
|
||||
${{ env.openssl_dir }}
|
||||
${{ env.PYENV_ROOT }}
|
||||
- name: Build and set up Python 2.6
|
||||
if: ${{ matrix.python-impl == 'cpython' && matrix.python-version == '2.6' && ! steps.cache26.outputs.cache-hit }}
|
||||
# dl and build locally
|
||||
shell: bash
|
||||
run: |
|
||||
# Install build environment
|
||||
sudo apt-get install -y build-essential llvm libssl-dev tk-dev \
|
||||
libncursesw5-dev libreadline-dev libsqlite3-dev \
|
||||
libffi-dev xz-utils zlib1g-dev libbz2-dev liblzma-dev
|
||||
# Download and install OpenSSL 1.0.2, back in time
|
||||
openssl_name=${{ env.openssl_name }}
|
||||
openssl_targz=${openssl_name}.tar.gz
|
||||
openssl_dir=${{ env.openssl_dir }}
|
||||
openssl_inc=$openssl_dir/include
|
||||
openssl_lib=$openssl_dir/lib
|
||||
openssl_ssl=$openssl_dir/ssl
|
||||
curl -L "https://www.openssl.org/source/$openssl_targz" -o $openssl_targz
|
||||
tar -xf $openssl_targz
|
||||
( cd $openssl_name; \
|
||||
./config --prefix=$openssl_dir --openssldir=${openssl_dir}/ssl \
|
||||
--libdir=lib -Wl,-rpath=${openssl_dir}/lib shared zlib-dynamic && \
|
||||
make && \
|
||||
make install )
|
||||
rm -rf $openssl_name
|
||||
rmdir $openssl_ssl/certs && ln -s /etc/ssl/certs $openssl_ssl/certs
|
||||
# Download PyEnv from its GitHub repository.
|
||||
export PYENV_ROOT=${{ env.PYENV_ROOT }}
|
||||
export PATH=$PYENV_ROOT/bin:$PATH
|
||||
git clone "https://github.com/pyenv/pyenv.git" "$PYENV_ROOT"
|
||||
# Prevent pyenv build trying (and failing) to update pip
|
||||
export GET_PIP=get-pip-2.6.py
|
||||
echo 'import sys; sys.exit(0)' > ${GET_PIP}
|
||||
GET_PIP=$(realpath $GET_PIP)
|
||||
# Build and install Python
|
||||
export CFLAGS="-I$openssl_inc"
|
||||
export LDFLAGS="-L$openssl_lib"
|
||||
export LD_LIBRARY_PATH="$openssl_lib"
|
||||
pyenv install 2.6.9
|
||||
- name: Locate Python 2.6
|
||||
if: ${{ matrix.python-impl == 'cpython' && matrix.python-version == '2.6' }}
|
||||
shell: bash
|
||||
run: |
|
||||
PYTHONHOME="${{ env.PYENV_ROOT }}/versions/2.6.9"
|
||||
echo "PYTHONHOME=$PYTHONHOME" >> "$GITHUB_ENV"
|
||||
echo "PATH=${PYTHONHOME}/bin:$PATH" >> "$GITHUB_ENV"
|
||||
echo "LD_LIBRARY_PATH=${{ env.openssl_dir }}/lib${LD_LIBRARY_PATH:+:}${LD_LIBRARY_PATH}" >> "$GITHUB_ENV"
|
||||
#-------- Jython ------
|
||||
- name: Set up Java 8
|
||||
if: ${{ matrix.python-impl == 'jython' }}
|
||||
uses: actions/setup-java@v3
|
||||
uses: actions/setup-java@v1
|
||||
with:
|
||||
java-version: 8
|
||||
distribution: 'zulu'
|
||||
- name: Setup Jython environment
|
||||
if: ${{ matrix.python-impl == 'jython' }}
|
||||
shell: bash
|
||||
run: |
|
||||
echo "JYTHON_ROOT=${HOME}/jython" >> "$GITHUB_ENV"
|
||||
echo "PIP=pip" >> "$GITHUB_ENV"
|
||||
- name: Cache Jython
|
||||
id: cachejy
|
||||
if: ${{ matrix.python-impl == 'jython' && matrix.python-version == '2.7' }}
|
||||
uses: actions/cache@v3
|
||||
with:
|
||||
# 2.7.3 now available, may solve SNI issue
|
||||
key: jython-2.7.1
|
||||
path: |
|
||||
${{ env.JYTHON_ROOT }}
|
||||
- name: Install Jython
|
||||
if: ${{ matrix.python-impl == 'jython' && matrix.python-version == '2.7' && ! steps.cachejy.outputs.cache-hit }}
|
||||
shell: bash
|
||||
if: ${{ matrix.python-impl == 'jython' }}
|
||||
run: |
|
||||
JYTHON_ROOT="${{ env.JYTHON_ROOT }}"
|
||||
curl -L "https://repo1.maven.org/maven2/org/python/jython-installer/2.7.1/jython-installer-2.7.1.jar" -o jython-installer.jar
|
||||
java -jar jython-installer.jar -s -d "${JYTHON_ROOT}"
|
||||
echo "${JYTHON_ROOT}/bin" >> "$GITHUB_PATH"
|
||||
- name: Set up cached Jython
|
||||
if: ${{ steps.cachejy.outputs.cache-hit }}
|
||||
shell: bash
|
||||
wget https://repo1.maven.org/maven2/org/python/jython-installer/2.7.1/jython-installer-2.7.1.jar -O jython-installer.jar
|
||||
java -jar jython-installer.jar -s -d "$HOME/jython"
|
||||
echo "$HOME/jython/bin" >> $GITHUB_PATH
|
||||
- name: Install nose
|
||||
if: ${{ matrix.python-impl != 'jython' }}
|
||||
run: pip install nose
|
||||
- name: Install nose (Jython)
|
||||
if: ${{ matrix.python-impl == 'jython' }}
|
||||
# Working around deprecation of support for non-SNI clients at PyPI CDN (see https://status.python.org/incidents/hzmjhqsdjqgb)
|
||||
run: |
|
||||
JYTHON_ROOT="${{ env.JYTHON_ROOT }}"
|
||||
echo "${JYTHON_ROOT}/bin" >> $GITHUB_PATH
|
||||
- name: Install supporting Python 2.7 if possible
|
||||
if: ${{ steps.cachejy.outputs.cache-hit }}
|
||||
shell: bash
|
||||
run: |
|
||||
sudo apt-get install -y python2.7 || true
|
||||
#-------- pip ---------
|
||||
- name: Set up supported Python ${{ matrix.python-version }} pip
|
||||
if: ${{ (matrix.python-version != '3.2' && steps.setup-python.outputs.python-path) || matrix.python-version == '2.7' }}
|
||||
# This step may run in either Linux or Windows
|
||||
shell: bash
|
||||
run: |
|
||||
echo "$PATH"
|
||||
echo "$PYTHONHOME"
|
||||
# curl is available on both Windows and Linux, -L follows redirects, -O gets name
|
||||
python -m ensurepip || python -m pip --version || { \
|
||||
get_pip="${{ contains(needs.select.outputs.own-pip-versions, matrix.python-version) && format('{0}/', matrix.python-version) || '' }}"; \
|
||||
curl -L -O "https://bootstrap.pypa.io/pip/${get_pip}get-pip.py"; \
|
||||
python get-pip.py; }
|
||||
- name: Set up Python 2.6 pip
|
||||
if: ${{ matrix.python-version == '2.6' }}
|
||||
shell: bash
|
||||
run: |
|
||||
python -m pip --version || { \
|
||||
curl -L -O "https://bootstrap.pypa.io/pip/2.6/get-pip.py"; \
|
||||
curl -L -O "https://files.pythonhosted.org/packages/ac/95/a05b56bb975efa78d3557efa36acaf9cf5d2fd0ee0062060493687432e03/pip-9.0.3-py2.py3-none-any.whl"; \
|
||||
python get-pip.py --no-setuptools --no-wheel pip-9.0.3-py2.py3-none-any.whl; }
|
||||
# work-around to invoke pip module on 2.6: https://bugs.python.org/issue2751
|
||||
echo "PIP=python -m pip.__main__" >> "$GITHUB_ENV"
|
||||
- name: Set up other Python ${{ matrix.python-version }} pip
|
||||
if: ${{ matrix.python-version == '3.2' && steps.setup-python.outputs.python-path }}
|
||||
shell: bash
|
||||
run: |
|
||||
python -m pip --version || { \
|
||||
curl -L -O "https://bootstrap.pypa.io/pip/3.2/get-pip.py"; \
|
||||
curl -L -O "https://files.pythonhosted.org/packages/b2/d0/cd115fe345dd6f07ec1c780020a7dfe74966fceeb171e0f20d1d4905b0b7/pip-7.1.2-py2.py3-none-any.whl"; \
|
||||
python get-pip.py --no-setuptools --no-wheel pip-7.1.2-py2.py3-none-any.whl; }
|
||||
#-------- unittest ----
|
||||
- name: Upgrade Unittest for Python 2.6
|
||||
if: ${{ matrix.python-version == '2.6' }}
|
||||
shell: bash
|
||||
run: |
|
||||
# Work around deprecation of support for non-SNI clients at PyPI CDN (see https://status.python.org/incidents/hzmjhqsdjqgb)
|
||||
$PIP -qq show unittest2 || { \
|
||||
for u in "65/26/32b8464df2a97e6dd1b656ed26b2c194606c16fe163c695a992b36c11cdf/six-1.13.0-py2.py3-none-any.whl" \
|
||||
"f2/94/3af39d34be01a24a6e65433d19e107099374224905f1e0cc6bbe1fd22a2f/argparse-1.4.0-py2.py3-none-any.whl" \
|
||||
"c7/a3/c5da2a44c85bfbb6eebcfc1dde24933f8704441b98fdde6528f4831757a6/linecache2-1.0.0-py2.py3-none-any.whl" \
|
||||
"17/0a/6ac05a3723017a967193456a2efa0aa9ac4b51456891af1e2353bb9de21e/traceback2-1.4.0-py2.py3-none-any.whl" \
|
||||
"72/20/7f0f433060a962200b7272b8c12ba90ef5b903e218174301d0abfd523813/unittest2-1.1.0-py2.py3-none-any.whl"; do \
|
||||
curl -L -O "https://files.pythonhosted.org/packages/${u}"; \
|
||||
$PIP install ${u##*/}; \
|
||||
done; }
|
||||
# make tests use unittest2
|
||||
for test in ./test/test_*.py ./test/helper.py; do
|
||||
sed -r -i -e '/^import unittest$/s/test/test2 as unittest/' "$test"
|
||||
done
|
||||
#-------- nose --------
|
||||
- name: Install nose for Python ${{ matrix.python-version }}
|
||||
if: ${{ (matrix.python-version != '3.2' && steps.setup-python.outputs.python-path) || (matrix.python-impl == 'cpython' && (matrix.python-version == '2.7' || matrix.python-version == '3.12')) }}
|
||||
shell: bash
|
||||
run: |
|
||||
echo "$PATH"
|
||||
echo "$PYTHONHOME"
|
||||
# Use PyNose for recent Pythons instead of Nose
|
||||
py3ver="${{ matrix.python-version }}"
|
||||
py3ver=${py3ver#3.}
|
||||
[ "$py3ver" != "${{ matrix.python-version }}" ] && py3ver=${py3ver%.*} || py3ver=0
|
||||
[ "$py3ver" -ge 9 ] && nose=pynose || nose=nose
|
||||
$PIP -qq show $nose || $PIP install $nose
|
||||
- name: Install nose for other Python 2
|
||||
if: ${{ matrix.python-impl == 'jython' || (matrix.python-impl == 'cpython' && matrix.python-version == '2.6') }}
|
||||
shell: bash
|
||||
run: |
|
||||
# Work around deprecation of support for non-SNI clients at PyPI CDN (see https://status.python.org/incidents/hzmjhqsdjqgb)
|
||||
$PIP -qq show nose || { \
|
||||
curl -L -O "https://files.pythonhosted.org/packages/99/4f/13fb671119e65c4dce97c60e67d3fd9e6f7f809f2b307e2611f4701205cb/nose-1.3.7-py2-none-any.whl"; \
|
||||
$PIP install nose-1.3.7-py2-none-any.whl; }
|
||||
- name: Install nose for other Python 3
|
||||
if: ${{ matrix.python-version == '3.2' && steps.setup-python.outputs.python-path }}
|
||||
shell: bash
|
||||
run: |
|
||||
$PIP -qq show nose || { \
|
||||
curl -L -O "https://files.pythonhosted.org/packages/15/d8/dd071918c040f50fa1cf80da16423af51ff8ce4a0f2399b7bf8de45ac3d9/nose-1.3.7-py3-none-any.whl"; \
|
||||
$PIP install nose-1.3.7-py3-none-any.whl; }
|
||||
- name: Set up nosetest test
|
||||
if: ${{ contains(needs.select.outputs.test-set, matrix.ytdl-test-set ) }}
|
||||
shell: bash
|
||||
run: |
|
||||
# set PYTHON_VER
|
||||
PYTHON_VER=${{ matrix.python-version }}
|
||||
[ "${PYTHON_VER#*-}" != "$PYTHON_VER" ] || PYTHON_VER="${{ matrix.python-impl }}-${PYTHON_VER}"
|
||||
echo "PYTHON_VER=$PYTHON_VER" >> "$GITHUB_ENV"
|
||||
echo "PYTHON_IMPL=${{ matrix.python-impl }}" >> "$GITHUB_ENV"
|
||||
# define a test to validate the Python version used by nosetests
|
||||
printf '%s\n' \
|
||||
'from __future__ import unicode_literals' \
|
||||
'import sys, os, platform' \
|
||||
'try:' \
|
||||
' import unittest2 as unittest' \
|
||||
'except ImportError:' \
|
||||
' import unittest' \
|
||||
'class TestPython(unittest.TestCase):' \
|
||||
' def setUp(self):' \
|
||||
' self.ver = os.environ["PYTHON_VER"].split("-")' \
|
||||
' def test_python_ver(self):' \
|
||||
' self.assertEqual(["%d" % v for v in sys.version_info[:2]], self.ver[-1].split(".")[:2])' \
|
||||
' self.assertTrue(sys.version.startswith(self.ver[-1]))' \
|
||||
' self.assertIn(self.ver[0], ",".join((sys.version, platform.python_implementation())).lower())' \
|
||||
' def test_python_impl(self):' \
|
||||
' self.assertIn(platform.python_implementation().lower(), (os.environ["PYTHON_IMPL"], self.ver[0]))' \
|
||||
> test/test_python.py
|
||||
#-------- TESTS -------
|
||||
wget https://files.pythonhosted.org/packages/99/4f/13fb671119e65c4dce97c60e67d3fd9e6f7f809f2b307e2611f4701205cb/nose-1.3.7-py2-none-any.whl
|
||||
pip install nose-1.3.7-py2-none-any.whl
|
||||
- name: Run tests
|
||||
if: ${{ contains(needs.select.outputs.test-set, matrix.ytdl-test-set ) }}
|
||||
continue-on-error: ${{ matrix.ytdl-test-set == 'download' || matrix.python-impl == 'jython' }}
|
||||
env:
|
||||
YTDL_TEST_SET: ${{ matrix.ytdl-test-set }}
|
||||
run: |
|
||||
./devscripts/run_tests.${{ matrix.run-tests-ext }}
|
||||
run: ./devscripts/run_tests.${{ matrix.run-tests-ext }}
|
||||
flake8:
|
||||
name: Linter
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- uses: actions/checkout@v2
|
||||
- name: Set up Python
|
||||
uses: actions/setup-python@v4
|
||||
uses: actions/setup-python@v2
|
||||
with:
|
||||
python-version: 3.9
|
||||
- name: Install flake8
|
||||
run: pip install flake8
|
||||
- name: Run flake8
|
||||
run: flake8 .
|
||||
|
||||
|
113
README.md
113
README.md
@ -33,7 +33,7 @@ Windows users can [download an .exe file](https://yt-dl.org/latest/youtube-dl.ex
|
||||
You can also use pip:
|
||||
|
||||
sudo -H pip install --upgrade youtube-dl
|
||||
|
||||
|
||||
This command will update youtube-dl if you have already installed it. See the [pypi page](https://pypi.python.org/pypi/youtube_dl) for more information.
|
||||
|
||||
macOS users can install youtube-dl with [Homebrew](https://brew.sh/):
|
||||
@ -563,7 +563,7 @@ The basic usage is not to set any template arguments when downloading a single f
|
||||
- `is_live` (boolean): Whether this video is a live stream or a fixed-length video
|
||||
- `start_time` (numeric): Time in seconds where the reproduction should start, as specified in the URL
|
||||
- `end_time` (numeric): Time in seconds where the reproduction should end, as specified in the URL
|
||||
- `format` (string): A human-readable description of the format
|
||||
- `format` (string): A human-readable description of the format
|
||||
- `format_id` (string): Format code specified by `--format`
|
||||
- `format_note` (string): Additional info about the format
|
||||
- `width` (numeric): Width of the video
|
||||
@ -675,7 +675,7 @@ The general syntax for format selection is `--format FORMAT` or shorter `-f FORM
|
||||
|
||||
**tl;dr:** [navigate me to examples](#format-selection-examples).
|
||||
|
||||
The simplest case is requesting a specific format, for example with `-f 22` you can download the format with format code equal to 22. You can get the list of available format codes for particular video using `--list-formats` or `-F`. Note that these format codes are extractor specific.
|
||||
The simplest case is requesting a specific format, for example with `-f 22` you can download the format with format code equal to 22. You can get the list of available format codes for particular video using `--list-formats` or `-F`. Note that these format codes are extractor specific.
|
||||
|
||||
You can also use a file extension (currently `3gp`, `aac`, `flv`, `m4a`, `mp3`, `mp4`, `ogg`, `wav`, `webm` are supported) to download the best quality format of a particular file extension served as a single file, e.g. `-f webm` will download the best quality format with the `webm` extension served as a single file.
|
||||
|
||||
@ -760,7 +760,7 @@ Videos can be filtered by their upload date using the options `--date`, `--dateb
|
||||
|
||||
- Absolute dates: Dates in the format `YYYYMMDD`.
|
||||
- Relative dates: Dates in the format `(now|today)[+-][0-9](day|week|month|year)(s)?`
|
||||
|
||||
|
||||
Examples:
|
||||
|
||||
```bash
|
||||
@ -1000,8 +1000,6 @@ To run the test, simply invoke your favorite test runner, or execute a test file
|
||||
python test/test_download.py
|
||||
nosetests
|
||||
|
||||
For Python versions 3.6 and later, you can use [pynose](https://pypi.org/project/pynose/) to implement `nosetests`. The original [nose](https://pypi.org/project/nose/) has not been upgraded for 3.10 and later.
|
||||
|
||||
See item 6 of [new extractor tutorial](#adding-support-for-a-new-site) for how to run extractor specific test cases.
|
||||
|
||||
If you want to create a build of youtube-dl yourself, you'll need
|
||||
@ -1093,7 +1091,7 @@ In any case, thank you very much for your contributions!
|
||||
|
||||
## youtube-dl coding conventions
|
||||
|
||||
This section introduces guidelines for writing idiomatic, robust and future-proof extractor code.
|
||||
This section introduces a guide lines for writing idiomatic, robust and future-proof extractor code.
|
||||
|
||||
Extractors are very fragile by nature since they depend on the layout of the source data provided by 3rd party media hosters out of your control and this layout tends to change. As an extractor implementer your task is not only to write code that will extract media links and metadata correctly but also to minimize dependency on the source's layout and even to make the code foresee potential future changes and be ready for that. This is important because it will allow the extractor not to break on minor layout changes thus keeping old youtube-dl versions working. Even though this breakage issue is easily fixed by emitting a new version of youtube-dl with a fix incorporated, all the previous versions become broken in all repositories and distros' packages that may not be so prompt in fetching the update from us. Needless to say, some non rolling release distros may never receive an update at all.
|
||||
|
||||
@ -1116,7 +1114,7 @@ Say you have some source dictionary `meta` that you've fetched as JSON with HTTP
|
||||
```python
|
||||
meta = self._download_json(url, video_id)
|
||||
```
|
||||
|
||||
|
||||
Assume at this point `meta`'s layout is:
|
||||
|
||||
```python
|
||||
@ -1160,7 +1158,7 @@ description = self._search_regex(
|
||||
```
|
||||
|
||||
On failure this code will silently continue the extraction with `description` set to `None`. That is useful for metafields that may or may not be present.
|
||||
|
||||
|
||||
### Provide fallbacks
|
||||
|
||||
When extracting metadata try to do so from multiple sources. For example if `title` is present in several places, try extracting from at least some of them. This makes it more future-proof in case some of the sources become unavailable.
|
||||
@ -1208,7 +1206,7 @@ r'(id|ID)=(?P<id>\d+)'
|
||||
#### Make regular expressions relaxed and flexible
|
||||
|
||||
When using regular expressions try to write them fuzzy, relaxed and flexible, skipping insignificant parts that are more likely to change, allowing both single and double quotes for quoted values and so on.
|
||||
|
||||
|
||||
##### Example
|
||||
|
||||
Say you need to extract `title` from the following HTML code:
|
||||
@ -1232,7 +1230,7 @@ title = self._search_regex(
|
||||
webpage, 'title', group='title')
|
||||
```
|
||||
|
||||
Note how you tolerate potential changes in the `style` attribute's value or switch from using double quotes to single for `class` attribute:
|
||||
Note how you tolerate potential changes in the `style` attribute's value or switch from using double quotes to single for `class` attribute:
|
||||
|
||||
The code definitely should not look like:
|
||||
|
||||
@ -1333,114 +1331,27 @@ Wrap all extracted numeric data into safe functions from [`youtube_dl/utils.py`]
|
||||
|
||||
Use `url_or_none` for safe URL processing.
|
||||
|
||||
Use `traverse_obj` for safe metadata extraction from parsed JSON.
|
||||
Use `try_get` for safe metadata extraction from parsed JSON.
|
||||
|
||||
Use `unified_strdate` for uniform `upload_date` or any `YYYYMMDD` meta field extraction, `unified_timestamp` for uniform `timestamp` extraction, `parse_filesize` for `filesize` extraction, `parse_count` for count meta fields extraction, `parse_resolution`, `parse_duration` for `duration` extraction, `parse_age_limit` for `age_limit` extraction.
|
||||
Use `unified_strdate` for uniform `upload_date` or any `YYYYMMDD` meta field extraction, `unified_timestamp` for uniform `timestamp` extraction, `parse_filesize` for `filesize` extraction, `parse_count` for count meta fields extraction, `parse_resolution`, `parse_duration` for `duration` extraction, `parse_age_limit` for `age_limit` extraction.
|
||||
|
||||
Explore [`youtube_dl/utils.py`](https://github.com/ytdl-org/youtube-dl/blob/master/youtube_dl/utils.py) for more useful convenience functions.
|
||||
|
||||
#### More examples
|
||||
|
||||
##### Safely extract optional description from parsed JSON
|
||||
|
||||
When processing complex JSON, as often returned by site API requests or stashed in web pages for "hydration", you can use the `traverse_obj()` utility function to handle multiple fallback values and to ensure the expected type of metadata items. The function's docstring defines how the function works: also review usage in the codebase for more examples.
|
||||
|
||||
In this example, a text `description`, or `None`, is pulled from the `.result.video[0].summary` member of the parsed JSON `response`, if available.
|
||||
|
||||
```python
|
||||
description = traverse_obj(response, ('result', 'video', 0, 'summary', T(compat_str)))
|
||||
```
|
||||
`T(...)` is a shorthand for a set literal; if you hate people who still run Python 2.6, `T(type_or_transformation)` could be written as a set literal `{type_or_transformation}`.
|
||||
|
||||
Some extractors use the older and less capable `try_get()` function in the same way.
|
||||
|
||||
```python
|
||||
description = try_get(response, lambda x: x['result']['video'][0]['summary'], compat_str)
|
||||
```
|
||||
|
||||
##### Safely extract more optional metadata
|
||||
|
||||
In this example, various optional metadata values are extracted from the `.result.video[0]` member of the parsed JSON `response`, which is expected to be a JS object, parsed into a `dict`, with no crash if that isn't so, or if any of the target values are missing or invalid.
|
||||
|
||||
```python
|
||||
video = traverse_obj(response, ('result', 'video', 0, T(dict))) or {}
|
||||
# formerly:
|
||||
# video = try_get(response, lambda x: x['result']['video'][0], dict) or {}
|
||||
video = try_get(response, lambda x: x['result']['video'][0], dict) or {}
|
||||
description = video.get('summary')
|
||||
duration = float_or_none(video.get('durationMs'), scale=1000)
|
||||
view_count = int_or_none(video.get('views'))
|
||||
```
|
||||
|
||||
#### Safely extract nested lists
|
||||
|
||||
Suppose you've extracted JSON like this into a Python data structure named `media_json` using, say, the `_download_json()` or `_parse_json()` methods of `InfoExtractor`:
|
||||
```json
|
||||
{
|
||||
"title": "Example video",
|
||||
"comment": "try extracting this",
|
||||
"media": [{
|
||||
"type": "bad",
|
||||
"size": 320,
|
||||
"url": "https://some.cdn.site/bad.mp4"
|
||||
}, {
|
||||
"type": "streaming",
|
||||
"url": "https://some.cdn.site/hls.m3u8"
|
||||
}, {
|
||||
"type": "super",
|
||||
"size": 1280,
|
||||
"url": "https://some.cdn.site/good.webm"
|
||||
}],
|
||||
"moreStuff": "more values",
|
||||
...
|
||||
}
|
||||
```
|
||||
|
||||
Then extractor code like this can collect the various fields of the JSON:
|
||||
```python
|
||||
...
|
||||
from ..utils import (
|
||||
determine_ext,
|
||||
int_or_none,
|
||||
T,
|
||||
traverse_obj,
|
||||
txt_or_none,
|
||||
url_or_none,
|
||||
)
|
||||
...
|
||||
...
|
||||
info_dict = {}
|
||||
# extract title and description if valid and not empty
|
||||
info_dict.update(traverse_obj(media_json, {
|
||||
'title': ('title', T(txt_or_none)),
|
||||
'description': ('comment', T(txt_or_none)),
|
||||
}))
|
||||
|
||||
# extract any recognisable media formats
|
||||
fmts = []
|
||||
# traverse into "media" list, extract `dict`s with desired keys
|
||||
for fmt in traverse_obj(media_json, ('media', Ellipsis, {
|
||||
'format_id': ('type', T(txt_or_none)),
|
||||
'url': ('url', T(url_or_none)),
|
||||
'width': ('size', T(int_or_none)), })):
|
||||
# bad `fmt` values were `None` and removed
|
||||
if 'url' not in fmt:
|
||||
continue
|
||||
fmt_url = fmt['url'] # known to be valid URL
|
||||
ext = determine_ext(fmt_url)
|
||||
if ext == 'm3u8':
|
||||
fmts.extend(self._extract_m3u8_formats(fmt_url, video_id, 'mp4', fatal=False))
|
||||
else:
|
||||
fmt['ext'] = ext
|
||||
fmts.append(fmt)
|
||||
|
||||
# sort, raise if no formats
|
||||
self._sort_formats(fmts)
|
||||
|
||||
info_dict['formats'] = fmts
|
||||
...
|
||||
```
|
||||
The extractor raises an exception rather than random crashes if the JSON structure changes so that no formats are found.
|
||||
|
||||
# EMBEDDING YOUTUBE-DL
|
||||
|
||||
youtube-dl makes the best effort to be a good command-line program, and thus should be callable from any programming language. If you encounter any problems parsing its output, feel free to [create a report](https://github.com/ytdl-org/youtube-dl/issues/new).
|
||||
|
@ -1 +0,0 @@
|
||||
# Empty file needed to make devscripts.utils properly importable from outside
|
@ -5,12 +5,8 @@ import os
|
||||
from os.path import dirname as dirn
|
||||
import sys
|
||||
|
||||
sys.path.insert(0, dirn(dirn(os.path.abspath(__file__))))
|
||||
|
||||
sys.path.insert(0, dirn(dirn((os.path.abspath(__file__)))))
|
||||
import youtube_dl
|
||||
from youtube_dl.compat import compat_open as open
|
||||
|
||||
from utils import read_file
|
||||
|
||||
BASH_COMPLETION_FILE = "youtube-dl.bash-completion"
|
||||
BASH_COMPLETION_TEMPLATE = "devscripts/bash-completion.in"
|
||||
@ -22,8 +18,9 @@ def build_completion(opt_parser):
|
||||
for option in group.option_list:
|
||||
# for every long flag
|
||||
opts_flag.append(option.get_opt_string())
|
||||
template = read_file(BASH_COMPLETION_TEMPLATE)
|
||||
with open(BASH_COMPLETION_FILE, "w", encoding='utf-8') as f:
|
||||
with open(BASH_COMPLETION_TEMPLATE) as f:
|
||||
template = f.read()
|
||||
with open(BASH_COMPLETION_FILE, "w") as f:
|
||||
# just using the special char
|
||||
filled_template = template.replace("{{flags}}", " ".join(opts_flag))
|
||||
f.write(filled_template)
|
||||
|
@ -49,34 +49,15 @@ def cli_to_api(*opts):
|
||||
|
||||
# from https://github.com/yt-dlp/yt-dlp/issues/5859#issuecomment-1363938900
|
||||
default = parsed_options([])
|
||||
|
||||
def neq_opt(a, b):
|
||||
if a == b:
|
||||
return False
|
||||
if a is None and repr(type(object)).endswith(".utils.DateRange'>"):
|
||||
return '0001-01-01 - 9999-12-31' != '{0}'.format(b)
|
||||
return a != b
|
||||
|
||||
diff = dict((k, v) for k, v in parsed_options(opts).items() if neq_opt(default[k], v))
|
||||
diff = dict((k, v) for k, v in parsed_options(opts).items() if default[k] != v)
|
||||
if 'postprocessors' in diff:
|
||||
diff['postprocessors'] = [pp for pp in diff['postprocessors'] if pp not in default['postprocessors']]
|
||||
return diff
|
||||
|
||||
|
||||
def main():
|
||||
from pprint import PrettyPrinter
|
||||
|
||||
pprint = PrettyPrinter()
|
||||
super_format = pprint.format
|
||||
|
||||
def format(object, context, maxlevels, level):
|
||||
if repr(type(object)).endswith(".utils.DateRange'>"):
|
||||
return '{0}: {1}>'.format(repr(object)[:-2], object), True, False
|
||||
return super_format(object, context, maxlevels, level)
|
||||
|
||||
pprint.format = format
|
||||
|
||||
pprint.pprint(cli_to_api(*sys.argv))
|
||||
from pprint import pprint
|
||||
pprint(cli_to_api(*sys.argv))
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
|
@ -1,6 +1,7 @@
|
||||
#!/usr/bin/env python
|
||||
from __future__ import unicode_literals
|
||||
|
||||
import io
|
||||
import json
|
||||
import mimetypes
|
||||
import netrc
|
||||
@ -9,9 +10,7 @@ import os
|
||||
import re
|
||||
import sys
|
||||
|
||||
dirn = os.path.dirname
|
||||
|
||||
sys.path.insert(0, dirn(dirn(os.path.abspath(__file__))))
|
||||
sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
|
||||
|
||||
from youtube_dl.compat import (
|
||||
compat_basestring,
|
||||
@ -23,7 +22,6 @@ from youtube_dl.utils import (
|
||||
make_HTTPS_handler,
|
||||
sanitized_Request,
|
||||
)
|
||||
from utils import read_file
|
||||
|
||||
|
||||
class GitHubReleaser(object):
|
||||
@ -91,7 +89,8 @@ def main():
|
||||
|
||||
changelog_file, version, build_path = args
|
||||
|
||||
changelog = read_file(changelog_file)
|
||||
with io.open(changelog_file, encoding='utf-8') as inf:
|
||||
changelog = inf.read()
|
||||
|
||||
mobj = re.search(r'(?s)version %s\n{2}(.+?)\n{3}' % version, changelog)
|
||||
body = mobj.group(1) if mobj else ''
|
||||
|
@ -6,13 +6,10 @@ import os
|
||||
from os.path import dirname as dirn
|
||||
import sys
|
||||
|
||||
sys.path.insert(0, dirn(dirn(os.path.abspath(__file__))))
|
||||
|
||||
sys.path.insert(0, dirn(dirn((os.path.abspath(__file__)))))
|
||||
import youtube_dl
|
||||
from youtube_dl.utils import shell_quote
|
||||
|
||||
from utils import read_file, write_file
|
||||
|
||||
FISH_COMPLETION_FILE = 'youtube-dl.fish'
|
||||
FISH_COMPLETION_TEMPLATE = 'devscripts/fish-completion.in'
|
||||
|
||||
@ -41,9 +38,11 @@ def build_completion(opt_parser):
|
||||
complete_cmd.extend(EXTRA_ARGS.get(long_option, []))
|
||||
commands.append(shell_quote(complete_cmd))
|
||||
|
||||
template = read_file(FISH_COMPLETION_TEMPLATE)
|
||||
with open(FISH_COMPLETION_TEMPLATE) as f:
|
||||
template = f.read()
|
||||
filled_template = template.replace('{{commands}}', '\n'.join(commands))
|
||||
write_file(FISH_COMPLETION_FILE, filled_template)
|
||||
with open(FISH_COMPLETION_FILE, 'w') as f:
|
||||
f.write(filled_template)
|
||||
|
||||
|
||||
parser = youtube_dl.parseOpts()[0]
|
||||
|
@ -6,21 +6,16 @@ import sys
|
||||
import hashlib
|
||||
import os.path
|
||||
|
||||
dirn = os.path.dirname
|
||||
|
||||
sys.path.insert(0, dirn(dirn(dirn(os.path.abspath(__file__)))))
|
||||
|
||||
from devscripts.utils import read_file, write_file
|
||||
from youtube_dl.compat import compat_open as open
|
||||
|
||||
if len(sys.argv) <= 1:
|
||||
print('Specify the version number as parameter')
|
||||
sys.exit()
|
||||
version = sys.argv[1]
|
||||
|
||||
write_file('update/LATEST_VERSION', version)
|
||||
with open('update/LATEST_VERSION', 'w') as f:
|
||||
f.write(version)
|
||||
|
||||
versions_info = json.loads(read_file('update/versions.json'))
|
||||
versions_info = json.load(open('update/versions.json'))
|
||||
if 'signature' in versions_info:
|
||||
del versions_info['signature']
|
||||
|
||||
@ -44,5 +39,5 @@ for key, filename in filenames.items():
|
||||
versions_info['versions'][version] = new_version
|
||||
versions_info['latest'] = version
|
||||
|
||||
with open('update/versions.json', 'w', encoding='utf-8') as jsonf:
|
||||
json.dumps(versions_info, jsonf, indent=4, sort_keys=True)
|
||||
with open('update/versions.json', 'w') as jsonf:
|
||||
json.dump(versions_info, jsonf, indent=4, sort_keys=True)
|
||||
|
@ -2,21 +2,14 @@
|
||||
from __future__ import unicode_literals
|
||||
|
||||
import json
|
||||
import os.path
|
||||
import sys
|
||||
|
||||
dirn = os.path.dirname
|
||||
|
||||
sys.path.insert(0, dirn(dirn((os.path.abspath(__file__)))))
|
||||
|
||||
from utils import read_file, write_file
|
||||
|
||||
versions_info = json.loads(read_file('update/versions.json'))
|
||||
versions_info = json.load(open('update/versions.json'))
|
||||
version = versions_info['latest']
|
||||
version_dict = versions_info['versions'][version]
|
||||
|
||||
# Read template page
|
||||
template = read_file('download.html.in')
|
||||
with open('download.html.in', 'r', encoding='utf-8') as tmplf:
|
||||
template = tmplf.read()
|
||||
|
||||
template = template.replace('@PROGRAM_VERSION@', version)
|
||||
template = template.replace('@PROGRAM_URL@', version_dict['bin'][0])
|
||||
@ -25,5 +18,5 @@ template = template.replace('@EXE_URL@', version_dict['exe'][0])
|
||||
template = template.replace('@EXE_SHA256SUM@', version_dict['exe'][1])
|
||||
template = template.replace('@TAR_URL@', version_dict['tar'][0])
|
||||
template = template.replace('@TAR_SHA256SUM@', version_dict['tar'][1])
|
||||
|
||||
write_file('download.html', template)
|
||||
with open('download.html', 'w', encoding='utf-8') as dlf:
|
||||
dlf.write(template)
|
||||
|
@ -5,22 +5,17 @@ from __future__ import with_statement, unicode_literals
|
||||
|
||||
import datetime
|
||||
import glob
|
||||
import io # For Python 2 compatibility
|
||||
import os
|
||||
import re
|
||||
import sys
|
||||
|
||||
dirn = os.path.dirname
|
||||
|
||||
sys.path.insert(0, dirn(dirn(dirn(os.path.abspath(__file__)))))
|
||||
|
||||
from devscripts.utils import read_file, write_file
|
||||
from youtube_dl import compat_str
|
||||
|
||||
year = compat_str(datetime.datetime.now().year)
|
||||
year = str(datetime.datetime.now().year)
|
||||
for fn in glob.glob('*.html*'):
|
||||
content = read_file(fn)
|
||||
with io.open(fn, encoding='utf-8') as f:
|
||||
content = f.read()
|
||||
newc = re.sub(r'(?P<copyright>Copyright © 2011-)(?P<year>[0-9]{4})', 'Copyright © 2011-' + year, content)
|
||||
if content != newc:
|
||||
tmpFn = fn + '.part'
|
||||
write_file(tmpFn, newc)
|
||||
with io.open(tmpFn, 'wt', encoding='utf-8') as outf:
|
||||
outf.write(newc)
|
||||
os.rename(tmpFn, fn)
|
||||
|
@ -2,16 +2,10 @@
|
||||
from __future__ import unicode_literals
|
||||
|
||||
import datetime
|
||||
import io
|
||||
import json
|
||||
import os.path
|
||||
import textwrap
|
||||
import sys
|
||||
|
||||
dirn = os.path.dirname
|
||||
|
||||
sys.path.insert(0, dirn(dirn(os.path.abspath(__file__))))
|
||||
|
||||
from utils import write_file
|
||||
|
||||
atom_template = textwrap.dedent("""\
|
||||
<?xml version="1.0" encoding="utf-8"?>
|
||||
@ -78,4 +72,5 @@ for v in versions:
|
||||
entries_str = textwrap.indent(''.join(entries), '\t')
|
||||
atom_template = atom_template.replace('@ENTRIES@', entries_str)
|
||||
|
||||
write_file('update/releases.atom', atom_template)
|
||||
with io.open('update/releases.atom', 'w', encoding='utf-8') as atom_file:
|
||||
atom_file.write(atom_template)
|
||||
|
@ -5,17 +5,15 @@ import sys
|
||||
import os
|
||||
import textwrap
|
||||
|
||||
dirn = os.path.dirname
|
||||
|
||||
# We must be able to import youtube_dl
|
||||
sys.path.insert(0, dirn(dirn(dirn(os.path.abspath(__file__)))))
|
||||
sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))))
|
||||
|
||||
import youtube_dl
|
||||
from devscripts.utils import read_file, write_file
|
||||
|
||||
|
||||
def main():
|
||||
template = read_file('supportedsites.html.in')
|
||||
with open('supportedsites.html.in', 'r', encoding='utf-8') as tmplf:
|
||||
template = tmplf.read()
|
||||
|
||||
ie_htmls = []
|
||||
for ie in youtube_dl.list_extractors(age_limit=None):
|
||||
@ -31,7 +29,8 @@ def main():
|
||||
|
||||
template = template.replace('@SITES@', textwrap.indent('\n'.join(ie_htmls), '\t'))
|
||||
|
||||
write_file('supportedsites.html', template)
|
||||
with open('supportedsites.html', 'w', encoding='utf-8') as sitesf:
|
||||
sitesf.write(template)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
|
@ -1,11 +1,10 @@
|
||||
#!/usr/bin/env python
|
||||
from __future__ import unicode_literals
|
||||
|
||||
import io
|
||||
import optparse
|
||||
import re
|
||||
|
||||
from utils import read_file, write_file
|
||||
|
||||
|
||||
def main():
|
||||
parser = optparse.OptionParser(usage='%prog INFILE OUTFILE')
|
||||
@ -15,7 +14,8 @@ def main():
|
||||
|
||||
infile, outfile = args
|
||||
|
||||
readme = read_file(infile)
|
||||
with io.open(infile, encoding='utf-8') as inf:
|
||||
readme = inf.read()
|
||||
|
||||
bug_text = re.search(
|
||||
r'(?s)#\s*BUGS\s*[^\n]*\s*(.*?)#\s*COPYRIGHT', readme).group(1)
|
||||
@ -25,7 +25,8 @@ def main():
|
||||
|
||||
out = bug_text + dev_text
|
||||
|
||||
write_file(outfile, out)
|
||||
with io.open(outfile, 'w', encoding='utf-8') as outf:
|
||||
outf.write(out)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
|
@ -1,11 +1,8 @@
|
||||
#!/usr/bin/env python
|
||||
from __future__ import unicode_literals
|
||||
|
||||
import io
|
||||
import optparse
|
||||
import os.path
|
||||
import sys
|
||||
|
||||
from utils import read_file, read_version, write_file
|
||||
|
||||
|
||||
def main():
|
||||
@ -16,11 +13,17 @@ def main():
|
||||
|
||||
infile, outfile = args
|
||||
|
||||
issue_template_tmpl = read_file(infile)
|
||||
with io.open(infile, encoding='utf-8') as inf:
|
||||
issue_template_tmpl = inf.read()
|
||||
|
||||
out = issue_template_tmpl % {'version': read_version()}
|
||||
# Get the version from youtube_dl/version.py without importing the package
|
||||
exec(compile(open('youtube_dl/version.py').read(),
|
||||
'youtube_dl/version.py', 'exec'))
|
||||
|
||||
write_file(outfile, out)
|
||||
out = issue_template_tmpl % {'version': locals()['__version__']}
|
||||
|
||||
with io.open(outfile, 'w', encoding='utf-8') as outf:
|
||||
outf.write(out)
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
|
@ -1,48 +1,35 @@
|
||||
from __future__ import unicode_literals, print_function
|
||||
|
||||
from inspect import getsource
|
||||
import io
|
||||
import os
|
||||
from os.path import dirname as dirn
|
||||
import re
|
||||
import sys
|
||||
|
||||
print('WARNING: Lazy loading extractors is an experimental feature that may not always work', file=sys.stderr)
|
||||
|
||||
sys.path.insert(0, dirn(dirn(os.path.abspath(__file__))))
|
||||
sys.path.insert(0, dirn(dirn((os.path.abspath(__file__)))))
|
||||
|
||||
lazy_extractors_filename = sys.argv[1]
|
||||
if os.path.exists(lazy_extractors_filename):
|
||||
os.remove(lazy_extractors_filename)
|
||||
# Py2: may be confused by leftover lazy_extractors.pyc
|
||||
if sys.version_info[0] < 3:
|
||||
for c in ('c', 'o'):
|
||||
try:
|
||||
os.remove(lazy_extractors_filename + 'c')
|
||||
except OSError:
|
||||
pass
|
||||
|
||||
from devscripts.utils import read_file, write_file
|
||||
from youtube_dl.compat import compat_register_utf8
|
||||
|
||||
compat_register_utf8()
|
||||
try:
|
||||
os.remove(lazy_extractors_filename + 'c')
|
||||
except OSError:
|
||||
pass
|
||||
|
||||
from youtube_dl.extractor import _ALL_CLASSES
|
||||
from youtube_dl.extractor.common import InfoExtractor, SearchInfoExtractor
|
||||
|
||||
module_template = read_file('devscripts/lazy_load_template.py')
|
||||
|
||||
|
||||
def get_source(m):
|
||||
return re.sub(r'(?m)^\s*#.*\n', '', getsource(m))
|
||||
|
||||
with open('devscripts/lazy_load_template.py', 'rt') as f:
|
||||
module_template = f.read()
|
||||
|
||||
module_contents = [
|
||||
module_template,
|
||||
get_source(InfoExtractor.suitable),
|
||||
get_source(InfoExtractor._match_valid_url) + '\n',
|
||||
module_template + '\n' + getsource(InfoExtractor.suitable) + '\n',
|
||||
'class LazyLoadSearchExtractor(LazyLoadExtractor):\n pass\n',
|
||||
# needed for suitable() methods of Youtube extractor (see #28780)
|
||||
'from youtube_dl.utils import parse_qs, variadic\n',
|
||||
'from youtube_dl.utils import parse_qs\n',
|
||||
]
|
||||
|
||||
ie_template = '''
|
||||
@ -75,7 +62,7 @@ def build_lazy_ie(ie, name):
|
||||
valid_url=valid_url,
|
||||
module=ie.__module__)
|
||||
if ie.suitable.__func__ is not InfoExtractor.suitable.__func__:
|
||||
s += '\n' + get_source(ie.suitable)
|
||||
s += '\n' + getsource(ie.suitable)
|
||||
if hasattr(ie, '_make_valid_url'):
|
||||
# search extractors
|
||||
s += make_valid_template.format(valid_url=ie._make_valid_url())
|
||||
@ -115,17 +102,7 @@ for ie in ordered_cls:
|
||||
module_contents.append(
|
||||
'_ALL_CLASSES = [{0}]'.format(', '.join(names)))
|
||||
|
||||
module_src = '\n'.join(module_contents)
|
||||
module_src = '\n'.join(module_contents) + '\n'
|
||||
|
||||
write_file(lazy_extractors_filename, module_src + '\n')
|
||||
|
||||
# work around JVM byte code module limit in Jython
|
||||
if sys.platform.startswith('java') and sys.version_info[:2] == (2, 7):
|
||||
import subprocess
|
||||
from youtube_dl.compat import compat_subprocess_get_DEVNULL
|
||||
# if Python 2.7 is available, use it to compile the module for Jython
|
||||
try:
|
||||
# if Python 2.7 is available, use it to compile the module for Jython
|
||||
subprocess.check_call(['python2.7', '-m', 'py_compile', lazy_extractors_filename], stdout=compat_subprocess_get_DEVNULL())
|
||||
except Exception:
|
||||
pass
|
||||
with io.open(lazy_extractors_filename, 'wt', encoding='utf-8') as f:
|
||||
f.write(module_src)
|
||||
|
@ -1,14 +1,8 @@
|
||||
from __future__ import unicode_literals
|
||||
|
||||
import os.path
|
||||
import re
|
||||
import io
|
||||
import sys
|
||||
dirn = os.path.dirname
|
||||
|
||||
sys.path.insert(0, dirn(dirn(os.path.abspath(__file__))))
|
||||
|
||||
from utils import read_file
|
||||
from youtube_dl.compat import compat_open as open
|
||||
import re
|
||||
|
||||
README_FILE = 'README.md'
|
||||
helptext = sys.stdin.read()
|
||||
@ -16,7 +10,8 @@ helptext = sys.stdin.read()
|
||||
if isinstance(helptext, bytes):
|
||||
helptext = helptext.decode('utf-8')
|
||||
|
||||
oldreadme = read_file(README_FILE)
|
||||
with io.open(README_FILE, encoding='utf-8') as f:
|
||||
oldreadme = f.read()
|
||||
|
||||
header = oldreadme[:oldreadme.index('# OPTIONS')]
|
||||
footer = oldreadme[oldreadme.index('# CONFIGURATION'):]
|
||||
@ -25,7 +20,7 @@ options = helptext[helptext.index(' General Options:') + 19:]
|
||||
options = re.sub(r'(?m)^ (\w.+)$', r'## \1', options)
|
||||
options = '# OPTIONS\n' + options + '\n'
|
||||
|
||||
with open(README_FILE, 'w', encoding='utf-8') as f:
|
||||
with io.open(README_FILE, 'w', encoding='utf-8') as f:
|
||||
f.write(header)
|
||||
f.write(options)
|
||||
f.write(footer)
|
||||
|
@ -1,19 +1,17 @@
|
||||
#!/usr/bin/env python
|
||||
from __future__ import unicode_literals
|
||||
|
||||
import io
|
||||
import optparse
|
||||
import os.path
|
||||
import os
|
||||
import sys
|
||||
|
||||
|
||||
# Import youtube_dl
|
||||
dirn = os.path.dirname
|
||||
|
||||
sys.path.insert(0, dirn(dirn(os.path.abspath(__file__))))
|
||||
|
||||
ROOT_DIR = os.path.join(os.path.dirname(__file__), '..')
|
||||
sys.path.insert(0, ROOT_DIR)
|
||||
import youtube_dl
|
||||
|
||||
from utils import write_file
|
||||
|
||||
|
||||
def main():
|
||||
parser = optparse.OptionParser(usage='%prog OUTFILE.md')
|
||||
@ -40,7 +38,8 @@ def main():
|
||||
' - ' + md + '\n'
|
||||
for md in gen_ies_md(ies))
|
||||
|
||||
write_file(outfile, out)
|
||||
with io.open(outfile, 'w', encoding='utf-8') as outf:
|
||||
outf.write(out)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
|
@ -1,13 +1,13 @@
|
||||
from __future__ import unicode_literals
|
||||
|
||||
import io
|
||||
import optparse
|
||||
import os.path
|
||||
import re
|
||||
|
||||
from utils import read_file, write_file
|
||||
|
||||
ROOT_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
|
||||
README_FILE = os.path.join(ROOT_DIR, 'README.md')
|
||||
|
||||
PREFIX = r'''%YOUTUBE-DL(1)
|
||||
|
||||
# NAME
|
||||
@ -29,7 +29,8 @@ def main():
|
||||
|
||||
outfile, = args
|
||||
|
||||
readme = read_file(README_FILE)
|
||||
with io.open(README_FILE, encoding='utf-8') as f:
|
||||
readme = f.read()
|
||||
|
||||
readme = re.sub(r'(?s)^.*?(?=# DESCRIPTION)', '', readme)
|
||||
readme = re.sub(r'\s+youtube-dl \[OPTIONS\] URL \[URL\.\.\.\]', '', readme)
|
||||
@ -37,7 +38,8 @@ def main():
|
||||
|
||||
readme = filter_options(readme)
|
||||
|
||||
write_file(outfile, readme)
|
||||
with io.open(outfile, 'w', encoding='utf-8') as outf:
|
||||
outf.write(readme)
|
||||
|
||||
|
||||
def filter_options(readme):
|
||||
|
@ -1,62 +0,0 @@
|
||||
# coding: utf-8
|
||||
from __future__ import unicode_literals
|
||||
|
||||
import argparse
|
||||
import functools
|
||||
import os.path
|
||||
import subprocess
|
||||
import sys
|
||||
|
||||
dirn = os.path.dirname
|
||||
|
||||
sys.path.insert(0, dirn(dirn(os.path.abspath(__file__))))
|
||||
|
||||
from youtube_dl.compat import (
|
||||
compat_kwargs,
|
||||
compat_open as open,
|
||||
)
|
||||
|
||||
|
||||
def read_file(fname):
|
||||
with open(fname, encoding='utf-8') as f:
|
||||
return f.read()
|
||||
|
||||
|
||||
def write_file(fname, content, mode='w'):
|
||||
with open(fname, mode, encoding='utf-8') as f:
|
||||
return f.write(content)
|
||||
|
||||
|
||||
def read_version(fname='youtube_dl/version.py'):
|
||||
"""Get the version without importing the package"""
|
||||
exec(compile(read_file(fname), fname, 'exec'))
|
||||
return locals()['__version__']
|
||||
|
||||
|
||||
def get_filename_args(has_infile=False, default_outfile=None):
|
||||
parser = argparse.ArgumentParser()
|
||||
if has_infile:
|
||||
parser.add_argument('infile', help='Input file')
|
||||
kwargs = {'nargs': '?', 'default': default_outfile} if default_outfile else {}
|
||||
kwargs['help'] = 'Output file'
|
||||
parser.add_argument('outfile', **compat_kwargs(kwargs))
|
||||
|
||||
opts = parser.parse_args()
|
||||
if has_infile:
|
||||
return opts.infile, opts.outfile
|
||||
return opts.outfile
|
||||
|
||||
|
||||
def compose_functions(*functions):
|
||||
return lambda x: functools.reduce(lambda y, f: f(y), functions, x)
|
||||
|
||||
|
||||
def run_process(*args, **kwargs):
|
||||
kwargs.setdefault('text', True)
|
||||
kwargs.setdefault('check', True)
|
||||
kwargs.setdefault('capture_output', True)
|
||||
if kwargs['text']:
|
||||
kwargs.setdefault('encoding', 'utf-8')
|
||||
kwargs.setdefault('errors', 'replace')
|
||||
kwargs = compat_kwargs(kwargs)
|
||||
return subprocess.run(args, **kwargs)
|
@ -7,8 +7,6 @@ import sys
|
||||
|
||||
sys.path.insert(0, dirn(dirn((os.path.abspath(__file__)))))
|
||||
import youtube_dl
|
||||
from utils import read_file, write_file
|
||||
|
||||
|
||||
ZSH_COMPLETION_FILE = "youtube-dl.zsh"
|
||||
ZSH_COMPLETION_TEMPLATE = "devscripts/zsh-completion.in"
|
||||
@ -36,13 +34,15 @@ def build_completion(opt_parser):
|
||||
|
||||
flags = [opt.get_opt_string() for opt in opts]
|
||||
|
||||
template = read_file(ZSH_COMPLETION_TEMPLATE)
|
||||
with open(ZSH_COMPLETION_TEMPLATE) as f:
|
||||
template = f.read()
|
||||
|
||||
template = template.replace("{{fileopts}}", "|".join(fileopts))
|
||||
template = template.replace("{{diropts}}", "|".join(diropts))
|
||||
template = template.replace("{{flags}}", " ".join(flags))
|
||||
|
||||
write_file(ZSH_COMPLETION_FILE, template)
|
||||
with open(ZSH_COMPLETION_FILE, "w") as f:
|
||||
f.write(template)
|
||||
|
||||
|
||||
parser = youtube_dl.parseOpts()[0]
|
||||
|
@ -1,6 +1,7 @@
|
||||
from __future__ import unicode_literals
|
||||
|
||||
import errno
|
||||
import io
|
||||
import hashlib
|
||||
import json
|
||||
import os.path
|
||||
@ -8,17 +9,14 @@ import re
|
||||
import types
|
||||
import ssl
|
||||
import sys
|
||||
import unittest
|
||||
|
||||
import youtube_dl.extractor
|
||||
from youtube_dl import YoutubeDL
|
||||
from youtube_dl.compat import (
|
||||
compat_open as open,
|
||||
compat_os_name,
|
||||
compat_str,
|
||||
)
|
||||
from youtube_dl.utils import (
|
||||
IDENTITY,
|
||||
preferredencoding,
|
||||
write_string,
|
||||
)
|
||||
@ -29,10 +27,10 @@ def get_params(override=None):
|
||||
"parameters.json")
|
||||
LOCAL_PARAMETERS_FILE = os.path.join(os.path.dirname(os.path.abspath(__file__)),
|
||||
"local_parameters.json")
|
||||
with open(PARAMETERS_FILE, encoding='utf-8') as pf:
|
||||
with io.open(PARAMETERS_FILE, encoding='utf-8') as pf:
|
||||
parameters = json.load(pf)
|
||||
if os.path.exists(LOCAL_PARAMETERS_FILE):
|
||||
with open(LOCAL_PARAMETERS_FILE, encoding='utf-8') as pf:
|
||||
with io.open(LOCAL_PARAMETERS_FILE, encoding='utf-8') as pf:
|
||||
parameters.update(json.load(pf))
|
||||
if override:
|
||||
parameters.update(override)
|
||||
@ -74,8 +72,7 @@ class FakeYDL(YoutubeDL):
|
||||
def to_screen(self, s, skip_eol=None):
|
||||
print(s)
|
||||
|
||||
def trouble(self, *args, **kwargs):
|
||||
s = args[0] if len(args) > 0 else kwargs.get('message', 'Missing message')
|
||||
def trouble(self, s, tb=None):
|
||||
raise Exception(s)
|
||||
|
||||
def download(self, x):
|
||||
@ -142,7 +139,7 @@ def expect_value(self, got, expected, field):
|
||||
self.assertTrue(
|
||||
contains_str in got,
|
||||
'field %s (value: %r) should contain %r' % (field, got, contains_str))
|
||||
elif isinstance(expected, compat_str) and re.match(r'lambda \w+:', expected):
|
||||
elif isinstance(expected, compat_str) and re.match(r'^lambda \w+:', expected):
|
||||
fn = eval(expected)
|
||||
suite = expected.split(':', 1)[1].strip()
|
||||
self.assertTrue(
|
||||
@ -300,7 +297,3 @@ def http_server_port(httpd):
|
||||
else:
|
||||
sock = httpd.socket
|
||||
return sock.getsockname()[1]
|
||||
|
||||
|
||||
def expectedFailureIf(cond):
|
||||
return unittest.expectedFailure if cond else IDENTITY
|
||||
|
@ -3,36 +3,18 @@
|
||||
from __future__ import unicode_literals
|
||||
|
||||
# Allow direct execution
|
||||
import io
|
||||
import os
|
||||
import sys
|
||||
import unittest
|
||||
|
||||
sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
|
||||
|
||||
import threading
|
||||
|
||||
from test.helper import (
|
||||
expect_dict,
|
||||
expect_value,
|
||||
FakeYDL,
|
||||
http_server_port,
|
||||
)
|
||||
from youtube_dl.compat import (
|
||||
compat_etree_fromstring,
|
||||
compat_http_server,
|
||||
compat_open as open,
|
||||
)
|
||||
from test.helper import FakeYDL, expect_dict, expect_value, http_server_port
|
||||
from youtube_dl.compat import compat_etree_fromstring, compat_http_server
|
||||
from youtube_dl.extractor.common import InfoExtractor
|
||||
from youtube_dl.extractor import (
|
||||
get_info_extractor,
|
||||
YoutubeIE,
|
||||
)
|
||||
from youtube_dl.utils import (
|
||||
encode_data_uri,
|
||||
ExtractorError,
|
||||
RegexNotFoundError,
|
||||
strip_jsonp,
|
||||
)
|
||||
from youtube_dl.extractor import YoutubeIE, get_info_extractor
|
||||
from youtube_dl.utils import encode_data_uri, strip_jsonp, ExtractorError, RegexNotFoundError
|
||||
import threading
|
||||
|
||||
|
||||
TEAPOT_RESPONSE_STATUS = 418
|
||||
@ -118,71 +100,6 @@ class TestInfoExtractor(unittest.TestCase):
|
||||
self.assertRaises(RegexNotFoundError, ie._html_search_meta, 'z', html, None, fatal=True)
|
||||
self.assertRaises(RegexNotFoundError, ie._html_search_meta, ('z', 'x'), html, None, fatal=True)
|
||||
|
||||
def test_search_nextjs_data(self):
|
||||
html = '''
|
||||
<!DOCTYPE html>
|
||||
<html>
|
||||
<head>
|
||||
<meta http-equiv="content-type" content=
|
||||
"text/html; charset=utf-8">
|
||||
<meta name="viewport" content="width=device-width">
|
||||
<title>Test _search_nextjs_data()</title>
|
||||
</head>
|
||||
<body>
|
||||
<div id="__next">
|
||||
<div style="background-color:#17171E" class="FU" dir="ltr">
|
||||
<div class="sc-93de261d-0 dyzzYE">
|
||||
<div>
|
||||
<header class="HD"></header>
|
||||
<main class="MN">
|
||||
<div style="height:0" class="HT0">
|
||||
<div style="width:NaN%" data-testid=
|
||||
"stream-container" class="WDN"></div>
|
||||
</div>
|
||||
</main>
|
||||
</div>
|
||||
<footer class="sc-6e5faf91-0 dEGaHS"></footer>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
<script id="__NEXT_DATA__" type="application/json">
|
||||
{"props":{"pageProps":{"video":{"id":"testid"}}}}
|
||||
</script>
|
||||
</body>
|
||||
</html>
|
||||
'''
|
||||
search = self.ie._search_nextjs_data(html, 'testID')
|
||||
self.assertEqual(search['props']['pageProps']['video']['id'], 'testid')
|
||||
|
||||
def test_search_nuxt_data(self):
|
||||
html = '''
|
||||
<!DOCTYPE html>
|
||||
<html>
|
||||
<head>
|
||||
<meta http-equiv="content-type" content=
|
||||
"text/html; charset=utf-8">
|
||||
<title>Nuxt.js Test Page</title>
|
||||
<meta name="viewport" content=
|
||||
"width=device-width, initial-scale=1">
|
||||
<meta data-hid="robots" name="robots" content="all">
|
||||
</head>
|
||||
<body class="BD">
|
||||
<div id="__layout">
|
||||
<h1 class="H1">Example heading</h1>
|
||||
<div class="IN">
|
||||
<p>Decoy text</p>
|
||||
</div>
|
||||
</div>
|
||||
<script>
|
||||
window.__NUXT__=(function(a,b,c,d,e,f,g,h){return {decoy:" default",data:[{track:{id:f,title:g}}]}}(null,null,"c",null,null,"testid","Nuxt.js title",null));
|
||||
</script>
|
||||
<script src="/_nuxt/a12345b.js" defer="defer"></script>
|
||||
</body>
|
||||
</html>
|
||||
'''
|
||||
search = self.ie._search_nuxt_data(html, 'testID')
|
||||
self.assertEqual(search['track']['id'], 'testid')
|
||||
|
||||
def test_search_json_ld_realworld(self):
|
||||
# https://github.com/ytdl-org/youtube-dl/issues/23306
|
||||
expect_dict(
|
||||
@ -431,24 +348,6 @@ class TestInfoExtractor(unittest.TestCase):
|
||||
}],
|
||||
})
|
||||
|
||||
# from https://0000.studio/
|
||||
# with type attribute but without extension in URL
|
||||
expect_dict(
|
||||
self,
|
||||
self.ie._parse_html5_media_entries(
|
||||
'https://0000.studio',
|
||||
r'''
|
||||
<video src="https://d1ggyt9m8pwf3g.cloudfront.net/protected/ap-northeast-1:1864af40-28d5-492b-b739-b32314b1a527/archive/clip/838db6a7-8973-4cd6-840d-8517e4093c92"
|
||||
controls="controls" type="video/mp4" preload="metadata" autoplay="autoplay" playsinline class="object-contain">
|
||||
</video>
|
||||
''', None)[0],
|
||||
{
|
||||
'formats': [{
|
||||
'url': 'https://d1ggyt9m8pwf3g.cloudfront.net/protected/ap-northeast-1:1864af40-28d5-492b-b739-b32314b1a527/archive/clip/838db6a7-8973-4cd6-840d-8517e4093c92',
|
||||
'ext': 'mp4',
|
||||
}],
|
||||
})
|
||||
|
||||
def test_extract_jwplayer_data_realworld(self):
|
||||
# from http://www.suffolk.edu/sjc/
|
||||
expect_dict(
|
||||
@ -902,8 +801,8 @@ jwplayer("mediaplayer").setup({"abouttext":"Visit Indie DB","aboutlink":"http:\/
|
||||
]
|
||||
|
||||
for m3u8_file, m3u8_url, expected_formats in _TEST_CASES:
|
||||
with open('./test/testdata/m3u8/%s.m3u8' % m3u8_file,
|
||||
mode='r', encoding='utf-8') as f:
|
||||
with io.open('./test/testdata/m3u8/%s.m3u8' % m3u8_file,
|
||||
mode='r', encoding='utf-8') as f:
|
||||
formats = self.ie._parse_m3u8_formats(
|
||||
f.read(), m3u8_url, ext='mp4')
|
||||
self.ie._sort_formats(formats)
|
||||
@ -1127,8 +1026,8 @@ jwplayer("mediaplayer").setup({"abouttext":"Visit Indie DB","aboutlink":"http:\/
|
||||
]
|
||||
|
||||
for mpd_file, mpd_url, mpd_base_url, expected_formats in _TEST_CASES:
|
||||
with open('./test/testdata/mpd/%s.mpd' % mpd_file,
|
||||
mode='r', encoding='utf-8') as f:
|
||||
with io.open('./test/testdata/mpd/%s.mpd' % mpd_file,
|
||||
mode='r', encoding='utf-8') as f:
|
||||
formats = self.ie._parse_mpd_formats(
|
||||
compat_etree_fromstring(f.read().encode('utf-8')),
|
||||
mpd_base_url=mpd_base_url, mpd_url=mpd_url)
|
||||
@ -1154,8 +1053,8 @@ jwplayer("mediaplayer").setup({"abouttext":"Visit Indie DB","aboutlink":"http:\/
|
||||
]
|
||||
|
||||
for f4m_file, f4m_url, expected_formats in _TEST_CASES:
|
||||
with open('./test/testdata/f4m/%s.f4m' % f4m_file,
|
||||
mode='r', encoding='utf-8') as f:
|
||||
with io.open('./test/testdata/f4m/%s.f4m' % f4m_file,
|
||||
mode='r', encoding='utf-8') as f:
|
||||
formats = self.ie._parse_f4m_formats(
|
||||
compat_etree_fromstring(f.read().encode('utf-8')),
|
||||
f4m_url, None)
|
||||
@ -1202,8 +1101,8 @@ jwplayer("mediaplayer").setup({"abouttext":"Visit Indie DB","aboutlink":"http:\/
|
||||
]
|
||||
|
||||
for xspf_file, xspf_url, expected_entries in _TEST_CASES:
|
||||
with open('./test/testdata/xspf/%s.xspf' % xspf_file,
|
||||
mode='r', encoding='utf-8') as f:
|
||||
with io.open('./test/testdata/xspf/%s.xspf' % xspf_file,
|
||||
mode='r', encoding='utf-8') as f:
|
||||
entries = self.ie._parse_xspf(
|
||||
compat_etree_fromstring(f.read().encode('utf-8')),
|
||||
xspf_file, xspf_url=xspf_url, xspf_base_url=xspf_url)
|
||||
|
@ -10,31 +10,14 @@ import unittest
|
||||
sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
|
||||
|
||||
import copy
|
||||
import json
|
||||
|
||||
from test.helper import (
|
||||
FakeYDL,
|
||||
assertRegexpMatches,
|
||||
try_rm,
|
||||
)
|
||||
from test.helper import FakeYDL, assertRegexpMatches
|
||||
from youtube_dl import YoutubeDL
|
||||
from youtube_dl.compat import (
|
||||
compat_http_cookiejar_Cookie,
|
||||
compat_http_cookies_SimpleCookie,
|
||||
compat_kwargs,
|
||||
compat_open as open,
|
||||
compat_str,
|
||||
compat_urllib_error,
|
||||
)
|
||||
|
||||
from youtube_dl.compat import compat_str, compat_urllib_error
|
||||
from youtube_dl.extractor import YoutubeIE
|
||||
from youtube_dl.extractor.common import InfoExtractor
|
||||
from youtube_dl.postprocessor.common import PostProcessor
|
||||
from youtube_dl.utils import (
|
||||
ExtractorError,
|
||||
match_filter_func,
|
||||
traverse_obj,
|
||||
)
|
||||
from youtube_dl.utils import ExtractorError, match_filter_func
|
||||
|
||||
TEST_URL = 'http://localhost/sample.mp4'
|
||||
|
||||
@ -46,14 +29,11 @@ class YDL(FakeYDL):
|
||||
self.msgs = []
|
||||
|
||||
def process_info(self, info_dict):
|
||||
self.downloaded_info_dicts.append(info_dict.copy())
|
||||
self.downloaded_info_dicts.append(info_dict)
|
||||
|
||||
def to_screen(self, msg):
|
||||
self.msgs.append(msg)
|
||||
|
||||
def dl(self, *args, **kwargs):
|
||||
assert False, 'Downloader must not be invoked for test_YoutubeDL'
|
||||
|
||||
|
||||
def _make_result(formats, **kwargs):
|
||||
res = {
|
||||
@ -62,9 +42,8 @@ def _make_result(formats, **kwargs):
|
||||
'title': 'testttitle',
|
||||
'extractor': 'testex',
|
||||
'extractor_key': 'TestEx',
|
||||
'webpage_url': 'http://example.com/watch?v=shenanigans',
|
||||
}
|
||||
res.update(**compat_kwargs(kwargs))
|
||||
res.update(**kwargs)
|
||||
return res
|
||||
|
||||
|
||||
@ -702,12 +681,12 @@ class TestYoutubeDL(unittest.TestCase):
|
||||
|
||||
class SimplePP(PostProcessor):
|
||||
def run(self, info):
|
||||
with open(audiofile, 'w') as f:
|
||||
with open(audiofile, 'wt') as f:
|
||||
f.write('EXAMPLE')
|
||||
return [info['filepath']], info
|
||||
|
||||
def run_pp(params, PP):
|
||||
with open(filename, 'w') as f:
|
||||
with open(filename, 'wt') as f:
|
||||
f.write('EXAMPLE')
|
||||
ydl = YoutubeDL(params)
|
||||
ydl.add_post_processor(PP())
|
||||
@ -726,7 +705,7 @@ class TestYoutubeDL(unittest.TestCase):
|
||||
|
||||
class ModifierPP(PostProcessor):
|
||||
def run(self, info):
|
||||
with open(info['filepath'], 'w') as f:
|
||||
with open(info['filepath'], 'wt') as f:
|
||||
f.write('MODIFIED')
|
||||
return [], info
|
||||
|
||||
@ -951,11 +930,17 @@ class TestYoutubeDL(unittest.TestCase):
|
||||
# Test case for https://github.com/ytdl-org/youtube-dl/issues/27064
|
||||
def test_ignoreerrors_for_playlist_with_url_transparent_iterable_entries(self):
|
||||
|
||||
ydl = YDL({
|
||||
class _YDL(YDL):
|
||||
def __init__(self, *args, **kwargs):
|
||||
super(_YDL, self).__init__(*args, **kwargs)
|
||||
|
||||
def trouble(self, s, tb=None):
|
||||
pass
|
||||
|
||||
ydl = _YDL({
|
||||
'format': 'extra',
|
||||
'ignoreerrors': True,
|
||||
})
|
||||
ydl.trouble = lambda *_, **__: None
|
||||
|
||||
class VideoIE(InfoExtractor):
|
||||
_VALID_URL = r'video:(?P<id>\d+)'
|
||||
@ -1032,160 +1017,5 @@ class TestYoutubeDL(unittest.TestCase):
|
||||
self.assertEqual(out_info['release_date'], '20210930')
|
||||
|
||||
|
||||
class TestYoutubeDLCookies(unittest.TestCase):
|
||||
|
||||
@staticmethod
|
||||
def encode_cookie(cookie):
|
||||
if not isinstance(cookie, dict):
|
||||
cookie = vars(cookie)
|
||||
for name, value in cookie.items():
|
||||
yield name, compat_str(value)
|
||||
|
||||
@classmethod
|
||||
def comparable_cookies(cls, cookies):
|
||||
# Work around cookiejar cookies not being unicode strings
|
||||
return sorted(map(tuple, map(sorted, map(cls.encode_cookie, cookies))))
|
||||
|
||||
def assertSameCookies(self, c1, c2, msg=None):
|
||||
return self.assertEqual(
|
||||
*map(self.comparable_cookies, (c1, c2)),
|
||||
msg=msg)
|
||||
|
||||
def assertSameCookieStrings(self, c1, c2, msg=None):
|
||||
return self.assertSameCookies(
|
||||
*map(lambda c: compat_http_cookies_SimpleCookie(c).values(), (c1, c2)),
|
||||
msg=msg)
|
||||
|
||||
def test_header_cookies(self):
|
||||
|
||||
ydl = FakeYDL()
|
||||
ydl.report_warning = lambda *_, **__: None
|
||||
|
||||
def cookie(name, value, version=None, domain='', path='', secure=False, expires=None):
|
||||
return compat_http_cookiejar_Cookie(
|
||||
version or 0, name, value, None, False,
|
||||
domain, bool(domain), bool(domain), path, bool(path),
|
||||
secure, expires, False, None, None, rest={})
|
||||
|
||||
test_url, test_domain = (t % ('yt.dl',) for t in ('https://%s/test', '.%s'))
|
||||
|
||||
def test(encoded_cookies, cookies, headers=False, round_trip=None, error_re=None):
|
||||
def _test():
|
||||
ydl.cookiejar.clear()
|
||||
ydl._load_cookies(encoded_cookies, autoscope=headers)
|
||||
if headers:
|
||||
ydl._apply_header_cookies(test_url)
|
||||
data = {'url': test_url}
|
||||
ydl._calc_headers(data)
|
||||
self.assertSameCookies(
|
||||
cookies, ydl.cookiejar,
|
||||
'Extracted cookiejar.Cookie is not the same')
|
||||
if not headers:
|
||||
self.assertSameCookieStrings(
|
||||
data.get('cookies'), round_trip or encoded_cookies,
|
||||
msg='Cookie is not the same as round trip')
|
||||
ydl.__dict__['_YoutubeDL__header_cookies'] = []
|
||||
|
||||
try:
|
||||
_test()
|
||||
except AssertionError:
|
||||
raise
|
||||
except Exception as e:
|
||||
if not error_re:
|
||||
raise
|
||||
assertRegexpMatches(self, e.args[0], error_re.join(('.*',) * 2))
|
||||
|
||||
test('test=value; Domain=' + test_domain, [cookie('test', 'value', domain=test_domain)])
|
||||
test('test=value', [cookie('test', 'value')], error_re='Unscoped cookies are not allowed')
|
||||
test('cookie1=value1; Domain={0}; Path=/test; cookie2=value2; Domain={0}; Path=/'.format(test_domain), [
|
||||
cookie('cookie1', 'value1', domain=test_domain, path='/test'),
|
||||
cookie('cookie2', 'value2', domain=test_domain, path='/')])
|
||||
cookie_kw = compat_kwargs(
|
||||
{'domain': test_domain, 'path': '/test', 'secure': True, 'expires': '9999999999', })
|
||||
test('test=value; Domain={domain}; Path={path}; Secure; Expires={expires}'.format(**cookie_kw), [
|
||||
cookie('test', 'value', **cookie_kw)])
|
||||
test('test="value; "; path=/test; domain=' + test_domain, [
|
||||
cookie('test', 'value; ', domain=test_domain, path='/test')],
|
||||
round_trip='test="value\\073 "; Domain={0}; Path=/test'.format(test_domain))
|
||||
test('name=; Domain=' + test_domain, [cookie('name', '', domain=test_domain)],
|
||||
round_trip='name=""; Domain=' + test_domain)
|
||||
test('test=value', [cookie('test', 'value', domain=test_domain)], headers=True)
|
||||
test('cookie1=value; Domain={0}; cookie2=value'.format(test_domain), [],
|
||||
headers=True, error_re='Invalid syntax')
|
||||
ydl.report_warning = ydl.report_error
|
||||
test('test=value', [], headers=True, error_re='Passing cookies as a header is a potential security risk')
|
||||
|
||||
def test_infojson_cookies(self):
|
||||
TEST_FILE = 'test_infojson_cookies.info.json'
|
||||
TEST_URL = 'https://example.com/example.mp4'
|
||||
COOKIES = 'a=b; Domain=.example.com; c=d; Domain=.example.com'
|
||||
COOKIE_HEADER = {'Cookie': 'a=b; c=d'}
|
||||
|
||||
ydl = FakeYDL()
|
||||
ydl.process_info = lambda x: ydl._write_info_json('test', x, TEST_FILE)
|
||||
|
||||
def make_info(info_header_cookies=False, fmts_header_cookies=False, cookies_field=False):
|
||||
fmt = {'url': TEST_URL}
|
||||
if fmts_header_cookies:
|
||||
fmt['http_headers'] = COOKIE_HEADER
|
||||
if cookies_field:
|
||||
fmt['cookies'] = COOKIES
|
||||
return _make_result([fmt], http_headers=COOKIE_HEADER if info_header_cookies else None)
|
||||
|
||||
def test(initial_info, note):
|
||||
|
||||
def failure_msg(why):
|
||||
return ' when '.join((why, note))
|
||||
|
||||
result = {}
|
||||
result['processed'] = ydl.process_ie_result(initial_info)
|
||||
self.assertTrue(ydl.cookiejar.get_cookies_for_url(TEST_URL),
|
||||
msg=failure_msg('No cookies set in cookiejar after initial process'))
|
||||
ydl.cookiejar.clear()
|
||||
with open(TEST_FILE) as infojson:
|
||||
result['loaded'] = ydl.sanitize_info(json.load(infojson), True)
|
||||
result['final'] = ydl.process_ie_result(result['loaded'].copy(), download=False)
|
||||
self.assertTrue(ydl.cookiejar.get_cookies_for_url(TEST_URL),
|
||||
msg=failure_msg('No cookies set in cookiejar after final process'))
|
||||
ydl.cookiejar.clear()
|
||||
for key in ('processed', 'loaded', 'final'):
|
||||
info = result[key]
|
||||
self.assertIsNone(
|
||||
traverse_obj(info, ((None, ('formats', 0)), 'http_headers', 'Cookie'), casesense=False, get_all=False),
|
||||
msg=failure_msg('Cookie header not removed in {0} result'.format(key)))
|
||||
self.assertSameCookieStrings(
|
||||
traverse_obj(info, ((None, ('formats', 0)), 'cookies'), get_all=False), COOKIES,
|
||||
msg=failure_msg('No cookies field found in {0} result'.format(key)))
|
||||
|
||||
test({'url': TEST_URL, 'http_headers': COOKIE_HEADER, 'id': '1', 'title': 'x'}, 'no formats field')
|
||||
test(make_info(info_header_cookies=True), 'info_dict header cokies')
|
||||
test(make_info(fmts_header_cookies=True), 'format header cookies')
|
||||
test(make_info(info_header_cookies=True, fmts_header_cookies=True), 'info_dict and format header cookies')
|
||||
test(make_info(info_header_cookies=True, fmts_header_cookies=True, cookies_field=True), 'all cookies fields')
|
||||
test(make_info(cookies_field=True), 'cookies format field')
|
||||
test({'url': TEST_URL, 'cookies': COOKIES, 'id': '1', 'title': 'x'}, 'info_dict cookies field only')
|
||||
|
||||
try_rm(TEST_FILE)
|
||||
|
||||
def test_add_headers_cookie(self):
|
||||
def check_for_cookie_header(result):
|
||||
return traverse_obj(result, ((None, ('formats', 0)), 'http_headers', 'Cookie'), casesense=False, get_all=False)
|
||||
|
||||
ydl = FakeYDL({'http_headers': {'Cookie': 'a=b'}})
|
||||
ydl._apply_header_cookies(_make_result([])['webpage_url']) # Scope to input webpage URL: .example.com
|
||||
|
||||
fmt = {'url': 'https://example.com/video.mp4'}
|
||||
result = ydl.process_ie_result(_make_result([fmt]), download=False)
|
||||
self.assertIsNone(check_for_cookie_header(result), msg='http_headers cookies in result info_dict')
|
||||
self.assertEqual(result.get('cookies'), 'a=b; Domain=.example.com', msg='No cookies were set in cookies field')
|
||||
self.assertIn('a=b', ydl.cookiejar.get_cookie_header(fmt['url']), msg='No cookies were set in cookiejar')
|
||||
|
||||
fmt = {'url': 'https://wrong.com/video.mp4'}
|
||||
result = ydl.process_ie_result(_make_result([fmt]), download=False)
|
||||
self.assertIsNone(check_for_cookie_header(result), msg='http_headers cookies for wrong domain')
|
||||
self.assertFalse(result.get('cookies'), msg='Cookies set in cookies field for wrong domain')
|
||||
self.assertFalse(ydl.cookiejar.get_cookie_header(fmt['url']), msg='Cookies set in cookiejar for wrong domain')
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
unittest.main()
|
||||
|
@ -46,20 +46,6 @@ class TestYoutubeDLCookieJar(unittest.TestCase):
|
||||
# will be ignored
|
||||
self.assertFalse(cookiejar._cookies)
|
||||
|
||||
def test_get_cookie_header(self):
|
||||
cookiejar = YoutubeDLCookieJar('./test/testdata/cookies/httponly_cookies.txt')
|
||||
cookiejar.load(ignore_discard=True, ignore_expires=True)
|
||||
header = cookiejar.get_cookie_header('https://www.foobar.foobar')
|
||||
self.assertIn('HTTPONLY_COOKIE', header)
|
||||
|
||||
def test_get_cookies_for_url(self):
|
||||
cookiejar = YoutubeDLCookieJar('./test/testdata/cookies/session_cookies.txt')
|
||||
cookiejar.load(ignore_discard=True, ignore_expires=True)
|
||||
cookies = cookiejar.get_cookies_for_url('https://www.foobar.foobar/')
|
||||
self.assertEqual(len(cookies), 2)
|
||||
cookies = cookiejar.get_cookies_for_url('https://foobar.foobar/')
|
||||
self.assertFalse(cookies)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
unittest.main()
|
||||
|
@ -20,15 +20,15 @@ from test.helper import (
|
||||
|
||||
|
||||
import hashlib
|
||||
import io
|
||||
import json
|
||||
import socket
|
||||
|
||||
import youtube_dl.YoutubeDL
|
||||
from youtube_dl.compat import (
|
||||
compat_http_client,
|
||||
compat_HTTPError,
|
||||
compat_open as open,
|
||||
compat_urllib_error,
|
||||
compat_HTTPError,
|
||||
)
|
||||
from youtube_dl.utils import (
|
||||
DownloadError,
|
||||
@ -245,7 +245,7 @@ def generator(test_case, tname):
|
||||
self.assertTrue(
|
||||
os.path.exists(info_json_fn),
|
||||
'Missing info file %s' % info_json_fn)
|
||||
with open(info_json_fn, encoding='utf-8') as infof:
|
||||
with io.open(info_json_fn, encoding='utf-8') as infof:
|
||||
info_dict = json.load(infof)
|
||||
expect_info_dict(self, info_dict, tc.get('info_dict', {}))
|
||||
finally:
|
||||
|
@ -12,65 +12,20 @@ sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
|
||||
|
||||
from test.helper import (
|
||||
FakeLogger,
|
||||
FakeYDL,
|
||||
http_server_port,
|
||||
try_rm,
|
||||
)
|
||||
from youtube_dl import YoutubeDL
|
||||
from youtube_dl.compat import (
|
||||
compat_http_cookiejar_Cookie,
|
||||
compat_http_server,
|
||||
compat_kwargs,
|
||||
)
|
||||
from youtube_dl.utils import (
|
||||
encodeFilename,
|
||||
join_nonempty,
|
||||
)
|
||||
from youtube_dl.downloader.external import (
|
||||
Aria2cFD,
|
||||
Aria2pFD,
|
||||
AxelFD,
|
||||
CurlFD,
|
||||
FFmpegFD,
|
||||
HttpieFD,
|
||||
WgetFD,
|
||||
)
|
||||
from youtube_dl.compat import compat_http_server
|
||||
from youtube_dl.utils import encodeFilename
|
||||
from youtube_dl.downloader.external import Aria2pFD
|
||||
import threading
|
||||
|
||||
TEST_DIR = os.path.dirname(os.path.abspath(__file__))
|
||||
|
||||
|
||||
TEST_SIZE = 10 * 1024
|
||||
|
||||
TEST_COOKIE = {
|
||||
'version': 0,
|
||||
'name': 'test',
|
||||
'value': 'ytdlp',
|
||||
'port': None,
|
||||
'port_specified': False,
|
||||
'domain': '.example.com',
|
||||
'domain_specified': True,
|
||||
'domain_initial_dot': False,
|
||||
'path': '/',
|
||||
'path_specified': True,
|
||||
'secure': False,
|
||||
'expires': None,
|
||||
'discard': False,
|
||||
'comment': None,
|
||||
'comment_url': None,
|
||||
'rest': {},
|
||||
}
|
||||
|
||||
TEST_COOKIE_VALUE = join_nonempty('name', 'value', delim='=', from_dict=TEST_COOKIE)
|
||||
|
||||
TEST_INFO = {'url': 'http://www.example.com/'}
|
||||
|
||||
|
||||
def cookiejar_Cookie(**cookie_args):
|
||||
return compat_http_cookiejar_Cookie(**compat_kwargs(cookie_args))
|
||||
|
||||
|
||||
def ifExternalFDAvailable(externalFD):
|
||||
return unittest.skipUnless(externalFD.available(),
|
||||
externalFD.get_basename() + ' not found')
|
||||
|
||||
|
||||
class HTTPTestRequestHandler(compat_http_server.BaseHTTPRequestHandler):
|
||||
def log_message(self, format, *args):
|
||||
@ -115,7 +70,7 @@ class HTTPTestRequestHandler(compat_http_server.BaseHTTPRequestHandler):
|
||||
assert False, 'unrecognised server path'
|
||||
|
||||
|
||||
@ifExternalFDAvailable(Aria2pFD)
|
||||
@unittest.skipUnless(Aria2pFD.available(), 'aria2p module not found')
|
||||
class TestAria2pFD(unittest.TestCase):
|
||||
def setUp(self):
|
||||
self.httpd = compat_http_server.HTTPServer(
|
||||
@ -156,103 +111,5 @@ class TestAria2pFD(unittest.TestCase):
|
||||
})
|
||||
|
||||
|
||||
@ifExternalFDAvailable(HttpieFD)
|
||||
class TestHttpieFD(unittest.TestCase):
|
||||
def test_make_cmd(self):
|
||||
with FakeYDL() as ydl:
|
||||
downloader = HttpieFD(ydl, {})
|
||||
self.assertEqual(
|
||||
downloader._make_cmd('test', TEST_INFO),
|
||||
['http', '--download', '--output', 'test', 'http://www.example.com/'])
|
||||
|
||||
# Test cookie header is added
|
||||
ydl.cookiejar.set_cookie(cookiejar_Cookie(**TEST_COOKIE))
|
||||
self.assertEqual(
|
||||
downloader._make_cmd('test', TEST_INFO),
|
||||
['http', '--download', '--output', 'test',
|
||||
'http://www.example.com/', 'Cookie:' + TEST_COOKIE_VALUE])
|
||||
|
||||
|
||||
@ifExternalFDAvailable(AxelFD)
|
||||
class TestAxelFD(unittest.TestCase):
|
||||
def test_make_cmd(self):
|
||||
with FakeYDL() as ydl:
|
||||
downloader = AxelFD(ydl, {})
|
||||
self.assertEqual(
|
||||
downloader._make_cmd('test', TEST_INFO),
|
||||
['axel', '-o', 'test', '--', 'http://www.example.com/'])
|
||||
|
||||
# Test cookie header is added
|
||||
ydl.cookiejar.set_cookie(cookiejar_Cookie(**TEST_COOKIE))
|
||||
self.assertEqual(
|
||||
downloader._make_cmd('test', TEST_INFO),
|
||||
['axel', '-o', 'test', '-H', 'Cookie: ' + TEST_COOKIE_VALUE,
|
||||
'--max-redirect=0', '--', 'http://www.example.com/'])
|
||||
|
||||
|
||||
@ifExternalFDAvailable(WgetFD)
|
||||
class TestWgetFD(unittest.TestCase):
|
||||
def test_make_cmd(self):
|
||||
with FakeYDL() as ydl:
|
||||
downloader = WgetFD(ydl, {})
|
||||
self.assertNotIn('--load-cookies', downloader._make_cmd('test', TEST_INFO))
|
||||
# Test cookiejar tempfile arg is added
|
||||
ydl.cookiejar.set_cookie(cookiejar_Cookie(**TEST_COOKIE))
|
||||
self.assertIn('--load-cookies', downloader._make_cmd('test', TEST_INFO))
|
||||
|
||||
|
||||
@ifExternalFDAvailable(CurlFD)
|
||||
class TestCurlFD(unittest.TestCase):
|
||||
def test_make_cmd(self):
|
||||
with FakeYDL() as ydl:
|
||||
downloader = CurlFD(ydl, {})
|
||||
self.assertNotIn('--cookie', downloader._make_cmd('test', TEST_INFO))
|
||||
# Test cookie header is added
|
||||
ydl.cookiejar.set_cookie(cookiejar_Cookie(**TEST_COOKIE))
|
||||
self.assertIn('--cookie', downloader._make_cmd('test', TEST_INFO))
|
||||
self.assertIn(TEST_COOKIE_VALUE, downloader._make_cmd('test', TEST_INFO))
|
||||
|
||||
|
||||
@ifExternalFDAvailable(Aria2cFD)
|
||||
class TestAria2cFD(unittest.TestCase):
|
||||
def test_make_cmd(self):
|
||||
with FakeYDL() as ydl:
|
||||
downloader = Aria2cFD(ydl, {})
|
||||
downloader._make_cmd('test', TEST_INFO)
|
||||
self.assertFalse(hasattr(downloader, '_cookies_tempfile'))
|
||||
|
||||
# Test cookiejar tempfile arg is added
|
||||
ydl.cookiejar.set_cookie(cookiejar_Cookie(**TEST_COOKIE))
|
||||
cmd = downloader._make_cmd('test', TEST_INFO)
|
||||
self.assertIn('--load-cookies=%s' % downloader._cookies_tempfile, cmd)
|
||||
|
||||
|
||||
@ifExternalFDAvailable(FFmpegFD)
|
||||
class TestFFmpegFD(unittest.TestCase):
|
||||
_args = []
|
||||
|
||||
def _test_cmd(self, args):
|
||||
self._args = args
|
||||
|
||||
def test_make_cmd(self):
|
||||
with FakeYDL() as ydl:
|
||||
downloader = FFmpegFD(ydl, {})
|
||||
downloader._debug_cmd = self._test_cmd
|
||||
info_dict = TEST_INFO.copy()
|
||||
info_dict['ext'] = 'mp4'
|
||||
|
||||
downloader._call_downloader('test', info_dict)
|
||||
self.assertEqual(self._args, [
|
||||
'ffmpeg', '-y', '-i', 'http://www.example.com/',
|
||||
'-c', 'copy', '-f', 'mp4', 'file:test'])
|
||||
|
||||
# Test cookies arg is added
|
||||
ydl.cookiejar.set_cookie(cookiejar_Cookie(**TEST_COOKIE))
|
||||
downloader._call_downloader('test', info_dict)
|
||||
self.assertEqual(self._args, [
|
||||
'ffmpeg', '-y', '-cookies', TEST_COOKIE_VALUE + '; path=/; domain=.example.com;\r\n',
|
||||
'-i', 'http://www.example.com/', '-c', 'copy', '-f', 'mp4', 'file:test'])
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
unittest.main()
|
||||
|
@ -88,7 +88,7 @@ class TestHttpFD(unittest.TestCase):
|
||||
self.assertTrue(downloader.real_download(filename, {
|
||||
'url': 'http://127.0.0.1:%d/%s' % (self.port, ep),
|
||||
}))
|
||||
self.assertEqual(os.path.getsize(encodeFilename(filename)), TEST_SIZE, ep)
|
||||
self.assertEqual(os.path.getsize(encodeFilename(filename)), TEST_SIZE)
|
||||
try_rm(encodeFilename(filename))
|
||||
|
||||
def download_all(self, params):
|
||||
|
@ -8,53 +8,47 @@ import unittest
|
||||
import sys
|
||||
import os
|
||||
import subprocess
|
||||
sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
|
||||
|
||||
from youtube_dl.utils import encodeArgument
|
||||
|
||||
rootDir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
|
||||
|
||||
sys.path.insert(0, rootDir)
|
||||
|
||||
from youtube_dl.compat import compat_register_utf8, compat_subprocess_get_DEVNULL
|
||||
from youtube_dl.utils import encodeArgument
|
||||
|
||||
compat_register_utf8()
|
||||
|
||||
|
||||
_DEV_NULL = compat_subprocess_get_DEVNULL()
|
||||
try:
|
||||
_DEV_NULL = subprocess.DEVNULL
|
||||
except AttributeError:
|
||||
_DEV_NULL = open(os.devnull, 'wb')
|
||||
|
||||
|
||||
class TestExecution(unittest.TestCase):
|
||||
def setUp(self):
|
||||
self.module = 'youtube_dl'
|
||||
if sys.version_info < (2, 7):
|
||||
self.module += '.__main__'
|
||||
|
||||
def test_import(self):
|
||||
subprocess.check_call([sys.executable, '-c', 'import youtube_dl'], cwd=rootDir)
|
||||
|
||||
def test_module_exec(self):
|
||||
subprocess.check_call([sys.executable, '-m', self.module, '--version'], cwd=rootDir, stdout=_DEV_NULL)
|
||||
if sys.version_info >= (2, 7): # Python 2.6 doesn't support package execution
|
||||
subprocess.check_call([sys.executable, '-m', 'youtube_dl', '--version'], cwd=rootDir, stdout=_DEV_NULL)
|
||||
|
||||
def test_main_exec(self):
|
||||
subprocess.check_call([sys.executable, os.path.normpath('youtube_dl/__main__.py'), '--version'], cwd=rootDir, stdout=_DEV_NULL)
|
||||
subprocess.check_call([sys.executable, 'youtube_dl/__main__.py', '--version'], cwd=rootDir, stdout=_DEV_NULL)
|
||||
|
||||
def test_cmdline_umlauts(self):
|
||||
os.environ['PYTHONIOENCODING'] = 'utf-8'
|
||||
p = subprocess.Popen(
|
||||
[sys.executable, '-m', self.module, encodeArgument('ä'), '--version'],
|
||||
[sys.executable, 'youtube_dl/__main__.py', encodeArgument('ä'), '--version'],
|
||||
cwd=rootDir, stdout=_DEV_NULL, stderr=subprocess.PIPE)
|
||||
_, stderr = p.communicate()
|
||||
self.assertFalse(stderr)
|
||||
|
||||
def test_lazy_extractors(self):
|
||||
lazy_extractors = os.path.normpath('youtube_dl/extractor/lazy_extractors.py')
|
||||
lazy_extractors = 'youtube_dl/extractor/lazy_extractors.py'
|
||||
try:
|
||||
subprocess.check_call([sys.executable, os.path.normpath('devscripts/make_lazy_extractors.py'), lazy_extractors], cwd=rootDir, stdout=_DEV_NULL)
|
||||
subprocess.check_call([sys.executable, os.path.normpath('test/test_all_urls.py')], cwd=rootDir, stdout=_DEV_NULL)
|
||||
subprocess.check_call([sys.executable, 'devscripts/make_lazy_extractors.py', lazy_extractors], cwd=rootDir, stdout=_DEV_NULL)
|
||||
subprocess.check_call([sys.executable, 'test/test_all_urls.py'], cwd=rootDir, stdout=_DEV_NULL)
|
||||
finally:
|
||||
for x in ('', 'c') if sys.version_info[0] < 3 else ('',):
|
||||
for x in ['', 'c'] if sys.version_info[0] < 3 else ['']:
|
||||
try:
|
||||
os.remove(lazy_extractors + x)
|
||||
except OSError:
|
||||
except (IOError, OSError):
|
||||
pass
|
||||
|
||||
|
||||
|
@ -8,163 +8,33 @@ import sys
|
||||
import unittest
|
||||
sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
|
||||
|
||||
import contextlib
|
||||
import gzip
|
||||
import io
|
||||
import ssl
|
||||
import tempfile
|
||||
import threading
|
||||
import zlib
|
||||
|
||||
# avoid deprecated alias assertRaisesRegexp
|
||||
if hasattr(unittest.TestCase, 'assertRaisesRegex'):
|
||||
unittest.TestCase.assertRaisesRegexp = unittest.TestCase.assertRaisesRegex
|
||||
|
||||
try:
|
||||
import brotli
|
||||
except ImportError:
|
||||
brotli = None
|
||||
try:
|
||||
from urllib.request import pathname2url
|
||||
except ImportError:
|
||||
from urllib import pathname2url
|
||||
|
||||
from youtube_dl.compat import (
|
||||
compat_http_cookiejar_Cookie,
|
||||
compat_http_server,
|
||||
compat_str as str,
|
||||
compat_urllib_error,
|
||||
compat_urllib_HTTPError,
|
||||
compat_urllib_parse,
|
||||
compat_urllib_request,
|
||||
)
|
||||
|
||||
from youtube_dl.utils import (
|
||||
sanitized_Request,
|
||||
update_Request,
|
||||
urlencode_postdata,
|
||||
)
|
||||
|
||||
from test.helper import (
|
||||
expectedFailureIf,
|
||||
FakeYDL,
|
||||
FakeLogger,
|
||||
http_server_port,
|
||||
)
|
||||
from youtube_dl import YoutubeDL
|
||||
from youtube_dl.compat import compat_http_server, compat_urllib_request
|
||||
import ssl
|
||||
import threading
|
||||
|
||||
TEST_DIR = os.path.dirname(os.path.abspath(__file__))
|
||||
|
||||
|
||||
class HTTPTestRequestHandler(compat_http_server.BaseHTTPRequestHandler):
|
||||
protocol_version = 'HTTP/1.1'
|
||||
|
||||
# work-around old/new -style class inheritance
|
||||
def super(self, meth_name, *args, **kwargs):
|
||||
from types import MethodType
|
||||
try:
|
||||
super()
|
||||
fn = lambda s, m, *a, **k: getattr(super(), m)(*a, **k)
|
||||
except TypeError:
|
||||
fn = lambda s, m, *a, **k: getattr(compat_http_server.BaseHTTPRequestHandler, m)(s, *a, **k)
|
||||
self.super = MethodType(fn, self)
|
||||
return self.super(meth_name, *args, **kwargs)
|
||||
|
||||
def log_message(self, format, *args):
|
||||
pass
|
||||
|
||||
def _headers(self):
|
||||
payload = str(self.headers).encode('utf-8')
|
||||
self.send_response(200)
|
||||
self.send_header('Content-Type', 'application/json')
|
||||
self.send_header('Content-Length', str(len(payload)))
|
||||
self.end_headers()
|
||||
self.wfile.write(payload)
|
||||
|
||||
def _redirect(self):
|
||||
self.send_response(int(self.path[len('/redirect_'):]))
|
||||
self.send_header('Location', '/method')
|
||||
self.send_header('Content-Length', '0')
|
||||
self.end_headers()
|
||||
|
||||
def _method(self, method, payload=None):
|
||||
self.send_response(200)
|
||||
self.send_header('Content-Length', str(len(payload or '')))
|
||||
self.send_header('Method', method)
|
||||
self.end_headers()
|
||||
if payload:
|
||||
self.wfile.write(payload)
|
||||
|
||||
def _status(self, status):
|
||||
payload = '<html>{0} NOT FOUND</html>'.format(status).encode('utf-8')
|
||||
self.send_response(int(status))
|
||||
self.send_header('Content-Type', 'text/html; charset=utf-8')
|
||||
self.send_header('Content-Length', str(len(payload)))
|
||||
self.end_headers()
|
||||
self.wfile.write(payload)
|
||||
|
||||
def _read_data(self):
|
||||
if 'Content-Length' in self.headers:
|
||||
return self.rfile.read(int(self.headers['Content-Length']))
|
||||
|
||||
def _test_url(self, path, host='127.0.0.1', scheme='http', port=None):
|
||||
return '{0}://{1}:{2}/{3}'.format(
|
||||
scheme, host,
|
||||
port if port is not None
|
||||
else http_server_port(self.server), path)
|
||||
|
||||
def do_POST(self):
|
||||
data = self._read_data()
|
||||
if self.path.startswith('/redirect_'):
|
||||
self._redirect()
|
||||
elif self.path.startswith('/method'):
|
||||
self._method('POST', data)
|
||||
elif self.path.startswith('/headers'):
|
||||
self._headers()
|
||||
else:
|
||||
self._status(404)
|
||||
|
||||
def do_HEAD(self):
|
||||
if self.path.startswith('/redirect_'):
|
||||
self._redirect()
|
||||
elif self.path.startswith('/method'):
|
||||
self._method('HEAD')
|
||||
else:
|
||||
self._status(404)
|
||||
|
||||
def do_PUT(self):
|
||||
data = self._read_data()
|
||||
if self.path.startswith('/redirect_'):
|
||||
self._redirect()
|
||||
elif self.path.startswith('/method'):
|
||||
self._method('PUT', data)
|
||||
else:
|
||||
self._status(404)
|
||||
|
||||
def do_GET(self):
|
||||
|
||||
def respond(payload=b'<html><video src="/vid.mp4" /></html>',
|
||||
payload_type='text/html; charset=utf-8',
|
||||
payload_encoding=None,
|
||||
resp_code=200):
|
||||
self.send_response(resp_code)
|
||||
self.send_header('Content-Type', payload_type)
|
||||
if payload_encoding:
|
||||
self.send_header('Content-Encoding', payload_encoding)
|
||||
self.send_header('Content-Length', str(len(payload))) # required for persistent connections
|
||||
self.end_headers()
|
||||
self.wfile.write(payload)
|
||||
|
||||
def gzip_compress(p):
|
||||
buf = io.BytesIO()
|
||||
with contextlib.closing(gzip.GzipFile(fileobj=buf, mode='wb')) as f:
|
||||
f.write(p)
|
||||
return buf.getvalue()
|
||||
|
||||
if self.path == '/video.html':
|
||||
respond()
|
||||
self.send_response(200)
|
||||
self.send_header('Content-Type', 'text/html; charset=utf-8')
|
||||
self.end_headers()
|
||||
self.wfile.write(b'<html><video src="/vid.mp4" /></html>')
|
||||
elif self.path == '/vid.mp4':
|
||||
respond(b'\x00\x00\x00\x00\x20\x66\x74[video]', 'video/mp4')
|
||||
self.send_response(200)
|
||||
self.send_header('Content-Type', 'video/mp4')
|
||||
self.end_headers()
|
||||
self.wfile.write(b'\x00\x00\x00\x00\x20\x66\x74[video]')
|
||||
elif self.path == '/302':
|
||||
if sys.version_info[0] == 3:
|
||||
# XXX: Python 3 http server does not allow non-ASCII header values
|
||||
@ -172,336 +42,60 @@ class HTTPTestRequestHandler(compat_http_server.BaseHTTPRequestHandler):
|
||||
self.end_headers()
|
||||
return
|
||||
|
||||
new_url = self._test_url('中文.html')
|
||||
new_url = 'http://127.0.0.1:%d/中文.html' % http_server_port(self.server)
|
||||
self.send_response(302)
|
||||
self.send_header(b'Location', new_url.encode('utf-8'))
|
||||
self.end_headers()
|
||||
elif self.path == '/%E4%B8%AD%E6%96%87.html':
|
||||
respond()
|
||||
elif self.path == '/%c7%9f':
|
||||
respond()
|
||||
elif self.path == '/redirect_dotsegments':
|
||||
self.send_response(301)
|
||||
# redirect to /headers but with dot segments before
|
||||
self.send_header('Location', '/a/b/./../../headers')
|
||||
self.send_header('Content-Length', '0')
|
||||
self.send_response(200)
|
||||
self.send_header('Content-Type', 'text/html; charset=utf-8')
|
||||
self.end_headers()
|
||||
elif self.path.startswith('/redirect_'):
|
||||
self._redirect()
|
||||
elif self.path.startswith('/method'):
|
||||
self._method('GET')
|
||||
elif self.path.startswith('/headers'):
|
||||
self._headers()
|
||||
elif self.path.startswith('/308-to-headers'):
|
||||
self.send_response(308)
|
||||
self.send_header('Location', '/headers')
|
||||
self.send_header('Content-Length', '0')
|
||||
self.end_headers()
|
||||
elif self.path == '/trailing_garbage':
|
||||
payload = b'<html><video src="/vid.mp4" /></html>'
|
||||
compressed = gzip_compress(payload) + b'trailing garbage'
|
||||
respond(compressed, payload_encoding='gzip')
|
||||
elif self.path == '/302-non-ascii-redirect':
|
||||
new_url = self._test_url('中文.html')
|
||||
# actually respond with permanent redirect
|
||||
self.send_response(301)
|
||||
self.send_header('Location', new_url)
|
||||
self.send_header('Content-Length', '0')
|
||||
self.end_headers()
|
||||
elif self.path == '/content-encoding':
|
||||
encodings = self.headers.get('ytdl-encoding', '')
|
||||
payload = b'<html><video src="/vid.mp4" /></html>'
|
||||
for encoding in filter(None, (e.strip() for e in encodings.split(','))):
|
||||
if encoding == 'br' and brotli:
|
||||
payload = brotli.compress(payload)
|
||||
elif encoding == 'gzip':
|
||||
payload = gzip_compress(payload)
|
||||
elif encoding == 'deflate':
|
||||
payload = zlib.compress(payload)
|
||||
elif encoding == 'unsupported':
|
||||
payload = b'raw'
|
||||
break
|
||||
else:
|
||||
self._status(415)
|
||||
return
|
||||
respond(payload, payload_encoding=encodings)
|
||||
|
||||
self.wfile.write(b'<html><video src="/vid.mp4" /></html>')
|
||||
else:
|
||||
self._status(404)
|
||||
|
||||
def send_header(self, keyword, value):
|
||||
"""
|
||||
Forcibly allow HTTP server to send non percent-encoded non-ASCII characters in headers.
|
||||
This is against what is defined in RFC 3986: but we need to test that we support this
|
||||
since some sites incorrectly do this.
|
||||
"""
|
||||
if keyword.lower() == 'connection':
|
||||
return self.super('send_header', keyword, value)
|
||||
|
||||
if not hasattr(self, '_headers_buffer'):
|
||||
self._headers_buffer = []
|
||||
|
||||
self._headers_buffer.append('{0}: {1}\r\n'.format(keyword, value).encode('utf-8'))
|
||||
|
||||
def end_headers(self):
|
||||
if hasattr(self, '_headers_buffer'):
|
||||
self.wfile.write(b''.join(self._headers_buffer))
|
||||
self._headers_buffer = []
|
||||
self.super('end_headers')
|
||||
assert False
|
||||
|
||||
|
||||
class TestHTTP(unittest.TestCase):
|
||||
# when does it make sense to check the SSL certificate?
|
||||
_check_cert = (
|
||||
sys.version_info >= (3, 2)
|
||||
or (sys.version_info[0] == 2 and sys.version_info[1:] >= (7, 19)))
|
||||
|
||||
def setUp(self):
|
||||
# HTTP server
|
||||
self.http_httpd = compat_http_server.HTTPServer(
|
||||
self.httpd = compat_http_server.HTTPServer(
|
||||
('127.0.0.1', 0), HTTPTestRequestHandler)
|
||||
self.http_port = http_server_port(self.http_httpd)
|
||||
|
||||
self.http_server_thread = threading.Thread(target=self.http_httpd.serve_forever)
|
||||
self.http_server_thread.daemon = True
|
||||
self.http_server_thread.start()
|
||||
|
||||
try:
|
||||
from http.server import ThreadingHTTPServer
|
||||
except ImportError:
|
||||
try:
|
||||
from socketserver import ThreadingMixIn
|
||||
except ImportError:
|
||||
from SocketServer import ThreadingMixIn
|
||||
|
||||
class ThreadingHTTPServer(ThreadingMixIn, compat_http_server.HTTPServer):
|
||||
pass
|
||||
|
||||
# HTTPS server
|
||||
certfn = os.path.join(TEST_DIR, 'testcert.pem')
|
||||
self.https_httpd = ThreadingHTTPServer(
|
||||
('127.0.0.1', 0), HTTPTestRequestHandler)
|
||||
try:
|
||||
sslctx = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
|
||||
sslctx.verify_mode = ssl.CERT_NONE
|
||||
sslctx.check_hostname = False
|
||||
sslctx.load_cert_chain(certfn, None)
|
||||
self.https_httpd.socket = sslctx.wrap_socket(
|
||||
self.https_httpd.socket, server_side=True)
|
||||
except AttributeError:
|
||||
self.https_httpd.socket = ssl.wrap_socket(
|
||||
self.https_httpd.socket, certfile=certfn, server_side=True)
|
||||
|
||||
self.https_port = http_server_port(self.https_httpd)
|
||||
self.https_server_thread = threading.Thread(target=self.https_httpd.serve_forever)
|
||||
self.https_server_thread.daemon = True
|
||||
self.https_server_thread.start()
|
||||
|
||||
def tearDown(self):
|
||||
|
||||
def closer(svr):
|
||||
def _closer():
|
||||
svr.shutdown()
|
||||
svr.server_close()
|
||||
return _closer
|
||||
|
||||
shutdown_thread = threading.Thread(target=closer(self.http_httpd))
|
||||
shutdown_thread.start()
|
||||
self.http_server_thread.join(2.0)
|
||||
|
||||
shutdown_thread = threading.Thread(target=closer(self.https_httpd))
|
||||
shutdown_thread.start()
|
||||
self.https_server_thread.join(2.0)
|
||||
|
||||
def _test_url(self, path, host='127.0.0.1', scheme='http', port=None):
|
||||
return '{0}://{1}:{2}/{3}'.format(
|
||||
scheme, host,
|
||||
port if port is not None
|
||||
else self.https_port if scheme == 'https'
|
||||
else self.http_port, path)
|
||||
|
||||
@unittest.skipUnless(_check_cert, 'No support for certificate check in SSL')
|
||||
def test_nocheckcertificate(self):
|
||||
with FakeYDL({'logger': FakeLogger()}) as ydl:
|
||||
with self.assertRaises(compat_urllib_error.URLError):
|
||||
ydl.urlopen(sanitized_Request(self._test_url('headers', scheme='https')))
|
||||
|
||||
with FakeYDL({'logger': FakeLogger(), 'nocheckcertificate': True}) as ydl:
|
||||
r = ydl.urlopen(sanitized_Request(self._test_url('headers', scheme='https')))
|
||||
self.assertEqual(r.getcode(), 200)
|
||||
r.close()
|
||||
|
||||
def test_percent_encode(self):
|
||||
with FakeYDL() as ydl:
|
||||
# Unicode characters should be encoded with uppercase percent-encoding
|
||||
res = ydl.urlopen(sanitized_Request(self._test_url('中文.html')))
|
||||
self.assertEqual(res.getcode(), 200)
|
||||
res.close()
|
||||
# don't normalize existing percent encodings
|
||||
res = ydl.urlopen(sanitized_Request(self._test_url('%c7%9f')))
|
||||
self.assertEqual(res.getcode(), 200)
|
||||
res.close()
|
||||
self.port = http_server_port(self.httpd)
|
||||
self.server_thread = threading.Thread(target=self.httpd.serve_forever)
|
||||
self.server_thread.daemon = True
|
||||
self.server_thread.start()
|
||||
|
||||
def test_unicode_path_redirection(self):
|
||||
with FakeYDL() as ydl:
|
||||
r = ydl.urlopen(sanitized_Request(self._test_url('302-non-ascii-redirect')))
|
||||
self.assertEqual(r.url, self._test_url('%E4%B8%AD%E6%96%87.html'))
|
||||
r.close()
|
||||
# XXX: Python 3 http server does not allow non-ASCII header values
|
||||
if sys.version_info[0] == 3:
|
||||
return
|
||||
|
||||
def test_redirect(self):
|
||||
with FakeYDL() as ydl:
|
||||
def do_req(redirect_status, method, check_no_content=False):
|
||||
data = b'testdata' if method in ('POST', 'PUT') else None
|
||||
res = ydl.urlopen(sanitized_Request(
|
||||
self._test_url('redirect_{0}'.format(redirect_status)),
|
||||
method=method, data=data))
|
||||
if check_no_content:
|
||||
self.assertNotIn('Content-Type', res.headers)
|
||||
return res.read().decode('utf-8'), res.headers.get('method', '')
|
||||
# A 303 must either use GET or HEAD for subsequent request
|
||||
self.assertEqual(do_req(303, 'POST'), ('', 'GET'))
|
||||
self.assertEqual(do_req(303, 'HEAD'), ('', 'HEAD'))
|
||||
ydl = YoutubeDL({'logger': FakeLogger()})
|
||||
r = ydl.extract_info('http://127.0.0.1:%d/302' % self.port)
|
||||
self.assertEqual(r['entries'][0]['url'], 'http://127.0.0.1:%d/vid.mp4' % self.port)
|
||||
|
||||
self.assertEqual(do_req(303, 'PUT'), ('', 'GET'))
|
||||
|
||||
# 301 and 302 turn POST only into a GET, with no Content-Type
|
||||
self.assertEqual(do_req(301, 'POST', True), ('', 'GET'))
|
||||
self.assertEqual(do_req(301, 'HEAD'), ('', 'HEAD'))
|
||||
self.assertEqual(do_req(302, 'POST', True), ('', 'GET'))
|
||||
self.assertEqual(do_req(302, 'HEAD'), ('', 'HEAD'))
|
||||
class TestHTTPS(unittest.TestCase):
|
||||
def setUp(self):
|
||||
certfn = os.path.join(TEST_DIR, 'testcert.pem')
|
||||
self.httpd = compat_http_server.HTTPServer(
|
||||
('127.0.0.1', 0), HTTPTestRequestHandler)
|
||||
self.httpd.socket = ssl.wrap_socket(
|
||||
self.httpd.socket, certfile=certfn, server_side=True)
|
||||
self.port = http_server_port(self.httpd)
|
||||
self.server_thread = threading.Thread(target=self.httpd.serve_forever)
|
||||
self.server_thread.daemon = True
|
||||
self.server_thread.start()
|
||||
|
||||
self.assertEqual(do_req(301, 'PUT'), ('testdata', 'PUT'))
|
||||
self.assertEqual(do_req(302, 'PUT'), ('testdata', 'PUT'))
|
||||
def test_nocheckcertificate(self):
|
||||
if sys.version_info >= (2, 7, 9): # No certificate checking anyways
|
||||
ydl = YoutubeDL({'logger': FakeLogger()})
|
||||
self.assertRaises(
|
||||
Exception,
|
||||
ydl.extract_info, 'https://127.0.0.1:%d/video.html' % self.port)
|
||||
|
||||
# 307 and 308 should not change method
|
||||
for m in ('POST', 'PUT'):
|
||||
self.assertEqual(do_req(307, m), ('testdata', m))
|
||||
self.assertEqual(do_req(308, m), ('testdata', m))
|
||||
|
||||
self.assertEqual(do_req(307, 'HEAD'), ('', 'HEAD'))
|
||||
self.assertEqual(do_req(308, 'HEAD'), ('', 'HEAD'))
|
||||
|
||||
# These should not redirect and instead raise an HTTPError
|
||||
for code in (300, 304, 305, 306):
|
||||
with self.assertRaises(compat_urllib_HTTPError):
|
||||
do_req(code, 'GET')
|
||||
|
||||
# Jython 2.7.1 times out for some reason
|
||||
@expectedFailureIf(sys.platform.startswith('java') and sys.version_info < (2, 7, 2))
|
||||
def test_content_type(self):
|
||||
# https://github.com/yt-dlp/yt-dlp/commit/379a4f161d4ad3e40932dcf5aca6e6fb9715ab28
|
||||
with FakeYDL({'nocheckcertificate': True}) as ydl:
|
||||
# method should be auto-detected as POST
|
||||
r = sanitized_Request(self._test_url('headers', scheme='https'), data=urlencode_postdata({'test': 'test'}))
|
||||
|
||||
headers = ydl.urlopen(r).read().decode('utf-8')
|
||||
self.assertIn('Content-Type: application/x-www-form-urlencoded', headers)
|
||||
|
||||
# test http
|
||||
r = sanitized_Request(self._test_url('headers'), data=urlencode_postdata({'test': 'test'}))
|
||||
headers = ydl.urlopen(r).read().decode('utf-8')
|
||||
self.assertIn('Content-Type: application/x-www-form-urlencoded', headers)
|
||||
|
||||
def test_update_req(self):
|
||||
req = sanitized_Request('http://example.com')
|
||||
assert req.data is None
|
||||
assert req.get_method() == 'GET'
|
||||
assert not req.has_header('Content-Type')
|
||||
# Test that zero-byte payloads will be sent
|
||||
req = update_Request(req, data=b'')
|
||||
assert req.data == b''
|
||||
assert req.get_method() == 'POST'
|
||||
# yt-dl expects data to be encoded and Content-Type to be added by sender
|
||||
# assert req.get_header('Content-Type') == 'application/x-www-form-urlencoded'
|
||||
|
||||
def test_cookiejar(self):
|
||||
with FakeYDL() as ydl:
|
||||
ydl.cookiejar.set_cookie(compat_http_cookiejar_Cookie(
|
||||
0, 'test', 'ytdl', None, False, '127.0.0.1', True,
|
||||
False, '/headers', True, False, None, False, None, None, {}))
|
||||
data = ydl.urlopen(sanitized_Request(
|
||||
self._test_url('headers'))).read().decode('utf-8')
|
||||
self.assertIn('Cookie: test=ytdl', data)
|
||||
|
||||
def test_passed_cookie_header(self):
|
||||
# We should accept a Cookie header being passed as in normal headers and handle it appropriately.
|
||||
with FakeYDL() as ydl:
|
||||
# Specified Cookie header should be used
|
||||
res = ydl.urlopen(sanitized_Request(
|
||||
self._test_url('headers'), headers={'Cookie': 'test=test'})).read().decode('utf-8')
|
||||
self.assertIn('Cookie: test=test', res)
|
||||
|
||||
# Specified Cookie header should be removed on any redirect
|
||||
res = ydl.urlopen(sanitized_Request(
|
||||
self._test_url('308-to-headers'), headers={'Cookie': 'test=test'})).read().decode('utf-8')
|
||||
self.assertNotIn('Cookie: test=test', res)
|
||||
|
||||
# Specified Cookie header should override global cookiejar for that request
|
||||
ydl.cookiejar.set_cookie(compat_http_cookiejar_Cookie(
|
||||
0, 'test', 'ytdlp', None, False, '127.0.0.1', True,
|
||||
False, '/headers', True, False, None, False, None, None, {}))
|
||||
data = ydl.urlopen(sanitized_Request(
|
||||
self._test_url('headers'), headers={'Cookie': 'test=test'})).read().decode('utf-8')
|
||||
self.assertNotIn('Cookie: test=ytdlp', data)
|
||||
self.assertIn('Cookie: test=test', data)
|
||||
|
||||
def test_no_compression_compat_header(self):
|
||||
with FakeYDL() as ydl:
|
||||
data = ydl.urlopen(
|
||||
sanitized_Request(
|
||||
self._test_url('headers'),
|
||||
headers={'Youtubedl-no-compression': True})).read()
|
||||
self.assertIn(b'Accept-Encoding: identity', data)
|
||||
self.assertNotIn(b'youtubedl-no-compression', data.lower())
|
||||
|
||||
def test_gzip_trailing_garbage(self):
|
||||
# https://github.com/ytdl-org/youtube-dl/commit/aa3e950764337ef9800c936f4de89b31c00dfcf5
|
||||
# https://github.com/ytdl-org/youtube-dl/commit/6f2ec15cee79d35dba065677cad9da7491ec6e6f
|
||||
with FakeYDL() as ydl:
|
||||
data = ydl.urlopen(sanitized_Request(self._test_url('trailing_garbage'))).read().decode('utf-8')
|
||||
self.assertEqual(data, '<html><video src="/vid.mp4" /></html>')
|
||||
|
||||
def __test_compression(self, encoding):
|
||||
with FakeYDL() as ydl:
|
||||
res = ydl.urlopen(
|
||||
sanitized_Request(
|
||||
self._test_url('content-encoding'),
|
||||
headers={'ytdl-encoding': encoding}))
|
||||
# decoded encodings are removed: only check for valid decompressed data
|
||||
self.assertEqual(res.read(), b'<html><video src="/vid.mp4" /></html>')
|
||||
|
||||
@unittest.skipUnless(brotli, 'brotli support is not installed')
|
||||
def test_brotli(self):
|
||||
self.__test_compression('br')
|
||||
|
||||
def test_deflate(self):
|
||||
self.__test_compression('deflate')
|
||||
|
||||
def test_gzip(self):
|
||||
self.__test_compression('gzip')
|
||||
|
||||
def test_multiple_encodings(self):
|
||||
# https://www.rfc-editor.org/rfc/rfc9110.html#section-8.4
|
||||
for pair in ('gzip,deflate', 'deflate, gzip', 'gzip, gzip', 'deflate, deflate'):
|
||||
self.__test_compression(pair)
|
||||
|
||||
def test_unsupported_encoding(self):
|
||||
# it should return the raw content
|
||||
with FakeYDL() as ydl:
|
||||
res = ydl.urlopen(
|
||||
sanitized_Request(
|
||||
self._test_url('content-encoding'),
|
||||
headers={'ytdl-encoding': 'unsupported'}))
|
||||
self.assertEqual(res.headers.get('Content-Encoding'), 'unsupported')
|
||||
self.assertEqual(res.read(), b'raw')
|
||||
|
||||
def test_remove_dot_segments(self):
|
||||
with FakeYDL() as ydl:
|
||||
res = ydl.urlopen(sanitized_Request(self._test_url('a/b/./../../headers')))
|
||||
self.assertEqual(compat_urllib_parse.urlparse(res.geturl()).path, '/headers')
|
||||
|
||||
res = ydl.urlopen(sanitized_Request(self._test_url('redirect_dotsegments')))
|
||||
self.assertEqual(compat_urllib_parse.urlparse(res.geturl()).path, '/headers')
|
||||
ydl = YoutubeDL({'logger': FakeLogger(), 'nocheckcertificate': True})
|
||||
r = ydl.extract_info('https://127.0.0.1:%d/video.html' % self.port)
|
||||
self.assertEqual(r['entries'][0]['url'], 'https://127.0.0.1:%d/vid.mp4' % self.port)
|
||||
|
||||
|
||||
def _build_proxy_handler(name):
|
||||
@ -515,7 +109,7 @@ def _build_proxy_handler(name):
|
||||
self.send_response(200)
|
||||
self.send_header('Content-Type', 'text/plain; charset=utf-8')
|
||||
self.end_headers()
|
||||
self.wfile.write('{0}: {1}'.format(self.proxy_name, self.path).encode('utf-8'))
|
||||
self.wfile.write('{self.proxy_name}: {self.path}'.format(self=self).encode('utf-8'))
|
||||
return HTTPTestRequestHandler
|
||||
|
||||
|
||||
@ -535,30 +129,10 @@ class TestProxy(unittest.TestCase):
|
||||
self.geo_proxy_thread.daemon = True
|
||||
self.geo_proxy_thread.start()
|
||||
|
||||
def tearDown(self):
|
||||
|
||||
def closer(svr):
|
||||
def _closer():
|
||||
svr.shutdown()
|
||||
svr.server_close()
|
||||
return _closer
|
||||
|
||||
shutdown_thread = threading.Thread(target=closer(self.proxy))
|
||||
shutdown_thread.start()
|
||||
self.proxy_thread.join(2.0)
|
||||
|
||||
shutdown_thread = threading.Thread(target=closer(self.geo_proxy))
|
||||
shutdown_thread.start()
|
||||
self.geo_proxy_thread.join(2.0)
|
||||
|
||||
def _test_proxy(self, host='127.0.0.1', port=None):
|
||||
return '{0}:{1}'.format(
|
||||
host, port if port is not None else self.port)
|
||||
|
||||
def test_proxy(self):
|
||||
geo_proxy = self._test_proxy(port=self.geo_port)
|
||||
geo_proxy = '127.0.0.1:{0}'.format(self.geo_port)
|
||||
ydl = YoutubeDL({
|
||||
'proxy': self._test_proxy(),
|
||||
'proxy': '127.0.0.1:{0}'.format(self.port),
|
||||
'geo_verification_proxy': geo_proxy,
|
||||
})
|
||||
url = 'http://foo.com/bar'
|
||||
@ -572,7 +146,7 @@ class TestProxy(unittest.TestCase):
|
||||
|
||||
def test_proxy_with_idn(self):
|
||||
ydl = YoutubeDL({
|
||||
'proxy': self._test_proxy(),
|
||||
'proxy': '127.0.0.1:{0}'.format(self.port),
|
||||
})
|
||||
url = 'http://中文.tw/'
|
||||
response = ydl.urlopen(url).read().decode('utf-8')
|
||||
@ -580,25 +154,5 @@ class TestProxy(unittest.TestCase):
|
||||
self.assertEqual(response, 'normal: http://xn--fiq228c.tw/')
|
||||
|
||||
|
||||
class TestFileURL(unittest.TestCase):
|
||||
# See https://github.com/ytdl-org/youtube-dl/issues/8227
|
||||
def test_file_urls(self):
|
||||
tf = tempfile.NamedTemporaryFile(delete=False)
|
||||
tf.write(b'foobar')
|
||||
tf.close()
|
||||
url = compat_urllib_parse.urljoin('file://', pathname2url(tf.name))
|
||||
with FakeYDL() as ydl:
|
||||
self.assertRaisesRegexp(
|
||||
compat_urllib_error.URLError, 'file:// scheme is explicitly disabled in youtube-dl for security reasons', ydl.urlopen, url)
|
||||
# not yet implemented
|
||||
"""
|
||||
with FakeYDL({'enable_file_urls': True}) as ydl:
|
||||
res = ydl.urlopen(url)
|
||||
self.assertEqual(res.read(), b'foobar')
|
||||
res.close()
|
||||
"""
|
||||
os.unlink(tf.name)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
unittest.main()
|
||||
|
@ -18,7 +18,6 @@ class TestJSInterpreter(unittest.TestCase):
|
||||
def test_basic(self):
|
||||
jsi = JSInterpreter('function x(){;}')
|
||||
self.assertEqual(jsi.call_function('x'), None)
|
||||
self.assertEqual(repr(jsi.extract_function('x')), 'F<x>')
|
||||
|
||||
jsi = JSInterpreter('function x3(){return 42;}')
|
||||
self.assertEqual(jsi.call_function('x3'), 42)
|
||||
@ -33,55 +32,6 @@ class TestJSInterpreter(unittest.TestCase):
|
||||
jsi = JSInterpreter('function x4(a){return 2*a+1;}')
|
||||
self.assertEqual(jsi.call_function('x4', 3), 7)
|
||||
|
||||
def test_add(self):
|
||||
jsi = JSInterpreter('function f(){return 42 + 7;}')
|
||||
self.assertEqual(jsi.call_function('f'), 49)
|
||||
jsi = JSInterpreter('function f(){return 42 + undefined;}')
|
||||
self.assertTrue(math.isnan(jsi.call_function('f')))
|
||||
jsi = JSInterpreter('function f(){return 42 + null;}')
|
||||
self.assertEqual(jsi.call_function('f'), 42)
|
||||
|
||||
def test_sub(self):
|
||||
jsi = JSInterpreter('function f(){return 42 - 7;}')
|
||||
self.assertEqual(jsi.call_function('f'), 35)
|
||||
jsi = JSInterpreter('function f(){return 42 - undefined;}')
|
||||
self.assertTrue(math.isnan(jsi.call_function('f')))
|
||||
jsi = JSInterpreter('function f(){return 42 - null;}')
|
||||
self.assertEqual(jsi.call_function('f'), 42)
|
||||
|
||||
def test_mul(self):
|
||||
jsi = JSInterpreter('function f(){return 42 * 7;}')
|
||||
self.assertEqual(jsi.call_function('f'), 294)
|
||||
jsi = JSInterpreter('function f(){return 42 * undefined;}')
|
||||
self.assertTrue(math.isnan(jsi.call_function('f')))
|
||||
jsi = JSInterpreter('function f(){return 42 * null;}')
|
||||
self.assertEqual(jsi.call_function('f'), 0)
|
||||
|
||||
def test_div(self):
|
||||
jsi = JSInterpreter('function f(a, b){return a / b;}')
|
||||
self.assertTrue(math.isnan(jsi.call_function('f', 0, 0)))
|
||||
self.assertTrue(math.isnan(jsi.call_function('f', JS_Undefined, 1)))
|
||||
self.assertTrue(math.isinf(jsi.call_function('f', 2, 0)))
|
||||
self.assertEqual(jsi.call_function('f', 0, 3), 0)
|
||||
|
||||
def test_mod(self):
|
||||
jsi = JSInterpreter('function f(){return 42 % 7;}')
|
||||
self.assertEqual(jsi.call_function('f'), 0)
|
||||
jsi = JSInterpreter('function f(){return 42 % 0;}')
|
||||
self.assertTrue(math.isnan(jsi.call_function('f')))
|
||||
jsi = JSInterpreter('function f(){return 42 % undefined;}')
|
||||
self.assertTrue(math.isnan(jsi.call_function('f')))
|
||||
|
||||
def test_exp(self):
|
||||
jsi = JSInterpreter('function f(){return 42 ** 2;}')
|
||||
self.assertEqual(jsi.call_function('f'), 1764)
|
||||
jsi = JSInterpreter('function f(){return 42 ** undefined;}')
|
||||
self.assertTrue(math.isnan(jsi.call_function('f')))
|
||||
jsi = JSInterpreter('function f(){return 42 ** null;}')
|
||||
self.assertEqual(jsi.call_function('f'), 1)
|
||||
jsi = JSInterpreter('function f(){return undefined ** 42;}')
|
||||
self.assertTrue(math.isnan(jsi.call_function('f')))
|
||||
|
||||
def test_empty_return(self):
|
||||
jsi = JSInterpreter('function f(){return; y()}')
|
||||
self.assertEqual(jsi.call_function('f'), None)
|
||||
@ -492,12 +442,10 @@ class TestJSInterpreter(unittest.TestCase):
|
||||
jsi = JSInterpreter('''
|
||||
function x() { let a=/,,[/,913,/](,)}/; "".replace(a, ""); return a; }
|
||||
''')
|
||||
attrs = set(('findall', 'finditer', 'match', 'scanner', 'search',
|
||||
'split', 'sub', 'subn'))
|
||||
if sys.version_info >= (2, 7):
|
||||
# documented for 2.6 but may not be found
|
||||
attrs.update(('flags', 'groupindex', 'groups', 'pattern'))
|
||||
self.assertSetEqual(set(dir(jsi.call_function('x'))) & attrs, attrs)
|
||||
attrs = set(('findall', 'finditer', 'flags', 'groupindex',
|
||||
'groups', 'match', 'pattern', 'scanner',
|
||||
'search', 'split', 'sub', 'subn'))
|
||||
self.assertTrue(set(dir(jsi.call_function('x'))) > attrs)
|
||||
|
||||
jsi = JSInterpreter('''
|
||||
function x() { let a=/,,[/,913,/](,)}/i; return a; }
|
||||
@ -557,30 +505,6 @@ class TestJSInterpreter(unittest.TestCase):
|
||||
jsi = JSInterpreter('function x(){return 1236566549 << 5}')
|
||||
self.assertEqual(jsi.call_function('x'), 915423904)
|
||||
|
||||
def test_bitwise_operators_madness(self):
|
||||
jsi = JSInterpreter('function x(){return null << 5}')
|
||||
self.assertEqual(jsi.call_function('x'), 0)
|
||||
|
||||
jsi = JSInterpreter('function x(){return undefined >> 5}')
|
||||
self.assertEqual(jsi.call_function('x'), 0)
|
||||
|
||||
jsi = JSInterpreter('function x(){return 42 << NaN}')
|
||||
self.assertEqual(jsi.call_function('x'), 42)
|
||||
|
||||
jsi = JSInterpreter('function x(){return 42 << Infinity}')
|
||||
self.assertEqual(jsi.call_function('x'), 42)
|
||||
|
||||
def test_32066(self):
|
||||
jsi = JSInterpreter("function x(){return Math.pow(3, 5) + new Date('1970-01-01T08:01:42.000+08:00') / 1000 * -239 - -24205;}")
|
||||
self.assertEqual(jsi.call_function('x'), 70)
|
||||
|
||||
def test_unary_operators(self):
|
||||
jsi = JSInterpreter('function f(){return 2 - - - 2;}')
|
||||
self.assertEqual(jsi.call_function('f'), 0)
|
||||
# fails
|
||||
# jsi = JSInterpreter('function f(){return 2 + - + - - 2;}')
|
||||
# self.assertEqual(jsi.call_function('f'), 0)
|
||||
|
||||
""" # fails so far
|
||||
def test_packed(self):
|
||||
jsi = JSInterpreter('''function x(p,a,c,k,e,d){while(c--)if(k[c])p=p.replace(new RegExp('\\b'+c.toString(a)+'\\b','g'),k[c]);return p}''')
|
||||
|
@ -5,18 +5,16 @@ from __future__ import unicode_literals
|
||||
import os
|
||||
import sys
|
||||
import unittest
|
||||
sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
|
||||
|
||||
dirn = os.path.dirname
|
||||
|
||||
sys.path.insert(0, dirn(dirn(os.path.abspath(__file__))))
|
||||
|
||||
import errno
|
||||
import io
|
||||
import json
|
||||
import re
|
||||
import subprocess
|
||||
|
||||
from youtube_dl.swfinterp import SWFInterpreter
|
||||
from youtube_dl.compat import compat_open as open
|
||||
|
||||
|
||||
TEST_DIR = os.path.join(
|
||||
@ -45,7 +43,7 @@ def _make_testfunc(testfile):
|
||||
'-static-link-runtime-shared-libraries', as_file])
|
||||
except OSError as ose:
|
||||
if ose.errno == errno.ENOENT:
|
||||
self.skipTest('mxmlc not found!')
|
||||
print('mxmlc not found! Skipping test.')
|
||||
return
|
||||
raise
|
||||
|
||||
@ -53,7 +51,7 @@ def _make_testfunc(testfile):
|
||||
swf_content = swf_f.read()
|
||||
swfi = SWFInterpreter(swf_content)
|
||||
|
||||
with open(as_file, 'r', encoding='utf-8') as as_f:
|
||||
with io.open(as_file, 'r', encoding='utf-8') as as_f:
|
||||
as_content = as_f.read()
|
||||
|
||||
def _find_spec(key):
|
||||
|
@ -2,21 +2,19 @@ from __future__ import unicode_literals
|
||||
|
||||
# Allow direct execution
|
||||
import os
|
||||
import re
|
||||
import sys
|
||||
import unittest
|
||||
sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
|
||||
|
||||
dirn = os.path.dirname
|
||||
import io
|
||||
import re
|
||||
|
||||
rootDir = dirn(dirn(os.path.abspath(__file__)))
|
||||
|
||||
sys.path.insert(0, rootDir)
|
||||
rootDir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
|
||||
|
||||
IGNORED_FILES = [
|
||||
'setup.py', # http://bugs.python.org/issue13943
|
||||
'conf.py',
|
||||
'buildserver.py',
|
||||
'get-pip.py',
|
||||
]
|
||||
|
||||
IGNORED_DIRS = [
|
||||
@ -25,7 +23,6 @@ IGNORED_DIRS = [
|
||||
]
|
||||
|
||||
from test.helper import assertRegexpMatches
|
||||
from youtube_dl.compat import compat_open as open
|
||||
|
||||
|
||||
class TestUnicodeLiterals(unittest.TestCase):
|
||||
@ -43,7 +40,7 @@ class TestUnicodeLiterals(unittest.TestCase):
|
||||
continue
|
||||
|
||||
fn = os.path.join(dirpath, basename)
|
||||
with open(fn, encoding='utf-8') as inf:
|
||||
with io.open(fn, encoding='utf-8') as inf:
|
||||
code = inf.read()
|
||||
|
||||
if "'" not in code and '"' not in code:
|
||||
|
@ -20,7 +20,7 @@ import xml.etree.ElementTree
|
||||
from youtube_dl.utils import (
|
||||
age_restricted,
|
||||
args_to_str,
|
||||
base_url,
|
||||
encode_base_n,
|
||||
caesar,
|
||||
clean_html,
|
||||
clean_podcast_url,
|
||||
@ -29,12 +29,10 @@ from youtube_dl.utils import (
|
||||
detect_exe_version,
|
||||
determine_ext,
|
||||
dict_get,
|
||||
encode_base_n,
|
||||
encode_compat_str,
|
||||
encodeFilename,
|
||||
escape_rfc3986,
|
||||
escape_url,
|
||||
expand_path,
|
||||
extract_attributes,
|
||||
ExtractorError,
|
||||
find_xpath_attr,
|
||||
@ -53,7 +51,6 @@ from youtube_dl.utils import (
|
||||
js_to_json,
|
||||
LazyList,
|
||||
limit_length,
|
||||
lowercase_escape,
|
||||
merge_dicts,
|
||||
mimetype2ext,
|
||||
month_by_name,
|
||||
@ -62,33 +59,30 @@ from youtube_dl.utils import (
|
||||
OnDemandPagedList,
|
||||
orderedSet,
|
||||
parse_age_limit,
|
||||
parse_bitrate,
|
||||
parse_duration,
|
||||
parse_filesize,
|
||||
parse_codecs,
|
||||
parse_count,
|
||||
parse_iso8601,
|
||||
parse_resolution,
|
||||
parse_qs,
|
||||
parse_bitrate,
|
||||
pkcs1pad,
|
||||
prepend_extension,
|
||||
read_batch_urls,
|
||||
remove_start,
|
||||
remove_end,
|
||||
remove_quotes,
|
||||
replace_extension,
|
||||
rot47,
|
||||
sanitize_filename,
|
||||
sanitize_path,
|
||||
sanitize_url,
|
||||
expand_path,
|
||||
prepend_extension,
|
||||
replace_extension,
|
||||
remove_start,
|
||||
remove_end,
|
||||
remove_quotes,
|
||||
rot47,
|
||||
shell_quote,
|
||||
smuggle_url,
|
||||
str_or_none,
|
||||
str_to_int,
|
||||
strip_jsonp,
|
||||
strip_or_none,
|
||||
subtitles_filename,
|
||||
T,
|
||||
timeconvert,
|
||||
traverse_obj,
|
||||
try_call,
|
||||
@ -97,8 +91,10 @@ from youtube_dl.utils import (
|
||||
unified_timestamp,
|
||||
unsmuggle_url,
|
||||
uppercase_escape,
|
||||
lowercase_escape,
|
||||
url_basename,
|
||||
url_or_none,
|
||||
base_url,
|
||||
urljoin,
|
||||
urlencode_postdata,
|
||||
urshift,
|
||||
@ -116,7 +112,7 @@ from youtube_dl.utils import (
|
||||
cli_option,
|
||||
cli_valueless_option,
|
||||
cli_bool_option,
|
||||
YoutubeDLHandler,
|
||||
parse_codecs,
|
||||
)
|
||||
from youtube_dl.compat import (
|
||||
compat_chr,
|
||||
@ -126,6 +122,7 @@ from youtube_dl.compat import (
|
||||
compat_setenv,
|
||||
compat_str,
|
||||
compat_urlparse,
|
||||
compat_parse_qs,
|
||||
)
|
||||
|
||||
|
||||
@ -683,36 +680,38 @@ class TestUtil(unittest.TestCase):
|
||||
self.assertTrue(isinstance(data, bytes))
|
||||
|
||||
def test_update_url_query(self):
|
||||
self.assertEqual(parse_qs(update_url_query(
|
||||
def query_dict(url):
|
||||
return compat_parse_qs(compat_urlparse.urlparse(url).query)
|
||||
self.assertEqual(query_dict(update_url_query(
|
||||
'http://example.com/path', {'quality': ['HD'], 'format': ['mp4']})),
|
||||
parse_qs('http://example.com/path?quality=HD&format=mp4'))
|
||||
self.assertEqual(parse_qs(update_url_query(
|
||||
query_dict('http://example.com/path?quality=HD&format=mp4'))
|
||||
self.assertEqual(query_dict(update_url_query(
|
||||
'http://example.com/path', {'system': ['LINUX', 'WINDOWS']})),
|
||||
parse_qs('http://example.com/path?system=LINUX&system=WINDOWS'))
|
||||
self.assertEqual(parse_qs(update_url_query(
|
||||
query_dict('http://example.com/path?system=LINUX&system=WINDOWS'))
|
||||
self.assertEqual(query_dict(update_url_query(
|
||||
'http://example.com/path', {'fields': 'id,formats,subtitles'})),
|
||||
parse_qs('http://example.com/path?fields=id,formats,subtitles'))
|
||||
self.assertEqual(parse_qs(update_url_query(
|
||||
query_dict('http://example.com/path?fields=id,formats,subtitles'))
|
||||
self.assertEqual(query_dict(update_url_query(
|
||||
'http://example.com/path', {'fields': ('id,formats,subtitles', 'thumbnails')})),
|
||||
parse_qs('http://example.com/path?fields=id,formats,subtitles&fields=thumbnails'))
|
||||
self.assertEqual(parse_qs(update_url_query(
|
||||
query_dict('http://example.com/path?fields=id,formats,subtitles&fields=thumbnails'))
|
||||
self.assertEqual(query_dict(update_url_query(
|
||||
'http://example.com/path?manifest=f4m', {'manifest': []})),
|
||||
parse_qs('http://example.com/path'))
|
||||
self.assertEqual(parse_qs(update_url_query(
|
||||
query_dict('http://example.com/path'))
|
||||
self.assertEqual(query_dict(update_url_query(
|
||||
'http://example.com/path?system=LINUX&system=WINDOWS', {'system': 'LINUX'})),
|
||||
parse_qs('http://example.com/path?system=LINUX'))
|
||||
self.assertEqual(parse_qs(update_url_query(
|
||||
query_dict('http://example.com/path?system=LINUX'))
|
||||
self.assertEqual(query_dict(update_url_query(
|
||||
'http://example.com/path', {'fields': b'id,formats,subtitles'})),
|
||||
parse_qs('http://example.com/path?fields=id,formats,subtitles'))
|
||||
self.assertEqual(parse_qs(update_url_query(
|
||||
query_dict('http://example.com/path?fields=id,formats,subtitles'))
|
||||
self.assertEqual(query_dict(update_url_query(
|
||||
'http://example.com/path', {'width': 1080, 'height': 720})),
|
||||
parse_qs('http://example.com/path?width=1080&height=720'))
|
||||
self.assertEqual(parse_qs(update_url_query(
|
||||
query_dict('http://example.com/path?width=1080&height=720'))
|
||||
self.assertEqual(query_dict(update_url_query(
|
||||
'http://example.com/path', {'bitrate': 5020.43})),
|
||||
parse_qs('http://example.com/path?bitrate=5020.43'))
|
||||
self.assertEqual(parse_qs(update_url_query(
|
||||
query_dict('http://example.com/path?bitrate=5020.43'))
|
||||
self.assertEqual(query_dict(update_url_query(
|
||||
'http://example.com/path', {'test': '第二行тест'})),
|
||||
parse_qs('http://example.com/path?test=%E7%AC%AC%E4%BA%8C%E8%A1%8C%D1%82%D0%B5%D1%81%D1%82'))
|
||||
query_dict('http://example.com/path?test=%E7%AC%AC%E4%BA%8C%E8%A1%8C%D1%82%D0%B5%D1%81%D1%82'))
|
||||
|
||||
def test_multipart_encode(self):
|
||||
self.assertEqual(
|
||||
@ -904,111 +903,6 @@ class TestUtil(unittest.TestCase):
|
||||
)
|
||||
self.assertEqual(escape_url('http://vimeo.com/56015672#at=0'), 'http://vimeo.com/56015672#at=0')
|
||||
|
||||
def test_remove_dot_segments(self):
|
||||
|
||||
def remove_dot_segments(p):
|
||||
q = '' if p.startswith('/') else '/'
|
||||
p = 'http://example.com' + q + p
|
||||
p = compat_urlparse.urlsplit(YoutubeDLHandler._fix_path(p)).path
|
||||
return p[1:] if q else p
|
||||
|
||||
self.assertEqual(remove_dot_segments('/a/b/c/./../../g'), '/a/g')
|
||||
self.assertEqual(remove_dot_segments('mid/content=5/../6'), 'mid/6')
|
||||
self.assertEqual(remove_dot_segments('/ad/../cd'), '/cd')
|
||||
self.assertEqual(remove_dot_segments('/ad/../cd/'), '/cd/')
|
||||
self.assertEqual(remove_dot_segments('/..'), '/')
|
||||
self.assertEqual(remove_dot_segments('/./'), '/')
|
||||
self.assertEqual(remove_dot_segments('/./a'), '/a')
|
||||
self.assertEqual(remove_dot_segments('/abc/./.././d/././e/.././f/./../../ghi'), '/ghi')
|
||||
self.assertEqual(remove_dot_segments('/'), '/')
|
||||
self.assertEqual(remove_dot_segments('/t'), '/t')
|
||||
self.assertEqual(remove_dot_segments('t'), 't')
|
||||
self.assertEqual(remove_dot_segments(''), '')
|
||||
self.assertEqual(remove_dot_segments('/../a/b/c'), '/a/b/c')
|
||||
self.assertEqual(remove_dot_segments('../a'), 'a')
|
||||
self.assertEqual(remove_dot_segments('./a'), 'a')
|
||||
self.assertEqual(remove_dot_segments('.'), '')
|
||||
self.assertEqual(remove_dot_segments('////'), '////')
|
||||
|
||||
def test_js_to_json_vars_strings(self):
|
||||
self.assertDictEqual(
|
||||
json.loads(js_to_json(
|
||||
'''{
|
||||
'null': a,
|
||||
'nullStr': b,
|
||||
'true': c,
|
||||
'trueStr': d,
|
||||
'false': e,
|
||||
'falseStr': f,
|
||||
'unresolvedVar': g,
|
||||
}''',
|
||||
{
|
||||
'a': 'null',
|
||||
'b': '"null"',
|
||||
'c': 'true',
|
||||
'd': '"true"',
|
||||
'e': 'false',
|
||||
'f': '"false"',
|
||||
'g': 'var',
|
||||
}
|
||||
)),
|
||||
{
|
||||
'null': None,
|
||||
'nullStr': 'null',
|
||||
'true': True,
|
||||
'trueStr': 'true',
|
||||
'false': False,
|
||||
'falseStr': 'false',
|
||||
'unresolvedVar': 'var'
|
||||
}
|
||||
)
|
||||
|
||||
self.assertDictEqual(
|
||||
json.loads(js_to_json(
|
||||
'''{
|
||||
'int': a,
|
||||
'intStr': b,
|
||||
'float': c,
|
||||
'floatStr': d,
|
||||
}''',
|
||||
{
|
||||
'a': '123',
|
||||
'b': '"123"',
|
||||
'c': '1.23',
|
||||
'd': '"1.23"',
|
||||
}
|
||||
)),
|
||||
{
|
||||
'int': 123,
|
||||
'intStr': '123',
|
||||
'float': 1.23,
|
||||
'floatStr': '1.23',
|
||||
}
|
||||
)
|
||||
|
||||
self.assertDictEqual(
|
||||
json.loads(js_to_json(
|
||||
'''{
|
||||
'object': a,
|
||||
'objectStr': b,
|
||||
'array': c,
|
||||
'arrayStr': d,
|
||||
}''',
|
||||
{
|
||||
'a': '{}',
|
||||
'b': '"{}"',
|
||||
'c': '[]',
|
||||
'd': '"[]"',
|
||||
}
|
||||
)),
|
||||
{
|
||||
'object': {},
|
||||
'objectStr': '{}',
|
||||
'array': [],
|
||||
'arrayStr': '[]',
|
||||
}
|
||||
)
|
||||
|
||||
def test_js_to_json_realworld(self):
|
||||
inp = '''{
|
||||
'clip':{'provider':'pseudo'}
|
||||
@ -1079,10 +973,10 @@ class TestUtil(unittest.TestCase):
|
||||
!42: 42
|
||||
}''')
|
||||
self.assertEqual(json.loads(on), {
|
||||
'a': True,
|
||||
'b': False,
|
||||
'c': False,
|
||||
'd': True,
|
||||
'a': 0,
|
||||
'b': 1,
|
||||
'c': 0,
|
||||
'd': 42.42,
|
||||
'e': [],
|
||||
'f': "abc",
|
||||
'g': "",
|
||||
@ -1152,26 +1046,10 @@ class TestUtil(unittest.TestCase):
|
||||
on = js_to_json('{ "040": "040" }')
|
||||
self.assertEqual(json.loads(on), {'040': '040'})
|
||||
|
||||
on = js_to_json('[1,//{},\n2]')
|
||||
self.assertEqual(json.loads(on), [1, 2])
|
||||
|
||||
on = js_to_json(r'"\^\$\#"')
|
||||
self.assertEqual(json.loads(on), R'^$#', msg='Unnecessary escapes should be stripped')
|
||||
|
||||
on = js_to_json('\'"\\""\'')
|
||||
self.assertEqual(json.loads(on), '"""', msg='Unnecessary quote escape should be escaped')
|
||||
|
||||
def test_js_to_json_malformed(self):
|
||||
self.assertEqual(js_to_json('42a1'), '42"a1"')
|
||||
self.assertEqual(js_to_json('42a-1'), '42"a"-1')
|
||||
|
||||
def test_js_to_json_template_literal(self):
|
||||
self.assertEqual(js_to_json('`Hello ${name}`', {'name': '"world"'}), '"Hello world"')
|
||||
self.assertEqual(js_to_json('`${name}${name}`', {'name': '"X"'}), '"XX"')
|
||||
self.assertEqual(js_to_json('`${name}${name}`', {'name': '5'}), '"55"')
|
||||
self.assertEqual(js_to_json('`${name}"${name}"`', {'name': '5'}), '"5\\"5\\""')
|
||||
self.assertEqual(js_to_json('`${name}`', {}), '"name"')
|
||||
|
||||
def test_extract_attributes(self):
|
||||
self.assertEqual(extract_attributes('<e x="y">'), {'x': 'y'})
|
||||
self.assertEqual(extract_attributes("<e x='y'>"), {'x': 'y'})
|
||||
@ -1685,10 +1563,8 @@ Line 1
|
||||
self.assertEqual(variadic(None), (None, ))
|
||||
self.assertEqual(variadic('spam'), ('spam', ))
|
||||
self.assertEqual(variadic('spam', allowed_types=dict), 'spam')
|
||||
self.assertEqual(variadic('spam', allowed_types=[dict]), 'spam')
|
||||
|
||||
def test_traverse_obj(self):
|
||||
str = compat_str
|
||||
_TEST_DATA = {
|
||||
100: 100,
|
||||
1.2: 1.2,
|
||||
@ -1706,11 +1582,6 @@ Line 1
|
||||
'dict': {},
|
||||
}
|
||||
|
||||
# define a pukka Iterable
|
||||
def iter_range(stop):
|
||||
for from_ in range(stop):
|
||||
yield from_
|
||||
|
||||
# Test base functionality
|
||||
self.assertEqual(traverse_obj(_TEST_DATA, ('str',)), 'str',
|
||||
msg='allow tuple path')
|
||||
@ -1726,60 +1597,22 @@ Line 1
|
||||
|
||||
# Test Ellipsis behavior
|
||||
self.assertCountEqual(traverse_obj(_TEST_DATA, Ellipsis),
|
||||
(item for item in _TEST_DATA.values() if item not in (None, {})),
|
||||
msg='`...` should give all non-discarded values')
|
||||
(item for item in _TEST_DATA.values() if item is not None),
|
||||
msg='`...` should give all values except `None`')
|
||||
self.assertCountEqual(traverse_obj(_TEST_DATA, ('urls', 0, Ellipsis)), _TEST_DATA['urls'][0].values(),
|
||||
msg='`...` selection for dicts should select all values')
|
||||
self.assertEqual(traverse_obj(_TEST_DATA, (Ellipsis, Ellipsis, 'url')),
|
||||
['https://www.example.com/0', 'https://www.example.com/1'],
|
||||
msg='nested `...` queries should work')
|
||||
self.assertCountEqual(traverse_obj(_TEST_DATA, (Ellipsis, Ellipsis, 'index')), iter_range(4),
|
||||
self.assertCountEqual(traverse_obj(_TEST_DATA, (Ellipsis, Ellipsis, 'index')), range(4),
|
||||
msg='`...` query result should be flattened')
|
||||
self.assertEqual(traverse_obj(iter(range(4)), Ellipsis), list(range(4)),
|
||||
msg='`...` should accept iterables')
|
||||
|
||||
# Test function as key
|
||||
self.assertEqual(traverse_obj(_TEST_DATA, lambda x, y: x == 'urls' and isinstance(y, list)),
|
||||
[_TEST_DATA['urls']],
|
||||
msg='function as query key should perform a filter based on (key, value)')
|
||||
self.assertCountEqual(traverse_obj(_TEST_DATA, lambda _, x: isinstance(x[0], str)), set(('str',)),
|
||||
self.assertCountEqual(traverse_obj(_TEST_DATA, lambda _, x: isinstance(x[0], compat_str)), {'str'},
|
||||
msg='exceptions in the query function should be caught')
|
||||
self.assertEqual(traverse_obj(iter(range(4)), lambda _, x: x % 2 == 0), [0, 2],
|
||||
msg='function key should accept iterables')
|
||||
if __debug__:
|
||||
with self.assertRaises(Exception, msg='Wrong function signature should raise in debug'):
|
||||
traverse_obj(_TEST_DATA, lambda a: Ellipsis)
|
||||
with self.assertRaises(Exception, msg='Wrong function signature should raise in debug'):
|
||||
traverse_obj(_TEST_DATA, lambda a, b, c: Ellipsis)
|
||||
|
||||
# Test set as key (transformation/type, like `expected_type`)
|
||||
self.assertEqual(traverse_obj(_TEST_DATA, (Ellipsis, T(str.upper), )), ['STR'],
|
||||
msg='Function in set should be a transformation')
|
||||
self.assertEqual(traverse_obj(_TEST_DATA, (Ellipsis, T(str))), ['str'],
|
||||
msg='Type in set should be a type filter')
|
||||
self.assertEqual(traverse_obj(_TEST_DATA, T(dict)), _TEST_DATA,
|
||||
msg='A single set should be wrapped into a path')
|
||||
self.assertEqual(traverse_obj(_TEST_DATA, (Ellipsis, T(str.upper))), ['STR'],
|
||||
msg='Transformation function should not raise')
|
||||
self.assertEqual(traverse_obj(_TEST_DATA, (Ellipsis, T(str_or_none))),
|
||||
[item for item in map(str_or_none, _TEST_DATA.values()) if item is not None],
|
||||
msg='Function in set should be a transformation')
|
||||
if __debug__:
|
||||
with self.assertRaises(Exception, msg='Sets with length != 1 should raise in debug'):
|
||||
traverse_obj(_TEST_DATA, set())
|
||||
with self.assertRaises(Exception, msg='Sets with length != 1 should raise in debug'):
|
||||
traverse_obj(_TEST_DATA, set((str.upper, str)))
|
||||
|
||||
# Test `slice` as a key
|
||||
_SLICE_DATA = [0, 1, 2, 3, 4]
|
||||
self.assertEqual(traverse_obj(_TEST_DATA, ('dict', slice(1))), None,
|
||||
msg='slice on a dictionary should not throw')
|
||||
self.assertEqual(traverse_obj(_SLICE_DATA, slice(1)), _SLICE_DATA[:1],
|
||||
msg='slice key should apply slice to sequence')
|
||||
self.assertEqual(traverse_obj(_SLICE_DATA, slice(1, 2)), _SLICE_DATA[1:2],
|
||||
msg='slice key should apply slice to sequence')
|
||||
self.assertEqual(traverse_obj(_SLICE_DATA, slice(1, 4, 2)), _SLICE_DATA[1:4:2],
|
||||
msg='slice key should apply slice to sequence')
|
||||
|
||||
# Test alternative paths
|
||||
self.assertEqual(traverse_obj(_TEST_DATA, 'fail', 'str'), 'str',
|
||||
@ -1825,23 +1658,15 @@ Line 1
|
||||
{0: ['https://www.example.com/1', 'https://www.example.com/0']},
|
||||
msg='triple nesting in dict path should be treated as branches')
|
||||
self.assertEqual(traverse_obj(_TEST_DATA, {0: 'fail'}), {},
|
||||
msg='remove `None` values when top level dict key fails')
|
||||
msg='remove `None` values when dict key')
|
||||
self.assertEqual(traverse_obj(_TEST_DATA, {0: 'fail'}, default=Ellipsis), {0: Ellipsis},
|
||||
msg='use `default` if key fails and `default`')
|
||||
self.assertEqual(traverse_obj(_TEST_DATA, {0: 'dict'}), {},
|
||||
msg='remove empty values when dict key')
|
||||
self.assertEqual(traverse_obj(_TEST_DATA, {0: 'dict'}, default=Ellipsis), {0: Ellipsis},
|
||||
msg='use `default` when dict key and a default')
|
||||
self.assertEqual(traverse_obj(_TEST_DATA, {0: {0: 'fail'}}), {},
|
||||
msg='remove empty values when nested dict key fails')
|
||||
self.assertEqual(traverse_obj(None, {0: 'fail'}), {},
|
||||
msg='default to dict if pruned')
|
||||
self.assertEqual(traverse_obj(None, {0: 'fail'}, default=Ellipsis), {0: Ellipsis},
|
||||
msg='default to dict if pruned and default is given')
|
||||
self.assertEqual(traverse_obj(_TEST_DATA, {0: {0: 'fail'}}, default=Ellipsis), {0: {0: Ellipsis}},
|
||||
msg='use nested `default` when nested dict key fails and `default`')
|
||||
self.assertEqual(traverse_obj(_TEST_DATA, {0: ('dict', Ellipsis)}), {},
|
||||
msg='remove key if branch in dict key not successful')
|
||||
msg='do not remove `None` values if `default`')
|
||||
self.assertEqual(traverse_obj(_TEST_DATA, {0: 'dict'}), {0: {}},
|
||||
msg='do not remove empty values when dict key')
|
||||
self.assertEqual(traverse_obj(_TEST_DATA, {0: 'dict'}, default=Ellipsis), {0: {}},
|
||||
msg='do not remove empty values when dict key and a default')
|
||||
self.assertEqual(traverse_obj(_TEST_DATA, {0: ('dict', Ellipsis)}), {0: []},
|
||||
msg='if branch in dict key not successful, return `[]`')
|
||||
|
||||
# Testing default parameter behavior
|
||||
_DEFAULT_DATA = {'None': None, 'int': 0, 'list': []}
|
||||
@ -1865,55 +1690,20 @@ Line 1
|
||||
msg='if branched but not successful return `[]`, not `default`')
|
||||
self.assertEqual(traverse_obj(_DEFAULT_DATA, ('list', Ellipsis)), [],
|
||||
msg='if branched but object is empty return `[]`, not `default`')
|
||||
self.assertEqual(traverse_obj(None, Ellipsis), [],
|
||||
msg='if branched but object is `None` return `[]`, not `default`')
|
||||
self.assertEqual(traverse_obj({0: None}, (0, Ellipsis)), [],
|
||||
msg='if branched but state is `None` return `[]`, not `default`')
|
||||
|
||||
branching_paths = [
|
||||
('fail', Ellipsis),
|
||||
(Ellipsis, 'fail'),
|
||||
100 * ('fail',) + (Ellipsis,),
|
||||
(Ellipsis,) + 100 * ('fail',),
|
||||
]
|
||||
for branching_path in branching_paths:
|
||||
self.assertEqual(traverse_obj({}, branching_path), [],
|
||||
msg='if branched but state is `None`, return `[]` (not `default`)')
|
||||
self.assertEqual(traverse_obj({}, 'fail', branching_path), [],
|
||||
msg='if branching in last alternative and previous did not match, return `[]` (not `default`)')
|
||||
self.assertEqual(traverse_obj({0: 'x'}, 0, branching_path), 'x',
|
||||
msg='if branching in last alternative and previous did match, return single value')
|
||||
self.assertEqual(traverse_obj({0: 'x'}, branching_path, 0), 'x',
|
||||
msg='if branching in first alternative and non-branching path does match, return single value')
|
||||
self.assertEqual(traverse_obj({}, branching_path, 'fail'), None,
|
||||
msg='if branching in first alternative and non-branching path does not match, return `default`')
|
||||
|
||||
# Testing expected_type behavior
|
||||
_EXPECTED_TYPE_DATA = {'str': 'str', 'int': 0}
|
||||
self.assertEqual(traverse_obj(_EXPECTED_TYPE_DATA, 'str', expected_type=str),
|
||||
'str', msg='accept matching `expected_type` type')
|
||||
self.assertEqual(traverse_obj(_EXPECTED_TYPE_DATA, 'str', expected_type=int),
|
||||
None, msg='reject non-matching `expected_type` type')
|
||||
self.assertEqual(traverse_obj(_EXPECTED_TYPE_DATA, 'int', expected_type=lambda x: str(x)),
|
||||
'0', msg='transform type using type function')
|
||||
self.assertEqual(traverse_obj(_EXPECTED_TYPE_DATA, 'str', expected_type=lambda _: 1 / 0),
|
||||
None, msg='wrap expected_type function in try_call')
|
||||
self.assertEqual(traverse_obj(_EXPECTED_TYPE_DATA, Ellipsis, expected_type=str),
|
||||
['str'], msg='eliminate items that expected_type fails on')
|
||||
self.assertEqual(traverse_obj(_TEST_DATA, {0: 100, 1: 1.2}, expected_type=int),
|
||||
{0: 100}, msg='type as expected_type should filter dict values')
|
||||
self.assertEqual(traverse_obj(_TEST_DATA, {0: 100, 1: 1.2, 2: 'None'}, expected_type=str_or_none),
|
||||
{0: '100', 1: '1.2'}, msg='function as expected_type should transform dict values')
|
||||
self.assertEqual(traverse_obj(_TEST_DATA, ({0: 1.2}, 0, set((int_or_none,))), expected_type=int),
|
||||
1, msg='expected_type should not filter non-final dict values')
|
||||
self.assertEqual(traverse_obj(_TEST_DATA, {0: {0: 100, 1: 'str'}}, expected_type=int),
|
||||
{0: {0: 100}}, msg='expected_type should transform deep dict values')
|
||||
self.assertEqual(traverse_obj(_TEST_DATA, [({0: '...'}, {0: '...'})], expected_type=type(Ellipsis)),
|
||||
[{0: Ellipsis}, {0: Ellipsis}], msg='expected_type should transform branched dict values')
|
||||
self.assertEqual(traverse_obj({1: {3: 4}}, [(1, 2), 3], expected_type=int),
|
||||
[4], msg='expected_type regression for type matching in tuple branching')
|
||||
self.assertEqual(traverse_obj(_TEST_DATA, ['data', Ellipsis], expected_type=int),
|
||||
[], msg='expected_type regression for type matching in dict result')
|
||||
self.assertEqual(traverse_obj(_EXPECTED_TYPE_DATA, 'str', expected_type=compat_str), 'str',
|
||||
msg='accept matching `expected_type` type')
|
||||
self.assertEqual(traverse_obj(_EXPECTED_TYPE_DATA, 'str', expected_type=int), None,
|
||||
msg='reject non matching `expected_type` type')
|
||||
self.assertEqual(traverse_obj(_EXPECTED_TYPE_DATA, 'int', expected_type=lambda x: compat_str(x)), '0',
|
||||
msg='transform type using type function')
|
||||
self.assertEqual(traverse_obj(_EXPECTED_TYPE_DATA, 'str',
|
||||
expected_type=lambda _: 1 / 0), None,
|
||||
msg='wrap expected_type function in try_call')
|
||||
self.assertEqual(traverse_obj(_EXPECTED_TYPE_DATA, Ellipsis, expected_type=compat_str), ['str'],
|
||||
msg='eliminate items that expected_type fails on')
|
||||
|
||||
# Test get_all behavior
|
||||
_GET_ALL_DATA = {'key': [0, 1, 2]}
|
||||
@ -1958,23 +1748,14 @@ Line 1
|
||||
_traverse_string=True), '.',
|
||||
msg='traverse into converted data if `traverse_string`')
|
||||
self.assertEqual(traverse_obj(_TRAVERSE_STRING_DATA, ('str', Ellipsis),
|
||||
_traverse_string=True), 'str',
|
||||
msg='`...` should result in string (same value) if `traverse_string`')
|
||||
self.assertEqual(traverse_obj(_TRAVERSE_STRING_DATA, ('str', slice(0, None, 2)),
|
||||
_traverse_string=True), 'sr',
|
||||
msg='`slice` should result in string if `traverse_string`')
|
||||
self.assertEqual(traverse_obj(_TRAVERSE_STRING_DATA, ('str', lambda i, v: i or v == 's'),
|
||||
_traverse_string=True), 'str',
|
||||
msg='function should result in string if `traverse_string`')
|
||||
_traverse_string=True), list('str'),
|
||||
msg='`...` branching into string should result in list')
|
||||
self.assertEqual(traverse_obj(_TRAVERSE_STRING_DATA, ('str', (0, 2)),
|
||||
_traverse_string=True), ['s', 'r'],
|
||||
msg='branching should result in list if `traverse_string`')
|
||||
self.assertEqual(traverse_obj({}, (0, Ellipsis), _traverse_string=True), [],
|
||||
msg='branching should result in list if `traverse_string`')
|
||||
self.assertEqual(traverse_obj({}, (0, lambda x, y: True), _traverse_string=True), [],
|
||||
msg='branching should result in list if `traverse_string`')
|
||||
self.assertEqual(traverse_obj({}, (0, slice(1)), _traverse_string=True), [],
|
||||
msg='branching should result in list if `traverse_string`')
|
||||
msg='branching into string should result in list')
|
||||
self.assertEqual(traverse_obj(_TRAVERSE_STRING_DATA, ('str', lambda _, x: x),
|
||||
_traverse_string=True), list('str'),
|
||||
msg='function branching into string should result in list')
|
||||
|
||||
# Test is_user_input behavior
|
||||
_IS_USER_INPUT_DATA = {'range8': list(range(8))}
|
||||
@ -2011,8 +1792,6 @@ Line 1
|
||||
msg='failing str key on a `re.Match` should return `default`')
|
||||
self.assertEqual(traverse_obj(mobj, 8), None,
|
||||
msg='failing int key on a `re.Match` should return `default`')
|
||||
self.assertEqual(traverse_obj(mobj, lambda k, _: k in (0, 'group')), ['0123', '3'],
|
||||
msg='function on a `re.Match` should give group name as well')
|
||||
|
||||
def test_get_first(self):
|
||||
self.assertEqual(get_first([{'a': None}, {'a': 'spam'}], 'a'), 'spam')
|
||||
|
@ -11,11 +11,12 @@ sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
|
||||
from test.helper import get_params, try_rm
|
||||
|
||||
|
||||
import io
|
||||
|
||||
import xml.etree.ElementTree
|
||||
|
||||
import youtube_dl.YoutubeDL
|
||||
import youtube_dl.extractor
|
||||
from youtube_dl.compat import compat_open as open
|
||||
|
||||
|
||||
class YoutubeDL(youtube_dl.YoutubeDL):
|
||||
@ -50,7 +51,7 @@ class TestAnnotations(unittest.TestCase):
|
||||
ydl.download([TEST_ID])
|
||||
self.assertTrue(os.path.exists(ANNOTATIONS_FILE))
|
||||
annoxml = None
|
||||
with open(ANNOTATIONS_FILE, 'r', encoding='utf-8') as annof:
|
||||
with io.open(ANNOTATIONS_FILE, 'r', encoding='utf-8') as annof:
|
||||
annoxml = xml.etree.ElementTree.parse(annof)
|
||||
self.assertTrue(annoxml is not None, 'Failed to parse annotations XML')
|
||||
root = annoxml.getroot()
|
||||
|
@ -8,14 +8,11 @@ import sys
|
||||
import unittest
|
||||
sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
|
||||
|
||||
import io
|
||||
import re
|
||||
import string
|
||||
|
||||
from youtube_dl.compat import (
|
||||
compat_open as open,
|
||||
compat_str,
|
||||
compat_urlretrieve,
|
||||
)
|
||||
from youtube_dl.compat import compat_str, compat_urlretrieve
|
||||
|
||||
from test.helper import FakeYDL
|
||||
from youtube_dl.extractor import YoutubeIE
|
||||
@ -146,18 +143,6 @@ _NSIG_TESTS = [
|
||||
'https://www.youtube.com/s/player/dac945fd/player_ias.vflset/en_US/base.js',
|
||||
'o8BkRxXhuYsBCWi6RplPdP', '3Lx32v_hmzTm6A',
|
||||
),
|
||||
(
|
||||
'https://www.youtube.com/s/player/6f20102c/player_ias.vflset/en_US/base.js',
|
||||
'lE8DhoDmKqnmJJ', 'pJTTX6XyJP2BYw',
|
||||
),
|
||||
(
|
||||
'https://www.youtube.com/s/player/cfa9e7cb/player_ias.vflset/en_US/base.js',
|
||||
'qO0NiMtYQ7TeJnfFG2', 'k9cuJDHNS5O7kQ',
|
||||
),
|
||||
(
|
||||
'https://www.youtube.com/s/player/b7910ca8/player_ias.vflset/en_US/base.js',
|
||||
'_hXMCwMt9qE310D', 'LoZMgkkofRMCZQ',
|
||||
),
|
||||
]
|
||||
|
||||
|
||||
@ -211,7 +196,7 @@ def t_factory(name, sig_func, url_pattern):
|
||||
|
||||
if not os.path.exists(fn):
|
||||
compat_urlretrieve(url, fn)
|
||||
with open(fn, encoding='utf-8') as testf:
|
||||
with io.open(fn, encoding='utf-8') as testf:
|
||||
jscode = testf.read()
|
||||
self.assertEqual(sig_func(jscode, sig_input), expected_sig)
|
||||
|
||||
|
@ -4,9 +4,11 @@
|
||||
from __future__ import absolute_import, unicode_literals
|
||||
|
||||
import collections
|
||||
import contextlib
|
||||
import copy
|
||||
import datetime
|
||||
import errno
|
||||
import fileinput
|
||||
import io
|
||||
import itertools
|
||||
import json
|
||||
@ -24,26 +26,15 @@ import tokenize
|
||||
import traceback
|
||||
import random
|
||||
|
||||
try:
|
||||
from ssl import OPENSSL_VERSION
|
||||
except ImportError:
|
||||
# Must be Python 2.6, should be built against 1.0.2
|
||||
OPENSSL_VERSION = 'OpenSSL 1.0.2(?)'
|
||||
from string import ascii_letters
|
||||
|
||||
from .compat import (
|
||||
compat_basestring,
|
||||
compat_collections_chain_map as ChainMap,
|
||||
compat_filter as filter,
|
||||
compat_cookiejar,
|
||||
compat_get_terminal_size,
|
||||
compat_http_client,
|
||||
compat_http_cookiejar_Cookie,
|
||||
compat_http_cookies_SimpleCookie,
|
||||
compat_integer_types,
|
||||
compat_kwargs,
|
||||
compat_map as map,
|
||||
compat_numeric_types,
|
||||
compat_open as open,
|
||||
compat_os_name,
|
||||
compat_str,
|
||||
compat_tokenize_tokenize,
|
||||
@ -55,7 +46,6 @@ from .compat import (
|
||||
from .utils import (
|
||||
age_restricted,
|
||||
args_to_str,
|
||||
bug_reports_message,
|
||||
ContentTooShortError,
|
||||
date_from_str,
|
||||
DateRange,
|
||||
@ -71,11 +61,10 @@ from .utils import (
|
||||
format_bytes,
|
||||
formatSeconds,
|
||||
GeoRestrictedError,
|
||||
HEADRequest,
|
||||
int_or_none,
|
||||
ISO3166Utils,
|
||||
join_nonempty,
|
||||
locked_file,
|
||||
LazyList,
|
||||
make_HTTPS_handler,
|
||||
MaxDownloadsReached,
|
||||
orderedSet,
|
||||
@ -87,6 +76,7 @@ from .utils import (
|
||||
preferredencoding,
|
||||
prepend_extension,
|
||||
process_communicate_or_kill,
|
||||
PUTRequest,
|
||||
register_socks_protocols,
|
||||
render_table,
|
||||
replace_extension,
|
||||
@ -98,7 +88,6 @@ from .utils import (
|
||||
std_headers,
|
||||
str_or_none,
|
||||
subtitles_filename,
|
||||
traverse_obj,
|
||||
UnavailableVideoError,
|
||||
url_basename,
|
||||
version_tuple,
|
||||
@ -108,7 +97,6 @@ from .utils import (
|
||||
YoutubeDLCookieProcessor,
|
||||
YoutubeDLHandler,
|
||||
YoutubeDLRedirectHandler,
|
||||
ytdl_is_updateable,
|
||||
)
|
||||
from .cache import Cache
|
||||
from .extractor import get_info_extractor, gen_extractor_classes, _LAZY_LOADER
|
||||
@ -378,9 +366,6 @@ class YoutubeDL(object):
|
||||
self.params.update(params)
|
||||
self.cache = Cache(self)
|
||||
|
||||
self._header_cookies = []
|
||||
self._load_cookies_from_headers(self.params.get('http_headers'))
|
||||
|
||||
def check_deprecated(param, option, suggestion):
|
||||
if self.params.get(param) is not None:
|
||||
self.report_warning(
|
||||
@ -587,7 +572,7 @@ class YoutubeDL(object):
|
||||
if self.params.get('cookiefile') is not None:
|
||||
self.cookiejar.save(ignore_discard=True, ignore_expires=True)
|
||||
|
||||
def trouble(self, *args, **kwargs):
|
||||
def trouble(self, message=None, tb=None):
|
||||
"""Determine action to take when a download problem appears.
|
||||
|
||||
Depending on if the downloader has been configured to ignore
|
||||
@ -596,11 +581,6 @@ class YoutubeDL(object):
|
||||
|
||||
tb, if given, is additional traceback information.
|
||||
"""
|
||||
# message=None, tb=None, is_error=True
|
||||
message = args[0] if len(args) > 0 else kwargs.get('message', None)
|
||||
tb = args[1] if len(args) > 1 else kwargs.get('tb', None)
|
||||
is_error = args[2] if len(args) > 2 else kwargs.get('is_error', True)
|
||||
|
||||
if message is not None:
|
||||
self.to_stderr(message)
|
||||
if self.params.get('verbose'):
|
||||
@ -613,10 +593,7 @@ class YoutubeDL(object):
|
||||
else:
|
||||
tb_data = traceback.format_list(traceback.extract_stack())
|
||||
tb = ''.join(tb_data)
|
||||
if tb:
|
||||
self.to_stderr(tb)
|
||||
if not is_error:
|
||||
return
|
||||
self.to_stderr(tb)
|
||||
if not self.params.get('ignoreerrors', False):
|
||||
if sys.exc_info()[0] and hasattr(sys.exc_info()[1], 'exc_info') and sys.exc_info()[1].exc_info[0]:
|
||||
exc_info = sys.exc_info()[1].exc_info
|
||||
@ -625,18 +602,11 @@ class YoutubeDL(object):
|
||||
raise DownloadError(message, exc_info)
|
||||
self._download_retcode = 1
|
||||
|
||||
def report_warning(self, message, only_once=False, _cache={}):
|
||||
def report_warning(self, message):
|
||||
'''
|
||||
Print the message to stderr, it will be prefixed with 'WARNING:'
|
||||
If stderr is a tty file the 'WARNING:' will be colored
|
||||
'''
|
||||
if only_once:
|
||||
m_hash = hash((self, message))
|
||||
m_cnt = _cache.setdefault(m_hash, 0)
|
||||
_cache[m_hash] = m_cnt + 1
|
||||
if m_cnt > 0:
|
||||
return
|
||||
|
||||
if self.params.get('logger') is not None:
|
||||
self.params['logger'].warning(message)
|
||||
else:
|
||||
@ -649,7 +619,7 @@ class YoutubeDL(object):
|
||||
warning_message = '%s %s' % (_msg_header, message)
|
||||
self.to_stderr(warning_message)
|
||||
|
||||
def report_error(self, message, *args, **kwargs):
|
||||
def report_error(self, message, tb=None):
|
||||
'''
|
||||
Do the same as trouble, but prefixes the message with 'ERROR:', colored
|
||||
in red if stderr is a tty file.
|
||||
@ -658,18 +628,8 @@ class YoutubeDL(object):
|
||||
_msg_header = '\033[0;31mERROR:\033[0m'
|
||||
else:
|
||||
_msg_header = 'ERROR:'
|
||||
kwargs['message'] = '%s %s' % (_msg_header, message)
|
||||
self.trouble(*args, **kwargs)
|
||||
|
||||
def report_unscoped_cookies(self, *args, **kwargs):
|
||||
# message=None, tb=False, is_error=False
|
||||
if len(args) <= 2:
|
||||
kwargs.setdefault('is_error', False)
|
||||
if len(args) <= 0:
|
||||
kwargs.setdefault(
|
||||
'message',
|
||||
'Unscoped cookies are not allowed: please specify some sort of scoping')
|
||||
self.report_error(*args, **kwargs)
|
||||
error_message = '%s %s' % (_msg_header, message)
|
||||
self.trouble(error_message, tb)
|
||||
|
||||
def report_file_already_downloaded(self, file_name):
|
||||
"""Report file has already been fully downloaded."""
|
||||
@ -865,7 +825,7 @@ class YoutubeDL(object):
|
||||
msg += '\nYou might want to use a VPN or a proxy server (with --proxy) to workaround.'
|
||||
self.report_error(msg)
|
||||
except ExtractorError as e: # An error we somewhat expected
|
||||
self.report_error(compat_str(e), tb=e.format_traceback())
|
||||
self.report_error(compat_str(e), e.format_traceback())
|
||||
except MaxDownloadsReached:
|
||||
raise
|
||||
except Exception as e:
|
||||
@ -875,83 +835,8 @@ class YoutubeDL(object):
|
||||
raise
|
||||
return wrapper
|
||||
|
||||
def _remove_cookie_header(self, http_headers):
|
||||
"""Filters out `Cookie` header from an `http_headers` dict
|
||||
The `Cookie` header is removed to prevent leaks as a result of unscoped cookies.
|
||||
See: https://github.com/yt-dlp/yt-dlp/security/advisories/GHSA-v8mc-9377-rwjj
|
||||
|
||||
@param http_headers An `http_headers` dict from which any `Cookie` header
|
||||
should be removed, or None
|
||||
"""
|
||||
return dict(filter(lambda pair: pair[0].lower() != 'cookie', (http_headers or {}).items()))
|
||||
|
||||
def _load_cookies(self, data, **kwargs):
|
||||
"""Loads cookies from a `Cookie` header
|
||||
|
||||
This tries to work around the security vulnerability of passing cookies to every domain.
|
||||
|
||||
@param data The Cookie header as a string to load the cookies from
|
||||
@param autoscope If `False`, scope cookies using Set-Cookie syntax and error for cookie without domains
|
||||
If `True`, save cookies for later to be stored in the jar with a limited scope
|
||||
If a URL, save cookies in the jar with the domain of the URL
|
||||
"""
|
||||
# autoscope=True (kw-only)
|
||||
autoscope = kwargs.get('autoscope', True)
|
||||
|
||||
for cookie in compat_http_cookies_SimpleCookie(data).values() if data else []:
|
||||
if autoscope and any(cookie.values()):
|
||||
raise ValueError('Invalid syntax in Cookie Header')
|
||||
|
||||
domain = cookie.get('domain') or ''
|
||||
expiry = cookie.get('expires')
|
||||
if expiry == '': # 0 is valid so we check for `''` explicitly
|
||||
expiry = None
|
||||
prepared_cookie = compat_http_cookiejar_Cookie(
|
||||
cookie.get('version') or 0, cookie.key, cookie.value, None, False,
|
||||
domain, True, True, cookie.get('path') or '', bool(cookie.get('path')),
|
||||
bool(cookie.get('secure')), expiry, False, None, None, {})
|
||||
|
||||
if domain:
|
||||
self.cookiejar.set_cookie(prepared_cookie)
|
||||
elif autoscope is True:
|
||||
self.report_warning(
|
||||
'Passing cookies as a header is a potential security risk; '
|
||||
'they will be scoped to the domain of the downloaded urls. '
|
||||
'Please consider loading cookies from a file or browser instead.',
|
||||
only_once=True)
|
||||
self._header_cookies.append(prepared_cookie)
|
||||
elif autoscope:
|
||||
self.report_warning(
|
||||
'The extractor result contains an unscoped cookie as an HTTP header. '
|
||||
'If you are specifying an input URL, ' + bug_reports_message(),
|
||||
only_once=True)
|
||||
self._apply_header_cookies(autoscope, [prepared_cookie])
|
||||
else:
|
||||
self.report_unscoped_cookies()
|
||||
|
||||
def _load_cookies_from_headers(self, headers):
|
||||
self._load_cookies(traverse_obj(headers, 'cookie', casesense=False))
|
||||
|
||||
def _apply_header_cookies(self, url, cookies=None):
|
||||
"""This method applies stray header cookies to the provided url
|
||||
|
||||
This loads header cookies and scopes them to the domain provided in `url`.
|
||||
While this is not ideal, it helps reduce the risk of them being sent to
|
||||
an unintended destination.
|
||||
"""
|
||||
parsed = compat_urllib_parse.urlparse(url)
|
||||
if not parsed.hostname:
|
||||
return
|
||||
|
||||
for cookie in map(copy.copy, cookies or self._header_cookies):
|
||||
cookie.domain = '.' + parsed.hostname
|
||||
self.cookiejar.set_cookie(cookie)
|
||||
|
||||
@__handle_extraction_exceptions
|
||||
def __extract_info(self, url, ie, download, extra_info, process):
|
||||
# Compat with passing cookies in http headers
|
||||
self._apply_header_cookies(url)
|
||||
|
||||
ie_result = ie.extract(url)
|
||||
if ie_result is None: # Finished already (backwards compatibility; listformats and friends should be moved here)
|
||||
return
|
||||
@ -977,7 +862,7 @@ class YoutubeDL(object):
|
||||
|
||||
def process_ie_result(self, ie_result, download=True, extra_info={}):
|
||||
"""
|
||||
Take the result of the ie (may be modified) and resolve all unresolved
|
||||
Take the result of the ie(may be modified) and resolve all unresolved
|
||||
references (URLs, playlist items).
|
||||
|
||||
It will also download the videos if 'download'.
|
||||
@ -1504,16 +1389,17 @@ class YoutubeDL(object):
|
||||
'abr': formats_info[1].get('abr'),
|
||||
'ext': output_ext,
|
||||
}
|
||||
video_selector, audio_selector = map(_build_selector_function, selector.selector)
|
||||
|
||||
def selector_function(ctx):
|
||||
selector_fn = lambda x: _build_selector_function(x)(ctx)
|
||||
for pair in itertools.product(*map(selector_fn, selector.selector)):
|
||||
for pair in itertools.product(
|
||||
video_selector(copy.deepcopy(ctx)), audio_selector(copy.deepcopy(ctx))):
|
||||
yield _merge(pair)
|
||||
|
||||
filters = [self._build_format_filter(f) for f in selector.filters]
|
||||
|
||||
def final_selector(ctx):
|
||||
ctx_copy = dict(ctx)
|
||||
ctx_copy = copy.deepcopy(ctx)
|
||||
for _filter in filters:
|
||||
ctx_copy['formats'] = list(filter(_filter, ctx_copy['formats']))
|
||||
return selector_function(ctx_copy)
|
||||
@ -1548,45 +1434,23 @@ class YoutubeDL(object):
|
||||
parsed_selector = _parse_format_selection(iter(TokenIterator(tokens)))
|
||||
return _build_selector_function(parsed_selector)
|
||||
|
||||
def _calc_headers(self, info_dict, load_cookies=False):
|
||||
if load_cookies: # For --load-info-json
|
||||
# load cookies from http_headers in legacy info.json
|
||||
self._load_cookies(traverse_obj(info_dict, ('http_headers', 'Cookie'), casesense=False),
|
||||
autoscope=info_dict['url'])
|
||||
# load scoped cookies from info.json
|
||||
self._load_cookies(info_dict.get('cookies'), autoscope=False)
|
||||
|
||||
cookies = self.cookiejar.get_cookies_for_url(info_dict['url'])
|
||||
if cookies:
|
||||
# Make a string like name1=val1; attr1=a_val1; ...name2=val2; ...
|
||||
# By convention a cookie name can't be a well-known attribute name
|
||||
# so this syntax is unambiguous and can be parsed by (eg) SimpleCookie
|
||||
encoder = compat_http_cookies_SimpleCookie()
|
||||
values = []
|
||||
attributes = (('Domain', '='), ('Path', '='), ('Secure',), ('Expires', '='), ('Version', '='))
|
||||
attributes = tuple([x[0].lower()] + list(x) for x in attributes)
|
||||
for cookie in cookies:
|
||||
_, value = encoder.value_encode(cookie.value)
|
||||
# Py 2 '' --> '', Py 3 '' --> '""'
|
||||
if value == '':
|
||||
value = '""'
|
||||
values.append('='.join((cookie.name, value)))
|
||||
for attr in attributes:
|
||||
value = getattr(cookie, attr[0], None)
|
||||
if value:
|
||||
values.append('%s%s' % (''.join(attr[1:]), value if len(attr) == 3 else ''))
|
||||
info_dict['cookies'] = '; '.join(values)
|
||||
|
||||
def _calc_headers(self, info_dict):
|
||||
res = std_headers.copy()
|
||||
res.update(info_dict.get('http_headers') or {})
|
||||
res = self._remove_cookie_header(res)
|
||||
|
||||
add_headers = info_dict.get('http_headers')
|
||||
if add_headers:
|
||||
res.update(add_headers)
|
||||
|
||||
cookies = self._calc_cookies(info_dict)
|
||||
if cookies:
|
||||
res['Cookie'] = cookies
|
||||
|
||||
if 'X-Forwarded-For' not in res:
|
||||
x_forwarded_for_ip = info_dict.get('__x_forwarded_for_ip')
|
||||
if x_forwarded_for_ip:
|
||||
res['X-Forwarded-For'] = x_forwarded_for_ip
|
||||
|
||||
return res or None
|
||||
return res
|
||||
|
||||
def _calc_cookies(self, info_dict):
|
||||
pr = sanitized_Request(info_dict['url'])
|
||||
@ -1765,13 +1629,10 @@ class YoutubeDL(object):
|
||||
format['protocol'] = determine_protocol(format)
|
||||
# Add HTTP headers, so that external programs can use them from the
|
||||
# json output
|
||||
format['http_headers'] = self._calc_headers(ChainMap(format, info_dict), load_cookies=True)
|
||||
|
||||
# Safeguard against old/insecure infojson when using --load-info-json
|
||||
info_dict['http_headers'] = self._remove_cookie_header(
|
||||
info_dict.get('http_headers') or {}) or None
|
||||
|
||||
# Remove private housekeeping stuff (copied to http_headers in _calc_headers())
|
||||
full_format_info = info_dict.copy()
|
||||
full_format_info.update(format)
|
||||
format['http_headers'] = self._calc_headers(full_format_info)
|
||||
# Remove private housekeeping stuff
|
||||
if '__x_forwarded_for_ip' in info_dict:
|
||||
del info_dict['__x_forwarded_for_ip']
|
||||
|
||||
@ -1914,7 +1775,7 @@ class YoutubeDL(object):
|
||||
self.to_stdout(formatSeconds(info_dict['duration']))
|
||||
print_mandatory('format')
|
||||
if self.params.get('forcejson', False):
|
||||
self.to_stdout(json.dumps(self.sanitize_info(info_dict)))
|
||||
self.to_stdout(json.dumps(info_dict))
|
||||
|
||||
def process_info(self, info_dict):
|
||||
"""Process a single resolved IE result."""
|
||||
@ -1974,7 +1835,7 @@ class YoutubeDL(object):
|
||||
else:
|
||||
try:
|
||||
self.to_screen('[info] Writing video description to: ' + descfn)
|
||||
with open(encodeFilename(descfn), 'w', encoding='utf-8') as descfile:
|
||||
with io.open(encodeFilename(descfn), 'w', encoding='utf-8') as descfile:
|
||||
descfile.write(info_dict['description'])
|
||||
except (OSError, IOError):
|
||||
self.report_error('Cannot write description file ' + descfn)
|
||||
@ -1989,7 +1850,7 @@ class YoutubeDL(object):
|
||||
else:
|
||||
try:
|
||||
self.to_screen('[info] Writing video annotations to: ' + annofn)
|
||||
with open(encodeFilename(annofn), 'w', encoding='utf-8') as annofile:
|
||||
with io.open(encodeFilename(annofn), 'w', encoding='utf-8') as annofile:
|
||||
annofile.write(info_dict['annotations'])
|
||||
except (KeyError, TypeError):
|
||||
self.report_warning('There are no annotations to write.')
|
||||
@ -2016,7 +1877,7 @@ class YoutubeDL(object):
|
||||
try:
|
||||
# Use newline='' to prevent conversion of newline characters
|
||||
# See https://github.com/ytdl-org/youtube-dl/issues/10268
|
||||
with open(encodeFilename(sub_filename), 'w', encoding='utf-8', newline='') as subfile:
|
||||
with io.open(encodeFilename(sub_filename), 'w', encoding='utf-8', newline='') as subfile:
|
||||
subfile.write(sub_info['data'])
|
||||
except (OSError, IOError):
|
||||
self.report_error('Cannot write subtitles file ' + sub_filename)
|
||||
@ -2025,16 +1886,24 @@ class YoutubeDL(object):
|
||||
try:
|
||||
sub_data = ie._request_webpage(
|
||||
sub_info['url'], info_dict['id'], note=False).read()
|
||||
with open(encodeFilename(sub_filename), 'wb') as subfile:
|
||||
with io.open(encodeFilename(sub_filename), 'wb') as subfile:
|
||||
subfile.write(sub_data)
|
||||
except (ExtractorError, IOError, OSError, ValueError) as err:
|
||||
self.report_warning('Unable to download subtitle for "%s": %s' %
|
||||
(sub_lang, error_to_compat_str(err)))
|
||||
continue
|
||||
|
||||
self._write_info_json(
|
||||
'video description', info_dict,
|
||||
replace_extension(filename, 'info.json', info_dict.get('ext')))
|
||||
if self.params.get('writeinfojson', False):
|
||||
infofn = replace_extension(filename, 'info.json', info_dict.get('ext'))
|
||||
if self.params.get('nooverwrites', False) and os.path.exists(encodeFilename(infofn)):
|
||||
self.to_screen('[info] Video description metadata is already present')
|
||||
else:
|
||||
self.to_screen('[info] Writing video description metadata as JSON to: ' + infofn)
|
||||
try:
|
||||
write_json_file(self.filter_requested_info(info_dict), infofn)
|
||||
except (OSError, IOError):
|
||||
self.report_error('Cannot write metadata to JSON file ' + infofn)
|
||||
return
|
||||
|
||||
self._write_thumbnails(info_dict, filename)
|
||||
|
||||
@ -2055,11 +1924,7 @@ class YoutubeDL(object):
|
||||
fd.add_progress_hook(ph)
|
||||
if self.params.get('verbose'):
|
||||
self.to_screen('[debug] Invoking downloader on %r' % info.get('url'))
|
||||
|
||||
new_info = dict((k, v) for k, v in info.items() if not k.startswith('__p'))
|
||||
new_info['http_headers'] = self._calc_headers(new_info)
|
||||
|
||||
return fd.download(name, new_info)
|
||||
return fd.download(name, info)
|
||||
|
||||
if info_dict.get('requested_formats') is not None:
|
||||
downloaded = []
|
||||
@ -2224,13 +2089,16 @@ class YoutubeDL(object):
|
||||
raise
|
||||
else:
|
||||
if self.params.get('dump_single_json', False):
|
||||
self.to_stdout(json.dumps(self.sanitize_info(res)))
|
||||
self.to_stdout(json.dumps(res))
|
||||
|
||||
return self._download_retcode
|
||||
|
||||
def download_with_info_file(self, info_filename):
|
||||
with open(info_filename, encoding='utf-8') as f:
|
||||
info = self.filter_requested_info(json.load(f))
|
||||
with contextlib.closing(fileinput.FileInput(
|
||||
[info_filename], mode='r',
|
||||
openhook=fileinput.hook_encoded('utf-8'))) as f:
|
||||
# FileInput doesn't have a read method, we can't call json.load
|
||||
info = self.filter_requested_info(json.loads('\n'.join(f)))
|
||||
try:
|
||||
self.process_ie_result(info, download=True)
|
||||
except DownloadError:
|
||||
@ -2243,36 +2111,10 @@ class YoutubeDL(object):
|
||||
return self._download_retcode
|
||||
|
||||
@staticmethod
|
||||
def sanitize_info(info_dict, remove_private_keys=False):
|
||||
''' Sanitize the infodict for converting to json '''
|
||||
if info_dict is None:
|
||||
return info_dict
|
||||
|
||||
if remove_private_keys:
|
||||
reject = lambda k, v: (v is None
|
||||
or k.startswith('__')
|
||||
or k in ('requested_formats',
|
||||
'requested_subtitles'))
|
||||
else:
|
||||
reject = lambda k, v: False
|
||||
|
||||
def filter_fn(obj):
|
||||
if isinstance(obj, dict):
|
||||
return dict((k, filter_fn(v)) for k, v in obj.items() if not reject(k, v))
|
||||
elif isinstance(obj, (list, tuple, set, LazyList)):
|
||||
return list(map(filter_fn, obj))
|
||||
elif obj is None or any(isinstance(obj, c)
|
||||
for c in (compat_integer_types,
|
||||
(compat_str, float, bool))):
|
||||
return obj
|
||||
else:
|
||||
return repr(obj)
|
||||
|
||||
return filter_fn(info_dict)
|
||||
|
||||
@classmethod
|
||||
def filter_requested_info(cls, info_dict):
|
||||
return cls.sanitize_info(info_dict, True)
|
||||
def filter_requested_info(info_dict):
|
||||
return dict(
|
||||
(k, v) for k, v in info_dict.items()
|
||||
if k not in ['requested_formats', 'requested_subtitles'])
|
||||
|
||||
def post_process(self, filename, ie_info):
|
||||
"""Run all the postprocessors on the given file."""
|
||||
@ -2458,6 +2300,27 @@ class YoutubeDL(object):
|
||||
""" Start an HTTP download """
|
||||
if isinstance(req, compat_basestring):
|
||||
req = sanitized_Request(req)
|
||||
# an embedded /../ sequence is not automatically handled by urllib2
|
||||
# see https://github.com/yt-dlp/yt-dlp/issues/3355
|
||||
url = req.get_full_url()
|
||||
parts = url.partition('/../')
|
||||
if parts[1]:
|
||||
url = compat_urllib_parse.urljoin(parts[0] + parts[1][:1], parts[1][1:] + parts[2])
|
||||
if url:
|
||||
# worse, URL path may have initial /../ against RFCs: work-around
|
||||
# by stripping such prefixes, like eg Firefox
|
||||
parts = compat_urllib_parse.urlsplit(url)
|
||||
path = parts.path
|
||||
while path.startswith('/../'):
|
||||
path = path[3:]
|
||||
url = parts._replace(path=path).geturl()
|
||||
# get a new Request with the munged URL
|
||||
if url != req.get_full_url():
|
||||
req_type = {'HEAD': HEADRequest, 'PUT': PUTRequest}.get(
|
||||
req.get_method(), compat_urllib_request.Request)
|
||||
req = req_type(
|
||||
url, data=req.data, headers=dict(req.header_items()),
|
||||
origin_req_host=req.origin_req_host, unverifiable=req.unverifiable)
|
||||
return self._opener.open(req, timeout=self._socket_timeout)
|
||||
|
||||
def print_debug_header(self):
|
||||
@ -2479,12 +2342,9 @@ class YoutubeDL(object):
|
||||
self.get_encoding()))
|
||||
write_string(encoding_str, encoding=None)
|
||||
|
||||
writeln_debug = lambda *s: self._write_string('[debug] %s\n' % (''.join(s), ))
|
||||
writeln_debug('youtube-dl version ', __version__)
|
||||
self._write_string('[debug] youtube-dl version ' + __version__ + '\n')
|
||||
if _LAZY_LOADER:
|
||||
writeln_debug('Lazy loading extractors enabled')
|
||||
if ytdl_is_updateable():
|
||||
writeln_debug('Single file build')
|
||||
self._write_string('[debug] Lazy loading extractors enabled' + '\n')
|
||||
try:
|
||||
sp = subprocess.Popen(
|
||||
['git', 'rev-parse', '--short', 'HEAD'],
|
||||
@ -2493,7 +2353,7 @@ class YoutubeDL(object):
|
||||
out, err = process_communicate_or_kill(sp)
|
||||
out = out.decode().strip()
|
||||
if re.match('[0-9a-f]+', out):
|
||||
writeln_debug('Git HEAD: ', out)
|
||||
self._write_string('[debug] Git HEAD: ' + out + '\n')
|
||||
except Exception:
|
||||
try:
|
||||
sys.exc_clear()
|
||||
@ -2506,22 +2366,9 @@ class YoutubeDL(object):
|
||||
return impl_name + ' version %d.%d.%d' % sys.pypy_version_info[:3]
|
||||
return impl_name
|
||||
|
||||
def libc_ver():
|
||||
try:
|
||||
return platform.libc_ver()
|
||||
except OSError: # We may not have access to the executable
|
||||
return []
|
||||
|
||||
libc = join_nonempty(*libc_ver(), delim=' ')
|
||||
writeln_debug('Python %s (%s %s %s) - %s - %s%s' % (
|
||||
platform.python_version(),
|
||||
python_implementation(),
|
||||
platform.machine(),
|
||||
platform.architecture()[0],
|
||||
platform_name(),
|
||||
OPENSSL_VERSION,
|
||||
(' - %s' % (libc, )) if libc else ''
|
||||
))
|
||||
self._write_string('[debug] Python version %s (%s) - %s\n' % (
|
||||
platform.python_version(), python_implementation(),
|
||||
platform_name()))
|
||||
|
||||
exe_versions = FFmpegPostProcessor.get_versions(self)
|
||||
exe_versions['rtmpdump'] = rtmpdump_version()
|
||||
@ -2533,17 +2380,17 @@ class YoutubeDL(object):
|
||||
)
|
||||
if not exe_str:
|
||||
exe_str = 'none'
|
||||
writeln_debug('exe versions: %s' % (exe_str, ))
|
||||
self._write_string('[debug] exe versions: %s\n' % exe_str)
|
||||
|
||||
proxy_map = {}
|
||||
for handler in self._opener.handlers:
|
||||
if hasattr(handler, 'proxies'):
|
||||
proxy_map.update(handler.proxies)
|
||||
writeln_debug('Proxy map: ', compat_str(proxy_map))
|
||||
self._write_string('[debug] Proxy map: ' + compat_str(proxy_map) + '\n')
|
||||
|
||||
if self.params.get('call_home', False):
|
||||
ipaddr = self.urlopen('https://yt-dl.org/ip').read().decode('utf-8')
|
||||
writeln_debug('Public IP address: %s' % (ipaddr, ))
|
||||
self._write_string('[debug] Public IP address: %s\n' % ipaddr)
|
||||
latest_version = self.urlopen(
|
||||
'https://yt-dl.org/latest/version').read().decode('utf-8')
|
||||
if version_tuple(latest_version) > version_tuple(__version__):
|
||||
@ -2560,7 +2407,7 @@ class YoutubeDL(object):
|
||||
opts_proxy = self.params.get('proxy')
|
||||
|
||||
if opts_cookiefile is None:
|
||||
self.cookiejar = YoutubeDLCookieJar()
|
||||
self.cookiejar = compat_cookiejar.CookieJar()
|
||||
else:
|
||||
opts_cookiefile = expand_path(opts_cookiefile)
|
||||
self.cookiejar = YoutubeDLCookieJar(opts_cookiefile)
|
||||
@ -2621,28 +2468,6 @@ class YoutubeDL(object):
|
||||
encoding = preferredencoding()
|
||||
return encoding
|
||||
|
||||
def _write_info_json(self, label, info_dict, infofn, overwrite=None):
|
||||
if not self.params.get('writeinfojson', False):
|
||||
return False
|
||||
|
||||
def msg(fmt, lbl):
|
||||
return fmt % (lbl + ' metadata',)
|
||||
|
||||
if overwrite is None:
|
||||
overwrite = not self.params.get('nooverwrites', False)
|
||||
|
||||
if not overwrite and os.path.exists(encodeFilename(infofn)):
|
||||
self.to_screen(msg('[info] %s is already present', label.title()))
|
||||
return 'exists'
|
||||
else:
|
||||
self.to_screen(msg('[info] Writing %s as JSON to: ' + infofn, label))
|
||||
try:
|
||||
write_json_file(self.filter_requested_info(info_dict), infofn)
|
||||
return True
|
||||
except (OSError, IOError):
|
||||
self.report_error(msg('Cannot write %s to JSON file ' + infofn, label))
|
||||
return
|
||||
|
||||
def _write_thumbnails(self, info_dict, filename):
|
||||
if self.params.get('writethumbnail', False):
|
||||
thumbnails = info_dict.get('thumbnails')
|
||||
|
@ -5,6 +5,7 @@ from __future__ import unicode_literals
|
||||
|
||||
__license__ = 'Public Domain'
|
||||
|
||||
import codecs
|
||||
import io
|
||||
import os
|
||||
import random
|
||||
@ -16,7 +17,6 @@ from .options import (
|
||||
)
|
||||
from .compat import (
|
||||
compat_getpass,
|
||||
compat_register_utf8,
|
||||
compat_shlex_split,
|
||||
workaround_optparse_bug9161,
|
||||
)
|
||||
@ -46,8 +46,10 @@ from .YoutubeDL import YoutubeDL
|
||||
|
||||
|
||||
def _real_main(argv=None):
|
||||
# Compatibility fix for Windows
|
||||
compat_register_utf8()
|
||||
# Compatibility fixes for Windows
|
||||
if sys.platform == 'win32':
|
||||
# https://github.com/ytdl-org/youtube-dl/issues/820
|
||||
codecs.register(lambda name: codecs.lookup('utf-8') if name == 'cp65001' else None)
|
||||
|
||||
workaround_optparse_bug9161()
|
||||
|
||||
|
@ -1,16 +1,14 @@
|
||||
from __future__ import unicode_literals
|
||||
|
||||
import errno
|
||||
import io
|
||||
import json
|
||||
import os
|
||||
import re
|
||||
import shutil
|
||||
import traceback
|
||||
|
||||
from .compat import (
|
||||
compat_getenv,
|
||||
compat_open as open,
|
||||
)
|
||||
from .compat import compat_getenv
|
||||
from .utils import (
|
||||
error_to_compat_str,
|
||||
expand_path,
|
||||
@ -85,7 +83,7 @@ class Cache(object):
|
||||
cache_fn = self._get_cache_fn(section, key, dtype)
|
||||
try:
|
||||
try:
|
||||
with open(cache_fn, 'r', encoding='utf-8') as cachef:
|
||||
with io.open(cache_fn, 'r', encoding='utf-8') as cachef:
|
||||
return self._validate(json.load(cachef), min_ver)
|
||||
except ValueError:
|
||||
try:
|
||||
|
@ -1663,5 +1663,5 @@ def casefold(s):
|
||||
|
||||
|
||||
__all__ = [
|
||||
'casefold',
|
||||
casefold
|
||||
]
|
||||
|
@ -1,12 +1,10 @@
|
||||
# coding: utf-8
|
||||
from __future__ import unicode_literals
|
||||
from __future__ import division
|
||||
|
||||
import base64
|
||||
import binascii
|
||||
import collections
|
||||
import ctypes
|
||||
import datetime
|
||||
import email
|
||||
import getpass
|
||||
import io
|
||||
@ -21,7 +19,6 @@ import socket
|
||||
import struct
|
||||
import subprocess
|
||||
import sys
|
||||
import types
|
||||
import xml.etree.ElementTree
|
||||
|
||||
# naming convention
|
||||
@ -34,17 +31,13 @@ try:
|
||||
compat_str, compat_basestring, compat_chr = (
|
||||
unicode, basestring, unichr
|
||||
)
|
||||
from .casefold import casefold as compat_casefold
|
||||
|
||||
except NameError:
|
||||
compat_str, compat_basestring, compat_chr = (
|
||||
str, (str, bytes), chr
|
||||
str, str, chr
|
||||
)
|
||||
|
||||
# casefold
|
||||
try:
|
||||
compat_str.casefold
|
||||
compat_casefold = lambda s: s.casefold()
|
||||
except AttributeError:
|
||||
from .casefold import casefold as compat_casefold
|
||||
|
||||
try:
|
||||
import collections.abc as compat_collections_abc
|
||||
@ -56,22 +49,6 @@ try:
|
||||
except ImportError: # Python 2
|
||||
import urllib2 as compat_urllib_request
|
||||
|
||||
# Also fix up lack of method arg in old Pythons
|
||||
try:
|
||||
_req = compat_urllib_request.Request
|
||||
_req('http://127.0.0.1', method='GET')
|
||||
except TypeError:
|
||||
class _request(object):
|
||||
def __new__(cls, url, *args, **kwargs):
|
||||
method = kwargs.pop('method', None)
|
||||
r = _req(url, *args, **kwargs)
|
||||
if method:
|
||||
r.get_method = types.MethodType(lambda _: method, r)
|
||||
return r
|
||||
|
||||
compat_urllib_request.Request = _request
|
||||
|
||||
|
||||
try:
|
||||
import urllib.error as compat_urllib_error
|
||||
except ImportError: # Python 2
|
||||
@ -96,12 +73,6 @@ try:
|
||||
except ImportError: # Python 2
|
||||
import urllib as compat_urllib_response
|
||||
|
||||
try:
|
||||
compat_urllib_response.addinfourl.status
|
||||
except AttributeError:
|
||||
# .getcode() is deprecated in Py 3.
|
||||
compat_urllib_response.addinfourl.status = property(lambda self: self.getcode())
|
||||
|
||||
try:
|
||||
import http.cookiejar as compat_cookiejar
|
||||
except ImportError: # Python 2
|
||||
@ -126,24 +97,12 @@ except ImportError: # Python 2
|
||||
import Cookie as compat_cookies
|
||||
compat_http_cookies = compat_cookies
|
||||
|
||||
if sys.version_info[0] == 2 or sys.version_info < (3, 3):
|
||||
if sys.version_info[0] == 2:
|
||||
class compat_cookies_SimpleCookie(compat_cookies.SimpleCookie):
|
||||
def load(self, rawdata):
|
||||
must_have_value = 0
|
||||
if not isinstance(rawdata, dict):
|
||||
if sys.version_info[:2] != (2, 7) or sys.platform.startswith('java'):
|
||||
# attribute must have value for parsing
|
||||
rawdata, must_have_value = re.subn(
|
||||
r'(?i)(;\s*)(secure|httponly)(\s*(?:;|$))', r'\1\2=\2\3', rawdata)
|
||||
if sys.version_info[0] == 2:
|
||||
if isinstance(rawdata, compat_str):
|
||||
rawdata = str(rawdata)
|
||||
super(compat_cookies_SimpleCookie, self).load(rawdata)
|
||||
if must_have_value > 0:
|
||||
for morsel in self.values():
|
||||
for attr in ('secure', 'httponly'):
|
||||
if morsel.get(attr):
|
||||
morsel[attr] = True
|
||||
if isinstance(rawdata, compat_str):
|
||||
rawdata = str(rawdata)
|
||||
return super(compat_cookies_SimpleCookie, self).load(rawdata)
|
||||
else:
|
||||
compat_cookies_SimpleCookie = compat_cookies.SimpleCookie
|
||||
compat_http_cookies_SimpleCookie = compat_cookies_SimpleCookie
|
||||
@ -2395,11 +2354,6 @@ try:
|
||||
import http.client as compat_http_client
|
||||
except ImportError: # Python 2
|
||||
import httplib as compat_http_client
|
||||
try:
|
||||
compat_http_client.HTTPResponse.getcode
|
||||
except AttributeError:
|
||||
# Py < 3.1
|
||||
compat_http_client.HTTPResponse.getcode = lambda self: self.status
|
||||
|
||||
try:
|
||||
from urllib.error import HTTPError as compat_HTTPError
|
||||
@ -3173,46 +3127,6 @@ else:
|
||||
return ctypes.WINFUNCTYPE(*args, **kwargs)
|
||||
|
||||
|
||||
if sys.version_info < (3, 0):
|
||||
# open(file, mode='r', buffering=- 1, encoding=None, errors=None, newline=None, closefd=True) not: opener=None
|
||||
def compat_open(file_, *args, **kwargs):
|
||||
if len(args) > 6 or 'opener' in kwargs:
|
||||
raise ValueError('open: unsupported argument "opener"')
|
||||
return io.open(file_, *args, **kwargs)
|
||||
else:
|
||||
compat_open = open
|
||||
|
||||
|
||||
# compat_register_utf8
|
||||
def compat_register_utf8():
|
||||
if sys.platform == 'win32':
|
||||
# https://github.com/ytdl-org/youtube-dl/issues/820
|
||||
from codecs import register, lookup
|
||||
register(
|
||||
lambda name: lookup('utf-8') if name == 'cp65001' else None)
|
||||
|
||||
|
||||
# compat_datetime_timedelta_total_seconds
|
||||
try:
|
||||
compat_datetime_timedelta_total_seconds = datetime.timedelta.total_seconds
|
||||
except AttributeError:
|
||||
# Py 2.6
|
||||
def compat_datetime_timedelta_total_seconds(td):
|
||||
return (td.microseconds + (td.seconds + td.days * 24 * 3600) * 10**6) / 10**6
|
||||
|
||||
# optional decompression packages
|
||||
# PyPi brotli package implements 'br' Content-Encoding
|
||||
try:
|
||||
import brotli as compat_brotli
|
||||
except ImportError:
|
||||
compat_brotli = None
|
||||
# PyPi ncompress package implements 'compress' Content-Encoding
|
||||
try:
|
||||
import ncompress as compat_ncompress
|
||||
except ImportError:
|
||||
compat_ncompress = None
|
||||
|
||||
|
||||
legacy = [
|
||||
'compat_HTMLParseError',
|
||||
'compat_HTMLParser',
|
||||
@ -3246,12 +3160,10 @@ __all__ = [
|
||||
'compat_Struct',
|
||||
'compat_base64_b64decode',
|
||||
'compat_basestring',
|
||||
'compat_brotli',
|
||||
'compat_casefold',
|
||||
'compat_chr',
|
||||
'compat_collections_abc',
|
||||
'compat_collections_chain_map',
|
||||
'compat_datetime_timedelta_total_seconds',
|
||||
'compat_http_cookiejar',
|
||||
'compat_http_cookiejar_Cookie',
|
||||
'compat_http_cookies',
|
||||
@ -3272,9 +3184,7 @@ __all__ = [
|
||||
'compat_itertools_zip_longest',
|
||||
'compat_kwargs',
|
||||
'compat_map',
|
||||
'compat_ncompress',
|
||||
'compat_numeric_types',
|
||||
'compat_open',
|
||||
'compat_ord',
|
||||
'compat_os_name',
|
||||
'compat_os_path_expanduser',
|
||||
@ -3282,7 +3192,6 @@ __all__ = [
|
||||
'compat_print',
|
||||
'compat_re_Match',
|
||||
'compat_re_Pattern',
|
||||
'compat_register_utf8',
|
||||
'compat_setenv',
|
||||
'compat_shlex_quote',
|
||||
'compat_shlex_split',
|
||||
|
@ -96,7 +96,7 @@ class FileDownloader(object):
|
||||
return None
|
||||
return int(float(remaining) / rate)
|
||||
start, now = (start_or_rate, now_or_remaining)
|
||||
total, current = args[:2]
|
||||
total, current = args
|
||||
if total is None:
|
||||
return None
|
||||
if now is None:
|
||||
@ -339,10 +339,6 @@ class FileDownloader(object):
|
||||
def download(self, filename, info_dict):
|
||||
"""Download to a filename using the info from info_dict
|
||||
Return True on success and False otherwise
|
||||
|
||||
This method filters the `Cookie` header from the info_dict to prevent leaks.
|
||||
Downloaders have their own way of handling cookies.
|
||||
See: https://github.com/yt-dlp/yt-dlp/security/advisories/GHSA-v8mc-9377-rwjj
|
||||
"""
|
||||
|
||||
nooverwrites_and_exists = (
|
||||
|
@ -1,7 +1,5 @@
|
||||
from __future__ import unicode_literals
|
||||
|
||||
import itertools
|
||||
|
||||
from .fragment import FragmentFD
|
||||
from ..compat import compat_urllib_error
|
||||
from ..utils import (
|
||||
@ -32,23 +30,25 @@ class DashSegmentsFD(FragmentFD):
|
||||
fragment_retries = self.params.get('fragment_retries', 0)
|
||||
skip_unavailable_fragments = self.params.get('skip_unavailable_fragments', True)
|
||||
|
||||
for frag_index, fragment in enumerate(fragments, 1):
|
||||
frag_index = 0
|
||||
for i, fragment in enumerate(fragments):
|
||||
frag_index += 1
|
||||
if frag_index <= ctx['fragment_index']:
|
||||
continue
|
||||
# In DASH, the first segment contains necessary headers to
|
||||
# generate a valid MP4 file, so always abort for the first segment
|
||||
fatal = frag_index == 1 or not skip_unavailable_fragments
|
||||
fragment_url = fragment.get('url')
|
||||
if not fragment_url:
|
||||
assert fragment_base_url
|
||||
fragment_url = urljoin(fragment_base_url, fragment['path'])
|
||||
success = False
|
||||
for count in itertools.count():
|
||||
fatal = i == 0 or not skip_unavailable_fragments
|
||||
for count in range(fragment_retries + 1):
|
||||
try:
|
||||
fragment_url = fragment.get('url')
|
||||
if not fragment_url:
|
||||
assert fragment_base_url
|
||||
fragment_url = urljoin(fragment_base_url, fragment['path'])
|
||||
success, frag_content = self._download_fragment(ctx, fragment_url, info_dict)
|
||||
if not success:
|
||||
return False
|
||||
self._append_fragment(ctx, frag_content)
|
||||
break
|
||||
except compat_urllib_error.HTTPError as err:
|
||||
# YouTube may often return 404 HTTP error for a fragment causing the
|
||||
# whole download to fail. However if the same fragment is immediately
|
||||
@ -58,19 +58,19 @@ class DashSegmentsFD(FragmentFD):
|
||||
# HTTP error.
|
||||
if count < fragment_retries:
|
||||
self.report_retry_fragment(err, frag_index, count + 1, fragment_retries)
|
||||
continue
|
||||
except DownloadError:
|
||||
# Don't retry fragment if error occurred during HTTP downloading
|
||||
# itself since it has its own retry settings
|
||||
if fatal:
|
||||
raise
|
||||
break
|
||||
# itself since it has own retry settings
|
||||
if not fatal:
|
||||
self.report_skip_fragment(frag_index)
|
||||
break
|
||||
raise
|
||||
|
||||
if not success:
|
||||
if count >= fragment_retries:
|
||||
if not fatal:
|
||||
self.report_skip_fragment(frag_index)
|
||||
continue
|
||||
self.report_error('giving up after %s fragment retries' % count)
|
||||
self.report_error('giving up after %s fragment retries' % fragment_retries)
|
||||
return False
|
||||
|
||||
self._finish_frag_download(ctx)
|
||||
|
@ -1,10 +1,9 @@
|
||||
from __future__ import unicode_literals
|
||||
|
||||
import os
|
||||
import os.path
|
||||
import re
|
||||
import subprocess
|
||||
import sys
|
||||
import tempfile
|
||||
import time
|
||||
|
||||
from .common import FileDownloader
|
||||
@ -24,8 +23,6 @@ from ..utils import (
|
||||
check_executable,
|
||||
is_outdated_version,
|
||||
process_communicate_or_kill,
|
||||
T,
|
||||
traverse_obj,
|
||||
)
|
||||
|
||||
|
||||
@ -33,7 +30,6 @@ class ExternalFD(FileDownloader):
|
||||
def real_download(self, filename, info_dict):
|
||||
self.report_destination(filename)
|
||||
tmpfilename = self.temp_name(filename)
|
||||
self._cookies_tempfile = None
|
||||
|
||||
try:
|
||||
started = time.time()
|
||||
@ -46,13 +42,6 @@ class ExternalFD(FileDownloader):
|
||||
# should take place
|
||||
retval = 0
|
||||
self.to_screen('[%s] Interrupted by user' % self.get_basename())
|
||||
finally:
|
||||
if self._cookies_tempfile and os.path.isfile(self._cookies_tempfile):
|
||||
try:
|
||||
os.remove(self._cookies_tempfile)
|
||||
except OSError:
|
||||
self.report_warning(
|
||||
'Unable to delete temporary cookies file "{0}"'.format(self._cookies_tempfile))
|
||||
|
||||
if retval == 0:
|
||||
status = {
|
||||
@ -108,16 +97,6 @@ class ExternalFD(FileDownloader):
|
||||
def _configuration_args(self, default=[]):
|
||||
return cli_configuration_args(self.params, 'external_downloader_args', default)
|
||||
|
||||
def _write_cookies(self):
|
||||
if not self.ydl.cookiejar.filename:
|
||||
tmp_cookies = tempfile.NamedTemporaryFile(suffix='.cookies', delete=False)
|
||||
tmp_cookies.close()
|
||||
self._cookies_tempfile = tmp_cookies.name
|
||||
self.to_screen('[download] Writing temporary cookies file to "{0}"'.format(self._cookies_tempfile))
|
||||
# real_download resets _cookies_tempfile; if it's None, save() will write to cookiejar.filename
|
||||
self.ydl.cookiejar.save(self._cookies_tempfile, ignore_discard=True, ignore_expires=True)
|
||||
return self.ydl.cookiejar.filename or self._cookies_tempfile
|
||||
|
||||
def _call_downloader(self, tmpfilename, info_dict):
|
||||
""" Either overwrite this or implement _make_cmd """
|
||||
cmd = [encodeArgument(a) for a in self._make_cmd(tmpfilename, info_dict)]
|
||||
@ -131,21 +110,13 @@ class ExternalFD(FileDownloader):
|
||||
self.to_stderr(stderr.decode('utf-8', 'replace'))
|
||||
return p.returncode
|
||||
|
||||
@staticmethod
|
||||
def _header_items(info_dict):
|
||||
return traverse_obj(
|
||||
info_dict, ('http_headers', T(dict.items), Ellipsis))
|
||||
|
||||
|
||||
class CurlFD(ExternalFD):
|
||||
AVAILABLE_OPT = '-V'
|
||||
|
||||
def _make_cmd(self, tmpfilename, info_dict):
|
||||
cmd = [self.exe, '--location', '-o', tmpfilename, '--compressed']
|
||||
cookie_header = self.ydl.cookiejar.get_cookie_header(info_dict['url'])
|
||||
if cookie_header:
|
||||
cmd += ['--cookie', cookie_header]
|
||||
for key, val in self._header_items(info_dict):
|
||||
cmd = [self.exe, '--location', '-o', tmpfilename]
|
||||
for key, val in info_dict['http_headers'].items():
|
||||
cmd += ['--header', '%s: %s' % (key, val)]
|
||||
cmd += self._bool_option('--continue-at', 'continuedl', '-', '0')
|
||||
cmd += self._valueless_option('--silent', 'noprogress')
|
||||
@ -180,11 +151,8 @@ class AxelFD(ExternalFD):
|
||||
|
||||
def _make_cmd(self, tmpfilename, info_dict):
|
||||
cmd = [self.exe, '-o', tmpfilename]
|
||||
for key, val in self._header_items(info_dict):
|
||||
for key, val in info_dict['http_headers'].items():
|
||||
cmd += ['-H', '%s: %s' % (key, val)]
|
||||
cookie_header = self.ydl.cookiejar.get_cookie_header(info_dict['url'])
|
||||
if cookie_header:
|
||||
cmd += ['-H', 'Cookie: {0}'.format(cookie_header), '--max-redirect=0']
|
||||
cmd += self._configuration_args()
|
||||
cmd += ['--', info_dict['url']]
|
||||
return cmd
|
||||
@ -194,10 +162,8 @@ class WgetFD(ExternalFD):
|
||||
AVAILABLE_OPT = '--version'
|
||||
|
||||
def _make_cmd(self, tmpfilename, info_dict):
|
||||
cmd = [self.exe, '-O', tmpfilename, '-nv', '--compression=auto']
|
||||
if self.ydl.cookiejar.get_cookie_header(info_dict['url']):
|
||||
cmd += ['--load-cookies', self._write_cookies()]
|
||||
for key, val in self._header_items(info_dict):
|
||||
cmd = [self.exe, '-O', tmpfilename, '-nv', '--no-cookies']
|
||||
for key, val in info_dict['http_headers'].items():
|
||||
cmd += ['--header', '%s: %s' % (key, val)]
|
||||
cmd += self._option('--limit-rate', 'ratelimit')
|
||||
retry = self._option('--tries', 'retries')
|
||||
@ -216,58 +182,21 @@ class WgetFD(ExternalFD):
|
||||
class Aria2cFD(ExternalFD):
|
||||
AVAILABLE_OPT = '-v'
|
||||
|
||||
@staticmethod
|
||||
def _aria2c_filename(fn):
|
||||
return fn if os.path.isabs(fn) else os.path.join('.', fn)
|
||||
|
||||
def _make_cmd(self, tmpfilename, info_dict):
|
||||
cmd = [self.exe, '-c',
|
||||
'--console-log-level=warn', '--summary-interval=0', '--download-result=hide',
|
||||
'--http-accept-gzip=true', '--file-allocation=none', '-x16', '-j16', '-s16']
|
||||
if 'fragments' in info_dict:
|
||||
cmd += ['--allow-overwrite=true', '--allow-piece-length-change=true']
|
||||
else:
|
||||
cmd += ['--min-split-size', '1M']
|
||||
|
||||
if self.ydl.cookiejar.get_cookie_header(info_dict['url']):
|
||||
cmd += ['--load-cookies={0}'.format(self._write_cookies())]
|
||||
for key, val in self._header_items(info_dict):
|
||||
cmd += ['--header', '%s: %s' % (key, val)]
|
||||
cmd += self._configuration_args(['--max-connection-per-server', '4'])
|
||||
cmd = [self.exe, '-c']
|
||||
cmd += self._configuration_args([
|
||||
'--min-split-size', '1M', '--max-connection-per-server', '4'])
|
||||
dn = os.path.dirname(tmpfilename)
|
||||
if dn:
|
||||
cmd += ['--dir', dn]
|
||||
cmd += ['--out', os.path.basename(tmpfilename)]
|
||||
cmd += self._option('--max-overall-download-limit', 'ratelimit')
|
||||
for key, val in info_dict['http_headers'].items():
|
||||
cmd += ['--header', '%s: %s' % (key, val)]
|
||||
cmd += self._option('--interface', 'source_address')
|
||||
cmd += self._option('--all-proxy', 'proxy')
|
||||
cmd += self._bool_option('--check-certificate', 'nocheckcertificate', 'false', 'true', '=')
|
||||
cmd += self._bool_option('--remote-time', 'updatetime', 'true', 'false', '=')
|
||||
cmd += self._bool_option('--show-console-readout', 'noprogress', 'false', 'true', '=')
|
||||
cmd += self._configuration_args()
|
||||
|
||||
# aria2c strips out spaces from the beginning/end of filenames and paths.
|
||||
# We work around this issue by adding a "./" to the beginning of the
|
||||
# filename and relative path, and adding a "/" at the end of the path.
|
||||
# See: https://github.com/yt-dlp/yt-dlp/issues/276
|
||||
# https://github.com/ytdl-org/youtube-dl/issues/20312
|
||||
# https://github.com/aria2/aria2/issues/1373
|
||||
dn = os.path.dirname(tmpfilename)
|
||||
if dn:
|
||||
cmd += ['--dir', self._aria2c_filename(dn) + os.path.sep]
|
||||
if 'fragments' not in info_dict:
|
||||
cmd += ['--out', self._aria2c_filename(os.path.basename(tmpfilename))]
|
||||
cmd += ['--auto-file-renaming=false']
|
||||
if 'fragments' in info_dict:
|
||||
cmd += ['--file-allocation=none', '--uri-selector=inorder']
|
||||
url_list_file = '%s.frag.urls' % (tmpfilename, )
|
||||
url_list = []
|
||||
for frag_index, fragment in enumerate(info_dict['fragments']):
|
||||
fragment_filename = '%s-Frag%d' % (os.path.basename(tmpfilename), frag_index)
|
||||
url_list.append('%s\n\tout=%s' % (fragment['url'], self._aria2c_filename(fragment_filename)))
|
||||
stream, _ = self.sanitize_open(url_list_file, 'wb')
|
||||
stream.write('\n'.join(url_list).encode())
|
||||
stream.close()
|
||||
cmd += ['-i', self._aria2c_filename(url_list_file)]
|
||||
else:
|
||||
cmd += ['--', info_dict['url']]
|
||||
cmd += ['--', info_dict['url']]
|
||||
return cmd
|
||||
|
||||
|
||||
@ -306,10 +235,8 @@ class Aria2pFD(ExternalFD):
|
||||
}
|
||||
options['dir'] = os.path.dirname(tmpfilename) or os.path.abspath('.')
|
||||
options['out'] = os.path.basename(tmpfilename)
|
||||
if self.ydl.cookiejar.get_cookie_header(info_dict['url']):
|
||||
options['load-cookies'] = self._write_cookies()
|
||||
options['header'] = []
|
||||
for key, val in self._header_items(info_dict):
|
||||
for key, val in info_dict['http_headers'].items():
|
||||
options['header'].append('{0}: {1}'.format(key, val))
|
||||
download = aria2.add_uris([info_dict['url']], options)
|
||||
status = {
|
||||
@ -338,16 +265,8 @@ class HttpieFD(ExternalFD):
|
||||
|
||||
def _make_cmd(self, tmpfilename, info_dict):
|
||||
cmd = ['http', '--download', '--output', tmpfilename, info_dict['url']]
|
||||
for key, val in self._header_items(info_dict):
|
||||
for key, val in info_dict['http_headers'].items():
|
||||
cmd += ['%s:%s' % (key, val)]
|
||||
|
||||
# httpie 3.1.0+ removes the Cookie header on redirect, so this should be safe for now. [1]
|
||||
# If we ever need cookie handling for redirects, we can export the cookiejar into a session. [2]
|
||||
# 1: https://github.com/httpie/httpie/security/advisories/GHSA-9w4w-cpc8-h2fq
|
||||
# 2: https://httpie.io/docs/cli/sessions
|
||||
cookie_header = self.ydl.cookiejar.get_cookie_header(info_dict['url'])
|
||||
if cookie_header:
|
||||
cmd += ['Cookie:%s' % cookie_header]
|
||||
return cmd
|
||||
|
||||
|
||||
@ -393,14 +312,7 @@ class FFmpegFD(ExternalFD):
|
||||
# if end_time:
|
||||
# args += ['-t', compat_str(end_time - start_time)]
|
||||
|
||||
cookies = self.ydl.cookiejar.get_cookies_for_url(url)
|
||||
if cookies:
|
||||
args.extend(['-cookies', ''.join(
|
||||
'{0}={1}; path={2}; domain={3};\r\n'.format(
|
||||
cookie.name, cookie.value, cookie.path, cookie.domain)
|
||||
for cookie in cookies)])
|
||||
|
||||
if info_dict.get('http_headers') and re.match(r'^https?://', url):
|
||||
if info_dict['http_headers'] and re.match(r'^https?://', url):
|
||||
# Trailing \r\n after each HTTP header is important to prevent warning from ffmpeg/avconv:
|
||||
# [http @ 00000000003d2fa0] No trailing CRLF found in HTTP header.
|
||||
headers = handle_youtubedl_headers(info_dict['http_headers'])
|
||||
|
@ -141,8 +141,7 @@ class HttpFD(FileDownloader):
|
||||
# Content-Range is either not present or invalid. Assuming remote webserver is
|
||||
# trying to send the whole file, resume is not possible, so wiping the local file
|
||||
# and performing entire redownload
|
||||
if range_start > 0:
|
||||
self.report_unable_to_resume()
|
||||
self.report_unable_to_resume()
|
||||
ctx.resume_len = 0
|
||||
ctx.open_mode = 'wb'
|
||||
ctx.data_len = int_or_none(ctx.data.info().get('Content-length', None))
|
||||
@ -294,7 +293,7 @@ class HttpFD(FileDownloader):
|
||||
|
||||
# Progress message
|
||||
speed = self.calc_speed(start, now, byte_counter - ctx.resume_len)
|
||||
eta = self.calc_eta(speed, ctx.data_len and (ctx.data_len - byte_counter))
|
||||
eta = self.calc_eta(speed, ctx.data_len and (ctx.data_len - ctx.resume_len))
|
||||
|
||||
self._hook_progress({
|
||||
'status': 'downloading',
|
||||
|
@ -14,7 +14,7 @@ class ChelseafcIE(InfoExtractor):
|
||||
_VALID_URL = r'https?://(?:www\.)?chelseafc\.com(?:/[a-z]+)?/video/(?P<id>[a-z0-9]+(?:-[a-z0-9]+)*)'
|
||||
_TESTS = [{
|
||||
'url': 'https://www.chelseafc.com/en/video/full-match-chelsea-2-2-everton',
|
||||
'md5': '2fda617911b7148a2a19bec55b75d30a',
|
||||
'md5': '16c0df4e6909ad5ae96eab68fc584d75',
|
||||
'info_dict': {
|
||||
'id': 'full-match-chelsea-2-2-everton',
|
||||
'ext': 'mp4',
|
||||
@ -29,7 +29,7 @@ class ChelseafcIE(InfoExtractor):
|
||||
},
|
||||
{
|
||||
'url': 'https://www.chelseafc.com/en/video/manchester-city-vs-chelsea-2-0-or-highlights-or-efl-cup',
|
||||
'md5': '2905365c3c9cf4612f303fbb99c2f4ca',
|
||||
'md5': 'c20c52f3954d938bc5da4d6fe358c81d',
|
||||
'info_dict': {
|
||||
'id': 'manchester-city-vs-chelsea-2-0-or-highlights-or-efl-cup',
|
||||
'ext': 'mp4',
|
||||
|
@ -1,69 +0,0 @@
|
||||
# coding: utf-8
|
||||
from __future__ import unicode_literals
|
||||
|
||||
from .common import InfoExtractor
|
||||
from ..compat import compat_str
|
||||
from ..utils import (
|
||||
ExtractorError,
|
||||
merge_dicts,
|
||||
T,
|
||||
traverse_obj,
|
||||
unified_timestamp,
|
||||
url_or_none,
|
||||
)
|
||||
|
||||
|
||||
class ClipchampIE(InfoExtractor):
|
||||
_VALID_URL = r'https?://(?:www\.)?clipchamp\.com/watch/(?P<id>[\w-]+)'
|
||||
_TESTS = [{
|
||||
'url': 'https://clipchamp.com/watch/gRXZ4ZhdDaU',
|
||||
'info_dict': {
|
||||
'id': 'gRXZ4ZhdDaU',
|
||||
'ext': 'mp4',
|
||||
'title': 'Untitled video',
|
||||
'uploader': 'Alexander Schwartz',
|
||||
'timestamp': 1680805580,
|
||||
'upload_date': '20230406',
|
||||
'thumbnail': r're:^https?://.+\.jpg',
|
||||
},
|
||||
'params': {
|
||||
'skip_download': 'm3u8',
|
||||
'format': 'bestvideo',
|
||||
},
|
||||
}]
|
||||
|
||||
_STREAM_URL_TMPL = 'https://%s.cloudflarestream.com/%s/manifest/video.%s'
|
||||
_STREAM_URL_QUERY = {'parentOrigin': 'https://clipchamp.com'}
|
||||
|
||||
def _real_extract(self, url):
|
||||
video_id = self._match_id(url)
|
||||
webpage = self._download_webpage(url, video_id)
|
||||
data = self._search_nextjs_data(webpage, video_id)['props']['pageProps']['video']
|
||||
|
||||
storage_location = data.get('storage_location')
|
||||
if storage_location != 'cf_stream':
|
||||
raise ExtractorError('Unsupported clip storage location "%s"' % (storage_location,))
|
||||
|
||||
path = data['download_url']
|
||||
iframe = self._download_webpage(
|
||||
'https://iframe.cloudflarestream.com/' + path, video_id, 'Downloading player iframe')
|
||||
subdomain = self._search_regex(
|
||||
r'''\bcustomer-domain-prefix\s*=\s*("|')(?P<sd>[\w-]+)\1''', iframe,
|
||||
'subdomain', group='sd', fatal=False) or 'customer-2ut9yn3y6fta1yxe'
|
||||
|
||||
formats = self._extract_mpd_formats(
|
||||
self._STREAM_URL_TMPL % (subdomain, path, 'mpd'), video_id,
|
||||
query=self._STREAM_URL_QUERY, fatal=False, mpd_id='dash')
|
||||
formats.extend(self._extract_m3u8_formats(
|
||||
self._STREAM_URL_TMPL % (subdomain, path, 'm3u8'), video_id, 'mp4',
|
||||
query=self._STREAM_URL_QUERY, fatal=False, m3u8_id='hls'))
|
||||
|
||||
return merge_dicts({
|
||||
'id': video_id,
|
||||
'formats': formats,
|
||||
'uploader': ' '.join(traverse_obj(data, ('creator', ('first_name', 'last_name'), T(compat_str)))) or None,
|
||||
}, traverse_obj(data, {
|
||||
'title': ('project', 'project_name', T(compat_str)),
|
||||
'timestamp': ('created_at', T(unified_timestamp)),
|
||||
'thumbnail': ('thumbnail_url', T(url_or_none)),
|
||||
}), rev=True)
|
@ -3,7 +3,6 @@ from __future__ import unicode_literals
|
||||
|
||||
import base64
|
||||
import datetime
|
||||
import functools
|
||||
import hashlib
|
||||
import json
|
||||
import netrc
|
||||
@ -24,8 +23,6 @@ from ..compat import (
|
||||
compat_getpass,
|
||||
compat_integer_types,
|
||||
compat_http_client,
|
||||
compat_map as map,
|
||||
compat_open as open,
|
||||
compat_os_name,
|
||||
compat_str,
|
||||
compat_urllib_error,
|
||||
@ -34,7 +31,6 @@ from ..compat import (
|
||||
compat_urllib_request,
|
||||
compat_urlparse,
|
||||
compat_xml_parse_error,
|
||||
compat_zip as zip,
|
||||
)
|
||||
from ..downloader.f4m import (
|
||||
get_base_url,
|
||||
@ -74,7 +70,6 @@ from ..utils import (
|
||||
str_or_none,
|
||||
str_to_int,
|
||||
strip_or_none,
|
||||
traverse_obj,
|
||||
try_get,
|
||||
unescapeHTML,
|
||||
unified_strdate,
|
||||
@ -84,7 +79,6 @@ from ..utils import (
|
||||
urljoin,
|
||||
url_basename,
|
||||
url_or_none,
|
||||
variadic,
|
||||
xpath_element,
|
||||
xpath_text,
|
||||
xpath_with_ns,
|
||||
@ -373,22 +367,9 @@ class InfoExtractor(object):
|
||||
title, description etc.
|
||||
|
||||
|
||||
A subclass of InfoExtractor must be defined to handle each specific site (or
|
||||
several sites). Such a concrete subclass should be added to the list of
|
||||
extractors. It should also:
|
||||
* define its _VALID_URL attribute as a regexp, or a Sequence of alternative
|
||||
regexps (but see below)
|
||||
* re-define the _real_extract() method
|
||||
* optionally re-define the _real_initialize() method.
|
||||
|
||||
An extractor subclass may also override suitable() if necessary, but the
|
||||
function signature must be preserved and the function must import everything
|
||||
it needs (except other extractors), so that lazy_extractors works correctly.
|
||||
If the subclass's suitable() and _real_extract() functions avoid using
|
||||
_VALID_URL, the subclass need not set that class attribute.
|
||||
|
||||
An abstract subclass of InfoExtractor may be used to simplify implementation
|
||||
within an extractor module; it should not be added to the list of extractors.
|
||||
Subclasses of this one should re-define the _real_initialize() and
|
||||
_real_extract() methods and define a _VALID_URL regexp.
|
||||
Probably, they should also be added to the list of extractors.
|
||||
|
||||
_GEO_BYPASS attribute may be set to False in order to disable
|
||||
geo restriction bypass mechanisms for a particular extractor.
|
||||
@ -423,33 +404,22 @@ class InfoExtractor(object):
|
||||
self._x_forwarded_for_ip = None
|
||||
self.set_downloader(downloader)
|
||||
|
||||
@classmethod
|
||||
def __match_valid_url(cls, url):
|
||||
# This does not use has/getattr intentionally - we want to know whether
|
||||
# we have cached the regexp for cls, whereas getattr would also
|
||||
# match its superclass
|
||||
if '_VALID_URL_RE' not in cls.__dict__:
|
||||
# _VALID_URL can now be a list/tuple of patterns
|
||||
cls._VALID_URL_RE = tuple(map(re.compile, variadic(cls._VALID_URL)))
|
||||
# 20% faster than next(filter(None, (p.match(url) for p in cls._VALID_URL_RE)), None) in 2.7
|
||||
for p in cls._VALID_URL_RE:
|
||||
p = p.match(url)
|
||||
if p:
|
||||
return p
|
||||
|
||||
# The public alias can safely be overridden, as in some back-ports
|
||||
_match_valid_url = __match_valid_url
|
||||
|
||||
@classmethod
|
||||
def suitable(cls, url):
|
||||
"""Receives a URL and returns True if suitable for this IE."""
|
||||
# This function must import everything it needs (except other extractors),
|
||||
# so that lazy_extractors works correctly
|
||||
return cls.__match_valid_url(url) is not None
|
||||
|
||||
# This does not use has/getattr intentionally - we want to know whether
|
||||
# we have cached the regexp for *this* class, whereas getattr would also
|
||||
# match the superclass
|
||||
if '_VALID_URL_RE' not in cls.__dict__:
|
||||
cls._VALID_URL_RE = re.compile(cls._VALID_URL)
|
||||
return cls._VALID_URL_RE.match(url) is not None
|
||||
|
||||
@classmethod
|
||||
def _match_id(cls, url):
|
||||
m = cls.__match_valid_url(url)
|
||||
if '_VALID_URL_RE' not in cls.__dict__:
|
||||
cls._VALID_URL_RE = re.compile(cls._VALID_URL)
|
||||
m = cls._VALID_URL_RE.match(url)
|
||||
assert m
|
||||
return compat_str(m.group('id'))
|
||||
|
||||
@ -1035,8 +1005,6 @@ class InfoExtractor(object):
|
||||
if group is None:
|
||||
# return the first matching group
|
||||
return next(g for g in mobj.groups() if g is not None)
|
||||
elif isinstance(group, (list, tuple)):
|
||||
return tuple(mobj.group(g) for g in group)
|
||||
else:
|
||||
return mobj.group(group)
|
||||
elif default is not NO_DEFAULT:
|
||||
@ -1052,9 +1020,10 @@ class InfoExtractor(object):
|
||||
Like _search_regex, but strips HTML tags and unescapes entities.
|
||||
"""
|
||||
res = self._search_regex(pattern, string, name, default, fatal, flags, group)
|
||||
if isinstance(res, tuple):
|
||||
return tuple(map(clean_html, res))
|
||||
return clean_html(res)
|
||||
if res:
|
||||
return clean_html(res).strip()
|
||||
else:
|
||||
return res
|
||||
|
||||
def _get_netrc_login_info(self, netrc_machine=None):
|
||||
username = None
|
||||
@ -1379,44 +1348,6 @@ class InfoExtractor(object):
|
||||
break
|
||||
return dict((k, v) for k, v in info.items() if v is not None)
|
||||
|
||||
def _search_nextjs_data(self, webpage, video_id, **kw):
|
||||
nkw = dict((k, v) for k, v in kw.items() if k in ('transform_source', 'fatal'))
|
||||
kw.pop('transform_source', None)
|
||||
next_data = self._search_regex(
|
||||
r'''<script[^>]+\bid\s*=\s*('|")__NEXT_DATA__\1[^>]*>(?P<nd>[^<]+)</script>''',
|
||||
webpage, 'next.js data', group='nd', **kw)
|
||||
if not next_data:
|
||||
return {}
|
||||
return self._parse_json(next_data, video_id, **nkw)
|
||||
|
||||
def _search_nuxt_data(self, webpage, video_id, *args, **kwargs):
|
||||
"""Parses Nuxt.js metadata. This works as long as the function __NUXT__ invokes is a pure function"""
|
||||
|
||||
# self, webpage, video_id, context_name='__NUXT__', *, fatal=True, traverse=('data', 0)
|
||||
context_name = args[0] if len(args) > 0 else kwargs.get('context_name', '__NUXT__')
|
||||
fatal = kwargs.get('fatal', True)
|
||||
traverse = kwargs.get('traverse', ('data', 0))
|
||||
|
||||
re_ctx = re.escape(context_name)
|
||||
|
||||
FUNCTION_RE = (r'\(\s*function\s*\((?P<arg_keys>[\s\S]*?)\)\s*\{\s*'
|
||||
r'return\s+(?P<js>\{[\s\S]*?})\s*;?\s*}\s*\((?P<arg_vals>[\s\S]*?)\)')
|
||||
|
||||
js, arg_keys, arg_vals = self._search_regex(
|
||||
(p.format(re_ctx, FUNCTION_RE) for p in
|
||||
(r'<script>\s*window\s*\.\s*{0}\s*=\s*{1}\s*\)\s*;?\s*</script>',
|
||||
r'{0}\s*\([\s\S]*?{1}')),
|
||||
webpage, context_name, group=('js', 'arg_keys', 'arg_vals'),
|
||||
default=NO_DEFAULT if fatal else (None, None, None))
|
||||
if js is None:
|
||||
return {}
|
||||
|
||||
args = dict(zip(arg_keys.split(','), map(json.dumps, self._parse_json(
|
||||
'[{0}]'.format(arg_vals), video_id, transform_source=js_to_json, fatal=fatal) or ())))
|
||||
|
||||
ret = self._parse_json(js, video_id, transform_source=functools.partial(js_to_json, vars=args), fatal=fatal)
|
||||
return traverse_obj(ret, traverse) or {}
|
||||
|
||||
@staticmethod
|
||||
def _hidden_inputs(html):
|
||||
html = re.sub(r'<!--(?:(?!<!--).)*-->', '', html)
|
||||
@ -2564,8 +2495,7 @@ class InfoExtractor(object):
|
||||
return f
|
||||
return {}
|
||||
|
||||
def _media_formats(src, cur_media_type, type_info=None):
|
||||
type_info = type_info or {}
|
||||
def _media_formats(src, cur_media_type, type_info={}):
|
||||
full_url = absolute_url(src)
|
||||
ext = type_info.get('ext') or determine_ext(full_url)
|
||||
if ext == 'm3u8':
|
||||
@ -2583,7 +2513,6 @@ class InfoExtractor(object):
|
||||
formats = [{
|
||||
'url': full_url,
|
||||
'vcodec': 'none' if cur_media_type == 'audio' else None,
|
||||
'ext': ext,
|
||||
}]
|
||||
return is_plain_url, formats
|
||||
|
||||
@ -2592,7 +2521,7 @@ class InfoExtractor(object):
|
||||
# so we wll include them right here (see
|
||||
# https://www.ampproject.org/docs/reference/components/amp-video)
|
||||
# For dl8-* tags see https://delight-vr.com/documentation/dl8-video/
|
||||
_MEDIA_TAG_NAME_RE = r'(?:(?:amp|dl8(?:-live)?)-)?(video(?:-js)?|audio)'
|
||||
_MEDIA_TAG_NAME_RE = r'(?:(?:amp|dl8(?:-live)?)-)?(video|audio)'
|
||||
media_tags = [(media_tag, media_tag_name, media_type, '')
|
||||
for media_tag, media_tag_name, media_type
|
||||
in re.findall(r'(?s)(<(%s)[^>]*/>)' % _MEDIA_TAG_NAME_RE, webpage)]
|
||||
@ -2610,8 +2539,7 @@ class InfoExtractor(object):
|
||||
media_attributes = extract_attributes(media_tag)
|
||||
src = strip_or_none(media_attributes.get('src'))
|
||||
if src:
|
||||
f = parse_content_type(media_attributes.get('type'))
|
||||
_, formats = _media_formats(src, media_type, f)
|
||||
_, formats = _media_formats(src, media_type)
|
||||
media_info['formats'].extend(formats)
|
||||
media_info['thumbnail'] = absolute_url(media_attributes.get('poster'))
|
||||
if media_content:
|
||||
|
@ -1,204 +0,0 @@
|
||||
# coding: utf-8
|
||||
from __future__ import unicode_literals
|
||||
|
||||
import re
|
||||
|
||||
from .common import InfoExtractor
|
||||
from ..compat import (
|
||||
compat_str,
|
||||
)
|
||||
from ..utils import (
|
||||
determine_ext,
|
||||
extract_attributes,
|
||||
int_or_none,
|
||||
merge_dicts,
|
||||
traverse_obj,
|
||||
url_or_none,
|
||||
variadic,
|
||||
)
|
||||
|
||||
|
||||
class DLFBaseIE(InfoExtractor):
|
||||
_VALID_URL_BASE = r'https?://(?:www\.)?deutschlandfunk\.de/'
|
||||
_BUTTON_REGEX = r'(<button[^>]+alt="Anhören"[^>]+data-audio-diraid[^>]*>)'
|
||||
|
||||
def _parse_button_attrs(self, button, audio_id=None):
|
||||
attrs = extract_attributes(button)
|
||||
audio_id = audio_id or attrs['data-audio-diraid']
|
||||
|
||||
url = traverse_obj(
|
||||
attrs, 'data-audio-download-src', 'data-audio', 'data-audioreference',
|
||||
'data-audio-src', expected_type=url_or_none)
|
||||
ext = determine_ext(url)
|
||||
formats = (self._extract_m3u8_formats(url, audio_id, fatal=False)
|
||||
if ext == 'm3u8' else [{'url': url, 'ext': ext, 'vcodec': 'none'}])
|
||||
self._sort_formats(formats)
|
||||
|
||||
def traverse_attrs(path):
|
||||
path = list(variadic(path))
|
||||
t = path.pop() if callable(path[-1]) else None
|
||||
return traverse_obj(attrs, path, expected_type=t, get_all=False)
|
||||
|
||||
def txt_or_none(v, default=None):
|
||||
return default if v is None else (compat_str(v).strip() or default)
|
||||
|
||||
return merge_dicts(*reversed([{
|
||||
'id': audio_id,
|
||||
# 'extractor_key': DLFIE.ie_key(),
|
||||
# 'extractor': DLFIE.IE_NAME,
|
||||
'formats': formats,
|
||||
}, dict((k, traverse_attrs(v)) for k, v in {
|
||||
'title': (('data-audiotitle', 'data-audio-title', 'data-audio-download-tracking-title'), txt_or_none),
|
||||
'duration': (('data-audioduration', 'data-audio-duration'), int_or_none),
|
||||
'thumbnail': ('data-audioimage', url_or_none),
|
||||
'uploader': 'data-audio-producer',
|
||||
'series': 'data-audio-series',
|
||||
'channel': 'data-audio-origin-site-name',
|
||||
'webpage_url': ('data-audio-download-tracking-path', url_or_none),
|
||||
}.items())]))
|
||||
|
||||
|
||||
class DLFIE(DLFBaseIE):
|
||||
IE_NAME = 'dlf'
|
||||
_VALID_URL = DLFBaseIE._VALID_URL_BASE + r'[\w-]+-dlf-(?P<id>[\da-f]{8})-100\.html'
|
||||
_TESTS = [
|
||||
# Audio as an HLS stream
|
||||
{
|
||||
'url': 'https://www.deutschlandfunk.de/tanz-der-saiteninstrumente-das-wild-strings-trio-aus-slowenien-dlf-03a3eb19-100.html',
|
||||
'info_dict': {
|
||||
'id': '03a3eb19',
|
||||
'title': r're:Tanz der Saiteninstrumente [-/] Das Wild Strings Trio aus Slowenien',
|
||||
'ext': 'm4a',
|
||||
'duration': 3298,
|
||||
'thumbnail': 'https://assets.deutschlandfunk.de/FALLBACK-IMAGE-AUDIO/512x512.png?t=1603714364673',
|
||||
'uploader': 'Deutschlandfunk',
|
||||
'series': 'On Stage',
|
||||
'channel': 'deutschlandfunk'
|
||||
},
|
||||
'params': {
|
||||
'skip_download': 'm3u8'
|
||||
},
|
||||
'skip': 'This webpage no longer exists'
|
||||
}, {
|
||||
'url': 'https://www.deutschlandfunk.de/russische-athleten-kehren-zurueck-auf-die-sportbuehne-ein-gefaehrlicher-tueroeffner-dlf-d9cc1856-100.html',
|
||||
'info_dict': {
|
||||
'id': 'd9cc1856',
|
||||
'title': 'Russische Athleten kehren zurück auf die Sportbühne: Ein gefährlicher Türöffner',
|
||||
'ext': 'mp3',
|
||||
'duration': 291,
|
||||
'thumbnail': 'https://assets.deutschlandfunk.de/FALLBACK-IMAGE-AUDIO/512x512.png?t=1603714364673',
|
||||
'uploader': 'Deutschlandfunk',
|
||||
'series': 'Kommentare und Themen der Woche',
|
||||
'channel': 'deutschlandfunk'
|
||||
}
|
||||
},
|
||||
]
|
||||
|
||||
def _real_extract(self, url):
|
||||
audio_id = self._match_id(url)
|
||||
webpage = self._download_webpage(url, audio_id)
|
||||
|
||||
return self._parse_button_attrs(
|
||||
self._search_regex(self._BUTTON_REGEX, webpage, 'button'), audio_id)
|
||||
|
||||
|
||||
class DLFCorpusIE(DLFBaseIE):
|
||||
IE_NAME = 'dlf:corpus'
|
||||
IE_DESC = 'DLF Multi-feed Archives'
|
||||
_VALID_URL = DLFBaseIE._VALID_URL_BASE + r'(?P<id>(?![\w-]+-dlf-[\da-f]{8})[\w-]+-\d+)\.html'
|
||||
_TESTS = [
|
||||
# Recorded news broadcast with referrals to related broadcasts
|
||||
{
|
||||
'url': 'https://www.deutschlandfunk.de/fechten-russland-belarus-ukraine-protest-100.html',
|
||||
'info_dict': {
|
||||
'id': 'fechten-russland-belarus-ukraine-protest-100',
|
||||
'title': r're:Wiederzulassung als neutrale Athleten [-/] Was die Rückkehr russischer und belarussischer Sportler beim Fechten bedeutet',
|
||||
'description': 'md5:91340aab29c71aa7518ad5be13d1e8ad'
|
||||
},
|
||||
'playlist_mincount': 5,
|
||||
'playlist': [{
|
||||
'info_dict': {
|
||||
'id': '1fc5d64a',
|
||||
'title': r're:Wiederzulassung als neutrale Athleten [-/] Was die Rückkehr russischer und belarussischer Sportler beim Fechten bedeutet',
|
||||
'ext': 'mp3',
|
||||
'duration': 252,
|
||||
'thumbnail': 'https://assets.deutschlandfunk.de/aad16241-6b76-4a09-958b-96d0ee1d6f57/512x512.jpg?t=1679480020313',
|
||||
'uploader': 'Deutschlandfunk',
|
||||
'series': 'Sport',
|
||||
'channel': 'deutschlandfunk'
|
||||
}
|
||||
}, {
|
||||
'info_dict': {
|
||||
'id': '2ada145f',
|
||||
'title': r're:(?:Sportpolitik / )?Fechtverband votiert für Rückkehr russischer Athleten',
|
||||
'ext': 'mp3',
|
||||
'duration': 336,
|
||||
'thumbnail': 'https://assets.deutschlandfunk.de/FILE_93982766f7317df30409b8a184ac044a/512x512.jpg?t=1678547581005',
|
||||
'uploader': 'Deutschlandfunk',
|
||||
'series': 'Deutschlandfunk Nova',
|
||||
'channel': 'deutschlandfunk-nova'
|
||||
}
|
||||
}, {
|
||||
'info_dict': {
|
||||
'id': '5e55e8c9',
|
||||
'title': r're:Wiederzulassung von Russland und Belarus [-/] "Herumlavieren" des Fechter-Bundes sorgt für Unverständnis',
|
||||
'ext': 'mp3',
|
||||
'duration': 187,
|
||||
'thumbnail': 'https://assets.deutschlandfunk.de/a595989d-1ed1-4a2e-8370-b64d7f11d757/512x512.jpg?t=1679173825412',
|
||||
'uploader': 'Deutschlandfunk',
|
||||
'series': 'Sport am Samstag',
|
||||
'channel': 'deutschlandfunk'
|
||||
}
|
||||
}, {
|
||||
'info_dict': {
|
||||
'id': '47e1a096',
|
||||
'title': r're:Rückkehr Russlands im Fechten [-/] "Fassungslos, dass es einfach so passiert ist"',
|
||||
'ext': 'mp3',
|
||||
'duration': 602,
|
||||
'thumbnail': 'https://assets.deutschlandfunk.de/da4c494a-21cc-48b4-9cc7-40e09fd442c2/512x512.jpg?t=1678562155770',
|
||||
'uploader': 'Deutschlandfunk',
|
||||
'series': 'Sport am Samstag',
|
||||
'channel': 'deutschlandfunk'
|
||||
}
|
||||
}, {
|
||||
'info_dict': {
|
||||
'id': '5e55e8c9',
|
||||
'title': r're:Wiederzulassung von Russland und Belarus [-/] "Herumlavieren" des Fechter-Bundes sorgt für Unverständnis',
|
||||
'ext': 'mp3',
|
||||
'duration': 187,
|
||||
'thumbnail': 'https://assets.deutschlandfunk.de/a595989d-1ed1-4a2e-8370-b64d7f11d757/512x512.jpg?t=1679173825412',
|
||||
'uploader': 'Deutschlandfunk',
|
||||
'series': 'Sport am Samstag',
|
||||
'channel': 'deutschlandfunk'
|
||||
}
|
||||
}]
|
||||
},
|
||||
# Podcast feed with tag buttons, playlist count fluctuates
|
||||
{
|
||||
'url': 'https://www.deutschlandfunk.de/kommentare-und-themen-der-woche-100.html',
|
||||
'info_dict': {
|
||||
'id': 'kommentare-und-themen-der-woche-100',
|
||||
'title': 'Meinung - Kommentare und Themen der Woche',
|
||||
'description': 'md5:2901bbd65cd2d45e116d399a099ce5d5',
|
||||
},
|
||||
'playlist_mincount': 10,
|
||||
},
|
||||
# Podcast feed with no description
|
||||
{
|
||||
'url': 'https://www.deutschlandfunk.de/podcast-tolle-idee-100.html',
|
||||
'info_dict': {
|
||||
'id': 'podcast-tolle-idee-100',
|
||||
'title': 'Wissenschaftspodcast - Tolle Idee! - Was wurde daraus?',
|
||||
},
|
||||
'playlist_mincount': 11,
|
||||
},
|
||||
]
|
||||
|
||||
def _real_extract(self, url):
|
||||
playlist_id = self._match_id(url)
|
||||
webpage = self._download_webpage(url, playlist_id)
|
||||
|
||||
return self.playlist_result(
|
||||
map(self._parse_button_attrs, re.findall(self._BUTTON_REGEX, webpage)),
|
||||
playlist_id, self._html_search_meta(['og:title', 'twitter:title'], webpage, default=None),
|
||||
self._html_search_meta(['description', 'og:description', 'twitter:description'], webpage, default=None))
|
@ -227,7 +227,6 @@ from .ciscolive import (
|
||||
CiscoLiveSearchIE,
|
||||
)
|
||||
from .cjsw import CJSWIE
|
||||
from .clipchamp import ClipchampIE
|
||||
from .cliphunter import CliphunterIE
|
||||
from .clippit import ClippitIE
|
||||
from .cliprs import ClipRsIE
|
||||
@ -297,10 +296,6 @@ from .dbtv import DBTVIE
|
||||
from .dctp import DctpTvIE
|
||||
from .deezer import DeezerPlaylistIE
|
||||
from .democracynow import DemocracynowIE
|
||||
from .dlf import (
|
||||
DLFCorpusIE,
|
||||
DLFIE,
|
||||
)
|
||||
from .dfb import DFBIE
|
||||
from .dhm import DHMIE
|
||||
from .digg import DiggIE
|
||||
@ -450,13 +445,6 @@ from .gfycat import GfycatIE
|
||||
from .giantbomb import GiantBombIE
|
||||
from .giga import GigaIE
|
||||
from .glide import GlideIE
|
||||
from .globalplayer import (
|
||||
GlobalPlayerLiveIE,
|
||||
GlobalPlayerLivePlaylistIE,
|
||||
GlobalPlayerAudioIE,
|
||||
GlobalPlayerAudioEpisodeIE,
|
||||
GlobalPlayerVideoIE
|
||||
)
|
||||
from .globo import (
|
||||
GloboIE,
|
||||
GloboArticleIE,
|
||||
@ -988,10 +976,6 @@ from .pornhub import (
|
||||
from .pornotube import PornotubeIE
|
||||
from .pornovoisines import PornoVoisinesIE
|
||||
from .pornoxo import PornoXOIE
|
||||
from .pr0gramm import (
|
||||
Pr0grammIE,
|
||||
Pr0grammStaticIE,
|
||||
)
|
||||
from .puhutv import (
|
||||
PuhuTVIE,
|
||||
PuhuTVSerieIE,
|
||||
@ -1092,10 +1076,6 @@ from .rutube import (
|
||||
from .rutv import RUTVIE
|
||||
from .ruutu import RuutuIE
|
||||
from .ruv import RuvIE
|
||||
from .s4c import (
|
||||
S4CIE,
|
||||
S4CSeriesIE,
|
||||
)
|
||||
from .safari import (
|
||||
SafariIE,
|
||||
SafariApiIE,
|
||||
@ -1590,7 +1570,6 @@ from .weibo import (
|
||||
WeiboMobileIE
|
||||
)
|
||||
from .weiqitv import WeiqiTVIE
|
||||
from .whyp import WhypIE
|
||||
from .wistia import (
|
||||
WistiaIE,
|
||||
WistiaPlaylistIE,
|
||||
|
@ -1,273 +0,0 @@
|
||||
# coding: utf-8
|
||||
from __future__ import unicode_literals
|
||||
|
||||
from .common import InfoExtractor
|
||||
from ..utils import (
|
||||
clean_html,
|
||||
join_nonempty,
|
||||
merge_dicts,
|
||||
parse_duration,
|
||||
str_or_none,
|
||||
T,
|
||||
traverse_obj,
|
||||
unified_strdate,
|
||||
unified_timestamp,
|
||||
urlhandle_detect_ext,
|
||||
)
|
||||
|
||||
|
||||
class GlobalPlayerBaseIE(InfoExtractor):
|
||||
|
||||
def _get_page_props(self, url, video_id):
|
||||
webpage = self._download_webpage(url, video_id)
|
||||
return self._search_nextjs_data(webpage, video_id)['props']['pageProps']
|
||||
|
||||
def _request_ext(self, url, video_id):
|
||||
return urlhandle_detect_ext(self._request_webpage( # Server rejects HEAD requests
|
||||
url, video_id, note='Determining source extension'))
|
||||
|
||||
@staticmethod
|
||||
def _clean_desc(x):
|
||||
x = clean_html(x)
|
||||
if x:
|
||||
x = x.replace('\xa0', ' ')
|
||||
return x
|
||||
|
||||
def _extract_audio(self, episode, series):
|
||||
|
||||
return merge_dicts({
|
||||
'vcodec': 'none',
|
||||
}, traverse_obj(series, {
|
||||
'series': 'title',
|
||||
'series_id': 'id',
|
||||
'thumbnail': 'imageUrl',
|
||||
'uploader': 'itunesAuthor', # podcasts only
|
||||
}), traverse_obj(episode, {
|
||||
'id': 'id',
|
||||
'description': ('description', T(self._clean_desc)),
|
||||
'duration': ('duration', T(parse_duration)),
|
||||
'thumbnail': 'imageUrl',
|
||||
'url': 'streamUrl',
|
||||
'timestamp': (('pubDate', 'startDate'), T(unified_timestamp)),
|
||||
'title': 'title',
|
||||
}, get_all=False), rev=True)
|
||||
|
||||
|
||||
class GlobalPlayerLiveIE(GlobalPlayerBaseIE):
|
||||
_VALID_URL = r'https?://www\.globalplayer\.com/live/(?P<id>\w+)/\w+'
|
||||
_TESTS = [{
|
||||
'url': 'https://www.globalplayer.com/live/smoothchill/uk/',
|
||||
'info_dict': {
|
||||
'id': '2mx1E',
|
||||
'ext': 'aac',
|
||||
'display_id': 'smoothchill-uk',
|
||||
'title': 're:^Smooth Chill.+$',
|
||||
'thumbnail': 'https://herald.musicradio.com/media/f296ade8-50c9-4f60-911f-924e96873620.png',
|
||||
'description': 'Music To Chill To',
|
||||
# 'live_status': 'is_live',
|
||||
'is_live': True,
|
||||
},
|
||||
}, {
|
||||
# national station
|
||||
'url': 'https://www.globalplayer.com/live/heart/uk/',
|
||||
'info_dict': {
|
||||
'id': '2mwx4',
|
||||
'ext': 'aac',
|
||||
'description': 'turn up the feel good!',
|
||||
'thumbnail': 'https://herald.musicradio.com/media/49b9e8cb-15bf-4bf2-8c28-a4850cc6b0f3.png',
|
||||
# 'live_status': 'is_live',
|
||||
'is_live': True,
|
||||
'title': 're:^Heart UK.+$',
|
||||
'display_id': 'heart-uk',
|
||||
},
|
||||
}, {
|
||||
# regional variation
|
||||
'url': 'https://www.globalplayer.com/live/heart/london/',
|
||||
'info_dict': {
|
||||
'id': 'AMqg',
|
||||
'ext': 'aac',
|
||||
'thumbnail': 'https://herald.musicradio.com/media/49b9e8cb-15bf-4bf2-8c28-a4850cc6b0f3.png',
|
||||
'title': 're:^Heart London.+$',
|
||||
# 'live_status': 'is_live',
|
||||
'is_live': True,
|
||||
'display_id': 'heart-london',
|
||||
'description': 'turn up the feel good!',
|
||||
},
|
||||
}]
|
||||
|
||||
def _real_extract(self, url):
|
||||
video_id = self._match_id(url)
|
||||
station = self._get_page_props(url, video_id)['station']
|
||||
stream_url = station['streamUrl']
|
||||
|
||||
return merge_dicts({
|
||||
'id': station['id'],
|
||||
'display_id': (
|
||||
join_nonempty('brandSlug', 'slug', from_dict=station)
|
||||
or station.get('legacyStationPrefix')),
|
||||
'url': stream_url,
|
||||
'ext': self._request_ext(stream_url, video_id),
|
||||
'vcodec': 'none',
|
||||
'is_live': True,
|
||||
}, {
|
||||
'title': self._live_title(traverse_obj(
|
||||
station, (('name', 'brandName'), T(str_or_none)),
|
||||
get_all=False)),
|
||||
}, traverse_obj(station, {
|
||||
'description': 'tagline',
|
||||
'thumbnail': 'brandLogo',
|
||||
}), rev=True)
|
||||
|
||||
|
||||
class GlobalPlayerLivePlaylistIE(GlobalPlayerBaseIE):
|
||||
_VALID_URL = r'https?://www\.globalplayer\.com/playlists/(?P<id>\w+)'
|
||||
_TESTS = [{
|
||||
# "live playlist"
|
||||
'url': 'https://www.globalplayer.com/playlists/8bLk/',
|
||||
'info_dict': {
|
||||
'id': '8bLk',
|
||||
'ext': 'aac',
|
||||
# 'live_status': 'is_live',
|
||||
'is_live': True,
|
||||
'description': r're:(?s).+\bclassical\b.+\bClassic FM Hall [oO]f Fame\b',
|
||||
'thumbnail': 'https://images.globalplayer.com/images/551379?width=450&signature=oMLPZIoi5_dBSHnTMREW0Xg76mA=',
|
||||
'title': 're:Classic FM Hall of Fame.+$'
|
||||
},
|
||||
}]
|
||||
|
||||
def _real_extract(self, url):
|
||||
video_id = self._match_id(url)
|
||||
station = self._get_page_props(url, video_id)['playlistData']
|
||||
stream_url = station['streamUrl']
|
||||
|
||||
return merge_dicts({
|
||||
'id': video_id,
|
||||
'url': stream_url,
|
||||
'ext': self._request_ext(stream_url, video_id),
|
||||
'vcodec': 'none',
|
||||
'is_live': True,
|
||||
}, traverse_obj(station, {
|
||||
'title': 'title',
|
||||
'description': ('description', T(self._clean_desc)),
|
||||
'thumbnail': 'image',
|
||||
}), rev=True)
|
||||
|
||||
|
||||
class GlobalPlayerAudioIE(GlobalPlayerBaseIE):
|
||||
_VALID_URL = r'https?://www\.globalplayer\.com/(?:(?P<podcast>podcasts)/|catchup/\w+/\w+/)(?P<id>\w+)/?(?:$|[?#])'
|
||||
_TESTS = [{
|
||||
# podcast
|
||||
'url': 'https://www.globalplayer.com/podcasts/42KuaM/',
|
||||
'playlist_mincount': 5,
|
||||
'info_dict': {
|
||||
'id': '42KuaM',
|
||||
'title': 'Filthy Ritual',
|
||||
'thumbnail': 'md5:60286e7d12d795bd1bbc9efc6cee643e',
|
||||
'categories': ['Society & Culture', 'True Crime'],
|
||||
'uploader': 'Global',
|
||||
'description': r're:(?s).+\bscam\b.+?\bseries available now\b',
|
||||
},
|
||||
}, {
|
||||
# radio catchup
|
||||
'url': 'https://www.globalplayer.com/catchup/lbc/uk/46vyD7z/',
|
||||
'playlist_mincount': 2,
|
||||
'info_dict': {
|
||||
'id': '46vyD7z',
|
||||
'description': 'Nick Ferrari At Breakfast is Leading Britain\'s Conversation.',
|
||||
'title': 'Nick Ferrari',
|
||||
'thumbnail': 'md5:4df24d8a226f5b2508efbcc6ae874ebf',
|
||||
},
|
||||
}]
|
||||
|
||||
def _real_extract(self, url):
|
||||
video_id, podcast = self._match_valid_url(url).group('id', 'podcast')
|
||||
props = self._get_page_props(url, video_id)
|
||||
series = props['podcastInfo'] if podcast else props['catchupInfo']
|
||||
|
||||
return merge_dicts({
|
||||
'_type': 'playlist',
|
||||
'id': video_id,
|
||||
'entries': [self._extract_audio(ep, series) for ep in traverse_obj(
|
||||
series, ('episodes', lambda _, v: v['id'] and v['streamUrl']))],
|
||||
'categories': traverse_obj(series, ('categories', Ellipsis, 'name')) or None,
|
||||
}, traverse_obj(series, {
|
||||
'description': ('description', T(self._clean_desc)),
|
||||
'thumbnail': 'imageUrl',
|
||||
'title': 'title',
|
||||
'uploader': 'itunesAuthor', # podcasts only
|
||||
}), rev=True)
|
||||
|
||||
|
||||
class GlobalPlayerAudioEpisodeIE(GlobalPlayerBaseIE):
|
||||
_VALID_URL = r'https?://www\.globalplayer\.com/(?:(?P<podcast>podcasts)|catchup/\w+/\w+)/episodes/(?P<id>\w+)/?(?:$|[?#])'
|
||||
_TESTS = [{
|
||||
# podcast
|
||||
'url': 'https://www.globalplayer.com/podcasts/episodes/7DrfNnE/',
|
||||
'info_dict': {
|
||||
'id': '7DrfNnE',
|
||||
'ext': 'mp3',
|
||||
'title': 'Filthy Ritual - Trailer',
|
||||
'description': 'md5:1f1562fd0f01b4773b590984f94223e0',
|
||||
'thumbnail': 'md5:60286e7d12d795bd1bbc9efc6cee643e',
|
||||
'duration': 225.0,
|
||||
'timestamp': 1681254900,
|
||||
'series': 'Filthy Ritual',
|
||||
'series_id': '42KuaM',
|
||||
'upload_date': '20230411',
|
||||
'uploader': 'Global',
|
||||
},
|
||||
}, {
|
||||
# radio catchup
|
||||
'url': 'https://www.globalplayer.com/catchup/lbc/uk/episodes/2zGq26Vcv1fCWhddC4JAwETXWe/',
|
||||
'only_matching': True,
|
||||
# expired: refresh the details with a current show for a full test
|
||||
'info_dict': {
|
||||
'id': '2zGq26Vcv1fCWhddC4JAwETXWe',
|
||||
'ext': 'm4a',
|
||||
'timestamp': 1682056800,
|
||||
'series': 'Nick Ferrari',
|
||||
'thumbnail': 'md5:4df24d8a226f5b2508efbcc6ae874ebf',
|
||||
'upload_date': '20230421',
|
||||
'series_id': '46vyD7z',
|
||||
'description': 'Nick Ferrari At Breakfast is Leading Britain\'s Conversation.',
|
||||
'title': 'Nick Ferrari',
|
||||
'duration': 10800.0,
|
||||
},
|
||||
}]
|
||||
|
||||
def _real_extract(self, url):
|
||||
video_id, podcast = self._match_valid_url(url).group('id', 'podcast')
|
||||
props = self._get_page_props(url, video_id)
|
||||
episode = props['podcastEpisode'] if podcast else props['catchupEpisode']
|
||||
|
||||
return self._extract_audio(
|
||||
episode, traverse_obj(episode, 'podcast', 'show', expected_type=dict) or {})
|
||||
|
||||
|
||||
class GlobalPlayerVideoIE(GlobalPlayerBaseIE):
|
||||
_VALID_URL = r'https?://www\.globalplayer\.com/videos/(?P<id>\w+)'
|
||||
_TESTS = [{
|
||||
'url': 'https://www.globalplayer.com/videos/2JsSZ7Gm2uP/',
|
||||
'info_dict': {
|
||||
'id': '2JsSZ7Gm2uP',
|
||||
'ext': 'mp4',
|
||||
'description': 'md5:6a9f063c67c42f218e42eee7d0298bfd',
|
||||
'thumbnail': 'md5:d4498af48e15aae4839ce77b97d39550',
|
||||
'upload_date': '20230420',
|
||||
'title': 'Treble Malakai Bayoh sings a sublime Handel aria at Classic FM Live',
|
||||
},
|
||||
}]
|
||||
|
||||
def _real_extract(self, url):
|
||||
video_id = self._match_id(url)
|
||||
meta = self._get_page_props(url, video_id)['videoData']
|
||||
|
||||
return merge_dicts({
|
||||
'id': video_id,
|
||||
}, traverse_obj(meta, {
|
||||
'url': 'url',
|
||||
'thumbnail': ('image', 'url'),
|
||||
'title': 'title',
|
||||
'upload_date': ('publish_date', T(unified_strdate)),
|
||||
'description': 'description',
|
||||
}), rev=True)
|
@ -59,7 +59,7 @@ class ITVBaseIE(InfoExtractor):
|
||||
|
||||
@staticmethod
|
||||
def _vanilla_ua_header():
|
||||
return {'User-Agent': 'Mozilla/5.0'}
|
||||
return {'User-agent': 'Mozilla/5.0'}
|
||||
|
||||
def _download_webpage_handle(self, url, video_id, *args, **kwargs):
|
||||
# specialised to (a) use vanilla UA (b) detect geo-block
|
||||
@ -69,7 +69,7 @@ class ITVBaseIE(InfoExtractor):
|
||||
'user_agent' not in params
|
||||
and not any(re.match(r'(?i)user-agent\s*:', h)
|
||||
for h in (params.get('headers') or []))
|
||||
and 'User-Agent' not in (kwargs.get('headers') or {})):
|
||||
and 'User-agent' not in (kwargs.get('headers') or {})):
|
||||
|
||||
kwargs.setdefault('headers', {})
|
||||
kwargs['headers'] = self._vanilla_ua_header()
|
||||
|
@ -7,7 +7,6 @@ import subprocess
|
||||
import tempfile
|
||||
|
||||
from ..compat import (
|
||||
compat_open as open,
|
||||
compat_urlparse,
|
||||
compat_kwargs,
|
||||
)
|
||||
|
@ -1,124 +0,0 @@
|
||||
# coding: utf-8
|
||||
|
||||
from __future__ import unicode_literals
|
||||
|
||||
from functools import partial as partial_f
|
||||
|
||||
from .common import InfoExtractor
|
||||
from ..utils import (
|
||||
float_or_none,
|
||||
merge_dicts,
|
||||
T,
|
||||
traverse_obj,
|
||||
txt_or_none,
|
||||
url_or_none,
|
||||
)
|
||||
|
||||
|
||||
class S4CIE(InfoExtractor):
|
||||
_VALID_URL = r'https?://(?:www\.)?s4c\.cymru/clic/programme/(?P<id>\d+)'
|
||||
_TESTS = [{
|
||||
'url': 'https://www.s4c.cymru/clic/programme/861362209',
|
||||
'info_dict': {
|
||||
'id': '861362209',
|
||||
'ext': 'mp4',
|
||||
'title': 'Y Swn',
|
||||
'description': 'md5:f7681a30e4955b250b3224aa9fe70cf0',
|
||||
'duration': 5340,
|
||||
'thumbnail': 'https://www.s4c.cymru/amg/1920x1080/Y_Swn_2023S4C_099_ii.jpg',
|
||||
},
|
||||
}, {
|
||||
'url': 'https://www.s4c.cymru/clic/programme/856636948',
|
||||
'info_dict': {
|
||||
'id': '856636948',
|
||||
'ext': 'mp4',
|
||||
'title': 'Am Dro',
|
||||
'duration': 2880,
|
||||
'description': 'md5:100d8686fc9a632a0cb2db52a3433ffe',
|
||||
'thumbnail': 'https://www.s4c.cymru/amg/1920x1080/Am_Dro_2022-23S4C_P6_4005.jpg',
|
||||
},
|
||||
}]
|
||||
|
||||
def _real_extract(self, url):
|
||||
video_id = self._match_id(url)
|
||||
details = self._download_json(
|
||||
'https://www.s4c.cymru/df/full_prog_details',
|
||||
video_id, query={
|
||||
'lang': 'e',
|
||||
'programme_id': video_id,
|
||||
}, fatal=False)
|
||||
|
||||
player_config = self._download_json(
|
||||
'https://player-api.s4c-cdn.co.uk/player-configuration/prod', video_id, query={
|
||||
'programme_id': video_id,
|
||||
'signed': '0',
|
||||
'lang': 'en',
|
||||
'mode': 'od',
|
||||
'appId': 'clic',
|
||||
'streamName': '',
|
||||
}, note='Downloading player config JSON')
|
||||
|
||||
m3u8_url = self._download_json(
|
||||
'https://player-api.s4c-cdn.co.uk/streaming-urls/prod', video_id, query={
|
||||
'mode': 'od',
|
||||
'application': 'clic',
|
||||
'region': 'WW',
|
||||
'extra': 'false',
|
||||
'thirdParty': 'false',
|
||||
'filename': player_config['filename'],
|
||||
}, note='Downloading streaming urls JSON')['hls']
|
||||
formats = self._extract_m3u8_formats(m3u8_url, video_id, 'mp4', m3u8_id='hls', entry_protocol='m3u8_native')
|
||||
self._sort_formats(formats)
|
||||
|
||||
subtitles = {}
|
||||
for sub in traverse_obj(player_config, ('subtitles', lambda _, v: url_or_none(v['0']))):
|
||||
subtitles.setdefault(sub.get('3', 'en'), []).append({
|
||||
'url': sub['0'],
|
||||
'name': sub.get('1'),
|
||||
})
|
||||
|
||||
return merge_dicts({
|
||||
'id': video_id,
|
||||
'formats': formats,
|
||||
'subtitles': subtitles,
|
||||
'thumbnail': url_or_none(player_config.get('poster')),
|
||||
}, traverse_obj(details, ('full_prog_details', 0, {
|
||||
'title': (('programme_title', 'series_title'), T(txt_or_none)),
|
||||
'description': ('full_billing', T(txt_or_none)),
|
||||
'duration': ('duration', T(partial_f(float_or_none, invscale=60))),
|
||||
}), get_all=False),
|
||||
rev=True)
|
||||
|
||||
|
||||
class S4CSeriesIE(InfoExtractor):
|
||||
_VALID_URL = r'https?://(?:www\.)?s4c\.cymru/clic/series/(?P<id>\d+)'
|
||||
_TESTS = [{
|
||||
'url': 'https://www.s4c.cymru/clic/series/864982911',
|
||||
'playlist_mincount': 6,
|
||||
'info_dict': {
|
||||
'id': '864982911',
|
||||
'title': 'Iaith ar Daith',
|
||||
},
|
||||
}, {
|
||||
'url': 'https://www.s4c.cymru/clic/series/866852587',
|
||||
'playlist_mincount': 8,
|
||||
'info_dict': {
|
||||
'id': '866852587',
|
||||
'title': 'FFIT Cymru',
|
||||
},
|
||||
}]
|
||||
|
||||
def _real_extract(self, url):
|
||||
series_id = self._match_id(url)
|
||||
series_details = self._download_json(
|
||||
'https://www.s4c.cymru/df/series_details', series_id, query={
|
||||
'lang': 'e',
|
||||
'series_id': series_id,
|
||||
'show_prog_in_series': 'Y'
|
||||
}, note='Downloading series details JSON')
|
||||
|
||||
return self.playlist_result(
|
||||
(self.url_result('https://www.s4c.cymru/clic/programme/' + episode_id, S4CIE, episode_id)
|
||||
for episode_id in traverse_obj(series_details, ('other_progs_in_series', Ellipsis, 'id'))),
|
||||
playlist_id=series_id, playlist_title=traverse_obj(
|
||||
series_details, ('full_prog_details', 0, 'series_title', T(txt_or_none))))
|
@ -1,55 +0,0 @@
|
||||
# coding: utf-8
|
||||
from __future__ import unicode_literals
|
||||
|
||||
from .common import InfoExtractor
|
||||
from ..utils import (
|
||||
float_or_none,
|
||||
merge_dicts,
|
||||
str_or_none,
|
||||
T,
|
||||
traverse_obj,
|
||||
url_or_none,
|
||||
)
|
||||
|
||||
|
||||
class WhypIE(InfoExtractor):
|
||||
_VALID_URL = r'https?://(?:www\.)?whyp\.it/tracks/(?P<id>\d+)'
|
||||
_TESTS = [{
|
||||
'url': 'https://www.whyp.it/tracks/18337/home-page-example-track-b4kq7',
|
||||
'md5': 'c1187b42ebf8605284e3dc92aeb33d16',
|
||||
'info_dict': {
|
||||
'url': 'https://cdn.whyp.it/50eb17cc-e9ff-4e18-b89b-dc9206a95cb1.mp3',
|
||||
'id': '18337',
|
||||
'title': 'Home Page Example Track',
|
||||
'description': r're:(?s).+\bexample track\b',
|
||||
'ext': 'mp3',
|
||||
'duration': 52.82,
|
||||
'uploader': 'Brad',
|
||||
'uploader_id': '1',
|
||||
'thumbnail': 'https://cdn.whyp.it/a537bb36-3373-4c61-96c8-27fc1b2f427a.jpg',
|
||||
},
|
||||
}, {
|
||||
'url': 'https://www.whyp.it/tracks/18337',
|
||||
'only_matching': True,
|
||||
}]
|
||||
|
||||
def _real_extract(self, url):
|
||||
unique_id = self._match_id(url)
|
||||
webpage = self._download_webpage(url, unique_id)
|
||||
data = self._search_nuxt_data(webpage, unique_id)['rawTrack']
|
||||
|
||||
return merge_dicts({
|
||||
'url': data['audio_url'],
|
||||
'id': unique_id,
|
||||
}, traverse_obj(data, {
|
||||
'title': 'title',
|
||||
'description': 'description',
|
||||
'duration': ('duration', T(float_or_none)),
|
||||
'uploader': ('user', 'username'),
|
||||
'uploader_id': ('user', 'id', T(str_or_none)),
|
||||
'thumbnail': ('artwork_url', T(url_or_none)),
|
||||
}), {
|
||||
'ext': 'mp3',
|
||||
'vcodec': 'none',
|
||||
'http_headers': {'Referer': 'https://whyp.it/'},
|
||||
}, rev=True)
|
@ -19,7 +19,6 @@ from ..compat import (
|
||||
compat_urllib_parse_parse_qs as compat_parse_qs,
|
||||
compat_urllib_parse_unquote_plus,
|
||||
compat_urllib_parse_urlparse,
|
||||
compat_zip as zip,
|
||||
)
|
||||
from ..jsinterp import JSInterpreter
|
||||
from ..utils import (
|
||||
@ -31,9 +30,7 @@ from ..utils import (
|
||||
extract_attributes,
|
||||
get_element_by_attribute,
|
||||
int_or_none,
|
||||
join_nonempty,
|
||||
js_to_json,
|
||||
LazyList,
|
||||
merge_dicts,
|
||||
mimetype2ext,
|
||||
parse_codecs,
|
||||
@ -46,7 +43,6 @@ from ..utils import (
|
||||
str_to_int,
|
||||
traverse_obj,
|
||||
try_get,
|
||||
txt_or_none,
|
||||
unescapeHTML,
|
||||
unified_strdate,
|
||||
unsmuggle_url,
|
||||
@ -448,7 +444,7 @@ class YoutubeBaseInfoExtractor(InfoExtractor):
|
||||
extract_attributes(self._search_regex(
|
||||
r'''(?s)(<link\b[^>]+\bitemprop\s*=\s*("|')%s\2[^>]*>)'''
|
||||
% re.escape(var_name),
|
||||
get_element_by_attribute('itemprop', 'author', webpage or '') or '',
|
||||
get_element_by_attribute('itemprop', 'author', webpage) or '',
|
||||
'author link', default='')),
|
||||
paths[var_name][0])
|
||||
|
||||
@ -1558,21 +1554,22 @@ class YoutubeIE(YoutubeBaseInfoExtractor):
|
||||
r'\b[a-zA-Z0-9]+\s*&&\s*[a-zA-Z0-9]+\.set\([^,]+\s*,\s*encodeURIComponent\s*\(\s*(?P<sig>[a-zA-Z0-9$]+)\(',
|
||||
r'\bm=(?P<sig>[a-zA-Z0-9$]{2,})\(decodeURIComponent\(h\.s\)\)',
|
||||
r'\bc&&\(c=(?P<sig>[a-zA-Z0-9$]{2,})\(decodeURIComponent\(c\)\)',
|
||||
r'(?:\b|[^a-zA-Z0-9$])(?P<sig>[a-zA-Z0-9$]{2,})\s*=\s*function\(\s*a\s*\)\s*{\s*a\s*=\s*a\.split\(\s*""\s*\)(?:;[a-zA-Z0-9$]{2}\.[a-zA-Z0-9$]{2}\(a,\d+\))?',
|
||||
r'(?:\b|[^a-zA-Z0-9$])(?P<sig>[a-zA-Z0-9$]{2,})\s*=\s*function\(\s*a\s*\)\s*{\s*a\s*=\s*a\.split\(\s*""\s*\);[a-zA-Z0-9$]{2}\.[a-zA-Z0-9$]{2}\(a,\d+\)',
|
||||
r'(?:\b|[^a-zA-Z0-9$])(?P<sig>[a-zA-Z0-9$]{2,})\s*=\s*function\(\s*a\s*\)\s*{\s*a\s*=\s*a\.split\(\s*""\s*\)',
|
||||
r'(?P<sig>[a-zA-Z0-9$]+)\s*=\s*function\(\s*a\s*\)\s*{\s*a\s*=\s*a\.split\(\s*""\s*\)',
|
||||
# Obsolete patterns
|
||||
r'("|\')signature\1\s*,\s*(?P<sig>[a-zA-Z0-9$]+)\(',
|
||||
r'(["\'])signature\1\s*,\s*(?P<sig>[a-zA-Z0-9$]+)\(',
|
||||
r'\.sig\|\|(?P<sig>[a-zA-Z0-9$]+)\(',
|
||||
r'yt\.akamaized\.net/\)\s*\|\|\s*.*?\s*[cs]\s*&&\s*[adf]\.set\([^,]+\s*,\s*(?:encodeURIComponent\s*\()?\s*(?P<sig>[a-zA-Z0-9$]+)\(',
|
||||
r'\b[cs]\s*&&\s*[adf]\.set\([^,]+\s*,\s*(?P<sig>[a-zA-Z0-9$]+)\(',
|
||||
r'\b[a-zA-Z0-9]+\s*&&\s*[a-zA-Z0-9]+\.set\([^,]+\s*,\s*(?P<sig>[a-zA-Z0-9$]+)\(',
|
||||
r'\bc\s*&&\s*a\.set\([^,]+\s*,\s*\([^)]*\)\s*\(\s*(?P<sig>[a-zA-Z0-9$]+)\(',
|
||||
r'\bc\s*&&\s*[a-zA-Z0-9]+\.set\([^,]+\s*,\s*\([^)]*\)\s*\(\s*(?P<sig>[a-zA-Z0-9$]+)\(',
|
||||
r'\bc\s*&&\s*[a-zA-Z0-9]+\.set\([^,]+\s*,\s*\([^)]*\)\s*\(\s*(?P<sig>[a-zA-Z0-9$]+)\('),
|
||||
jscode, 'Initial JS player signature function name', group='sig')
|
||||
|
||||
jsi = JSInterpreter(jscode)
|
||||
|
||||
initial_function = jsi.extract_function(funcname)
|
||||
|
||||
return lambda s: initial_function([s])
|
||||
|
||||
def _decrypt_signature(self, s, video_id, player_url):
|
||||
@ -1623,22 +1620,15 @@ class YoutubeIE(YoutubeBaseInfoExtractor):
|
||||
nfunc, idx = re.match(target, nfunc_and_idx).group('nfunc', 'idx')
|
||||
if not idx:
|
||||
return nfunc
|
||||
|
||||
VAR_RE_TMPL = r'var\s+%s\s*=\s*(?P<name>\[(?P<alias>%s)\])[;,]'
|
||||
note = 'Initial JS player n function {0} (%s[%s])' % (nfunc, idx)
|
||||
|
||||
def search_function_code(needle, group):
|
||||
return self._search_regex(
|
||||
VAR_RE_TMPL % (re.escape(nfunc), needle), jscode,
|
||||
note.format(group), group=group)
|
||||
|
||||
if int_or_none(idx) == 0:
|
||||
real_nfunc = search_function_code(r'[a-zA-Z_$][\w$]*', group='alias')
|
||||
real_nfunc = self._search_regex(
|
||||
r'var %s\s*=\s*\[([a-zA-Z_$][\w$]*)\];' % (re.escape(nfunc), ), jscode,
|
||||
'Initial JS player n function alias ({nfunc}[{idx}])'.format(**locals()))
|
||||
if real_nfunc:
|
||||
return real_nfunc
|
||||
return self._parse_json(
|
||||
search_function_code('.+?', group='name'),
|
||||
nfunc, transform_source=js_to_json)[int(idx)]
|
||||
return self._parse_json(self._search_regex(
|
||||
r'var %s\s*=\s*(\[.+?\]);' % (re.escape(nfunc), ), jscode,
|
||||
'Initial JS player n function name ({nfunc}[{idx}])'.format(**locals())), nfunc, transform_source=js_to_json)[int(idx)]
|
||||
|
||||
def _extract_n_function(self, video_id, player_url):
|
||||
player_id = self._extract_player_info(player_url)
|
||||
@ -1996,19 +1986,9 @@ class YoutubeIE(YoutubeBaseInfoExtractor):
|
||||
itags = []
|
||||
itag_qualities = {}
|
||||
q = qualities(['tiny', 'small', 'medium', 'large', 'hd720', 'hd1080', 'hd1440', 'hd2160', 'hd2880', 'highres'])
|
||||
CHUNK_SIZE = 10 << 20
|
||||
|
||||
streaming_data = player_response.get('streamingData') or {}
|
||||
streaming_formats = streaming_data.get('formats') or []
|
||||
streaming_formats.extend(streaming_data.get('adaptiveFormats') or [])
|
||||
|
||||
def build_fragments(f):
|
||||
return LazyList({
|
||||
'url': update_url_query(f['url'], {
|
||||
'range': '{0}-{1}'.format(range_start, min(range_start + CHUNK_SIZE - 1, f['filesize']))
|
||||
})
|
||||
} for range_start in range(0, f['filesize'], CHUNK_SIZE))
|
||||
|
||||
for fmt in streaming_formats:
|
||||
if fmt.get('targetDurationSec') or fmt.get('drmFamilies'):
|
||||
continue
|
||||
@ -2061,18 +2041,28 @@ class YoutubeIE(YoutubeBaseInfoExtractor):
|
||||
if mobj:
|
||||
dct['ext'] = mimetype2ext(mobj.group(1))
|
||||
dct.update(parse_codecs(mobj.group(2)))
|
||||
single_stream = 'none' in (dct.get(c) for c in ('acodec', 'vcodec'))
|
||||
if single_stream and dct.get('ext'):
|
||||
dct['container'] = dct['ext'] + '_dash'
|
||||
if single_stream or itag == '17':
|
||||
no_audio = dct.get('acodec') == 'none'
|
||||
no_video = dct.get('vcodec') == 'none'
|
||||
if no_audio:
|
||||
dct['vbr'] = tbr
|
||||
if no_video:
|
||||
dct['abr'] = tbr
|
||||
if no_audio or no_video:
|
||||
CHUNK_SIZE = 10 << 20
|
||||
# avoid Youtube throttling
|
||||
dct.update({
|
||||
'protocol': 'http_dash_segments',
|
||||
'fragments': build_fragments(dct),
|
||||
'fragments': [{
|
||||
'url': update_url_query(dct['url'], {
|
||||
'range': '{0}-{1}'.format(range_start, min(range_start + CHUNK_SIZE - 1, dct['filesize']))
|
||||
})
|
||||
} for range_start in range(0, dct['filesize'], CHUNK_SIZE)]
|
||||
} if dct['filesize'] else {
|
||||
'downloader_options': {'http_chunk_size': CHUNK_SIZE} # No longer useful?
|
||||
})
|
||||
|
||||
if dct.get('ext'):
|
||||
dct['container'] = dct['ext'] + '_dash'
|
||||
formats.append(dct)
|
||||
|
||||
hls_manifest_url = streaming_data.get('hlsManifestUrl')
|
||||
@ -2619,17 +2609,6 @@ class YoutubeTabIE(YoutubeBaseInfoExtractor):
|
||||
'uploader_id': '@lexwill718',
|
||||
},
|
||||
'playlist_mincount': 75,
|
||||
}, {
|
||||
# Releases tab
|
||||
'url': 'https://www.youtube.com/@daftpunk/releases',
|
||||
'info_dict': {
|
||||
'id': 'UC_kRDKYrUlrbtrSiyu5Tflg',
|
||||
'title': 'Daft Punk - Releases',
|
||||
'description': 'Daft Punk (1993 - 2021) - Official YouTube Channel',
|
||||
'uploader_id': '@daftpunk',
|
||||
'uploader': 'Daft Punk',
|
||||
},
|
||||
'playlist_mincount': 36,
|
||||
}, {
|
||||
'url': 'https://invidio.us/channel/UCmlqkdCBesrv2Lak1mF_MxA',
|
||||
'only_matching': True,
|
||||
@ -2844,12 +2823,6 @@ class YoutubeTabIE(YoutubeBaseInfoExtractor):
|
||||
continue
|
||||
return renderer
|
||||
|
||||
@staticmethod
|
||||
def _get_text(r, k):
|
||||
return traverse_obj(
|
||||
r, (k, 'runs', 0, 'text'), (k, 'simpleText'),
|
||||
expected_type=txt_or_none)
|
||||
|
||||
def _grid_entries(self, grid_renderer):
|
||||
for item in grid_renderer['items']:
|
||||
if not isinstance(item, dict):
|
||||
@ -2857,7 +2830,9 @@ class YoutubeTabIE(YoutubeBaseInfoExtractor):
|
||||
renderer = self._extract_grid_item_renderer(item)
|
||||
if not isinstance(renderer, dict):
|
||||
continue
|
||||
title = self._get_text(renderer, 'title')
|
||||
title = try_get(
|
||||
renderer, (lambda x: x['title']['runs'][0]['text'],
|
||||
lambda x: x['title']['simpleText']), compat_str)
|
||||
# playlist
|
||||
playlist_id = renderer.get('playlistId')
|
||||
if playlist_id:
|
||||
@ -2874,7 +2849,8 @@ class YoutubeTabIE(YoutubeBaseInfoExtractor):
|
||||
# channel
|
||||
channel_id = renderer.get('channelId')
|
||||
if channel_id:
|
||||
title = self._get_text(renderer, 'title')
|
||||
title = try_get(
|
||||
renderer, lambda x: x['title']['simpleText'], compat_str)
|
||||
yield self.url_result(
|
||||
'https://www.youtube.com/channel/%s' % channel_id,
|
||||
ie=YoutubeTabIE.ie_key(), video_title=title)
|
||||
@ -2983,26 +2959,15 @@ class YoutubeTabIE(YoutubeBaseInfoExtractor):
|
||||
|
||||
def _rich_grid_entries(self, contents):
|
||||
for content in contents:
|
||||
content = traverse_obj(
|
||||
content, ('richItemRenderer', 'content'),
|
||||
expected_type=dict) or {}
|
||||
video_renderer = traverse_obj(
|
||||
content, 'videoRenderer', 'reelItemRenderer',
|
||||
expected_type=dict)
|
||||
video_renderer = try_get(
|
||||
content,
|
||||
(lambda x: x['richItemRenderer']['content']['videoRenderer'],
|
||||
lambda x: x['richItemRenderer']['content']['reelItemRenderer']),
|
||||
dict)
|
||||
if video_renderer:
|
||||
entry = self._video_entry(video_renderer)
|
||||
if entry:
|
||||
yield entry
|
||||
# playlist
|
||||
renderer = traverse_obj(
|
||||
content, 'playlistRenderer', expected_type=dict) or {}
|
||||
title = self._get_text(renderer, 'title')
|
||||
playlist_id = renderer.get('playlistId')
|
||||
if playlist_id:
|
||||
yield self.url_result(
|
||||
'https://www.youtube.com/playlist?list=%s' % playlist_id,
|
||||
ie=YoutubeTabIE.ie_key(), video_id=playlist_id,
|
||||
video_title=title)
|
||||
|
||||
@staticmethod
|
||||
def _build_continuation_query(continuation, ctp=None):
|
||||
@ -3107,7 +3072,6 @@ class YoutubeTabIE(YoutubeBaseInfoExtractor):
|
||||
return
|
||||
for entry in self._rich_grid_entries(rich_grid_renderer.get('contents') or []):
|
||||
yield entry
|
||||
|
||||
continuation = self._extract_continuation(rich_grid_renderer)
|
||||
|
||||
ytcfg = self._extract_ytcfg(item_id, webpage)
|
||||
@ -3250,41 +3214,50 @@ class YoutubeTabIE(YoutubeBaseInfoExtractor):
|
||||
uploader['channel'] = uploader['uploader']
|
||||
return uploader
|
||||
|
||||
@classmethod
|
||||
def _extract_alert(cls, data):
|
||||
@staticmethod
|
||||
def _extract_alert(data):
|
||||
alerts = []
|
||||
for alert in traverse_obj(data, ('alerts', Ellipsis), expected_type=dict):
|
||||
alert_text = traverse_obj(
|
||||
alert, (None, lambda x: x['alertRenderer']['text']), get_all=False)
|
||||
for alert in try_get(data, lambda x: x['alerts'], list) or []:
|
||||
if not isinstance(alert, dict):
|
||||
continue
|
||||
alert_text = try_get(
|
||||
alert, lambda x: x['alertRenderer']['text'], dict)
|
||||
if not alert_text:
|
||||
continue
|
||||
text = cls._get_text(alert_text, 'text')
|
||||
text = try_get(
|
||||
alert_text,
|
||||
(lambda x: x['simpleText'], lambda x: x['runs'][0]['text']),
|
||||
compat_str)
|
||||
if text:
|
||||
alerts.append(text)
|
||||
return '\n'.join(alerts)
|
||||
|
||||
def _extract_from_tabs(self, item_id, webpage, data, tabs):
|
||||
selected_tab = self._extract_selected_tab(tabs)
|
||||
renderer = traverse_obj(data, ('metadata', 'channelMetadataRenderer'),
|
||||
expected_type=dict) or {}
|
||||
renderer = try_get(
|
||||
data, lambda x: x['metadata']['channelMetadataRenderer'], dict)
|
||||
playlist_id = item_id
|
||||
title = description = None
|
||||
if renderer:
|
||||
channel_title = txt_or_none(renderer.get('title')) or item_id
|
||||
tab_title = txt_or_none(selected_tab.get('title'))
|
||||
title = join_nonempty(
|
||||
channel_title or item_id, tab_title,
|
||||
txt_or_none(selected_tab.get('expandedText')),
|
||||
delim=' - ')
|
||||
description = txt_or_none(renderer.get('description'))
|
||||
playlist_id = txt_or_none(renderer.get('externalId')) or playlist_id
|
||||
channel_title = renderer.get('title') or item_id
|
||||
tab_title = selected_tab.get('title')
|
||||
title = channel_title or item_id
|
||||
if tab_title:
|
||||
title += ' - %s' % tab_title
|
||||
if selected_tab.get('expandedText'):
|
||||
title += ' - %s' % selected_tab['expandedText']
|
||||
description = renderer.get('description')
|
||||
playlist_id = renderer.get('externalId')
|
||||
else:
|
||||
renderer = traverse_obj(data,
|
||||
('metadata', 'playlistMetadataRenderer'),
|
||||
('header', 'hashtagHeaderRenderer'),
|
||||
expected_type=dict) or {}
|
||||
title = traverse_obj(renderer, 'title', ('hashtag', 'simpleText'),
|
||||
expected_type=txt_or_none)
|
||||
renderer = try_get(
|
||||
data, lambda x: x['metadata']['playlistMetadataRenderer'], dict)
|
||||
if renderer:
|
||||
title = renderer.get('title')
|
||||
else:
|
||||
renderer = try_get(
|
||||
data, lambda x: x['header']['hashtagHeaderRenderer'], dict)
|
||||
if renderer:
|
||||
title = try_get(renderer, lambda x: x['hashtag']['simpleText'])
|
||||
playlist = self.playlist_result(
|
||||
self._entries(selected_tab, item_id, webpage),
|
||||
playlist_id=playlist_id, playlist_title=title,
|
||||
@ -3292,16 +3265,15 @@ class YoutubeTabIE(YoutubeBaseInfoExtractor):
|
||||
return merge_dicts(playlist, self._extract_uploader(renderer, data))
|
||||
|
||||
def _extract_from_playlist(self, item_id, url, data, playlist):
|
||||
title = traverse_obj((playlist, data),
|
||||
(0, 'title'), (1, 'titleText', 'simpleText'),
|
||||
expected_type=txt_or_none)
|
||||
playlist_id = txt_or_none(playlist.get('playlistId')) or item_id
|
||||
title = playlist.get('title') or try_get(
|
||||
data, lambda x: x['titleText']['simpleText'], compat_str)
|
||||
playlist_id = playlist.get('playlistId') or item_id
|
||||
# Inline playlist rendition continuation does not always work
|
||||
# at Youtube side, so delegating regular tab-based playlist URL
|
||||
# processing whenever possible.
|
||||
playlist_url = urljoin(url, traverse_obj(
|
||||
playlist, ('endpoint', 'commandMetadata', 'webCommandMetadata', 'url'),
|
||||
expected_type=url_or_none))
|
||||
playlist_url = urljoin(url, try_get(
|
||||
playlist, lambda x: x['endpoint']['commandMetadata']['webCommandMetadata']['url'],
|
||||
compat_str))
|
||||
if playlist_url and playlist_url != url:
|
||||
return self.url_result(
|
||||
playlist_url, ie=YoutubeTabIE.ie_key(), video_id=playlist_id,
|
||||
|
@ -2,11 +2,10 @@ from __future__ import unicode_literals
|
||||
|
||||
import itertools
|
||||
import json
|
||||
import math
|
||||
import operator
|
||||
import re
|
||||
|
||||
from functools import update_wrapper
|
||||
|
||||
from .utils import (
|
||||
error_to_compat_str,
|
||||
ExtractorError,
|
||||
@ -24,46 +23,11 @@ from .compat import (
|
||||
)
|
||||
|
||||
|
||||
# name JS functions
|
||||
class function_with_repr(object):
|
||||
# from yt_dlp/utils.py, but in this module
|
||||
# repr_ is always set
|
||||
def __init__(self, func, repr_):
|
||||
update_wrapper(self, func)
|
||||
self.func, self.__repr = func, repr_
|
||||
|
||||
def __call__(self, *args, **kwargs):
|
||||
return self.func(*args, **kwargs)
|
||||
|
||||
def __repr__(self):
|
||||
return self.__repr
|
||||
|
||||
|
||||
# name JS operators
|
||||
def wraps_op(op):
|
||||
|
||||
def update_and_rename_wrapper(w):
|
||||
f = update_wrapper(w, op)
|
||||
# fn names are str in both Py 2/3
|
||||
f.__name__ = str('JS_') + f.__name__
|
||||
return f
|
||||
|
||||
return update_and_rename_wrapper
|
||||
|
||||
|
||||
# NB In principle NaN cannot be checked by membership.
|
||||
# Here all NaN values are actually this one, so _NaN is _NaN,
|
||||
# although _NaN != _NaN.
|
||||
|
||||
_NaN = float('nan')
|
||||
|
||||
|
||||
def _js_bit_op(op):
|
||||
|
||||
def zeroise(x):
|
||||
return 0 if x in (None, JS_Undefined, _NaN) else x
|
||||
return 0 if x in (None, JS_Undefined) else x
|
||||
|
||||
@wraps_op(op)
|
||||
def wrapped(a, b):
|
||||
return op(zeroise(a), zeroise(b)) & 0xffffffff
|
||||
|
||||
@ -72,24 +36,23 @@ def _js_bit_op(op):
|
||||
|
||||
def _js_arith_op(op):
|
||||
|
||||
@wraps_op(op)
|
||||
def wrapped(a, b):
|
||||
if JS_Undefined in (a, b):
|
||||
return _NaN
|
||||
return float('nan')
|
||||
return op(a or 0, b or 0)
|
||||
|
||||
return wrapped
|
||||
|
||||
|
||||
def _js_div(a, b):
|
||||
if JS_Undefined in (a, b) or not (a or b):
|
||||
return _NaN
|
||||
if JS_Undefined in (a, b) or not (a and b):
|
||||
return float('nan')
|
||||
return operator.truediv(a or 0, b) if b else float('inf')
|
||||
|
||||
|
||||
def _js_mod(a, b):
|
||||
if JS_Undefined in (a, b) or not b:
|
||||
return _NaN
|
||||
return float('nan')
|
||||
return (a or 0) % b
|
||||
|
||||
|
||||
@ -97,13 +60,12 @@ def _js_exp(a, b):
|
||||
if not b:
|
||||
return 1 # even 0 ** 0 !!
|
||||
elif JS_Undefined in (a, b):
|
||||
return _NaN
|
||||
return float('nan')
|
||||
return (a or 0) ** b
|
||||
|
||||
|
||||
def _js_eq_op(op):
|
||||
|
||||
@wraps_op(op)
|
||||
def wrapped(a, b):
|
||||
if set((a, b)) <= set((None, JS_Undefined)):
|
||||
return op(a, a)
|
||||
@ -114,7 +76,6 @@ def _js_eq_op(op):
|
||||
|
||||
def _js_comp_op(op):
|
||||
|
||||
@wraps_op(op)
|
||||
def wrapped(a, b):
|
||||
if JS_Undefined in (a, b):
|
||||
return False
|
||||
@ -129,8 +90,13 @@ def _js_comp_op(op):
|
||||
|
||||
def _js_ternary(cndn, if_true=True, if_false=False):
|
||||
"""Simulate JS's ternary operator (cndn?if_true:if_false)"""
|
||||
if cndn in (False, None, 0, '', JS_Undefined, _NaN):
|
||||
if cndn in (False, None, 0, '', JS_Undefined):
|
||||
return if_false
|
||||
try:
|
||||
if math.isnan(cndn): # NB: NaN cannot be checked by membership
|
||||
return if_false
|
||||
except TypeError:
|
||||
pass
|
||||
return if_true
|
||||
|
||||
|
||||
@ -277,20 +243,9 @@ class JSInterpreter(object):
|
||||
|
||||
def __getattr__(self, name):
|
||||
self.__instantiate()
|
||||
# make Py 2.6 conform to its lying documentation
|
||||
if name == 'flags':
|
||||
self.flags = self.__flags
|
||||
return self.flags
|
||||
elif name == 'pattern':
|
||||
self.pattern = self.__pattern_txt
|
||||
return self.pattern
|
||||
elif hasattr(self.__self, name):
|
||||
v = getattr(self.__self, name)
|
||||
setattr(self, name, v)
|
||||
return v
|
||||
elif name in ('groupindex', 'groups'):
|
||||
return 0 if name == 'groupindex' else {}
|
||||
raise AttributeError('{0} has no attribute named {1}'.format(self, name))
|
||||
if hasattr(self, name):
|
||||
return getattr(self, name)
|
||||
return super(JSInterpreter.JS_RegExp, self).__getattr__(name)
|
||||
|
||||
@classmethod
|
||||
def regex_flags(cls, expr):
|
||||
@ -314,8 +269,6 @@ class JSInterpreter(object):
|
||||
def _named_object(self, namespace, obj):
|
||||
self.__named_object_counter += 1
|
||||
name = '%s%d' % (self._OBJ_NAME, self.__named_object_counter)
|
||||
if callable(obj) and not isinstance(obj, function_with_repr):
|
||||
obj = function_with_repr(obj, 'F<%s>' % (self.__named_object_counter, ))
|
||||
namespace[name] = obj
|
||||
return name
|
||||
|
||||
@ -403,7 +356,6 @@ class JSInterpreter(object):
|
||||
return right_val
|
||||
|
||||
try:
|
||||
# print('Eval:', opfunc.__name__, left_val, right_val)
|
||||
return opfunc(left_val, right_val)
|
||||
except Exception as e:
|
||||
raise self.Exception('Failed to evaluate {left_val!r:.50} {op} {right_val!r:.50}'.format(**locals()), expr, cause=e)
|
||||
@ -443,7 +395,6 @@ class JSInterpreter(object):
|
||||
raise self.Exception('Recursion limit reached')
|
||||
allow_recursion -= 1
|
||||
|
||||
# print('At: ' + stmt[:60])
|
||||
should_return = False
|
||||
# fails on (eg) if (...) stmt1; else stmt2;
|
||||
sub_statements = list(self._separate(stmt, ';')) or ['']
|
||||
@ -511,15 +462,8 @@ class JSInterpreter(object):
|
||||
expr = self._dump(inner, local_vars) + outer
|
||||
|
||||
if expr.startswith('('):
|
||||
|
||||
m = re.match(r'\((?P<d>[a-z])%(?P<e>[a-z])\.length\+(?P=e)\.length\)%(?P=e)\.length', expr)
|
||||
if m:
|
||||
# short-cut eval of frequently used `(d%e.length+e.length)%e.length`, worth ~6% on `pytest -k test_nsig`
|
||||
outer = None
|
||||
inner, should_abort = self._offset_e_by_d(m.group('d'), m.group('e'), local_vars)
|
||||
else:
|
||||
inner, outer = self._separate_at_paren(expr)
|
||||
inner, should_abort = self.interpret_statement(inner, local_vars, allow_recursion)
|
||||
inner, outer = self._separate_at_paren(expr)
|
||||
inner, should_abort = self.interpret_statement(inner, local_vars, allow_recursion)
|
||||
if not outer or should_abort:
|
||||
return inner, should_abort or should_return
|
||||
else:
|
||||
@ -731,7 +675,7 @@ class JSInterpreter(object):
|
||||
elif expr == 'undefined':
|
||||
return JS_Undefined, should_return
|
||||
elif expr == 'NaN':
|
||||
return _NaN, should_return
|
||||
return float('NaN'), should_return
|
||||
|
||||
elif md.get('return'):
|
||||
return local_vars[m.group('name')], should_return
|
||||
@ -758,24 +702,9 @@ class JSInterpreter(object):
|
||||
continue
|
||||
|
||||
right_expr = separated.pop()
|
||||
# handle operators that are both unary and binary, minimal BODMAS
|
||||
if op in ('+', '-'):
|
||||
undone = 0
|
||||
while len(separated) > 1 and not separated[-1].strip():
|
||||
undone += 1
|
||||
separated.pop()
|
||||
if op == '-' and undone % 2 != 0:
|
||||
right_expr = op + right_expr
|
||||
left_val = separated[-1]
|
||||
for dm_op in ('*', '%', '/', '**'):
|
||||
bodmas = tuple(self._separate(left_val, dm_op, skip_delims=skip_delim))
|
||||
if len(bodmas) > 1 and not bodmas[-1].strip():
|
||||
expr = op.join(separated) + op + right_expr
|
||||
right_expr = None
|
||||
break
|
||||
if right_expr is None:
|
||||
continue
|
||||
|
||||
while op == '-' and len(separated) > 1 and not separated[-1].strip():
|
||||
right_expr = '-' + right_expr
|
||||
separated.pop()
|
||||
left_val = self.interpret_expression(op.join(separated), local_vars, allow_recursion)
|
||||
return self._operator(op, left_val, right_expr, expr, local_vars, allow_recursion), should_return
|
||||
|
||||
@ -951,19 +880,16 @@ class JSInterpreter(object):
|
||||
def extract_object(self, objname):
|
||||
_FUNC_NAME_RE = r'''(?:[a-zA-Z$0-9]+|"[a-zA-Z$0-9]+"|'[a-zA-Z$0-9]+')'''
|
||||
obj = {}
|
||||
fields = None
|
||||
for obj_m in re.finditer(
|
||||
r'''(?xs)
|
||||
{0}\s*\.\s*{1}|{1}\s*=\s*\{{\s*
|
||||
(?P<fields>({2}\s*:\s*function\s*\(.*?\)\s*\{{.*?}}(?:,\s*)?)*)
|
||||
}}\s*;
|
||||
'''.format(_NAME_RE, re.escape(objname), _FUNC_NAME_RE),
|
||||
self.code):
|
||||
fields = obj_m.group('fields')
|
||||
if fields:
|
||||
break
|
||||
else:
|
||||
obj_m = re.search(
|
||||
r'''(?x)
|
||||
(?<!this\.)%s\s*=\s*{\s*
|
||||
(?P<fields>(%s\s*:\s*function\s*\(.*?\)\s*{.*?}(?:,\s*)?)*)
|
||||
}\s*;
|
||||
''' % (re.escape(objname), _FUNC_NAME_RE),
|
||||
self.code)
|
||||
if not obj_m:
|
||||
raise self.Exception('Could not find object ' + objname)
|
||||
fields = obj_m.group('fields')
|
||||
# Currently, it only supports function definitions
|
||||
fields_m = re.finditer(
|
||||
r'''(?x)
|
||||
@ -976,17 +902,6 @@ class JSInterpreter(object):
|
||||
|
||||
return obj
|
||||
|
||||
@staticmethod
|
||||
def _offset_e_by_d(d, e, local_vars):
|
||||
""" Short-cut eval: (d%e.length+e.length)%e.length """
|
||||
try:
|
||||
d = local_vars[d]
|
||||
e = local_vars[e]
|
||||
e = len(e)
|
||||
return _js_mod(_js_mod(d, e) + e, e), False
|
||||
except Exception:
|
||||
return None, True
|
||||
|
||||
def extract_function_code(self, funcname):
|
||||
""" @returns argnames, code """
|
||||
func_m = re.search(
|
||||
@ -999,15 +914,13 @@ class JSInterpreter(object):
|
||||
\((?P<args>[^)]*)\)\s*
|
||||
(?P<code>{.+})''' % {'name': re.escape(funcname)},
|
||||
self.code)
|
||||
code, _ = self._separate_at_paren(func_m.group('code')) # refine the match
|
||||
if func_m is None:
|
||||
raise self.Exception('Could not find JS function "{funcname}"'.format(**locals()))
|
||||
code, _ = self._separate_at_paren(func_m.group('code')) # refine the match
|
||||
return self.build_arglist(func_m.group('args')), code
|
||||
|
||||
def extract_function(self, funcname):
|
||||
return function_with_repr(
|
||||
self.extract_function_from_code(*self.extract_function_code(funcname)),
|
||||
'F<%s>' % (funcname, ))
|
||||
return self.extract_function_from_code(*self.extract_function_code(funcname))
|
||||
|
||||
def extract_function_from_code(self, argnames, code, *global_stack):
|
||||
local_vars = {}
|
||||
|
@ -11,7 +11,6 @@ from .compat import (
|
||||
compat_get_terminal_size,
|
||||
compat_getenv,
|
||||
compat_kwargs,
|
||||
compat_open as open,
|
||||
compat_shlex_split,
|
||||
)
|
||||
from .utils import (
|
||||
@ -42,11 +41,14 @@ def _hide_login_info(opts):
|
||||
def parseOpts(overrideArguments=None):
|
||||
def _readOptions(filename_bytes, default=[]):
|
||||
try:
|
||||
optionf = open(filename_bytes, encoding=preferredencoding())
|
||||
optionf = open(filename_bytes)
|
||||
except IOError:
|
||||
return default # silently skip if file is not present
|
||||
try:
|
||||
# FIXME: https://github.com/ytdl-org/youtube-dl/commit/dfe5fa49aed02cf36ba9f743b11b0903554b5e56
|
||||
contents = optionf.read()
|
||||
if sys.version_info < (3,):
|
||||
contents = contents.decode(preferredencoding())
|
||||
res = compat_shlex_split(contents, comments=True)
|
||||
finally:
|
||||
optionf.close()
|
||||
@ -544,14 +546,12 @@ def parseOpts(overrideArguments=None):
|
||||
workarounds.add_option(
|
||||
'--referer',
|
||||
metavar='URL', dest='referer', default=None,
|
||||
help='Specify a custom Referer: use if the video access is restricted to one domain',
|
||||
help='Specify a custom referer, use if the video access is restricted to one domain',
|
||||
)
|
||||
workarounds.add_option(
|
||||
'--add-header',
|
||||
metavar='FIELD:VALUE', dest='headers', action='append',
|
||||
help=('Specify a custom HTTP header and its value, separated by a colon \':\'. You can use this option multiple times. '
|
||||
'NB Use --cookies rather than adding a Cookie header if its contents may be sensitive; '
|
||||
'data from a Cookie header will be sent to all domains, not just the one intended')
|
||||
help='Specify a custom HTTP header and its value, separated by a colon \':\'. You can use this option multiple times',
|
||||
)
|
||||
workarounds.add_option(
|
||||
'--bidi-workaround',
|
||||
@ -733,13 +733,9 @@ def parseOpts(overrideArguments=None):
|
||||
'--no-part',
|
||||
action='store_true', dest='nopart', default=False,
|
||||
help='Do not use .part files - write directly into output file')
|
||||
filesystem.add_option(
|
||||
'--mtime',
|
||||
action='store_true', dest='updatetime', default=True,
|
||||
help='Use the Last-modified header to set the file modification time (default)')
|
||||
filesystem.add_option(
|
||||
'--no-mtime',
|
||||
action='store_false', dest='updatetime',
|
||||
action='store_false', dest='updatetime', default=True,
|
||||
help='Do not use the Last-modified header to set the file modification time')
|
||||
filesystem.add_option(
|
||||
'--write-description',
|
||||
|
@ -18,8 +18,6 @@ from ..utils import (
|
||||
shell_quote,
|
||||
)
|
||||
|
||||
from ..compat import compat_open as open
|
||||
|
||||
|
||||
class EmbedThumbnailPPError(PostProcessingError):
|
||||
pass
|
||||
|
@ -1,5 +1,6 @@
|
||||
from __future__ import unicode_literals
|
||||
|
||||
import io
|
||||
import os
|
||||
import subprocess
|
||||
import time
|
||||
@ -8,7 +9,6 @@ import re
|
||||
|
||||
from .common import AudioConversionError, PostProcessor
|
||||
|
||||
from ..compat import compat_open as open
|
||||
from ..utils import (
|
||||
encodeArgument,
|
||||
encodeFilename,
|
||||
@ -493,7 +493,7 @@ class FFmpegMetadataPP(FFmpegPostProcessor):
|
||||
chapters = info.get('chapters', [])
|
||||
if chapters:
|
||||
metadata_filename = replace_extension(filename, 'meta')
|
||||
with open(metadata_filename, 'w', encoding='utf-8') as f:
|
||||
with io.open(metadata_filename, 'wt', encoding='utf-8') as f:
|
||||
def ffmpeg_escape(text):
|
||||
return re.sub(r'(=|;|#|\\|\n)', r'\\\1', text)
|
||||
|
||||
@ -636,7 +636,7 @@ class FFmpegSubtitlesConvertorPP(FFmpegPostProcessor):
|
||||
with open(dfxp_file, 'rb') as f:
|
||||
srt_data = dfxp2srt(f.read())
|
||||
|
||||
with open(srt_file, 'w', encoding='utf-8') as f:
|
||||
with io.open(srt_file, 'wt', encoding='utf-8') as f:
|
||||
f.write(srt_data)
|
||||
old_file = srt_file
|
||||
|
||||
@ -652,7 +652,7 @@ class FFmpegSubtitlesConvertorPP(FFmpegPostProcessor):
|
||||
|
||||
self.run_ffmpeg(old_file, new_file, ['-f', new_format])
|
||||
|
||||
with open(new_file, 'r', encoding='utf-8') as f:
|
||||
with io.open(new_file, 'rt', encoding='utf-8') as f:
|
||||
subs[lang] = {
|
||||
'ext': new_ext,
|
||||
'data': f.read(),
|
||||
|
@ -727,7 +727,7 @@ class SWFInterpreter(object):
|
||||
stack.append(res)
|
||||
continue
|
||||
|
||||
assert isinstance(obj, (dict, _ScopeDict)), \
|
||||
assert isinstance(obj, (dict, _ScopeDict)),\
|
||||
'Accessing member %r on %r' % (pname, obj)
|
||||
res = obj.get(pname, undefined)
|
||||
stack.append(res)
|
||||
|
@ -1,5 +1,6 @@
|
||||
from __future__ import unicode_literals
|
||||
|
||||
import io
|
||||
import json
|
||||
import traceback
|
||||
import hashlib
|
||||
@ -8,10 +9,7 @@ import subprocess
|
||||
import sys
|
||||
from zipimport import zipimporter
|
||||
|
||||
from .compat import (
|
||||
compat_open as open,
|
||||
compat_realpath,
|
||||
)
|
||||
from .compat import compat_realpath
|
||||
from .utils import encode_compat_str
|
||||
|
||||
from .version import __version__
|
||||
@ -129,7 +127,7 @@ def update_self(to_screen, verbose, opener):
|
||||
|
||||
try:
|
||||
bat = os.path.join(directory, 'youtube-dl-updater.bat')
|
||||
with open(bat, 'w') as batfile:
|
||||
with io.open(bat, 'w') as batfile:
|
||||
batfile.write('''
|
||||
@echo off
|
||||
echo Waiting for file handle to be closed ...
|
||||
|
File diff suppressed because it is too large
Load Diff
Loading…
Reference in New Issue
Block a user