mirror of
https://gitea.invidious.io/iv-org/invidious-copy-2022-03-16.git
synced 2024-08-15 00:53:18 +00:00
Update info extractor
This commit is contained in:
parent
b9e2fee2c9
commit
e8c9641548
4 changed files with 21 additions and 9 deletions
|
@ -58,7 +58,7 @@ end
|
||||||
|
|
||||||
def fetch_youtube_comments(id, continuation, proxies, format)
|
def fetch_youtube_comments(id, continuation, proxies, format)
|
||||||
client = make_client(YT_URL)
|
client = make_client(YT_URL)
|
||||||
html = client.get("/watch?v=#{id}&bpctr=#{Time.new.to_unix + 2000}&gl=US&hl=en&disable_polymer=1")
|
html = client.get("/watch?v=#{id}&gl=US&hl=en&disable_polymer=1&has_verified=1&bpctr=9999999999")
|
||||||
headers = HTTP::Headers.new
|
headers = HTTP::Headers.new
|
||||||
headers["cookie"] = html.cookies.add_request_headers(headers)["cookie"]
|
headers["cookie"] = html.cookies.add_request_headers(headers)["cookie"]
|
||||||
body = html.body
|
body = html.body
|
||||||
|
@ -83,7 +83,7 @@ def fetch_youtube_comments(id, continuation, proxies, format)
|
||||||
proxy = HTTPProxy.new(proxy_host: proxy[:ip], proxy_port: proxy[:port])
|
proxy = HTTPProxy.new(proxy_host: proxy[:ip], proxy_port: proxy[:port])
|
||||||
proxy_client.set_proxy(proxy)
|
proxy_client.set_proxy(proxy)
|
||||||
|
|
||||||
response = proxy_client.get("/watch?v=#{id}&bpctr=#{Time.new.to_unix + 2000}&gl=US&hl=en&disable_polymer=1")
|
response = proxy_client.get("/watch?v=#{id}&gl=US&hl=en&disable_polymer=1&has_verified=1&bpctr=9999999999")
|
||||||
proxy_headers = HTTP::Headers.new
|
proxy_headers = HTTP::Headers.new
|
||||||
proxy_headers["cookie"] = response.cookies.add_request_headers(headers)["cookie"]
|
proxy_headers["cookie"] = response.cookies.add_request_headers(headers)["cookie"]
|
||||||
proxy_html = response.body
|
proxy_html = response.body
|
||||||
|
@ -140,8 +140,8 @@ def fetch_youtube_comments(id, continuation, proxies, format)
|
||||||
headers["content-type"] = "application/x-www-form-urlencoded"
|
headers["content-type"] = "application/x-www-form-urlencoded"
|
||||||
|
|
||||||
headers["x-client-data"] = "CIi2yQEIpbbJAQipncoBCNedygEIqKPKAQ=="
|
headers["x-client-data"] = "CIi2yQEIpbbJAQipncoBCNedygEIqKPKAQ=="
|
||||||
headers["x-spf-previous"] = "https://www.youtube.com/watch?v=#{id}&bpctr=#{Time.new.to_unix + 2000}&gl=US&hl=en&disable_polymer=1"
|
headers["x-spf-previous"] = "https://www.youtube.com/watch?v=#{id}&gl=US&hl=en&disable_polymer=1&has_verified=1&bpctr=9999999999"
|
||||||
headers["x-spf-referer"] = "https://www.youtube.com/watch?v=#{id}&bpctr=#{Time.new.to_unix + 2000}&gl=US&hl=en&disable_polymer=1"
|
headers["x-spf-referer"] = "https://www.youtube.com/watch?v=#{id}&gl=US&hl=en&disable_polymer=1&has_verified=1&bpctr=9999999999"
|
||||||
|
|
||||||
headers["x-youtube-client-name"] = "1"
|
headers["x-youtube-client-name"] = "1"
|
||||||
headers["x-youtube-client-version"] = "2.20180719"
|
headers["x-youtube-client-version"] = "2.20180719"
|
||||||
|
|
|
@ -26,7 +26,7 @@ def fetch_mix(rdid, video_id, cookies = nil)
|
||||||
if cookies
|
if cookies
|
||||||
headers = cookies.add_request_headers(headers)
|
headers = cookies.add_request_headers(headers)
|
||||||
end
|
end
|
||||||
response = client.get("/watch?v=#{video_id}&list=#{rdid}&bpctr=#{Time.new.to_unix + 2000}&gl=US&hl=en", headers)
|
response = client.get("/watch?v=#{video_id}&list=#{rdid}&gl=US&hl=en&has_verified=1&bpctr=9999999999", headers)
|
||||||
|
|
||||||
yt_data = response.body.match(/window\["ytInitialData"\] = (?<data>.*);/)
|
yt_data = response.body.match(/window\["ytInitialData"\] = (?<data>.*);/)
|
||||||
if yt_data
|
if yt_data
|
||||||
|
|
|
@ -30,7 +30,7 @@ def fetch_playlist_videos(plid, page, video_count, continuation = nil)
|
||||||
client = make_client(YT_URL)
|
client = make_client(YT_URL)
|
||||||
|
|
||||||
if continuation
|
if continuation
|
||||||
html = client.get("/watch?v=#{continuation}&list=#{plid}&bpctr=#{Time.new.to_unix + 2000}&gl=US&hl=en&disable_polymer=1")
|
html = client.get("/watch?v=#{continuation}&list=#{plid}&gl=US&hl=en&disable_polymer=1&has_verified=1&bpctr=9999999999")
|
||||||
html = XML.parse_html(html.body)
|
html = XML.parse_html(html.body)
|
||||||
|
|
||||||
index = html.xpath_node(%q(//span[@id="playlist-current-index"])).try &.content.to_i?
|
index = html.xpath_node(%q(//span[@id="playlist-current-index"])).try &.content.to_i?
|
||||||
|
|
|
@ -546,7 +546,7 @@ def fetch_video(id, proxies)
|
||||||
|
|
||||||
spawn do
|
spawn do
|
||||||
client = make_client(YT_URL)
|
client = make_client(YT_URL)
|
||||||
html = client.get("/watch?v=#{id}&bpctr=#{Time.new.to_unix + 2000}&gl=US&hl=en&disable_polymer=1")
|
html = client.get("/watch?v=#{id}&gl=US&hl=en&disable_polymer=1&has_verified=1&bpctr=9999999999")
|
||||||
|
|
||||||
if md = html.headers["location"]?.try &.match(/v=(?<id>[a-zA-Z0-9_-]{11})/)
|
if md = html.headers["location"]?.try &.match(/v=(?<id>[a-zA-Z0-9_-]{11})/)
|
||||||
next html_channel.send(md["id"])
|
next html_channel.send(md["id"])
|
||||||
|
@ -620,7 +620,7 @@ def fetch_video(id, proxies)
|
||||||
client.connect_timeout = 10.seconds
|
client.connect_timeout = 10.seconds
|
||||||
client.set_proxy(proxy)
|
client.set_proxy(proxy)
|
||||||
|
|
||||||
html = XML.parse_html(client.get("/watch?v=#{id}&bpctr=#{Time.new.to_unix + 2000}&gl=US&hl=en&disable_polymer=1").body)
|
html = XML.parse_html(client.get("/watch?v=#{id}&gl=US&hl=en&disable_polymer=1&has_verified=1&bpctr=9999999999").body)
|
||||||
info = HTTP::Params.parse(client.get("/get_video_info?video_id=#{id}&el=detailpage&ps=default&eurl=&gl=US&hl=en&disable_polymer=1").body)
|
info = HTTP::Params.parse(client.get("/get_video_info?video_id=#{id}&el=detailpage&ps=default&eurl=&gl=US&hl=en&disable_polymer=1").body)
|
||||||
|
|
||||||
if info["reason"]?
|
if info["reason"]?
|
||||||
|
@ -641,7 +641,19 @@ def fetch_video(id, proxies)
|
||||||
end
|
end
|
||||||
|
|
||||||
if info["reason"]?
|
if info["reason"]?
|
||||||
raise info["reason"]
|
html_info = html.to_s.match(/ytplayer\.config = (?<info>.*?);ytplayer\.load/).try &.["info"]
|
||||||
|
if html_info
|
||||||
|
html_info = JSON.parse(html_info)["args"].as_h
|
||||||
|
info.delete("reason")
|
||||||
|
|
||||||
|
html_info.each do |k, v|
|
||||||
|
info[k] = v.to_s
|
||||||
|
end
|
||||||
|
end
|
||||||
|
|
||||||
|
if info["reason"]?
|
||||||
|
raise info["reason"]
|
||||||
|
end
|
||||||
end
|
end
|
||||||
|
|
||||||
title = info["title"]
|
title = info["title"]
|
||||||
|
|
Loading…
Reference in a new issue