Merge branch 'iv-org:master' into CICD

This commit is contained in:
John Wong 2023-05-26 20:12:38 +08:00 committed by GitHub
commit 48fdb6fadc
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
40 changed files with 931 additions and 784 deletions

View file

@ -46,6 +46,7 @@ body a.channel-owner {
}
.creator-heart {
display: inline-block;
position: relative;
width: 16px;
height: 16px;
@ -66,6 +67,7 @@ body a.channel-owner {
}
.creator-heart-small-container {
display: block;
position: relative;
width: 13px;
height: 13px;

View file

@ -547,5 +547,6 @@
"Song: ": "أغنية: ",
"Channel Sponsor": "راعي القناة",
"Standard YouTube license": "ترخيص YouTube القياسي",
"Download is disabled": "تم تعطيل التحميلات"
"Download is disabled": "تم تعطيل التحميلات",
"Import YouTube playlist (.csv)": "استيراد قائمة تشغيل YouTube (.csv)"
}

View file

@ -499,5 +499,6 @@
"Channel Sponsor": "Sponzor kanálu",
"Song: ": "Skladba: ",
"Standard YouTube license": "Standardní licence YouTube",
"Download is disabled": "Stahování je zakázáno"
"Download is disabled": "Stahování je zakázáno",
"Import YouTube playlist (.csv)": "Importovat YouTube playlist (.csv)"
}

View file

@ -433,7 +433,7 @@
"comments_points_count_plural": "{{count}} Punkte",
"crash_page_you_found_a_bug": "Anscheinend haben Sie einen Fehler in Invidious gefunden!",
"generic_count_months": "{{count}} Monat",
"generic_count_months_plural": "{{count}} Monate",
"generic_count_months_plural": "{{count}} Monaten",
"Cantonese (Hong Kong)": "Kantonesisch (Hong Kong)",
"Chinese (Hong Kong)": "Chinesisch (Hong Kong)",
"generic_playlists_count": "{{count}} Wiedergabeliste",
@ -482,5 +482,6 @@
"channel_tab_channels_label": "Kanäle",
"Channel Sponsor": "Kanalsponsor",
"Standard YouTube license": "Standard YouTube-Lizenz",
"Song: ": "Musik: "
"Song: ": "Musik: ",
"Download is disabled": "Herunterladen ist deaktiviert"
}

View file

@ -33,6 +33,7 @@
"Import": "Import",
"Import Invidious data": "Import Invidious JSON data",
"Import YouTube subscriptions": "Import YouTube/OPML subscriptions",
"Import YouTube playlist (.csv)": "Import YouTube playlist (.csv)",
"Import FreeTube subscriptions (.db)": "Import FreeTube subscriptions (.db)",
"Import NewPipe subscriptions (.json)": "Import NewPipe subscriptions (.json)",
"Import NewPipe data (.zip)": "Import NewPipe data (.zip)",

View file

@ -483,5 +483,6 @@
"Channel Sponsor": "Kanala sponsoro",
"Song: ": "Muzikaĵo: ",
"Standard YouTube license": "Implicita YouTube-licenco",
"Download is disabled": "Elŝuto estas malebligita"
"Download is disabled": "Elŝuto estas malebligita",
"Import YouTube playlist (.csv)": "Importi YouTube-ludliston (.csv)"
}

View file

@ -404,16 +404,16 @@
"generic_subscribers_count_plural": "{{count}} suscriptores",
"generic_subscriptions_count": "{{count}} suscripción",
"generic_subscriptions_count_plural": "{{count}} suscripciones",
"subscriptions_unseen_notifs_count": "{{count}} notificación sin ver",
"subscriptions_unseen_notifs_count_plural": "{{count}} notificaciones sin ver",
"subscriptions_unseen_notifs_count": "{{count}} notificación no vista",
"subscriptions_unseen_notifs_count_plural": "{{count}} notificaciones no vistas",
"generic_count_days": "{{count}} día",
"generic_count_days_plural": "{{count}} días",
"comments_view_x_replies": "Ver {{count}} respuesta",
"comments_view_x_replies_plural": "Ver {{count}} respuestas",
"generic_count_weeks": "{{count}} semana",
"generic_count_weeks_plural": "{{count}} semanas",
"generic_playlists_count": "{{count}} reproducción",
"generic_playlists_count_plural": "{{count}} reproducciones",
"generic_playlists_count": "{{count}} lista de reproducción",
"generic_playlists_count_plural": "{{count}} listas de reproducciones",
"generic_videos_count": "{{count}} video",
"generic_videos_count_plural": "{{count}} videos",
"generic_count_months": "{{count}} mes",
@ -483,5 +483,6 @@
"Song: ": "Canción: ",
"Channel Sponsor": "Patrocinador del canal",
"Standard YouTube license": "Licencia de YouTube estándar",
"Download is disabled": "La descarga está deshabilitada"
"Download is disabled": "La descarga está deshabilitada",
"Import YouTube playlist (.csv)": "Importar lista de reproducción de YouTube (.csv)"
}

View file

@ -499,5 +499,6 @@
"Channel Sponsor": "Sponzor kanala",
"Song: ": "Pjesma: ",
"Standard YouTube license": "Standardna YouTube licenca",
"Download is disabled": "Preuzimanje je deaktivirano"
"Download is disabled": "Preuzimanje je deaktivirano",
"Import YouTube playlist (.csv)": "Uvezi YouTube zbirku (.csv)"
}

View file

@ -483,5 +483,6 @@
"Download is disabled": "Il download è disabilitato",
"Song: ": "Canzone: ",
"Standard YouTube license": "Licenza standard di YouTube",
"Channel Sponsor": "Sponsor del canale"
"Channel Sponsor": "Sponsor del canale",
"Import YouTube playlist (.csv)": "Importa playlist di YouTube (.csv)"
}

View file

@ -8,8 +8,8 @@
"Shared `x` ago": "`x`前に公開",
"Unsubscribe": "登録解除",
"Subscribe": "登録",
"View channel on YouTube": "YouTube でチャンネルを見る",
"View playlist on YouTube": "YouTube で再生リストを見る",
"View channel on YouTube": "YouTube でチャンネルを表示",
"View playlist on YouTube": "YouTube で再生リストを表示",
"newest": "新しい順",
"oldest": "古い順",
"popular": "人気順",
@ -69,7 +69,7 @@
"preferences_captions_label": "優先する字幕: ",
"Fallback captions: ": "フォールバック時の字幕: ",
"preferences_related_videos_label": "関連動画を表示: ",
"preferences_annotations_label": "デフォルトでアノテーションを表示: ",
"preferences_annotations_label": "最初からアノテーションを表示: ",
"preferences_extend_desc_label": "動画の説明文を自動的に拡張: ",
"preferences_vr_mode_label": "対話的な360°動画 (WebGL が必要): ",
"preferences_category_visual": "外観設定",
@ -82,7 +82,7 @@
"preferences_category_misc": "ほかの設定",
"preferences_automatic_instance_redirect_label": "インスタンスの自動転送 (redirect.invidious.ioにフォールバック): ",
"preferences_category_subscription": "登録チャンネル設定",
"preferences_annotations_subscribed_label": "デフォルトで登録チャンネルのアノテーションを表示しますか? ",
"preferences_annotations_subscribed_label": "最初から登録チャンネルのアノテーションを表示 ",
"Redirect homepage to feed: ": "ホームからフィードにリダイレクト: ",
"preferences_max_results_label": "フィードに表示する動画の量: ",
"preferences_sort_label": "動画を並び替え: ",
@ -110,7 +110,7 @@
"preferences_category_admin": "管理者設定",
"preferences_default_home_label": "ホームに表示するページ: ",
"preferences_feed_menu_label": "フィードメニュー: ",
"preferences_show_nick_label": "ニックネームを一番上に表示する: ",
"preferences_show_nick_label": "ログイン名を上部に表示: ",
"Top enabled: ": "トップページを有効化: ",
"CAPTCHA enabled: ": "CAPTCHA を有効化: ",
"Login enabled: ": "ログインを有効化: ",
@ -131,7 +131,7 @@
"Released under the AGPLv3 on Github.": "GitHub 上で AGPLv3 の元で公開",
"Source available here.": "ソースはここで閲覧可能です。",
"View JavaScript license information.": "JavaScript ライセンス情報",
"View privacy policy.": "プライバシーポリシー",
"View privacy policy.": "個人情報保護方針",
"Trending": "急上昇",
"Public": "公開",
"Unlisted": "限定公開",
@ -142,11 +142,11 @@
"Delete playlist": "再生リストを削除",
"Create playlist": "再生リストを作成",
"Title": "タイトル",
"Playlist privacy": "再生リストの公開設定",
"Playlist privacy": "再生リストの公開状態",
"Editing playlist `x`": "再生リスト `x` を編集中",
"Show more": "もっと見る",
"Show less": "表示を少なく",
"Watch on YouTube": "YouTube で視聴",
"Watch on YouTube": "YouTubeで視聴",
"Switch Invidious Instance": "Invidious インスタンスの変更",
"Hide annotations": "アノテーションを隠す",
"Show annotations": "アノテーションを表示",
@ -161,13 +161,13 @@
"Premieres in `x`": "`x`後にプレミア公開",
"Premieres `x`": "`x`にプレミア公開",
"Hi! Looks like you have JavaScript turned off. Click here to view comments, keep in mind they may take a bit longer to load.": "やあ!君は JavaScript を無効にしているのかな?ここをクリックしてコメントを見れるけど、読み込みには少し時間がかかることがあるのを覚えておいてね。",
"View YouTube comments": "YouTube のコメントを見る",
"View YouTube comments": "YouTube のコメントを表示",
"View more comments on Reddit": "Reddit でコメントをもっと見る",
"View `x` comments": {
"([^.,0-9]|^)1([^.,0-9]|$)": "`x` 件のコメントを見る",
"": "`x` 件のコメントを見る"
"([^.,0-9]|^)1([^.,0-9]|$)": "`x` 件のコメントを表示",
"": "`x` 件のコメントを表示"
},
"View Reddit comments": "Reddit のコメントを見る",
"View Reddit comments": "Reddit のコメントを表示",
"Hide replies": "返信を非表示",
"Show replies": "返信を表示",
"Incorrect password": "パスワードが間違っています",
@ -314,7 +314,7 @@
"Zulu": "ズール語",
"generic_count_years_0": "{{count}}年",
"generic_count_months_0": "{{count}}か月",
"generic_count_weeks_0": "{{count}}週",
"generic_count_weeks_0": "{{count}}週",
"generic_count_days_0": "{{count}}日",
"generic_count_hours_0": "{{count}}時間",
"generic_count_minutes_0": "{{count}}分",
@ -326,8 +326,8 @@
"About": "このサービスについて",
"Rating: ": "評価: ",
"preferences_locale_label": "言語: ",
"View as playlist": "再生リストで見る",
"Default": "デフォルト",
"View as playlist": "再生リストとして閲覧",
"Default": "標準",
"Music": "音楽",
"Gaming": "ゲーム",
"News": "ニュース",
@ -375,7 +375,7 @@
"next_steps_error_message_refresh": "再読込",
"next_steps_error_message_go_to_youtube": "YouTubeへ",
"search_filters_duration_option_short": "4 分未満",
"footer_documentation": "書",
"footer_documentation": "説明書",
"footer_source_code": "ソースコード",
"footer_original_source_code": "元のソースコード",
"footer_modfied_source_code": "改変して使用",
@ -407,7 +407,7 @@
"preferences_quality_dash_option_worst": "最悪",
"preferences_quality_dash_option_best": "最高",
"videoinfo_started_streaming_x_ago": "`x`前に配信を開始",
"videoinfo_watch_on_youTube": "YouTube上で見る",
"videoinfo_watch_on_youTube": "YouTubeで視聴",
"user_created_playlists": "`x`個の作成した再生リスト",
"Video unavailable": "動画は利用できません",
"Chinese": "中国語",
@ -442,7 +442,7 @@
"crash_page_switch_instance": "<a href=\"`x`\">別のインスタンスを使用</a>を試す",
"crash_page_read_the_faq": "<a href=\"`x`\">よくある質問 (FAQ)</a> を読む",
"Popular enabled: ": "人気動画を有効化 ",
"search_message_use_another_instance": " <a href=\"`x`\">別のインスタンス上でも検索</a>できます。",
"search_message_use_another_instance": " <a href=\"`x`\">別のインスタンス上での検索</a>も可能です。",
"search_filters_apply_button": "選択したフィルターを適用",
"user_saved_playlists": "`x` 個の保存した再生リスト",
"crash_page_you_found_a_bug": "Invidious のバグのようです!",
@ -466,5 +466,7 @@
"Album: ": "アルバム: ",
"Song: ": "曲: ",
"Channel Sponsor": "チャンネルのスポンサー",
"Standard YouTube license": "標準 Youtube ライセンス"
"Standard YouTube license": "標準 Youtube ライセンス",
"Download is disabled": "ダウンロード: このインスタンスでは未対応",
"Import YouTube playlist (.csv)": "YouTube 再生リストをインポート (.csv)"
}

View file

@ -499,5 +499,6 @@
"Album: ": "Album: ",
"Song: ": "Piosenka: ",
"Channel Sponsor": "Sponsor kanału",
"Standard YouTube license": "Standardowa licencja YouTube"
"Standard YouTube license": "Standardowa licencja YouTube",
"Import YouTube playlist (.csv)": "Importuj playlistę YouTube (.csv)"
}

View file

@ -479,5 +479,9 @@
"channel_tab_streams_label": "Ao Vivo",
"Music in this video": "Música neste vídeo",
"Artist: ": "Artista: ",
"Album: ": "Álbum: "
"Album: ": "Álbum: ",
"Standard YouTube license": "Licença padrão do YouTube",
"Song: ": "Música: ",
"Channel Sponsor": "Patrocinador do Canal",
"Download is disabled": "Download está desativado"
}

View file

@ -483,5 +483,6 @@
"Channel Sponsor": "Kanal Sponsoru",
"Song: ": "Şarkı: ",
"Standard YouTube license": "Standart YouTube lisansı",
"Download is disabled": "İndirme devre dışı"
"Download is disabled": "İndirme devre dışı",
"Import YouTube playlist (.csv)": "YouTube Oynatma Listesini İçe Aktar (.csv)"
}

View file

@ -499,5 +499,6 @@
"Song: ": "Пісня: ",
"Channel Sponsor": "Спонсор каналу",
"Standard YouTube license": "Стандартна ліцензія YouTube",
"Download is disabled": "Завантаження вимкнено"
"Download is disabled": "Завантаження вимкнено",
"Import YouTube playlist (.csv)": "Імпорт списку відтворення YouTube (.csv)"
}

View file

@ -467,5 +467,6 @@
"Song: ": "歌曲: ",
"Channel Sponsor": "频道赞助者",
"Standard YouTube license": "标准 YouTube 许可证",
"Download is disabled": "已禁用下载"
"Download is disabled": "已禁用下载",
"Import YouTube playlist (.csv)": "导入 YouTube 播放列表(.csv"
}

View file

@ -467,5 +467,6 @@
"Channel Sponsor": "頻道贊助者",
"Song: ": "歌曲: ",
"Standard YouTube license": "標準 YouTube 授權條款",
"Download is disabled": "已停用下載"
"Download is disabled": "已停用下載",
"Import YouTube playlist (.csv)": "匯入 YouTube 播放清單 (.csv)"
}

View file

@ -23,18 +23,6 @@ Spectator.describe "Helper" do
end
end
describe "#produce_comment_continuation" do
it "correctly produces a continuation token for comments" do
expect(produce_comment_continuation("_cE8xSu6swE", "ADSJ_i2qvJeFtL0htmS5_K5Ctj3eGFVBMWL9Wd42o3kmUL6_mAzdLp85-liQZL0mYr_16BhaggUqX652Sv9JqV6VXinShSP-ZT6rL4NolPBaPXVtJsO5_rA_qE3GubAuLFw9uzIIXU2-HnpXbdgPLWTFavfX206hqWmmpHwUOrmxQV_OX6tYkM3ux3rPAKCDrT8eWL7MU3bLiNcnbgkW8o0h8KYLL_8BPa8LcHbTv8pAoNkjerlX1x7K4pqxaXPoyz89qNlnh6rRx6AXgAzzoHH1dmcyQ8CIBeOHg-m4i8ZxdX4dP88XWrIFg-jJGhpGP8JUMDgZgavxVx225hUEYZMyrLGler5em4FgbG62YWC51moLDLeYEA")).to eq("EkMSC19jRTh4U3U2c3dFyAEA4AEBogINKP___________wFAAMICHQgEGhdodHRwczovL3d3dy55b3V0dWJlLmNvbSIAGAYyjAMK9gJBRFNKX2kycXZKZUZ0TDBodG1TNV9LNUN0ajNlR0ZWQk1XTDlXZDQybzNrbVVMNl9tQXpkTHA4NS1saVFaTDBtWXJfMTZCaGFnZ1VxWDY1MlN2OUpxVjZWWGluU2hTUC1aVDZyTDROb2xQQmFQWFZ0SnNPNV9yQV9xRTNHdWJBdUxGdzl1eklJWFUyLUhucFhiZGdQTFdURmF2ZlgyMDZocVdtbXBId1VPcm14UVZfT1g2dFlrTTN1eDNyUEFLQ0RyVDhlV0w3TVUzYkxpTmNuYmdrVzhvMGg4S1lMTF84QlBhOExjSGJUdjhwQW9Oa2plcmxYMXg3SzRwcXhhWFBveXo4OXFObG5oNnJSeDZBWGdBenpvSEgxZG1jeVE4Q0lCZU9IZy1tNGk4WnhkWDRkUDg4WFdySUZnLWpKR2hwR1A4SlVNRGdaZ2F2eFZ4MjI1aFVFWVpNeXJMR2xlcjVlbTRGZ2JHNjJZV0M1MW1vTERMZVlFQSIPIgtfY0U4eFN1NnN3RTAAKBQ%3D")
expect(produce_comment_continuation("_cE8xSu6swE", "ADSJ_i1yz21HI4xrtsYXVC-2_kfZ6kx1yjYQumXAAxqH3CAd7ZxKxfLdZS1__fqhCtOASRbbpSBGH_tH1J96Dxux-Qfjk-lUbupMqv08Q3aHzGu7p70VoUMHhI2-GoJpnbpmcOxkGzeIuenRS_ym2Y8fkDowhqLPFgsS0n4djnZ2UmC17F3Ch3N1S1UYf1ZVOc991qOC1iW9kJDzyvRQTWCPsJUPneSaAKW-Rr97pdesOkR4i8cNvHZRnQKe2HEfsvlJOb2C3lF1dJBfJeNfnQYeh5hv6_fZN7bt3-JL1Xk3Qc9NXNxmmbDpwAC_yFR8dthFfUJdyIO9Nu1D79MLYeR-H5HxqUJokkJiGIz4lTE_CXXbhAI")).to eq("EkMSC19jRTh4U3U2c3dFyAEA4AEBogINKP___________wFAAMICHQgEGhdodHRwczovL3d3dy55b3V0dWJlLmNvbSIAGAYyiQMK8wJBRFNKX2kxeXoyMUhJNHhydHNZWFZDLTJfa2ZaNmt4MXlqWVF1bVhBQXhxSDNDQWQ3WnhLeGZMZFpTMV9fZnFoQ3RPQVNSYmJwU0JHSF90SDFKOTZEeHV4LVFmamstbFVidXBNcXYwOFEzYUh6R3U3cDcwVm9VTUhoSTItR29KcG5icG1jT3hrR3plSXVlblJTX3ltMlk4ZmtEb3docUxQRmdzUzBuNGRqbloyVW1DMTdGM0NoM04xUzFVWWYxWlZPYzk5MXFPQzFpVzlrSkR6eXZSUVRXQ1BzSlVQbmVTYUFLVy1Scjk3cGRlc09rUjRpOGNOdkhaUm5RS2UySEVmc3ZsSk9iMkMzbEYxZEpCZkplTmZuUVllaDVodjZfZlpON2J0My1KTDFYazNRYzlOWE54bW1iRHB3QUNfeUZSOGR0aEZmVUpkeUlPOU51MUQ3OU1MWWVSLUg1SHhxVUpva2tKaUdJejRsVEVfQ1hYYmhBSSIPIgtfY0U4eFN1NnN3RTAAKBQ%3D")
expect(produce_comment_continuation("29-q7YnyUmY", "")).to eq("EkMSCzI5LXE3WW55VW1ZyAEA4AEBogINKP___________wFAAMICHQgEGhdodHRwczovL3d3dy55b3V0dWJlLmNvbSIAGAYyFQoAIg8iCzI5LXE3WW55VW1ZMAAoFA%3D%3D")
expect(produce_comment_continuation("CvFH_6DNRCY", "")).to eq("EkMSC0N2RkhfNkROUkNZyAEA4AEBogINKP___________wFAAMICHQgEGhdodHRwczovL3d3dy55b3V0dWJlLmNvbSIAGAYyFQoAIg8iC0N2RkhfNkROUkNZMAAoFA%3D%3D")
end
end
describe "#produce_channel_community_continuation" do
it "correctly produces a continuation token for a channel community" do
expect(produce_channel_community_continuation("UCCj956IF62FbT7Gouszaj9w", "Egljb21tdW5pdHm4")).to eq("4qmFsgIsEhhVQ0NqOTU2SUY2MkZiVDdHb3VzemFqOXcaEEVnbGpiMjF0ZFc1cGRIbTQ%3D")

View file

@ -13,7 +13,7 @@ require "../src/invidious/helpers/utils"
require "../src/invidious/videos"
require "../src/invidious/videos/*"
require "../src/invidious/comments"
require "../src/invidious/comments/content"
require "../src/invidious/helpers/serialized_yt_data"
require "../src/invidious/yt_backend/extractors"

View file

@ -7,7 +7,6 @@ require "../src/invidious/helpers/*"
require "../src/invidious/channels/*"
require "../src/invidious/videos/caption"
require "../src/invidious/videos"
require "../src/invidious/comments"
require "../src/invidious/playlists"
require "../src/invidious/search/ctoken"
require "../src/invidious/trending"

View file

@ -43,6 +43,7 @@ require "./invidious/videos/*"
require "./invidious/jsonify/**"
require "./invidious/*"
require "./invidious/comments/*"
require "./invidious/channels/*"
require "./invidious/user/*"
require "./invidious/search/*"

View file

@ -250,7 +250,7 @@ def fetch_channel_community(ucid, continuation, locale, format, thin_mode)
if format == "html"
response = JSON.parse(response)
content_html = template_youtube_comments(response, locale, thin_mode)
content_html = IV::Frontend::Comments.template_youtube(response, locale, thin_mode)
response = JSON.build do |json|
json.object do

View file

@ -1,709 +0,0 @@
class RedditThing
include JSON::Serializable
property kind : String
property data : RedditComment | RedditLink | RedditMore | RedditListing
end
class RedditComment
include JSON::Serializable
property author : String
property body_html : String
property replies : RedditThing | String
property score : Int32
property depth : Int32
property permalink : String
@[JSON::Field(converter: RedditComment::TimeConverter)]
property created_utc : Time
module TimeConverter
def self.from_json(value : JSON::PullParser) : Time
Time.unix(value.read_float.to_i)
end
def self.to_json(value : Time, json : JSON::Builder)
json.number(value.to_unix)
end
end
end
struct RedditLink
include JSON::Serializable
property author : String
property score : Int32
property subreddit : String
property num_comments : Int32
property id : String
property permalink : String
property title : String
end
struct RedditMore
include JSON::Serializable
property children : Array(String)
property count : Int32
property depth : Int32
end
class RedditListing
include JSON::Serializable
property children : Array(RedditThing)
property modhash : String
end
def fetch_youtube_comments(id, cursor, format, locale, thin_mode, region, sort_by = "top")
case cursor
when nil, ""
ctoken = produce_comment_continuation(id, cursor: "", sort_by: sort_by)
when .starts_with? "ADSJ"
ctoken = produce_comment_continuation(id, cursor: cursor, sort_by: sort_by)
else
ctoken = cursor
end
client_config = YoutubeAPI::ClientConfig.new(region: region)
response = YoutubeAPI.next(continuation: ctoken, client_config: client_config)
contents = nil
if on_response_received_endpoints = response["onResponseReceivedEndpoints"]?
header = nil
on_response_received_endpoints.as_a.each do |item|
if item["reloadContinuationItemsCommand"]?
case item["reloadContinuationItemsCommand"]["slot"]
when "RELOAD_CONTINUATION_SLOT_HEADER"
header = item["reloadContinuationItemsCommand"]["continuationItems"][0]
when "RELOAD_CONTINUATION_SLOT_BODY"
# continuationItems is nil when video has no comments
contents = item["reloadContinuationItemsCommand"]["continuationItems"]?
end
elsif item["appendContinuationItemsAction"]?
contents = item["appendContinuationItemsAction"]["continuationItems"]
end
end
elsif response["continuationContents"]?
response = response["continuationContents"]
if response["commentRepliesContinuation"]?
body = response["commentRepliesContinuation"]
else
body = response["itemSectionContinuation"]
end
contents = body["contents"]?
header = body["header"]?
else
raise NotFoundException.new("Comments not found.")
end
if !contents
if format == "json"
return {"comments" => [] of String}.to_json
else
return {"contentHtml" => "", "commentCount" => 0}.to_json
end
end
continuation_item_renderer = nil
contents.as_a.reject! do |item|
if item["continuationItemRenderer"]?
continuation_item_renderer = item["continuationItemRenderer"]
true
end
end
response = JSON.build do |json|
json.object do
if header
count_text = header["commentsHeaderRenderer"]["countText"]
comment_count = (count_text["simpleText"]? || count_text["runs"]?.try &.[0]?.try &.["text"]?)
.try &.as_s.gsub(/\D/, "").to_i? || 0
json.field "commentCount", comment_count
end
json.field "videoId", id
json.field "comments" do
json.array do
contents.as_a.each do |node|
json.object do
if node["commentThreadRenderer"]?
node = node["commentThreadRenderer"]
end
if node["replies"]?
node_replies = node["replies"]["commentRepliesRenderer"]
end
if node["comment"]?
node_comment = node["comment"]["commentRenderer"]
else
node_comment = node["commentRenderer"]
end
content_html = node_comment["contentText"]?.try { |t| parse_content(t, id) } || ""
author = node_comment["authorText"]?.try &.["simpleText"]? || ""
json.field "verified", (node_comment["authorCommentBadge"]? != nil)
json.field "author", author
json.field "authorThumbnails" do
json.array do
node_comment["authorThumbnail"]["thumbnails"].as_a.each do |thumbnail|
json.object do
json.field "url", thumbnail["url"]
json.field "width", thumbnail["width"]
json.field "height", thumbnail["height"]
end
end
end
end
if node_comment["authorEndpoint"]?
json.field "authorId", node_comment["authorEndpoint"]["browseEndpoint"]["browseId"]
json.field "authorUrl", node_comment["authorEndpoint"]["browseEndpoint"]["canonicalBaseUrl"]
else
json.field "authorId", ""
json.field "authorUrl", ""
end
published_text = node_comment["publishedTimeText"]["runs"][0]["text"].as_s
published = decode_date(published_text.rchop(" (edited)"))
if published_text.includes?(" (edited)")
json.field "isEdited", true
else
json.field "isEdited", false
end
json.field "content", html_to_content(content_html)
json.field "contentHtml", content_html
json.field "isPinned", (node_comment["pinnedCommentBadge"]? != nil)
json.field "isSponsor", (node_comment["sponsorCommentBadge"]? != nil)
if node_comment["sponsorCommentBadge"]?
# Sponsor icon thumbnails always have one object and there's only ever the url property in it
json.field "sponsorIconUrl", node_comment.dig("sponsorCommentBadge", "sponsorCommentBadgeRenderer", "customBadge", "thumbnails", 0, "url").to_s
end
json.field "published", published.to_unix
json.field "publishedText", translate(locale, "`x` ago", recode_date(published, locale))
comment_action_buttons_renderer = node_comment["actionButtons"]["commentActionButtonsRenderer"]
json.field "likeCount", comment_action_buttons_renderer["likeButton"]["toggleButtonRenderer"]["accessibilityData"]["accessibilityData"]["label"].as_s.scan(/\d/).map(&.[0]).join.to_i
json.field "commentId", node_comment["commentId"]
json.field "authorIsChannelOwner", node_comment["authorIsChannelOwner"]
if comment_action_buttons_renderer["creatorHeart"]?
hearth_data = comment_action_buttons_renderer["creatorHeart"]["creatorHeartRenderer"]["creatorThumbnail"]
json.field "creatorHeart" do
json.object do
json.field "creatorThumbnail", hearth_data["thumbnails"][-1]["url"]
json.field "creatorName", hearth_data["accessibility"]["accessibilityData"]["label"]
end
end
end
if node_replies && !response["commentRepliesContinuation"]?
if node_replies["continuations"]?
continuation = node_replies["continuations"]?.try &.as_a[0]["nextContinuationData"]["continuation"].as_s
elsif node_replies["contents"]?
continuation = node_replies["contents"]?.try &.as_a[0]["continuationItemRenderer"]["continuationEndpoint"]["continuationCommand"]["token"].as_s
end
continuation ||= ""
json.field "replies" do
json.object do
json.field "replyCount", node_comment["replyCount"]? || 1
json.field "continuation", continuation
end
end
end
end
end
end
end
if continuation_item_renderer
if continuation_item_renderer["continuationEndpoint"]?
continuation_endpoint = continuation_item_renderer["continuationEndpoint"]
elsif continuation_item_renderer["button"]?
continuation_endpoint = continuation_item_renderer["button"]["buttonRenderer"]["command"]
end
if continuation_endpoint
json.field "continuation", continuation_endpoint["continuationCommand"]["token"].as_s
end
end
end
end
if format == "html"
response = JSON.parse(response)
content_html = template_youtube_comments(response, locale, thin_mode)
response = JSON.build do |json|
json.object do
json.field "contentHtml", content_html
if response["commentCount"]?
json.field "commentCount", response["commentCount"]
else
json.field "commentCount", 0
end
end
end
end
return response
end
def fetch_reddit_comments(id, sort_by = "confidence")
client = make_client(REDDIT_URL)
headers = HTTP::Headers{"User-Agent" => "web:invidious:v#{CURRENT_VERSION} (by github.com/iv-org/invidious)"}
# TODO: Use something like #479 for a static list of instances to use here
query = URI::Params.encode({q: "(url:3D#{id} OR url:#{id}) AND (site:invidio.us OR site:youtube.com OR site:youtu.be)"})
search_results = client.get("/search.json?#{query}", headers)
if search_results.status_code == 200
search_results = RedditThing.from_json(search_results.body)
# For videos that have more than one thread, choose the one with the highest score
threads = search_results.data.as(RedditListing).children
thread = threads.max_by?(&.data.as(RedditLink).score).try(&.data.as(RedditLink))
result = thread.try do |t|
body = client.get("/r/#{t.subreddit}/comments/#{t.id}.json?limit=100&sort=#{sort_by}", headers).body
Array(RedditThing).from_json(body)
end
result ||= [] of RedditThing
elsif search_results.status_code == 302
# Previously, if there was only one result then the API would redirect to that result.
# Now, it appears it will still return a listing so this section is likely unnecessary.
result = client.get(search_results.headers["Location"], headers).body
result = Array(RedditThing).from_json(result)
thread = result[0].data.as(RedditListing).children[0].data.as(RedditLink)
else
raise NotFoundException.new("Comments not found.")
end
client.close
comments = result[1]?.try(&.data.as(RedditListing).children)
comments ||= [] of RedditThing
return comments, thread
end
def template_youtube_comments(comments, locale, thin_mode, is_replies = false)
String.build do |html|
root = comments["comments"].as_a
root.each do |child|
if child["replies"]?
replies_count_text = translate_count(locale,
"comments_view_x_replies",
child["replies"]["replyCount"].as_i64 || 0,
NumberFormatting::Separator
)
replies_html = <<-END_HTML
<div id="replies" class="pure-g">
<div class="pure-u-1-24"></div>
<div class="pure-u-23-24">
<p>
<a href="javascript:void(0)" data-continuation="#{child["replies"]["continuation"]}"
data-onclick="get_youtube_replies" data-load-replies>#{replies_count_text}</a>
</p>
</div>
</div>
END_HTML
end
if !thin_mode
author_thumbnail = "/ggpht#{URI.parse(child["authorThumbnails"][-1]["url"].as_s).request_target}"
else
author_thumbnail = ""
end
author_name = HTML.escape(child["author"].as_s)
sponsor_icon = ""
if child["verified"]?.try &.as_bool && child["authorIsChannelOwner"]?.try &.as_bool
author_name += "&nbsp;<i class=\"icon ion ion-md-checkmark-circle\"></i>"
elsif child["verified"]?.try &.as_bool
author_name += "&nbsp;<i class=\"icon ion ion-md-checkmark\"></i>"
end
if child["isSponsor"]?.try &.as_bool
sponsor_icon = String.build do |str|
str << %(<img alt="" )
str << %(src="/ggpht) << URI.parse(child["sponsorIconUrl"].as_s).request_target << "\" "
str << %(title=") << translate(locale, "Channel Sponsor") << "\" "
str << %(width="16" height="16" />)
end
end
html << <<-END_HTML
<div class="pure-g" style="width:100%">
<div class="channel-profile pure-u-4-24 pure-u-md-2-24">
<img loading="lazy" style="margin-right:1em;margin-top:1em;width:90%" src="#{author_thumbnail}" alt="" />
</div>
<div class="pure-u-20-24 pure-u-md-22-24">
<p>
<b>
<a class="#{child["authorIsChannelOwner"] == true ? "channel-owner" : ""}" href="#{child["authorUrl"]}">#{author_name}</a>
</b>
#{sponsor_icon}
<p style="white-space:pre-wrap">#{child["contentHtml"]}</p>
END_HTML
if child["attachment"]?
attachment = child["attachment"]
case attachment["type"]
when "image"
attachment = attachment["imageThumbnails"][1]
html << <<-END_HTML
<div class="pure-g">
<div class="pure-u-1 pure-u-md-1-2">
<img loading="lazy" style="width:100%" src="/ggpht#{URI.parse(attachment["url"].as_s).request_target}" alt="" />
</div>
</div>
END_HTML
when "video"
if attachment["error"]?
html << <<-END_HTML
<div class="pure-g video-iframe-wrapper">
<p>#{attachment["error"]}</p>
</div>
END_HTML
else
html << <<-END_HTML
<div class="pure-g video-iframe-wrapper">
<iframe class="video-iframe" src='/embed/#{attachment["videoId"]?}?autoplay=0'></iframe>
</div>
END_HTML
end
else nil # Ignore
end
end
html << <<-END_HTML
<p>
<span title="#{Time.unix(child["published"].as_i64).to_s(translate(locale, "%A %B %-d, %Y"))}">#{translate(locale, "`x` ago", recode_date(Time.unix(child["published"].as_i64), locale))} #{child["isEdited"] == true ? translate(locale, "(edited)") : ""}</span>
|
END_HTML
if comments["videoId"]?
html << <<-END_HTML
<a href="https://www.youtube.com/watch?v=#{comments["videoId"]}&lc=#{child["commentId"]}" title="#{translate(locale, "YouTube comment permalink")}">[YT]</a>
|
END_HTML
elsif comments["authorId"]?
html << <<-END_HTML
<a href="https://www.youtube.com/channel/#{comments["authorId"]}/community?lb=#{child["commentId"]}" title="#{translate(locale, "YouTube comment permalink")}">[YT]</a>
|
END_HTML
end
html << <<-END_HTML
<i class="icon ion-ios-thumbs-up"></i> #{number_with_separator(child["likeCount"])}
</p>
END_HTML
if child["creatorHeart"]?
if !thin_mode
creator_thumbnail = "/ggpht#{URI.parse(child["creatorHeart"]["creatorThumbnail"].as_s).request_target}"
else
creator_thumbnail = ""
end
html << <<-END_HTML
<span class="creator-heart-container" title="#{translate(locale, "`x` marked it with a ❤", child["creatorHeart"]["creatorName"].as_s)}">
<div class="creator-heart">
<img loading="lazy" class="creator-heart-background-hearted" src="#{creator_thumbnail}" alt="" />
<div class="creator-heart-small-hearted">
<div class="icon ion-ios-heart creator-heart-small-container"></div>
</div>
</div>
</span>
END_HTML
end
html << <<-END_HTML
</p>
#{replies_html}
</div>
</div>
END_HTML
end
if comments["continuation"]?
html << <<-END_HTML
<div class="pure-g">
<div class="pure-u-1">
<p>
<a href="javascript:void(0)" data-continuation="#{comments["continuation"]}"
data-onclick="get_youtube_replies" data-load-more #{"data-load-replies" if is_replies}>#{translate(locale, "Load more")}</a>
</p>
</div>
</div>
END_HTML
end
end
end
def template_reddit_comments(root, locale)
String.build do |html|
root.each do |child|
if child.data.is_a?(RedditComment)
child = child.data.as(RedditComment)
body_html = HTML.unescape(child.body_html)
replies_html = ""
if child.replies.is_a?(RedditThing)
replies = child.replies.as(RedditThing)
replies_html = template_reddit_comments(replies.data.as(RedditListing).children, locale)
end
if child.depth > 0
html << <<-END_HTML
<div class="pure-g">
<div class="pure-u-1-24">
</div>
<div class="pure-u-23-24">
END_HTML
else
html << <<-END_HTML
<div class="pure-g">
<div class="pure-u-1">
END_HTML
end
html << <<-END_HTML
<p>
<a href="javascript:void(0)" data-onclick="toggle_parent">[ ]</a>
<b><a href="https://www.reddit.com/user/#{child.author}">#{child.author}</a></b>
#{translate_count(locale, "comments_points_count", child.score, NumberFormatting::Separator)}
<span title="#{child.created_utc.to_s(translate(locale, "%a %B %-d %T %Y UTC"))}">#{translate(locale, "`x` ago", recode_date(child.created_utc, locale))}</span>
<a href="https://www.reddit.com#{child.permalink}" title="#{translate(locale, "permalink")}">#{translate(locale, "permalink")}</a>
</p>
<div>
#{body_html}
#{replies_html}
</div>
</div>
</div>
END_HTML
end
end
end
end
def replace_links(html)
# Check if the document is empty
# Prevents edge-case bug with Reddit comments, see issue #3115
if html.nil? || html.empty?
return html
end
html = XML.parse_html(html)
html.xpath_nodes(%q(//a)).each do |anchor|
url = URI.parse(anchor["href"])
if url.host.nil? || url.host.not_nil!.ends_with?("youtube.com") || url.host.not_nil!.ends_with?("youtu.be")
if url.host.try &.ends_with? "youtu.be"
url = "/watch?v=#{url.path.lstrip('/')}#{url.query_params}"
else
if url.path == "/redirect"
params = HTTP::Params.parse(url.query.not_nil!)
anchor["href"] = params["q"]?
else
anchor["href"] = url.request_target
end
end
elsif url.to_s == "#"
begin
length_seconds = decode_length_seconds(anchor.content)
rescue ex
length_seconds = decode_time(anchor.content)
end
if length_seconds > 0
anchor["href"] = "javascript:void(0)"
anchor["onclick"] = "player.currentTime(#{length_seconds})"
else
anchor["href"] = url.request_target
end
end
end
html = html.xpath_node(%q(//body)).not_nil!
if node = html.xpath_node(%q(./p))
html = node
end
return html.to_xml(options: XML::SaveOptions::NO_DECL)
end
def fill_links(html, scheme, host)
# Check if the document is empty
# Prevents edge-case bug with Reddit comments, see issue #3115
if html.nil? || html.empty?
return html
end
html = XML.parse_html(html)
html.xpath_nodes("//a").each do |match|
url = URI.parse(match["href"])
# Reddit links don't have host
if !url.host && !match["href"].starts_with?("javascript") && !url.to_s.ends_with? "#"
url.scheme = scheme
url.host = host
match["href"] = url
end
end
if host == "www.youtube.com"
html = html.xpath_node(%q(//body/p)).not_nil!
end
return html.to_xml(options: XML::SaveOptions::NO_DECL)
end
def text_to_parsed_content(text : String) : JSON::Any
nodes = [] of JSON::Any
# For each line convert line to array of nodes
text.split('\n').each do |line|
# In first case line is just a simple node before
# check patterns inside line
# { 'text': line }
currentNodes = [] of JSON::Any
initialNode = {"text" => line}
currentNodes << (JSON.parse(initialNode.to_json))
# For each match with url pattern, get last node and preserve
# last node before create new node with url information
# { 'text': match, 'navigationEndpoint': { 'urlEndpoint' : 'url': match } }
line.scan(/https?:\/\/[^ ]*/).each do |urlMatch|
# Retrieve last node and update node without match
lastNode = currentNodes[currentNodes.size - 1].as_h
splittedLastNode = lastNode["text"].as_s.split(urlMatch[0])
lastNode["text"] = JSON.parse(splittedLastNode[0].to_json)
currentNodes[currentNodes.size - 1] = JSON.parse(lastNode.to_json)
# Create new node with match and navigation infos
currentNode = {"text" => urlMatch[0], "navigationEndpoint" => {"urlEndpoint" => {"url" => urlMatch[0]}}}
currentNodes << (JSON.parse(currentNode.to_json))
# If text remain after match create new simple node with text after match
afterNode = {"text" => splittedLastNode.size > 1 ? splittedLastNode[1] : ""}
currentNodes << (JSON.parse(afterNode.to_json))
end
# After processing of matches inside line
# Add \n at end of last node for preserve carriage return
lastNode = currentNodes[currentNodes.size - 1].as_h
lastNode["text"] = JSON.parse("#{currentNodes[currentNodes.size - 1]["text"]}\n".to_json)
currentNodes[currentNodes.size - 1] = JSON.parse(lastNode.to_json)
# Finally add final nodes to nodes returned
currentNodes.each do |node|
nodes << (node)
end
end
return JSON.parse({"runs" => nodes}.to_json)
end
def parse_content(content : JSON::Any, video_id : String? = "") : String
content["simpleText"]?.try &.as_s.rchop('\ufeff').try { |b| HTML.escape(b) }.to_s ||
content["runs"]?.try &.as_a.try { |r| content_to_comment_html(r, video_id).try &.to_s.gsub("\n", "<br>") } || ""
end
def content_to_comment_html(content, video_id : String? = "")
html_array = content.map do |run|
# Sometimes, there is an empty element.
# See: https://github.com/iv-org/invidious/issues/3096
next if run.as_h.empty?
text = HTML.escape(run["text"].as_s)
if navigationEndpoint = run.dig?("navigationEndpoint")
text = parse_link_endpoint(navigationEndpoint, text, video_id)
end
text = "<b>#{text}</b>" if run["bold"]?
text = "<s>#{text}</s>" if run["strikethrough"]?
text = "<i>#{text}</i>" if run["italics"]?
# check for custom emojis
if run["emoji"]?
if run["emoji"]["isCustomEmoji"]?.try &.as_bool
if emojiImage = run.dig?("emoji", "image")
emojiAlt = emojiImage.dig?("accessibility", "accessibilityData", "label").try &.as_s || text
emojiThumb = emojiImage["thumbnails"][0]
text = String.build do |str|
str << %(<img alt=") << emojiAlt << "\" "
str << %(src="/ggpht) << URI.parse(emojiThumb["url"].as_s).request_target << "\" "
str << %(title=") << emojiAlt << "\" "
str << %(width=") << emojiThumb["width"] << "\" "
str << %(height=") << emojiThumb["height"] << "\" "
str << %(class="channel-emoji" />)
end
else
# Hide deleted channel emoji
text = ""
end
end
end
text
end
return html_array.join("").delete('\ufeff')
end
def produce_comment_continuation(video_id, cursor = "", sort_by = "top")
object = {
"2:embedded" => {
"2:string" => video_id,
"25:varint" => 0_i64,
"28:varint" => 1_i64,
"36:embedded" => {
"5:varint" => -1_i64,
"8:varint" => 0_i64,
},
"40:embedded" => {
"1:varint" => 4_i64,
"3:string" => "https://www.youtube.com",
"4:string" => "",
},
},
"3:varint" => 6_i64,
"6:embedded" => {
"1:string" => cursor,
"4:embedded" => {
"4:string" => video_id,
"6:varint" => 0_i64,
},
"5:varint" => 20_i64,
},
}
case sort_by
when "top"
object["6:embedded"].as(Hash)["4:embedded"].as(Hash)["6:varint"] = 0_i64
when "new", "newest"
object["6:embedded"].as(Hash)["4:embedded"].as(Hash)["6:varint"] = 1_i64
else # top
object["6:embedded"].as(Hash)["4:embedded"].as(Hash)["6:varint"] = 0_i64
end
continuation = object.try { |i| Protodec::Any.cast_json(i) }
.try { |i| Protodec::Any.from_json(i) }
.try { |i| Base64.urlsafe_encode(i) }
.try { |i| URI.encode_www_form(i) }
return continuation
end

View file

@ -0,0 +1,89 @@
def text_to_parsed_content(text : String) : JSON::Any
nodes = [] of JSON::Any
# For each line convert line to array of nodes
text.split('\n').each do |line|
# In first case line is just a simple node before
# check patterns inside line
# { 'text': line }
currentNodes = [] of JSON::Any
initialNode = {"text" => line}
currentNodes << (JSON.parse(initialNode.to_json))
# For each match with url pattern, get last node and preserve
# last node before create new node with url information
# { 'text': match, 'navigationEndpoint': { 'urlEndpoint' : 'url': match } }
line.scan(/https?:\/\/[^ ]*/).each do |urlMatch|
# Retrieve last node and update node without match
lastNode = currentNodes[currentNodes.size - 1].as_h
splittedLastNode = lastNode["text"].as_s.split(urlMatch[0])
lastNode["text"] = JSON.parse(splittedLastNode[0].to_json)
currentNodes[currentNodes.size - 1] = JSON.parse(lastNode.to_json)
# Create new node with match and navigation infos
currentNode = {"text" => urlMatch[0], "navigationEndpoint" => {"urlEndpoint" => {"url" => urlMatch[0]}}}
currentNodes << (JSON.parse(currentNode.to_json))
# If text remain after match create new simple node with text after match
afterNode = {"text" => splittedLastNode.size > 1 ? splittedLastNode[1] : ""}
currentNodes << (JSON.parse(afterNode.to_json))
end
# After processing of matches inside line
# Add \n at end of last node for preserve carriage return
lastNode = currentNodes[currentNodes.size - 1].as_h
lastNode["text"] = JSON.parse("#{currentNodes[currentNodes.size - 1]["text"]}\n".to_json)
currentNodes[currentNodes.size - 1] = JSON.parse(lastNode.to_json)
# Finally add final nodes to nodes returned
currentNodes.each do |node|
nodes << (node)
end
end
return JSON.parse({"runs" => nodes}.to_json)
end
def parse_content(content : JSON::Any, video_id : String? = "") : String
content["simpleText"]?.try &.as_s.rchop('\ufeff').try { |b| HTML.escape(b) }.to_s ||
content["runs"]?.try &.as_a.try { |r| content_to_comment_html(r, video_id).try &.to_s.gsub("\n", "<br>") } || ""
end
def content_to_comment_html(content, video_id : String? = "")
html_array = content.map do |run|
# Sometimes, there is an empty element.
# See: https://github.com/iv-org/invidious/issues/3096
next if run.as_h.empty?
text = HTML.escape(run["text"].as_s)
if navigationEndpoint = run.dig?("navigationEndpoint")
text = parse_link_endpoint(navigationEndpoint, text, video_id)
end
text = "<b>#{text}</b>" if run["bold"]?
text = "<s>#{text}</s>" if run["strikethrough"]?
text = "<i>#{text}</i>" if run["italics"]?
# check for custom emojis
if run["emoji"]?
if run["emoji"]["isCustomEmoji"]?.try &.as_bool
if emojiImage = run.dig?("emoji", "image")
emojiAlt = emojiImage.dig?("accessibility", "accessibilityData", "label").try &.as_s || text
emojiThumb = emojiImage["thumbnails"][0]
text = String.build do |str|
str << %(<img alt=") << emojiAlt << "\" "
str << %(src="/ggpht) << URI.parse(emojiThumb["url"].as_s).request_target << "\" "
str << %(title=") << emojiAlt << "\" "
str << %(width=") << emojiThumb["width"] << "\" "
str << %(height=") << emojiThumb["height"] << "\" "
str << %(class="channel-emoji" />)
end
else
# Hide deleted channel emoji
text = ""
end
end
end
text
end
return html_array.join("").delete('\ufeff')
end

View file

@ -0,0 +1,76 @@
module Invidious::Comments
extend self
def replace_links(html)
# Check if the document is empty
# Prevents edge-case bug with Reddit comments, see issue #3115
if html.nil? || html.empty?
return html
end
html = XML.parse_html(html)
html.xpath_nodes(%q(//a)).each do |anchor|
url = URI.parse(anchor["href"])
if url.host.nil? || url.host.not_nil!.ends_with?("youtube.com") || url.host.not_nil!.ends_with?("youtu.be")
if url.host.try &.ends_with? "youtu.be"
url = "/watch?v=#{url.path.lstrip('/')}#{url.query_params}"
else
if url.path == "/redirect"
params = HTTP::Params.parse(url.query.not_nil!)
anchor["href"] = params["q"]?
else
anchor["href"] = url.request_target
end
end
elsif url.to_s == "#"
begin
length_seconds = decode_length_seconds(anchor.content)
rescue ex
length_seconds = decode_time(anchor.content)
end
if length_seconds > 0
anchor["href"] = "javascript:void(0)"
anchor["onclick"] = "player.currentTime(#{length_seconds})"
else
anchor["href"] = url.request_target
end
end
end
html = html.xpath_node(%q(//body)).not_nil!
if node = html.xpath_node(%q(./p))
html = node
end
return html.to_xml(options: XML::SaveOptions::NO_DECL)
end
def fill_links(html, scheme, host)
# Check if the document is empty
# Prevents edge-case bug with Reddit comments, see issue #3115
if html.nil? || html.empty?
return html
end
html = XML.parse_html(html)
html.xpath_nodes("//a").each do |match|
url = URI.parse(match["href"])
# Reddit links don't have host
if !url.host && !match["href"].starts_with?("javascript") && !url.to_s.ends_with? "#"
url.scheme = scheme
url.host = host
match["href"] = url
end
end
if host == "www.youtube.com"
html = html.xpath_node(%q(//body/p)).not_nil!
end
return html.to_xml(options: XML::SaveOptions::NO_DECL)
end
end

View file

@ -0,0 +1,41 @@
module Invidious::Comments
extend self
def fetch_reddit(id, sort_by = "confidence")
client = make_client(REDDIT_URL)
headers = HTTP::Headers{"User-Agent" => "web:invidious:v#{CURRENT_VERSION} (by github.com/iv-org/invidious)"}
# TODO: Use something like #479 for a static list of instances to use here
query = URI::Params.encode({q: "(url:3D#{id} OR url:#{id}) AND (site:invidio.us OR site:youtube.com OR site:youtu.be)"})
search_results = client.get("/search.json?#{query}", headers)
if search_results.status_code == 200
search_results = RedditThing.from_json(search_results.body)
# For videos that have more than one thread, choose the one with the highest score
threads = search_results.data.as(RedditListing).children
thread = threads.max_by?(&.data.as(RedditLink).score).try(&.data.as(RedditLink))
result = thread.try do |t|
body = client.get("/r/#{t.subreddit}/comments/#{t.id}.json?limit=100&sort=#{sort_by}", headers).body
Array(RedditThing).from_json(body)
end
result ||= [] of RedditThing
elsif search_results.status_code == 302
# Previously, if there was only one result then the API would redirect to that result.
# Now, it appears it will still return a listing so this section is likely unnecessary.
result = client.get(search_results.headers["Location"], headers).body
result = Array(RedditThing).from_json(result)
thread = result[0].data.as(RedditListing).children[0].data.as(RedditLink)
else
raise NotFoundException.new("Comments not found.")
end
client.close
comments = result[1]?.try(&.data.as(RedditListing).children)
comments ||= [] of RedditThing
return comments, thread
end
end

View file

@ -0,0 +1,57 @@
class RedditThing
include JSON::Serializable
property kind : String
property data : RedditComment | RedditLink | RedditMore | RedditListing
end
class RedditComment
include JSON::Serializable
property author : String
property body_html : String
property replies : RedditThing | String
property score : Int32
property depth : Int32
property permalink : String
@[JSON::Field(converter: RedditComment::TimeConverter)]
property created_utc : Time
module TimeConverter
def self.from_json(value : JSON::PullParser) : Time
Time.unix(value.read_float.to_i)
end
def self.to_json(value : Time, json : JSON::Builder)
json.number(value.to_unix)
end
end
end
struct RedditLink
include JSON::Serializable
property author : String
property score : Int32
property subreddit : String
property num_comments : Int32
property id : String
property permalink : String
property title : String
end
struct RedditMore
include JSON::Serializable
property children : Array(String)
property count : Int32
property depth : Int32
end
class RedditListing
include JSON::Serializable
property children : Array(RedditThing)
property modhash : String
end

View file

@ -0,0 +1,250 @@
module Invidious::Comments
extend self
def fetch_youtube(id, cursor, format, locale, thin_mode, region, sort_by = "top")
case cursor
when nil, ""
ctoken = Comments.produce_continuation(id, cursor: "", sort_by: sort_by)
when .starts_with? "ADSJ"
ctoken = Comments.produce_continuation(id, cursor: cursor, sort_by: sort_by)
else
ctoken = cursor
end
client_config = YoutubeAPI::ClientConfig.new(region: region)
response = YoutubeAPI.next(continuation: ctoken, client_config: client_config)
contents = nil
if on_response_received_endpoints = response["onResponseReceivedEndpoints"]?
header = nil
on_response_received_endpoints.as_a.each do |item|
if item["reloadContinuationItemsCommand"]?
case item["reloadContinuationItemsCommand"]["slot"]
when "RELOAD_CONTINUATION_SLOT_HEADER"
header = item["reloadContinuationItemsCommand"]["continuationItems"][0]
when "RELOAD_CONTINUATION_SLOT_BODY"
# continuationItems is nil when video has no comments
contents = item["reloadContinuationItemsCommand"]["continuationItems"]?
end
elsif item["appendContinuationItemsAction"]?
contents = item["appendContinuationItemsAction"]["continuationItems"]
end
end
elsif response["continuationContents"]?
response = response["continuationContents"]
if response["commentRepliesContinuation"]?
body = response["commentRepliesContinuation"]
else
body = response["itemSectionContinuation"]
end
contents = body["contents"]?
header = body["header"]?
else
raise NotFoundException.new("Comments not found.")
end
if !contents
if format == "json"
return {"comments" => [] of String}.to_json
else
return {"contentHtml" => "", "commentCount" => 0}.to_json
end
end
continuation_item_renderer = nil
contents.as_a.reject! do |item|
if item["continuationItemRenderer"]?
continuation_item_renderer = item["continuationItemRenderer"]
true
end
end
response = JSON.build do |json|
json.object do
if header
count_text = header["commentsHeaderRenderer"]["countText"]
comment_count = (count_text["simpleText"]? || count_text["runs"]?.try &.[0]?.try &.["text"]?)
.try &.as_s.gsub(/\D/, "").to_i? || 0
json.field "commentCount", comment_count
end
json.field "videoId", id
json.field "comments" do
json.array do
contents.as_a.each do |node|
json.object do
if node["commentThreadRenderer"]?
node = node["commentThreadRenderer"]
end
if node["replies"]?
node_replies = node["replies"]["commentRepliesRenderer"]
end
if node["comment"]?
node_comment = node["comment"]["commentRenderer"]
else
node_comment = node["commentRenderer"]
end
content_html = node_comment["contentText"]?.try { |t| parse_content(t, id) } || ""
author = node_comment["authorText"]?.try &.["simpleText"]? || ""
json.field "verified", (node_comment["authorCommentBadge"]? != nil)
json.field "author", author
json.field "authorThumbnails" do
json.array do
node_comment["authorThumbnail"]["thumbnails"].as_a.each do |thumbnail|
json.object do
json.field "url", thumbnail["url"]
json.field "width", thumbnail["width"]
json.field "height", thumbnail["height"]
end
end
end
end
if node_comment["authorEndpoint"]?
json.field "authorId", node_comment["authorEndpoint"]["browseEndpoint"]["browseId"]
json.field "authorUrl", node_comment["authorEndpoint"]["browseEndpoint"]["canonicalBaseUrl"]
else
json.field "authorId", ""
json.field "authorUrl", ""
end
published_text = node_comment["publishedTimeText"]["runs"][0]["text"].as_s
published = decode_date(published_text.rchop(" (edited)"))
if published_text.includes?(" (edited)")
json.field "isEdited", true
else
json.field "isEdited", false
end
json.field "content", html_to_content(content_html)
json.field "contentHtml", content_html
json.field "isPinned", (node_comment["pinnedCommentBadge"]? != nil)
json.field "isSponsor", (node_comment["sponsorCommentBadge"]? != nil)
if node_comment["sponsorCommentBadge"]?
# Sponsor icon thumbnails always have one object and there's only ever the url property in it
json.field "sponsorIconUrl", node_comment.dig("sponsorCommentBadge", "sponsorCommentBadgeRenderer", "customBadge", "thumbnails", 0, "url").to_s
end
json.field "published", published.to_unix
json.field "publishedText", translate(locale, "`x` ago", recode_date(published, locale))
comment_action_buttons_renderer = node_comment["actionButtons"]["commentActionButtonsRenderer"]
json.field "likeCount", comment_action_buttons_renderer["likeButton"]["toggleButtonRenderer"]["accessibilityData"]["accessibilityData"]["label"].as_s.scan(/\d/).map(&.[0]).join.to_i
json.field "commentId", node_comment["commentId"]
json.field "authorIsChannelOwner", node_comment["authorIsChannelOwner"]
if comment_action_buttons_renderer["creatorHeart"]?
hearth_data = comment_action_buttons_renderer["creatorHeart"]["creatorHeartRenderer"]["creatorThumbnail"]
json.field "creatorHeart" do
json.object do
json.field "creatorThumbnail", hearth_data["thumbnails"][-1]["url"]
json.field "creatorName", hearth_data["accessibility"]["accessibilityData"]["label"]
end
end
end
if node_replies && !response["commentRepliesContinuation"]?
if node_replies["continuations"]?
continuation = node_replies["continuations"]?.try &.as_a[0]["nextContinuationData"]["continuation"].as_s
elsif node_replies["contents"]?
continuation = node_replies["contents"]?.try &.as_a[0]["continuationItemRenderer"]["continuationEndpoint"]["continuationCommand"]["token"].as_s
end
continuation ||= ""
json.field "replies" do
json.object do
json.field "replyCount", node_comment["replyCount"]? || 1
json.field "continuation", continuation
end
end
end
end
end
end
end
if continuation_item_renderer
if continuation_item_renderer["continuationEndpoint"]?
continuation_endpoint = continuation_item_renderer["continuationEndpoint"]
elsif continuation_item_renderer["button"]?
continuation_endpoint = continuation_item_renderer["button"]["buttonRenderer"]["command"]
end
if continuation_endpoint
json.field "continuation", continuation_endpoint["continuationCommand"]["token"].as_s
end
end
end
end
if format == "html"
response = JSON.parse(response)
content_html = Frontend::Comments.template_youtube(response, locale, thin_mode)
response = JSON.build do |json|
json.object do
json.field "contentHtml", content_html
if response["commentCount"]?
json.field "commentCount", response["commentCount"]
else
json.field "commentCount", 0
end
end
end
end
return response
end
def produce_continuation(video_id, cursor = "", sort_by = "top")
object = {
"2:embedded" => {
"2:string" => video_id,
"25:varint" => 0_i64,
"28:varint" => 1_i64,
"36:embedded" => {
"5:varint" => -1_i64,
"8:varint" => 0_i64,
},
"40:embedded" => {
"1:varint" => 4_i64,
"3:string" => "https://www.youtube.com",
"4:string" => "",
},
},
"3:varint" => 6_i64,
"6:embedded" => {
"1:string" => cursor,
"4:embedded" => {
"4:string" => video_id,
"6:varint" => 0_i64,
},
"5:varint" => 20_i64,
},
}
case sort_by
when "top"
object["6:embedded"].as(Hash)["4:embedded"].as(Hash)["6:varint"] = 0_i64
when "new", "newest"
object["6:embedded"].as(Hash)["4:embedded"].as(Hash)["6:varint"] = 1_i64
else # top
object["6:embedded"].as(Hash)["4:embedded"].as(Hash)["6:varint"] = 0_i64
end
continuation = object.try { |i| Protodec::Any.cast_json(i) }
.try { |i| Protodec::Any.from_json(i) }
.try { |i| Base64.urlsafe_encode(i) }
.try { |i| URI.encode_www_form(i) }
return continuation
end
end

View file

@ -0,0 +1,50 @@
module Invidious::Frontend::Comments
extend self
def template_reddit(root, locale)
String.build do |html|
root.each do |child|
if child.data.is_a?(RedditComment)
child = child.data.as(RedditComment)
body_html = HTML.unescape(child.body_html)
replies_html = ""
if child.replies.is_a?(RedditThing)
replies = child.replies.as(RedditThing)
replies_html = self.template_reddit(replies.data.as(RedditListing).children, locale)
end
if child.depth > 0
html << <<-END_HTML
<div class="pure-g">
<div class="pure-u-1-24">
</div>
<div class="pure-u-23-24">
END_HTML
else
html << <<-END_HTML
<div class="pure-g">
<div class="pure-u-1">
END_HTML
end
html << <<-END_HTML
<p>
<a href="javascript:void(0)" data-onclick="toggle_parent">[ ]</a>
<b><a href="https://www.reddit.com/user/#{child.author}">#{child.author}</a></b>
#{translate_count(locale, "comments_points_count", child.score, NumberFormatting::Separator)}
<span title="#{child.created_utc.to_s(translate(locale, "%a %B %-d %T %Y UTC"))}">#{translate(locale, "`x` ago", recode_date(child.created_utc, locale))}</span>
<a href="https://www.reddit.com#{child.permalink}" title="#{translate(locale, "permalink")}">#{translate(locale, "permalink")}</a>
</p>
<div>
#{body_html}
#{replies_html}
</div>
</div>
</div>
END_HTML
end
end
end
end
end

View file

@ -0,0 +1,160 @@
module Invidious::Frontend::Comments
extend self
def template_youtube(comments, locale, thin_mode, is_replies = false)
String.build do |html|
root = comments["comments"].as_a
root.each do |child|
if child["replies"]?
replies_count_text = translate_count(locale,
"comments_view_x_replies",
child["replies"]["replyCount"].as_i64 || 0,
NumberFormatting::Separator
)
replies_html = <<-END_HTML
<div id="replies" class="pure-g">
<div class="pure-u-1-24"></div>
<div class="pure-u-23-24">
<p>
<a href="javascript:void(0)" data-continuation="#{child["replies"]["continuation"]}"
data-onclick="get_youtube_replies" data-load-replies>#{replies_count_text}</a>
</p>
</div>
</div>
END_HTML
end
if !thin_mode
author_thumbnail = "/ggpht#{URI.parse(child["authorThumbnails"][-1]["url"].as_s).request_target}"
else
author_thumbnail = ""
end
author_name = HTML.escape(child["author"].as_s)
sponsor_icon = ""
if child["verified"]?.try &.as_bool && child["authorIsChannelOwner"]?.try &.as_bool
author_name += "&nbsp;<i class=\"icon ion ion-md-checkmark-circle\"></i>"
elsif child["verified"]?.try &.as_bool
author_name += "&nbsp;<i class=\"icon ion ion-md-checkmark\"></i>"
end
if child["isSponsor"]?.try &.as_bool
sponsor_icon = String.build do |str|
str << %(<img alt="" )
str << %(src="/ggpht) << URI.parse(child["sponsorIconUrl"].as_s).request_target << "\" "
str << %(title=") << translate(locale, "Channel Sponsor") << "\" "
str << %(width="16" height="16" />)
end
end
html << <<-END_HTML
<div class="pure-g" style="width:100%">
<div class="channel-profile pure-u-4-24 pure-u-md-2-24">
<img loading="lazy" style="margin-right:1em;margin-top:1em;width:90%" src="#{author_thumbnail}" alt="" />
</div>
<div class="pure-u-20-24 pure-u-md-22-24">
<p>
<b>
<a class="#{child["authorIsChannelOwner"] == true ? "channel-owner" : ""}" href="#{child["authorUrl"]}">#{author_name}</a>
</b>
#{sponsor_icon}
<p style="white-space:pre-wrap">#{child["contentHtml"]}</p>
END_HTML
if child["attachment"]?
attachment = child["attachment"]
case attachment["type"]
when "image"
attachment = attachment["imageThumbnails"][1]
html << <<-END_HTML
<div class="pure-g">
<div class="pure-u-1 pure-u-md-1-2">
<img loading="lazy" style="width:100%" src="/ggpht#{URI.parse(attachment["url"].as_s).request_target}" alt="" />
</div>
</div>
END_HTML
when "video"
if attachment["error"]?
html << <<-END_HTML
<div class="pure-g video-iframe-wrapper">
<p>#{attachment["error"]}</p>
</div>
END_HTML
else
html << <<-END_HTML
<div class="pure-g video-iframe-wrapper">
<iframe class="video-iframe" src='/embed/#{attachment["videoId"]?}?autoplay=0'></iframe>
</div>
END_HTML
end
else nil # Ignore
end
end
html << <<-END_HTML
<p>
<span title="#{Time.unix(child["published"].as_i64).to_s(translate(locale, "%A %B %-d, %Y"))}">#{translate(locale, "`x` ago", recode_date(Time.unix(child["published"].as_i64), locale))} #{child["isEdited"] == true ? translate(locale, "(edited)") : ""}</span>
|
END_HTML
if comments["videoId"]?
html << <<-END_HTML
<a href="https://www.youtube.com/watch?v=#{comments["videoId"]}&lc=#{child["commentId"]}" title="#{translate(locale, "YouTube comment permalink")}">[YT]</a>
|
END_HTML
elsif comments["authorId"]?
html << <<-END_HTML
<a href="https://www.youtube.com/channel/#{comments["authorId"]}/community?lb=#{child["commentId"]}" title="#{translate(locale, "YouTube comment permalink")}">[YT]</a>
|
END_HTML
end
html << <<-END_HTML
<i class="icon ion-ios-thumbs-up"></i> #{number_with_separator(child["likeCount"])}
END_HTML
if child["creatorHeart"]?
if !thin_mode
creator_thumbnail = "/ggpht#{URI.parse(child["creatorHeart"]["creatorThumbnail"].as_s).request_target}"
else
creator_thumbnail = ""
end
html << <<-END_HTML
&nbsp;
<span class="creator-heart-container" title="#{translate(locale, "`x` marked it with a ❤", child["creatorHeart"]["creatorName"].as_s)}">
<span class="creator-heart">
<img loading="lazy" class="creator-heart-background-hearted" src="#{creator_thumbnail}" alt="" />
<span class="creator-heart-small-hearted">
<span class="icon ion-ios-heart creator-heart-small-container"></span>
</span>
</span>
</span>
END_HTML
end
html << <<-END_HTML
</p>
#{replies_html}
</div>
</div>
END_HTML
end
if comments["continuation"]?
html << <<-END_HTML
<div class="pure-g">
<div class="pure-u-1">
<p>
<a href="javascript:void(0)" data-continuation="#{comments["continuation"]}"
data-onclick="get_youtube_replies" data-load-more #{"data-load-replies" if is_replies}>#{translate(locale, "Load more")}</a>
</p>
</div>
</div>
END_HTML
end
end
end
end

View file

@ -55,4 +55,32 @@ module Invidious::Routes::API::V1::Search
return error_json(500, ex)
end
end
def self.hashtag(env)
hashtag = env.params.url["hashtag"]
page = env.params.query["page"]?.try &.to_i? || 1
locale = env.get("preferences").as(Preferences).locale
region = env.params.query["region"]?
env.response.content_type = "application/json"
begin
results = Invidious::Hashtag.fetch(hashtag, page, region)
rescue ex
return error_json(400, ex)
end
JSON.build do |json|
json.object do
json.field "results" do
json.array do
results.each do |item|
item.to_json(locale, json)
end
end
end
end
end
end
end

View file

@ -333,7 +333,7 @@ module Invidious::Routes::API::V1::Videos
sort_by ||= "top"
begin
comments = fetch_youtube_comments(id, continuation, format, locale, thin_mode, region, sort_by: sort_by)
comments = Comments.fetch_youtube(id, continuation, format, locale, thin_mode, region, sort_by: sort_by)
rescue ex : NotFoundException
return error_json(404, ex)
rescue ex
@ -345,7 +345,7 @@ module Invidious::Routes::API::V1::Videos
sort_by ||= "confidence"
begin
comments, reddit_thread = fetch_reddit_comments(id, sort_by: sort_by)
comments, reddit_thread = Comments.fetch_reddit(id, sort_by: sort_by)
rescue ex
comments = nil
reddit_thread = nil
@ -361,9 +361,9 @@ module Invidious::Routes::API::V1::Videos
return reddit_thread.to_json
else
content_html = template_reddit_comments(comments, locale)
content_html = fill_links(content_html, "https", "www.reddit.com")
content_html = replace_links(content_html)
content_html = Frontend::Comments.template_reddit(comments, locale)
content_html = Comments.fill_links(content_html, "https", "www.reddit.com")
content_html = Comments.replace_links(content_html)
response = {
"title" => reddit_thread.title,
"permalink" => reddit_thread.permalink,

View file

@ -278,6 +278,7 @@ module Invidious::Routes::Channels
return error_template(500, ex)
end
env.set "search", "channel:#{ucid} "
return {locale, user, subscriptions, continuation, ucid, channel}
end
end

View file

@ -310,6 +310,15 @@ module Invidious::Routes::PreferencesRoute
response: error_template(415, "Invalid subscription file uploaded")
)
end
when "import_youtube_pl"
filename = part.filename || ""
success = Invidious::User::Import.from_youtube_pl(user, body, filename, type)
if !success
haltf(env, status_code: 415,
response: error_template(415, "Invalid playlist file uploaded")
)
end
when "import_freetube"
Invidious::User::Import.from_freetube(user, body)
when "import_newpipe_subscriptions"

View file

@ -65,7 +65,11 @@ module Invidious::Routes::Search
redirect_url = Invidious::Frontend::Misc.redirect_url(env)
env.set "search", query.text
if query.type == Invidious::Search::Query::Type::Channel
env.set "search", "channel:#{query.channel} #{query.text}"
else
env.set "search", query.text
end
templated "search"
end
end

View file

@ -95,31 +95,31 @@ module Invidious::Routes::Watch
if source == "youtube"
begin
comment_html = JSON.parse(fetch_youtube_comments(id, nil, "html", locale, preferences.thin_mode, region))["contentHtml"]
comment_html = JSON.parse(Comments.fetch_youtube(id, nil, "html", locale, preferences.thin_mode, region))["contentHtml"]
rescue ex
if preferences.comments[1] == "reddit"
comments, reddit_thread = fetch_reddit_comments(id)
comment_html = template_reddit_comments(comments, locale)
comments, reddit_thread = Comments.fetch_reddit(id)
comment_html = Frontend::Comments.template_reddit(comments, locale)
comment_html = fill_links(comment_html, "https", "www.reddit.com")
comment_html = replace_links(comment_html)
comment_html = Comments.fill_links(comment_html, "https", "www.reddit.com")
comment_html = Comments.replace_links(comment_html)
end
end
elsif source == "reddit"
begin
comments, reddit_thread = fetch_reddit_comments(id)
comment_html = template_reddit_comments(comments, locale)
comments, reddit_thread = Comments.fetch_reddit(id)
comment_html = Frontend::Comments.template_reddit(comments, locale)
comment_html = fill_links(comment_html, "https", "www.reddit.com")
comment_html = replace_links(comment_html)
comment_html = Comments.fill_links(comment_html, "https", "www.reddit.com")
comment_html = Comments.replace_links(comment_html)
rescue ex
if preferences.comments[1] == "youtube"
comment_html = JSON.parse(fetch_youtube_comments(id, nil, "html", locale, preferences.thin_mode, region))["contentHtml"]
comment_html = JSON.parse(Comments.fetch_youtube(id, nil, "html", locale, preferences.thin_mode, region))["contentHtml"]
end
end
end
else
comment_html = JSON.parse(fetch_youtube_comments(id, nil, "html", locale, preferences.thin_mode, region))["contentHtml"]
comment_html = JSON.parse(Comments.fetch_youtube(id, nil, "html", locale, preferences.thin_mode, region))["contentHtml"]
end
comment_html ||= ""

View file

@ -243,6 +243,7 @@ module Invidious::Routing
# Search
get "/api/v1/search", {{namespace}}::Search, :search
get "/api/v1/search/suggestions", {{namespace}}::Search, :search_suggestions
get "/api/v1/hashtag/:hashtag", {{namespace}}::Search, :hashtag
# Authenticated

View file

@ -30,6 +30,60 @@ struct Invidious::User
return subscriptions
end
def parse_playlist_export_csv(user : User, raw_input : String)
# Split the input into head and body content
raw_head, raw_body = raw_input.split("\n\n", limit: 2, remove_empty: true)
# Create the playlist from the head content
csv_head = CSV.new(raw_head, headers: true)
csv_head.next
title = csv_head[4]
description = csv_head[5]
visibility = csv_head[6]
if visibility.compare("Public", case_insensitive: true) == 0
privacy = PlaylistPrivacy::Public
else
privacy = PlaylistPrivacy::Private
end
playlist = create_playlist(title, privacy, user)
Invidious::Database::Playlists.update_description(playlist.id, description)
# Add each video to the playlist from the body content
csv_body = CSV.new(raw_body, headers: true)
csv_body.each do |row|
video_id = row[0]
if playlist
next if !video_id
next if video_id == "Video Id"
begin
video = get_video(video_id)
rescue ex
next
end
playlist_video = PlaylistVideo.new({
title: video.title,
id: video.id,
author: video.author,
ucid: video.ucid,
length_seconds: video.length_seconds,
published: video.published,
plid: playlist.id,
live_now: video.live_now,
index: Random::Secure.rand(0_i64..Int64::MAX),
})
Invidious::Database::PlaylistVideos.insert(playlist_video)
Invidious::Database::Playlists.update_video_added(playlist.id, playlist_video.index)
end
end
return playlist
end
# -------------------
# Invidious
# -------------------
@ -149,6 +203,21 @@ struct Invidious::User
return true
end
def from_youtube_pl(user : User, body : String, filename : String, type : String) : Bool
extension = filename.split(".").last
if extension == "csv" || type == "text/csv"
playlist = parse_playlist_export_csv(user, body)
if playlist
return true
else
return false
end
else
return false
end
end
# -------------------
# Freetube
# -------------------

View file

@ -27,7 +27,7 @@
</div>
<% else %>
<div class="h-box pure-g" id="comments">
<%= template_youtube_comments(items.not_nil!, locale, thin_mode) %>
<%= IV::Frontend::Comments.template_youtube(items.not_nil!, locale, thin_mode) %>
</div>
<% end %>

View file

@ -5,12 +5,19 @@
<%= rendered "components/feed_menu" %>
<div class="pure-g h-box">
<div class="pure-u-2-3">
<div class="pure-u-1-3">
<h3><%= translate(locale, "user_created_playlists", %(<span id="count">#{items_created.size}</span>)) %></h3>
</div>
<div class="pure-u-1-3" style="text-align:right">
<h3>
<a href="/create_playlist?referer=<%= URI.encode_www_form(referer) %>"><%= translate(locale, "Create playlist") %></a>
<div class="pure-u-1-3">
<h3 style="text-align:center">
<a href="/create_playlist?referer=<%= URI.encode_www_form("/feed/playlists") %>"><%= translate(locale, "Create playlist") %></a>
</h3>
</div>
<div class="pure-u-1-3">
<h3 style="text-align:right">
<a href="/data_control?referer=<%= URI.encode_www_form("/feed/playlists") %>">
<%= translate(locale, "Import/export") %>
</a>
</h3>
</div>
</div>

View file

@ -8,7 +8,7 @@
<legend><%= translate(locale, "Import") %></legend>
<div class="pure-control-group">
<label for="import_youtube"><%= translate(locale, "Import Invidious data") %></label>
<label for="import_invidious"><%= translate(locale, "Import Invidious data") %></label>
<input type="file" id="import_invidious" name="import_invidious">
</div>
@ -21,6 +21,11 @@
<input type="file" id="import_youtube" name="import_youtube">
</div>
<div class="pure-control-group">
<label for="import_youtube_pl"><%= translate(locale, "Import YouTube playlist (.csv)") %></label>
<input type="file" id="import_youtube_pl" name="import_youtube_pl">
</div>
<div class="pure-control-group">
<label for="import_freetube"><%= translate(locale, "Import FreeTube subscriptions (.db)") %></label>
<input type="file" id="import_freetube" name="import_freetube">