Fix bug and some re-structure

This commit is contained in:
Mauricio Colli 2018-02-28 21:02:43 -03:00
parent 5e34556ac3
commit 11216f361f
18 changed files with 134 additions and 134 deletions

View file

@ -11,52 +11,46 @@ import java.util.List;
*/ */
public abstract class ListExtractor extends Extractor { public abstract class ListExtractor extends Extractor {
/**
* Get a new ListExtractor with the given nextPageUrl set.
*/
public ListExtractor(StreamingService service, String url) { public ListExtractor(StreamingService service, String url) {
super(service, url); super(service, url);
} }
@Nonnull @Nonnull
public abstract InfoItemsCollector getInfoItems() throws IOException, ExtractionException; public abstract InfoItemsCollector<? extends InfoItem, ?> getInfoItems() throws IOException, ExtractionException;
public abstract String getNextPageUrl() throws IOException, ExtractionException; public abstract String getNextPageUrl() throws IOException, ExtractionException;
public abstract InfoItemPage<? extends InfoItem> getPage(final String nextPageUrl) throws IOException, ExtractionException;
public abstract InfoItemPage getPage(final String nextPageUrl) throws IOException, ExtractionException;
public boolean hasNextPage() throws IOException, ExtractionException { public boolean hasNextPage() throws IOException, ExtractionException {
return getNextPageUrl() != null && !getNextPageUrl().isEmpty(); final String nextPageUrl = getNextPageUrl();
return nextPageUrl != null && !nextPageUrl.isEmpty();
} }
/*////////////////////////////////////////////////////////////////////////// /*//////////////////////////////////////////////////////////////////////////
// Inner // Inner
//////////////////////////////////////////////////////////////////////////*/ //////////////////////////////////////////////////////////////////////////*/
public static class InfoItemPage { public static class InfoItemPage<T extends InfoItem> {
/** /**
* The current list of items to this result * The current list of items to this result
*/ */
public final List<InfoItem> infoItemList; private final List<T> itemsList;
/** /**
* Next url to fetch more items * Next url to fetch more items
*/ */
public final String nextPageUrl; private final String nextPageUrl;
/** /**
* Errors that happened during the extraction * Errors that happened during the extraction
*/ */
public final List<Throwable> errors; private final List<Throwable> errors;
public InfoItemPage(InfoItemsCollector collector, String nextPageUrl) { public InfoItemPage(InfoItemsCollector<T, ?> collector, String nextPageUrl) {
this(collector.getItemList(), nextPageUrl, collector.getErrors()); this(collector.getItemList(), nextPageUrl, collector.getErrors());
} }
public InfoItemPage(List<InfoItem> infoItemList, String nextPageUrl, List<Throwable> errors) { public InfoItemPage(List<T> itemsList, String nextPageUrl, List<Throwable> errors) {
this.infoItemList = infoItemList; this.itemsList = itemsList;
this.nextPageUrl = nextPageUrl; this.nextPageUrl = nextPageUrl;
this.errors = errors; this.errors = errors;
} }
@ -65,8 +59,8 @@ public abstract class ListExtractor extends Extractor {
return nextPageUrl != null && !nextPageUrl.isEmpty(); return nextPageUrl != null && !nextPageUrl.isEmpty();
} }
public List<InfoItem> getItemsList() { public List<T> getItemsList() {
return infoItemList; return itemsList;
} }
public String getNextPageUrl() { public String getNextPageUrl() {

View file

@ -1,9 +1,12 @@
package org.schabi.newpipe.extractor.channel; package org.schabi.newpipe.extractor.channel;
import edu.umd.cs.findbugs.annotations.NonNull; import edu.umd.cs.findbugs.annotations.NonNull;
import org.schabi.newpipe.extractor.*; import org.schabi.newpipe.extractor.ListExtractor;
import org.schabi.newpipe.extractor.StreamingService;
import org.schabi.newpipe.extractor.UrlIdHandler;
import org.schabi.newpipe.extractor.exceptions.ExtractionException; import org.schabi.newpipe.extractor.exceptions.ExtractionException;
import org.schabi.newpipe.extractor.exceptions.ParsingException; import org.schabi.newpipe.extractor.exceptions.ParsingException;
import org.schabi.newpipe.extractor.stream.StreamInfoItem;
import org.schabi.newpipe.extractor.stream.StreamInfoItemsCollector; import org.schabi.newpipe.extractor.stream.StreamInfoItemsCollector;
import javax.annotation.Nonnull; import javax.annotation.Nonnull;
@ -43,12 +46,10 @@ public abstract class ChannelExtractor extends ListExtractor {
@NonNull @NonNull
@Override @Override
public InfoItemsCollector getInfoItems() public abstract StreamInfoItemsCollector getInfoItems() throws IOException, ExtractionException;
throws IOException, ExtractionException { @Override
return getStreams(); public abstract InfoItemPage<StreamInfoItem> getPage(String nextPageUrl) throws IOException, ExtractionException;
}
public abstract StreamInfoItemsCollector getStreams() throws IOException, ExtractionException;
public abstract String getAvatarUrl() throws ParsingException; public abstract String getAvatarUrl() throws ParsingException;
public abstract String getBannerUrl() throws ParsingException; public abstract String getBannerUrl() throws ParsingException;
public abstract String getFeedUrl() throws ParsingException; public abstract String getFeedUrl() throws ParsingException;

View file

@ -5,7 +5,7 @@ import org.schabi.newpipe.extractor.ListInfo;
import org.schabi.newpipe.extractor.NewPipe; import org.schabi.newpipe.extractor.NewPipe;
import org.schabi.newpipe.extractor.StreamingService; import org.schabi.newpipe.extractor.StreamingService;
import org.schabi.newpipe.extractor.exceptions.ExtractionException; import org.schabi.newpipe.extractor.exceptions.ExtractionException;
import org.schabi.newpipe.extractor.exceptions.ParsingException; import org.schabi.newpipe.extractor.stream.StreamInfoItem;
import org.schabi.newpipe.extractor.utils.ExtractorHelper; import org.schabi.newpipe.extractor.utils.ExtractorHelper;
import java.io.IOException; import java.io.IOException;
@ -37,7 +37,7 @@ public class ChannelInfo extends ListInfo {
} }
public static InfoItemPage getMoreItems(StreamingService service, String url, String pageUrl) public static InfoItemPage<StreamInfoItem> getMoreItems(StreamingService service, String url, String pageUrl)
throws IOException, ExtractionException { throws IOException, ExtractionException {
return service.getChannelExtractor(url).getPage(pageUrl); return service.getChannelExtractor(url).getPage(pageUrl);
} }

View file

@ -20,10 +20,13 @@ package org.schabi.newpipe.extractor.kiosk;
* along with NewPipe. If not, see <http://www.gnu.org/licenses/>. * along with NewPipe. If not, see <http://www.gnu.org/licenses/>.
*/ */
import edu.umd.cs.findbugs.annotations.NonNull;
import org.schabi.newpipe.extractor.ListExtractor; import org.schabi.newpipe.extractor.ListExtractor;
import org.schabi.newpipe.extractor.StreamingService; import org.schabi.newpipe.extractor.StreamingService;
import org.schabi.newpipe.extractor.exceptions.ExtractionException; import org.schabi.newpipe.extractor.exceptions.ExtractionException;
import org.schabi.newpipe.extractor.exceptions.ParsingException; import org.schabi.newpipe.extractor.exceptions.ParsingException;
import org.schabi.newpipe.extractor.stream.StreamInfoItem;
import org.schabi.newpipe.extractor.stream.StreamInfoItemsCollector;
import javax.annotation.Nonnull; import javax.annotation.Nonnull;
import java.io.IOException; import java.io.IOException;
@ -40,6 +43,12 @@ public abstract class KioskExtractor extends ListExtractor {
this.id = kioskId; this.id = kioskId;
} }
@NonNull
@Override
public abstract StreamInfoItemsCollector getInfoItems() throws IOException, ExtractionException;
@Override
public abstract InfoItemPage<StreamInfoItem> getPage(String nextPageUrl) throws IOException, ExtractionException;
/** /**
* For certain Websites the content of a kiosk will be different depending * For certain Websites the content of a kiosk will be different depending
* on the country you want to poen the website in. Therefore you should * on the country you want to poen the website in. Therefore you should

View file

@ -25,6 +25,7 @@ import org.schabi.newpipe.extractor.ListInfo;
import org.schabi.newpipe.extractor.NewPipe; import org.schabi.newpipe.extractor.NewPipe;
import org.schabi.newpipe.extractor.StreamingService; import org.schabi.newpipe.extractor.StreamingService;
import org.schabi.newpipe.extractor.exceptions.ExtractionException; import org.schabi.newpipe.extractor.exceptions.ExtractionException;
import org.schabi.newpipe.extractor.stream.StreamInfoItem;
import org.schabi.newpipe.extractor.utils.ExtractorHelper; import org.schabi.newpipe.extractor.utils.ExtractorHelper;
import java.io.IOException; import java.io.IOException;
@ -35,7 +36,7 @@ public class KioskInfo extends ListInfo {
super(serviceId, id, url, name); super(serviceId, id, url, name);
} }
public static ListExtractor.InfoItemPage getMoreItems(StreamingService service, public static ListExtractor.InfoItemPage<StreamInfoItem> getMoreItems(StreamingService service,
String url, String url,
String pageUrl, String pageUrl,
String contentCountry) throws IOException, ExtractionException { String contentCountry) throws IOException, ExtractionException {

View file

@ -1,12 +1,12 @@
package org.schabi.newpipe.extractor.playlist; package org.schabi.newpipe.extractor.playlist;
import edu.umd.cs.findbugs.annotations.NonNull; import edu.umd.cs.findbugs.annotations.NonNull;
import org.schabi.newpipe.extractor.InfoItemsCollector;
import org.schabi.newpipe.extractor.ListExtractor; import org.schabi.newpipe.extractor.ListExtractor;
import org.schabi.newpipe.extractor.StreamingService; import org.schabi.newpipe.extractor.StreamingService;
import org.schabi.newpipe.extractor.UrlIdHandler; import org.schabi.newpipe.extractor.UrlIdHandler;
import org.schabi.newpipe.extractor.exceptions.ExtractionException; import org.schabi.newpipe.extractor.exceptions.ExtractionException;
import org.schabi.newpipe.extractor.exceptions.ParsingException; import org.schabi.newpipe.extractor.exceptions.ParsingException;
import org.schabi.newpipe.extractor.stream.StreamInfoItem;
import org.schabi.newpipe.extractor.stream.StreamInfoItemsCollector; import org.schabi.newpipe.extractor.stream.StreamInfoItemsCollector;
import javax.annotation.Nonnull; import javax.annotation.Nonnull;
@ -26,12 +26,10 @@ public abstract class PlaylistExtractor extends ListExtractor {
@NonNull @NonNull
@Override @Override
public InfoItemsCollector getInfoItems() public abstract StreamInfoItemsCollector getInfoItems() throws IOException, ExtractionException;
throws IOException, ExtractionException { @Override
return getStreams(); public abstract InfoItemPage<StreamInfoItem> getPage(String nextPageUrl) throws IOException, ExtractionException;
}
public abstract StreamInfoItemsCollector getStreams() throws IOException, ExtractionException;
public abstract String getThumbnailUrl() throws ParsingException; public abstract String getThumbnailUrl() throws ParsingException;
public abstract String getBannerUrl() throws ParsingException; public abstract String getBannerUrl() throws ParsingException;

View file

@ -5,6 +5,7 @@ import org.schabi.newpipe.extractor.ListInfo;
import org.schabi.newpipe.extractor.NewPipe; import org.schabi.newpipe.extractor.NewPipe;
import org.schabi.newpipe.extractor.StreamingService; import org.schabi.newpipe.extractor.StreamingService;
import org.schabi.newpipe.extractor.exceptions.ExtractionException; import org.schabi.newpipe.extractor.exceptions.ExtractionException;
import org.schabi.newpipe.extractor.stream.StreamInfoItem;
import java.io.IOException; import java.io.IOException;
@ -16,7 +17,7 @@ public class PlaylistInfo extends ListInfo {
super(serviceId, id, url, name); super(serviceId, id, url, name);
} }
public static InfoItemPage getMoreItems(StreamingService service, String url, String pageUrl) throws IOException, ExtractionException { public static InfoItemPage<StreamInfoItem> getMoreItems(StreamingService service, String url, String pageUrl) throws IOException, ExtractionException {
return service.getPlaylistExtractor(url).getPage(pageUrl); return service.getPlaylistExtractor(url).getPage(pageUrl);
} }

View file

@ -8,6 +8,7 @@ import org.schabi.newpipe.extractor.StreamingService;
import org.schabi.newpipe.extractor.channel.ChannelExtractor; import org.schabi.newpipe.extractor.channel.ChannelExtractor;
import org.schabi.newpipe.extractor.exceptions.ExtractionException; import org.schabi.newpipe.extractor.exceptions.ExtractionException;
import org.schabi.newpipe.extractor.exceptions.ParsingException; import org.schabi.newpipe.extractor.exceptions.ParsingException;
import org.schabi.newpipe.extractor.stream.StreamInfoItem;
import org.schabi.newpipe.extractor.stream.StreamInfoItemsCollector; import org.schabi.newpipe.extractor.stream.StreamInfoItemsCollector;
import javax.annotation.Nonnull; import javax.annotation.Nonnull;
@ -89,7 +90,7 @@ public class SoundcloudChannelExtractor extends ChannelExtractor {
@Nonnull @Nonnull
@Override @Override
public StreamInfoItemsCollector getStreams() throws ExtractionException { public StreamInfoItemsCollector getInfoItems() throws ExtractionException {
if(streamInfoItemsCollector == null) { if(streamInfoItemsCollector == null) {
computeNextPageAndGetStreams(); computeNextPageAndGetStreams();
} }
@ -120,14 +121,14 @@ public class SoundcloudChannelExtractor extends ChannelExtractor {
} }
@Override @Override
public InfoItemPage getPage(final String pageUrl) throws IOException, ExtractionException { public InfoItemPage<StreamInfoItem> getPage(final String pageUrl) throws IOException, ExtractionException {
if (!hasNextPage()) { if (pageUrl == null || pageUrl.isEmpty()) {
throw new ExtractionException("Channel doesn't have more streams"); throw new ExtractionException(new IllegalArgumentException("Page url is empty or null"));
} }
StreamInfoItemsCollector collector = new StreamInfoItemsCollector(getServiceId()); StreamInfoItemsCollector collector = new StreamInfoItemsCollector(getServiceId());
String nextPageUrl = SoundcloudParsingHelper.getStreamsFromApiMinItems(15, collector, pageUrl); String nextPageUrl = SoundcloudParsingHelper.getStreamsFromApiMinItems(15, collector, pageUrl);
return new InfoItemPage(collector, nextPageUrl); return new InfoItemPage<>(collector, nextPageUrl);
} }
} }

View file

@ -1,19 +1,17 @@
package org.schabi.newpipe.extractor.services.soundcloud; package org.schabi.newpipe.extractor.services.soundcloud;
import java.io.IOException;
import java.util.Arrays;
import java.util.List;
import org.schabi.newpipe.extractor.Collector;
import org.schabi.newpipe.extractor.Downloader; import org.schabi.newpipe.extractor.Downloader;
import org.schabi.newpipe.extractor.StreamingService; import org.schabi.newpipe.extractor.StreamingService;
import org.schabi.newpipe.extractor.UrlIdHandler; import org.schabi.newpipe.extractor.UrlIdHandler;
import org.schabi.newpipe.extractor.exceptions.ExtractionException; import org.schabi.newpipe.extractor.exceptions.ExtractionException;
import org.schabi.newpipe.extractor.exceptions.ParsingException;
import org.schabi.newpipe.extractor.kiosk.KioskExtractor; import org.schabi.newpipe.extractor.kiosk.KioskExtractor;
import org.schabi.newpipe.extractor.stream.StreamInfoItem;
import org.schabi.newpipe.extractor.stream.StreamInfoItemsCollector; import org.schabi.newpipe.extractor.stream.StreamInfoItemsCollector;
import javax.annotation.Nonnull; import javax.annotation.Nonnull;
import java.io.IOException;
import java.util.Arrays;
import java.util.List;
public class SoundcloudChartsExtractor extends KioskExtractor { public class SoundcloudChartsExtractor extends KioskExtractor {
private String url; private String url;
@ -44,15 +42,15 @@ public class SoundcloudChartsExtractor extends KioskExtractor {
} }
@Override @Override
public InfoItemPage getPage(String pageUrl) throws IOException, ExtractionException { public InfoItemPage<StreamInfoItem> getPage(String pageUrl) throws IOException, ExtractionException {
if (!hasNextPage()) { if (pageUrl == null || pageUrl.isEmpty()) {
throw new ExtractionException("Chart doesn't have more streams"); throw new ExtractionException(new IllegalArgumentException("Page url is empty or null"));
} }
StreamInfoItemsCollector collector = new StreamInfoItemsCollector(getServiceId()); StreamInfoItemsCollector collector = new StreamInfoItemsCollector(getServiceId());
String nextPageUrl = SoundcloudParsingHelper.getStreamsFromApi(collector, pageUrl, true); String nextPageUrl = SoundcloudParsingHelper.getStreamsFromApi(collector, pageUrl, true);
return new InfoItemPage(collector, nextPageUrl); return new InfoItemPage<>(collector, nextPageUrl);
} }

View file

@ -8,6 +8,7 @@ import org.schabi.newpipe.extractor.StreamingService;
import org.schabi.newpipe.extractor.exceptions.ExtractionException; import org.schabi.newpipe.extractor.exceptions.ExtractionException;
import org.schabi.newpipe.extractor.exceptions.ParsingException; import org.schabi.newpipe.extractor.exceptions.ParsingException;
import org.schabi.newpipe.extractor.playlist.PlaylistExtractor; import org.schabi.newpipe.extractor.playlist.PlaylistExtractor;
import org.schabi.newpipe.extractor.stream.StreamInfoItem;
import org.schabi.newpipe.extractor.stream.StreamInfoItemsCollector; import org.schabi.newpipe.extractor.stream.StreamInfoItemsCollector;
import javax.annotation.Nonnull; import javax.annotation.Nonnull;
@ -91,7 +92,7 @@ public class SoundcloudPlaylistExtractor extends PlaylistExtractor {
@Nonnull @Nonnull
@Override @Override
public StreamInfoItemsCollector getStreams() throws IOException, ExtractionException { public StreamInfoItemsCollector getInfoItems() throws IOException, ExtractionException {
if(streamInfoItemsCollector == null) { if(streamInfoItemsCollector == null) {
computeStreamsAndNextPageUrl(); computeStreamsAndNextPageUrl();
} }
@ -119,14 +120,14 @@ public class SoundcloudPlaylistExtractor extends PlaylistExtractor {
} }
@Override @Override
public InfoItemPage getPage(String pageUrl) throws IOException, ExtractionException { public InfoItemPage<StreamInfoItem> getPage(String pageUrl) throws IOException, ExtractionException {
if (!hasNextPage()) { if (pageUrl == null || pageUrl.isEmpty()) {
throw new ExtractionException("Playlist doesn't have more streams"); throw new ExtractionException(new IllegalArgumentException("Page url is empty or null"));
} }
StreamInfoItemsCollector collector = new StreamInfoItemsCollector(getServiceId()); StreamInfoItemsCollector collector = new StreamInfoItemsCollector(getServiceId());
String nextPageUrl = SoundcloudParsingHelper.getStreamsFromApiMinItems(15, collector, pageUrl); String nextPageUrl = SoundcloudParsingHelper.getStreamsFromApiMinItems(15, collector, pageUrl);
return new InfoItemPage(collector, nextPageUrl); return new InfoItemPage<>(collector, nextPageUrl);
} }
} }

View file

@ -13,7 +13,7 @@ import org.schabi.newpipe.extractor.StreamingService;
import org.schabi.newpipe.extractor.channel.ChannelExtractor; import org.schabi.newpipe.extractor.channel.ChannelExtractor;
import org.schabi.newpipe.extractor.exceptions.ExtractionException; import org.schabi.newpipe.extractor.exceptions.ExtractionException;
import org.schabi.newpipe.extractor.exceptions.ParsingException; import org.schabi.newpipe.extractor.exceptions.ParsingException;
import org.schabi.newpipe.extractor.exceptions.ReCaptchaException; import org.schabi.newpipe.extractor.stream.StreamInfoItem;
import org.schabi.newpipe.extractor.stream.StreamInfoItemsCollector; import org.schabi.newpipe.extractor.stream.StreamInfoItemsCollector;
import org.schabi.newpipe.extractor.utils.Parser; import org.schabi.newpipe.extractor.utils.Parser;
import org.schabi.newpipe.extractor.utils.Utils; import org.schabi.newpipe.extractor.utils.Utils;
@ -150,7 +150,7 @@ public class YoutubeChannelExtractor extends ChannelExtractor {
@Nonnull @Nonnull
@Override @Override
public StreamInfoItemsCollector getStreams() throws ExtractionException { public StreamInfoItemsCollector getInfoItems() throws ExtractionException {
StreamInfoItemsCollector collector = new StreamInfoItemsCollector(getServiceId()); StreamInfoItemsCollector collector = new StreamInfoItemsCollector(getServiceId());
Element ul = doc.select("ul[id=\"browse-items-primary\"]").first(); Element ul = doc.select("ul[id=\"browse-items-primary\"]").first();
collectStreamsFrom(collector, ul); collectStreamsFrom(collector, ul);
@ -158,29 +158,27 @@ public class YoutubeChannelExtractor extends ChannelExtractor {
} }
@Override @Override
public InfoItemPage getPage(String pageUrl) throws IOException, ExtractionException { public InfoItemPage<StreamInfoItem> getPage(String pageUrl) throws IOException, ExtractionException {
try { if (pageUrl == null || pageUrl.isEmpty()) {
throw new ExtractionException(new IllegalArgumentException("Page url is empty or null"));
if (!hasNextPage()) {
throw new ExtractionException("Channel doesn't have more streams");
} }
// Unfortunately, we have to fetch the page even if we are only getting next streams,
// as they don't deliver enough information on their own (the channel name, for example).
fetchPage(); fetchPage();
StreamInfoItemsCollector collector = new StreamInfoItemsCollector(getServiceId()); StreamInfoItemsCollector collector = new StreamInfoItemsCollector(getServiceId());
JsonObject ajaxJson;
final JsonObject ajaxJson = JsonParser.object().from( try {
NewPipe.getDownloader() ajaxJson = JsonParser.object().from(NewPipe.getDownloader().download(pageUrl));
.download(pageUrl));
final Document ajaxHtml = Jsoup.parse(ajaxJson.getString("content_html"));
collectStreamsFrom(collector, ajaxHtml.select("body").first());
return new InfoItemPage(collector, getNextPageUrlFromAjaxPage(ajaxJson, pageUrl));
} catch (JsonParserException pe) { } catch (JsonParserException pe) {
throw new ParsingException("Could not parse json data for next streams", pe); throw new ParsingException("Could not parse json data for next streams", pe);
} }
final Document ajaxHtml = Jsoup.parse(ajaxJson.getString("content_html"));
collectStreamsFrom(collector, ajaxHtml.select("body").first());
return new InfoItemPage<>(collector, getNextPageUrlFromAjaxPage(ajaxJson, pageUrl));
} }
private String getNextPageUrlFromAjaxPage(final JsonObject ajaxJson, final String pageUrl) private String getNextPageUrlFromAjaxPage(final JsonObject ajaxJson, final String pageUrl)

View file

@ -12,8 +12,8 @@ import org.schabi.newpipe.extractor.StreamingService;
import org.schabi.newpipe.extractor.UrlIdHandler; import org.schabi.newpipe.extractor.UrlIdHandler;
import org.schabi.newpipe.extractor.exceptions.ExtractionException; import org.schabi.newpipe.extractor.exceptions.ExtractionException;
import org.schabi.newpipe.extractor.exceptions.ParsingException; import org.schabi.newpipe.extractor.exceptions.ParsingException;
import org.schabi.newpipe.extractor.exceptions.ReCaptchaException;
import org.schabi.newpipe.extractor.playlist.PlaylistExtractor; import org.schabi.newpipe.extractor.playlist.PlaylistExtractor;
import org.schabi.newpipe.extractor.stream.StreamInfoItem;
import org.schabi.newpipe.extractor.stream.StreamInfoItemsCollector; import org.schabi.newpipe.extractor.stream.StreamInfoItemsCollector;
import org.schabi.newpipe.extractor.stream.StreamType; import org.schabi.newpipe.extractor.stream.StreamType;
import org.schabi.newpipe.extractor.utils.Parser; import org.schabi.newpipe.extractor.utils.Parser;
@ -26,10 +26,6 @@ import java.io.IOException;
public class YoutubePlaylistExtractor extends PlaylistExtractor { public class YoutubePlaylistExtractor extends PlaylistExtractor {
private Document doc; private Document doc;
/**
* It's lazily initialized (when getInfoItemPage is called)
*/
private Document nextPageAjax;
public YoutubePlaylistExtractor(StreamingService service, String url) { public YoutubePlaylistExtractor(StreamingService service, String url) {
super(service, url); super(service, url);
@ -39,8 +35,6 @@ public class YoutubePlaylistExtractor extends PlaylistExtractor {
public void onFetchPage(@Nonnull Downloader downloader) throws IOException, ExtractionException { public void onFetchPage(@Nonnull Downloader downloader) throws IOException, ExtractionException {
String pageContent = downloader.download(getCleanUrl()); String pageContent = downloader.download(getCleanUrl());
doc = Jsoup.parse(pageContent, getCleanUrl()); doc = Jsoup.parse(pageContent, getCleanUrl());
nextPageAjax = null;
} }
@Override @Override
@ -135,7 +129,7 @@ public class YoutubePlaylistExtractor extends PlaylistExtractor {
@Nonnull @Nonnull
@Override @Override
public StreamInfoItemsCollector getStreams() throws IOException, ExtractionException { public StreamInfoItemsCollector getInfoItems() throws IOException, ExtractionException {
StreamInfoItemsCollector collector = new StreamInfoItemsCollector(getServiceId()); StreamInfoItemsCollector collector = new StreamInfoItemsCollector(getServiceId());
Element tbody = doc.select("tbody[id=\"pl-load-more-destination\"]").first(); Element tbody = doc.select("tbody[id=\"pl-load-more-destination\"]").first();
collectStreamsFrom(collector, tbody); collectStreamsFrom(collector, tbody);
@ -143,28 +137,26 @@ public class YoutubePlaylistExtractor extends PlaylistExtractor {
} }
@Override @Override
public InfoItemPage getPage(final String pageUrl) throws IOException, ExtractionException { public InfoItemPage<StreamInfoItem> getPage(final String pageUrl) throws IOException, ExtractionException {
try { if (pageUrl == null || pageUrl.isEmpty()) {
if (!hasNextPage()) { throw new ExtractionException(new IllegalArgumentException("Page url is empty or null"));
throw new ExtractionException("Playlist doesn't have more streams");
} }
StreamInfoItemsCollector collector = new StreamInfoItemsCollector(getServiceId()); StreamInfoItemsCollector collector = new StreamInfoItemsCollector(getServiceId());
// setupNextStreamsAjax(NewPipe.getDownloader()); JsonObject pageJson;
final JsonObject pageJson = JsonParser.object().from(NewPipe.getDownloader() try {
.download(pageUrl)); pageJson = JsonParser.object().from(NewPipe.getDownloader().download(pageUrl));
} catch (JsonParserException pe) {
throw new ParsingException("Could not parse ajax json", pe);
}
final Document pageHtml = Jsoup.parse("<table><tbody id=\"pl-load-more-destination\">" final Document pageHtml = Jsoup.parse("<table><tbody id=\"pl-load-more-destination\">"
+ pageJson.getString("content_html") + pageJson.getString("content_html")
+ "</tbody></table>", pageUrl); + "</tbody></table>", pageUrl);
collectStreamsFrom(collector, pageHtml.select("tbody[id=\"pl-load-more-destination\"]").first()); collectStreamsFrom(collector, pageHtml.select("tbody[id=\"pl-load-more-destination\"]").first());
return new InfoItemPage<>(collector, getNextPageUrlFromAjax(pageJson, pageUrl));
return new InfoItemPage(collector, getNextPageUrlFromAjax(pageJson, pageUrl));
} catch (JsonParserException pe) {
throw new ParsingException("Could not parse ajax json", pe);
}
} }
private String getNextPageUrlFromAjax(final JsonObject pageJson, final String pageUrl) private String getNextPageUrlFromAjax(final JsonObject pageJson, final String pageUrl)

View file

@ -24,10 +24,14 @@ import org.jsoup.Jsoup;
import org.jsoup.nodes.Document; import org.jsoup.nodes.Document;
import org.jsoup.nodes.Element; import org.jsoup.nodes.Element;
import org.jsoup.select.Elements; import org.jsoup.select.Elements;
import org.schabi.newpipe.extractor.*; import org.schabi.newpipe.extractor.Downloader;
import org.schabi.newpipe.extractor.ListExtractor;
import org.schabi.newpipe.extractor.StreamingService;
import org.schabi.newpipe.extractor.UrlIdHandler;
import org.schabi.newpipe.extractor.exceptions.ExtractionException; import org.schabi.newpipe.extractor.exceptions.ExtractionException;
import org.schabi.newpipe.extractor.exceptions.ParsingException; import org.schabi.newpipe.extractor.exceptions.ParsingException;
import org.schabi.newpipe.extractor.kiosk.KioskExtractor; import org.schabi.newpipe.extractor.kiosk.KioskExtractor;
import org.schabi.newpipe.extractor.stream.StreamInfoItem;
import org.schabi.newpipe.extractor.stream.StreamInfoItemsCollector; import org.schabi.newpipe.extractor.stream.StreamInfoItemsCollector;
import javax.annotation.Nonnull; import javax.annotation.Nonnull;
@ -66,7 +70,7 @@ public class YoutubeTrendingExtractor extends KioskExtractor {
} }
@Override @Override
public ListExtractor.InfoItemPage getPage(String pageUrl) { public ListExtractor.InfoItemPage<StreamInfoItem> getPage(String pageUrl) {
return null; return null;
} }

View file

@ -6,6 +6,7 @@ import org.schabi.newpipe.Downloader;
import org.schabi.newpipe.extractor.ListExtractor; import org.schabi.newpipe.extractor.ListExtractor;
import org.schabi.newpipe.extractor.NewPipe; import org.schabi.newpipe.extractor.NewPipe;
import org.schabi.newpipe.extractor.channel.ChannelExtractor; import org.schabi.newpipe.extractor.channel.ChannelExtractor;
import org.schabi.newpipe.extractor.stream.StreamInfoItem;
import static org.junit.Assert.*; import static org.junit.Assert.*;
import static org.schabi.newpipe.extractor.ExtractorAsserts.assertIsSecureUrl; import static org.schabi.newpipe.extractor.ExtractorAsserts.assertIsSecureUrl;
@ -49,18 +50,18 @@ public class SoundcloudChannelExtractorTest {
@Test @Test
public void testGetStreams() throws Exception { public void testGetStreams() throws Exception {
assertFalse("no streams are received", extractor.getStreams().getItemList().isEmpty()); assertFalse("no streams are received", extractor.getInfoItems().getItemList().isEmpty());
} }
@Test @Test
public void testGetStreamsErrors() throws Exception { public void testGetStreamsErrors() throws Exception {
assertTrue("errors during stream list extraction", extractor.getStreams().getErrors().isEmpty()); assertTrue("errors during stream list extraction", extractor.getInfoItems().getErrors().isEmpty());
} }
@Test @Test
public void testHasMoreStreams() throws Exception { public void testHasMoreStreams() throws Exception {
// Setup the streams // Setup the streams
extractor.getStreams(); extractor.getInfoItems();
assertTrue("don't have more streams", extractor.hasNextPage()); assertTrue("don't have more streams", extractor.hasNextPage());
} }
@ -77,10 +78,10 @@ public class SoundcloudChannelExtractorTest {
@Test @Test
public void testGetPage() throws Exception { public void testGetPage() throws Exception {
// Setup the streams // Setup the streams
extractor.getStreams(); extractor.getInfoItems();
ListExtractor.InfoItemPage nextItemsResult = extractor.getPage(extractor.getNextPageUrl()); ListExtractor.InfoItemPage<StreamInfoItem> nextItemsResult = extractor.getPage(extractor.getNextPageUrl());
assertTrue("extractor didn't have next streams", !nextItemsResult.infoItemList.isEmpty()); assertTrue("extractor didn't have next streams", !nextItemsResult.getItemsList().isEmpty());
assertTrue("errors occurred during extraction of the next streams", nextItemsResult.errors.isEmpty()); assertTrue("errors occurred during extraction of the next streams", nextItemsResult.getErrors().isEmpty());
assertTrue("extractor didn't have more streams after getInfoItemPage", extractor.hasNextPage()); assertTrue("extractor didn't have more streams after getInfoItemPage", extractor.hasNextPage());
} }

View file

@ -80,9 +80,9 @@ public class SoundcloudChartsExtractorTest {
@Test @Test
public void testGetNextPage() throws Exception { public void testGetNextPage() throws Exception {
extractor.getInfoItems(); extractor.getInfoItems().getItemList();
assertFalse("extractor has next streams", extractor.getPage(extractor.getNextPageUrl()) == null assertFalse("extractor has next streams", extractor.getPage(extractor.getNextPageUrl()) == null
|| extractor.getPage(extractor.getNextPageUrl()).infoItemList.isEmpty()); || extractor.getPage(extractor.getNextPageUrl()).getItemsList().isEmpty());
} }
@Test @Test

View file

@ -69,25 +69,25 @@ public class SoundcloudPlaylistExtractorTest {
@Test @Test
public void testGetStreams() throws Exception { public void testGetStreams() throws Exception {
assertTrue("no streams are received", !extractor.getStreams().getItemList().isEmpty()); assertTrue("no streams are received", !extractor.getInfoItems().getItemList().isEmpty());
} }
@Test @Test
public void testGetStreamsErrors() throws Exception { public void testGetStreamsErrors() throws Exception {
assertTrue("errors during stream list extraction", extractor.getStreams().getErrors().isEmpty()); assertTrue("errors during stream list extraction", extractor.getInfoItems().getErrors().isEmpty());
} }
@Test @Test
public void testHasMoreStreams() throws Exception { public void testHasMoreStreams() throws Exception {
// Setup the streams // Setup the streams
extractor.getStreams(); extractor.getInfoItems();
assertTrue("extractor didn't have more streams", !extractor.hasNextPage()); assertTrue("extractor didn't have more streams", !extractor.hasNextPage());
} }
@Test(expected = ExtractionException.class) @Test(expected = ExtractionException.class)
public void testGetNextPageNonExistent() throws Exception { public void testGetNextPageNonExistent() throws Exception {
// Setup the streams // Setup the streams
extractor.getStreams(); extractor.getInfoItems();
// This playlist don't have more streams, it should throw an error // This playlist don't have more streams, it should throw an error
extractor.getPage(extractor.getNextPageUrl()); extractor.getPage(extractor.getNextPageUrl());

View file

@ -6,6 +6,7 @@ import org.schabi.newpipe.Downloader;
import org.schabi.newpipe.extractor.ListExtractor; import org.schabi.newpipe.extractor.ListExtractor;
import org.schabi.newpipe.extractor.NewPipe; import org.schabi.newpipe.extractor.NewPipe;
import org.schabi.newpipe.extractor.channel.ChannelExtractor; import org.schabi.newpipe.extractor.channel.ChannelExtractor;
import org.schabi.newpipe.extractor.stream.StreamInfoItem;
import static org.junit.Assert.*; import static org.junit.Assert.*;
import static org.schabi.newpipe.extractor.ExtractorAsserts.assertEmptyErrors; import static org.schabi.newpipe.extractor.ExtractorAsserts.assertEmptyErrors;
@ -88,18 +89,18 @@ public class YoutubeChannelExtractorTest {
@Test @Test
public void testGetStreams() throws Exception { public void testGetStreams() throws Exception {
assertTrue("no streams are received", !extractor.getStreams().getItemList().isEmpty()); assertTrue("no streams are received", !extractor.getInfoItems().getItemList().isEmpty());
} }
@Test @Test
public void testGetStreamsErrors() throws Exception { public void testGetStreamsErrors() throws Exception {
assertEmptyErrors("errors during stream list extraction", extractor.getStreams().getErrors()); assertEmptyErrors("errors during stream list extraction", extractor.getInfoItems().getErrors());
} }
@Test @Test
public void testHasMoreStreams() throws Exception { public void testHasMoreStreams() throws Exception {
// Setup the streams // Setup the streams
extractor.getStreams(); extractor.getInfoItems();
assertTrue("don't have more streams", extractor.hasNextPage()); assertTrue("don't have more streams", extractor.hasNextPage());
} }
@ -116,10 +117,10 @@ public class YoutubeChannelExtractorTest {
@Test @Test
public void testGetPage() throws Exception { public void testGetPage() throws Exception {
// Setup the streams // Setup the streams
extractor.getStreams(); extractor.getInfoItems();
ListExtractor.InfoItemPage nextItemsResult = extractor.getPage(extractor.getNextPageUrl()); ListExtractor.InfoItemPage<StreamInfoItem> nextItemsResult = extractor.getPage(extractor.getNextPageUrl());
assertTrue("extractor didn't have next streams", !nextItemsResult.infoItemList.isEmpty()); assertTrue("extractor didn't have next streams", !nextItemsResult.getItemsList().isEmpty());
assertEmptyErrors("errors occurred during extraction of the next streams", nextItemsResult.errors); assertEmptyErrors("errors occurred during extraction of the next streams", nextItemsResult.getErrors());
assertTrue("extractor didn't have more streams after getInfoItemPage", extractor.hasNextPage()); assertTrue("extractor didn't have more streams after getInfoItemPage", extractor.hasNextPage());
} }
} }

View file

@ -81,7 +81,7 @@ public class YoutubePlaylistExtractorTest {
@Test @Test
public void testGetStreams() throws Exception { public void testGetStreams() throws Exception {
List<StreamInfoItem> streams = extractor.getStreams().getItemList(); List<StreamInfoItem> streams = extractor.getInfoItems().getItemList();
assertFalse("no streams are received", streams.isEmpty()); assertFalse("no streams are received", streams.isEmpty());
assertTrue(streams.size() > 60); assertTrue(streams.size() > 60);
assertFalse(streams.contains(null)); assertFalse(streams.contains(null));
@ -96,13 +96,13 @@ public class YoutubePlaylistExtractorTest {
@Test @Test
public void testGetStreamsErrors() throws Exception { public void testGetStreamsErrors() throws Exception {
assertEmptyErrors("errors during stream list extraction", extractor.getStreams().getErrors()); assertEmptyErrors("errors during stream list extraction", extractor.getInfoItems().getErrors());
} }
@Test @Test
public void testHasMoreStreams() throws Exception { public void testHasMoreStreams() throws Exception {
// Setup the streams // Setup the streams
extractor.getStreams(); extractor.getInfoItems();
assertTrue("extractor didn't have more streams", extractor.hasNextPage()); assertTrue("extractor didn't have more streams", extractor.hasNextPage());
} }
@ -110,10 +110,10 @@ public class YoutubePlaylistExtractorTest {
@Test @Ignore @Test @Ignore
public void testGetNextPage() throws Exception { public void testGetNextPage() throws Exception {
// Setup the streams // Setup the streams
extractor.getStreams(); extractor.getInfoItems();
ListExtractor.InfoItemPage infoItemPage = extractor.getPage(extractor.getNextPageUrl()); ListExtractor.InfoItemPage<StreamInfoItem> infoItemPage = extractor.getPage(extractor.getNextPageUrl());
assertTrue("extractor didn't have next streams", !infoItemPage.infoItemList.isEmpty()); assertTrue("extractor didn't have next streams", !infoItemPage.getItemsList().isEmpty());
assertEmptyErrors("errors occurred during extraction of the next streams", infoItemPage.errors); assertEmptyErrors("errors occurred during extraction of the next streams", infoItemPage.getErrors());
assertTrue("extractor didn't have more streams after getInfoItemPage", extractor.hasNextPage()); assertTrue("extractor didn't have more streams after getInfoItemPage", extractor.hasNextPage());
} }