architecture refacturing

This commit is contained in:
Christian Schabesberger 2018-02-24 22:20:50 +01:00
parent 9dfcb3be06
commit 86db415b18
40 changed files with 298 additions and 279 deletions

View file

@ -11,7 +11,7 @@ import java.util.List;
* Created by Christian Schabesberger on 12.02.17.
*
* Copyright (C) Christian Schabesberger 2017 <chris.schabesberger@mailbox.org>
* InfoItemCollector.java is part of NewPipe.
* InfoItemsCollector.java is part of NewPipe.
*
* NewPipe is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@ -27,7 +27,7 @@ import java.util.List;
* along with NewPipe. If not, see <http://www.gnu.org/licenses/>.
*/
public abstract class InfoItemCollector<I extends InfoItem, E> implements Collector<I,E> {
public abstract class InfoItemsCollector<I extends InfoItem, E> implements Collector<I,E> {
private final List<I> itemList = new ArrayList<>();
private final List<Throwable> errors = new ArrayList<>();
@ -37,7 +37,7 @@ public abstract class InfoItemCollector<I extends InfoItem, E> implements Collec
* Create a new collector
* @param serviceId the service id
*/
public InfoItemCollector(int serviceId) {
public InfoItemsCollector(int serviceId) {
this.serviceId = serviceId;
}

View file

@ -1,7 +1,6 @@
package org.schabi.newpipe.extractor;
import org.schabi.newpipe.extractor.exceptions.ExtractionException;
import org.schabi.newpipe.extractor.stream.StreamInfoItemCollector;
import javax.annotation.Nonnull;
import java.io.IOException;
@ -11,73 +10,73 @@ import java.util.List;
* Base class to extractors that have a list (e.g. playlists, users).
*/
public abstract class ListExtractor extends Extractor {
protected String nextStreamsUrl;
protected String nextPageUrl;
/**
* Get a new ListExtractor with the given nextStreamsUrl set.
* Get a new ListExtractor with the given nextPageUrl set.
*/
public ListExtractor(StreamingService service, String url, String nextStreamsUrl) throws ExtractionException {
public ListExtractor(StreamingService service, String url, String nextPageUrl) throws ExtractionException {
super(service, url);
setNextStreamsUrl(nextStreamsUrl);
setNextPageUrl(nextPageUrl);
}
@Nonnull
public abstract StreamInfoItemCollector getStreams() throws IOException, ExtractionException;
public abstract InfoItemsCollector getInfoItems() throws IOException, ExtractionException;
public abstract NextItemsResult getNextStreams() throws IOException, ExtractionException;
public abstract InfoItemPage getInfoItemPage() throws IOException, ExtractionException;
public boolean hasMoreStreams() {
return nextStreamsUrl != null && !nextStreamsUrl.isEmpty();
public boolean hasNextPage() {
return nextPageUrl != null && !nextPageUrl.isEmpty();
}
public String getNextStreamsUrl() {
return nextStreamsUrl;
public String getNextPageUrl() {
return nextPageUrl;
}
public void setNextStreamsUrl(String nextStreamsUrl) {
this.nextStreamsUrl = nextStreamsUrl;
public void setNextPageUrl(String nextPageUrl) {
this.nextPageUrl = nextPageUrl;
}
/*//////////////////////////////////////////////////////////////////////////
// Inner
//////////////////////////////////////////////////////////////////////////*/
public static class NextItemsResult {
public static class InfoItemPage {
/**
* The current list of items to this result
*/
public final List<InfoItem> nextItemsList;
public final List<InfoItem> infoItemList;
/**
* Next url to fetch more items
*/
public final String nextItemsUrl;
public final String nextPageUrl;
/**
* Errors that happened during the extraction
*/
public final List<Throwable> errors;
public NextItemsResult(InfoItemCollector collector, String nextItemsUrl) {
this(collector.getItemList(), nextItemsUrl, collector.getErrors());
public InfoItemPage(InfoItemsCollector collector, String nextPageUrl) {
this(collector.getItemList(), nextPageUrl, collector.getErrors());
}
public NextItemsResult(List<InfoItem> nextItemsList, String nextItemsUrl, List<Throwable> errors) {
this.nextItemsList = nextItemsList;
this.nextItemsUrl = nextItemsUrl;
public InfoItemPage(List<InfoItem> infoItemList, String nextPageUrl, List<Throwable> errors) {
this.infoItemList = infoItemList;
this.nextPageUrl = nextPageUrl;
this.errors = errors;
}
public boolean hasMoreStreams() {
return nextItemsUrl != null && !nextItemsUrl.isEmpty();
public boolean hasNextPage() {
return nextPageUrl != null && !nextPageUrl.isEmpty();
}
public List<InfoItem> getNextItemsList() {
return nextItemsList;
return infoItemList;
}
public String getNextItemsUrl() {
return nextItemsUrl;
public String getNextPageUrl() {
return nextPageUrl;
}
public List<Throwable> getErrors() {

View file

@ -19,7 +19,7 @@ public abstract class ListInfo extends Info {
this.related_streams = related_streams;
}
public boolean hasMoreStreams() {
public boolean hasNextPage() {
return has_more_streams;
}
@ -27,7 +27,7 @@ public abstract class ListInfo extends Info {
this.has_more_streams = has_more_streams;
}
public String getNextStreamsUrl() {
public String getNextPageUrl() {
return next_streams_url;
}

View file

@ -69,8 +69,8 @@ public abstract class StreamingService {
public abstract SearchEngine getSearchEngine();
public abstract SuggestionExtractor getSuggestionExtractor();
public abstract StreamExtractor getStreamExtractor(String url) throws IOException, ExtractionException;
public abstract ChannelExtractor getChannelExtractor(String url, String nextStreamsUrl) throws IOException, ExtractionException;
public abstract PlaylistExtractor getPlaylistExtractor(String url, String nextStreamsUrl) throws IOException, ExtractionException;
public abstract ChannelExtractor getChannelExtractor(String url, String nextPageUrl) throws IOException, ExtractionException;
public abstract PlaylistExtractor getPlaylistExtractor(String url, String nextPageUrl) throws IOException, ExtractionException;
public abstract KioskList getKioskList() throws ExtractionException;
public abstract SubscriptionExtractor getSubscriptionExtractor();

View file

@ -1,10 +1,10 @@
package org.schabi.newpipe.extractor.channel;
import org.schabi.newpipe.extractor.ListExtractor;
import org.schabi.newpipe.extractor.StreamingService;
import org.schabi.newpipe.extractor.UrlIdHandler;
import edu.umd.cs.findbugs.annotations.NonNull;
import org.schabi.newpipe.extractor.*;
import org.schabi.newpipe.extractor.exceptions.ExtractionException;
import org.schabi.newpipe.extractor.exceptions.ParsingException;
import org.schabi.newpipe.extractor.stream.StreamInfoItemsCollector;
import javax.annotation.Nonnull;
import java.io.IOException;
@ -31,16 +31,25 @@ import java.io.IOException;
public abstract class ChannelExtractor extends ListExtractor {
public ChannelExtractor(StreamingService service, String url, String nextStreamsUrl) throws IOException, ExtractionException {
super(service, url, nextStreamsUrl);
public ChannelExtractor(StreamingService service, String url, String nextPageUrl)
throws ExtractionException {
super(service, url, nextPageUrl);
}
@Nonnull
@Override
protected UrlIdHandler getUrlIdHandler() throws ParsingException {
protected UrlIdHandler getUrlIdHandler() {
return getService().getChannelUrlIdHandler();
}
@NonNull
@Override
public InfoItemsCollector getInfoItems()
throws IOException, ExtractionException {
return getStreams();
}
public abstract StreamInfoItemsCollector getStreams() throws IOException, ExtractionException;
public abstract String getAvatarUrl() throws ParsingException;
public abstract String getBannerUrl() throws ParsingException;
public abstract String getFeedUrl() throws ParsingException;

View file

@ -1,6 +1,6 @@
package org.schabi.newpipe.extractor.channel;
import org.schabi.newpipe.extractor.ListExtractor.NextItemsResult;
import org.schabi.newpipe.extractor.ListExtractor.InfoItemPage;
import org.schabi.newpipe.extractor.ListInfo;
import org.schabi.newpipe.extractor.NewPipe;
import org.schabi.newpipe.extractor.StreamingService;
@ -37,8 +37,9 @@ public class ChannelInfo extends ListInfo {
}
public static NextItemsResult getMoreItems(StreamingService service, String url, String nextStreamsUrl) throws IOException, ExtractionException {
return service.getChannelExtractor(url, nextStreamsUrl).getNextStreams();
public static InfoItemPage getMoreItems(StreamingService service, String url, String nextPageUrl)
throws IOException, ExtractionException {
return service.getChannelExtractor(url, nextPageUrl).getInfoItemPage();
}
public static ChannelInfo getInfo(String url) throws IOException, ExtractionException {
@ -78,7 +79,7 @@ public class ChannelInfo extends ListInfo {
info.addError(e);
}
info.setRelatedStreams(ExtractorHelper.getStreamsOrLogError(info, extractor));
info.setRelatedStreams(ExtractorHelper.getInfoItemsOrLogError(info, extractor));
try {
info.setSubscriberCount(extractor.getSubscriberCount());
@ -91,8 +92,8 @@ public class ChannelInfo extends ListInfo {
info.addError(e);
}
info.setHasMoreStreams(extractor.hasMoreStreams());
info.setNextStreamsUrl(extractor.getNextStreamsUrl());
info.setHasMoreStreams(extractor.hasNextPage());
info.setNextStreamsUrl(extractor.getNextPageUrl());
return info;
}

View file

@ -1,13 +1,13 @@
package org.schabi.newpipe.extractor.channel;
import org.schabi.newpipe.extractor.InfoItemCollector;
import org.schabi.newpipe.extractor.InfoItemsCollector;
import org.schabi.newpipe.extractor.exceptions.ParsingException;
/*
* Created by Christian Schabesberger on 12.02.17.
*
* Copyright (C) Christian Schabesberger 2017 <chris.schabesberger@mailbox.org>
* ChannelInfoItemCollector.java is part of NewPipe.
* ChannelInfoItemsCollector.java is part of NewPipe.
*
* NewPipe is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@ -23,8 +23,8 @@ import org.schabi.newpipe.extractor.exceptions.ParsingException;
* along with NewPipe. If not, see <http://www.gnu.org/licenses/>.
*/
public class ChannelInfoItemCollector extends InfoItemCollector<ChannelInfoItem, ChannelInfoItemExtractor> {
public ChannelInfoItemCollector(int serviceId) {
public class ChannelInfoItemsCollector extends InfoItemsCollector<ChannelInfoItem, ChannelInfoItemExtractor> {
public ChannelInfoItemsCollector(int serviceId) {
super(serviceId);
}

View file

@ -34,10 +34,10 @@ public abstract class KioskExtractor extends ListExtractor {
public KioskExtractor(StreamingService streamingService,
String url,
String nextStreamsUrl,
String nextPageUrl,
String kioskId)
throws IOException, ExtractionException {
super(streamingService, url, nextStreamsUrl);
throws ExtractionException {
super(streamingService, url, nextPageUrl);
this.id = kioskId;
}
@ -54,12 +54,12 @@ public abstract class KioskExtractor extends ListExtractor {
@Nonnull
@Override
public String getId() throws ParsingException {
public String getId() {
return id;
}
/**
* Id should be the name of the kiosk, tho Id is used for identifing it in the programm,
* Id should be the name of the kiosk, tho Id is used for identifing it in the frontend,
* so id should be kept in english.
* In order to get the name of the kiosk in the desired language we have to
* crawl if from the website.

View file

@ -35,14 +35,14 @@ public class KioskInfo extends ListInfo {
super(serviceId, id, url, name);
}
public static ListExtractor.NextItemsResult getMoreItems(StreamingService service,
public static ListExtractor.InfoItemPage getMoreItems(StreamingService service,
String url,
String nextStreamsUrl,
String nextPageUrl,
String contentCountry) throws IOException, ExtractionException {
KioskList kl = service.getKioskList();
KioskExtractor extractor = kl.getExtractorByUrl(url, nextStreamsUrl);
KioskExtractor extractor = kl.getExtractorByUrl(url, nextPageUrl);
extractor.setContentCountry(contentCountry);
return extractor.getNextStreams();
return extractor.getInfoItemPage();
}
public static KioskInfo getInfo(String url,
@ -74,7 +74,7 @@ public class KioskInfo extends ListInfo {
KioskInfo info = new KioskInfo(serviceId, id, name, url);
info.related_streams = ExtractorHelper.getStreamsOrLogError(info, extractor);
info.related_streams = ExtractorHelper.getInfoItemsOrLogError(info, extractor);
return info;
}

View file

@ -14,7 +14,7 @@ public class KioskList {
public interface KioskExtractorFactory {
KioskExtractor createNewKiosk(final StreamingService streamingService,
final String url,
final String nextStreamUrl,
final String nextPageUrl,
final String kioskId)
throws ExtractionException, IOException;
}
@ -48,15 +48,15 @@ public class KioskList {
defaultKiosk = kioskType;
}
public KioskExtractor getDefaultKioskExtractor(String nextStreamUrl)
public KioskExtractor getDefaultKioskExtractor(String nextPageUrl)
throws ExtractionException, IOException {
if(defaultKiosk != null && !defaultKiosk.equals("")) {
return getExtractorById(defaultKiosk, nextStreamUrl);
return getExtractorById(defaultKiosk, nextPageUrl);
} else {
if(!kioskList.isEmpty()) {
// if not set get any entry
Object[] keySet = kioskList.keySet().toArray();
return getExtractorById(keySet[0].toString(), nextStreamUrl);
return getExtractorById(keySet[0].toString(), nextPageUrl);
} else {
return null;
}
@ -67,7 +67,7 @@ public class KioskList {
return defaultKiosk;
}
public KioskExtractor getExtractorById(String kioskId, String nextStreamsUrl)
public KioskExtractor getExtractorById(String kioskId, String nextPageUrl)
throws ExtractionException, IOException {
KioskEntry ke = kioskList.get(kioskId);
if(ke == null) {
@ -75,7 +75,7 @@ public class KioskList {
} else {
return ke.extractorFactory.createNewKiosk(NewPipe.getService(service_id),
ke.handler.getUrl(kioskId),
nextStreamsUrl, kioskId);
nextPageUrl, kioskId);
}
}
@ -83,12 +83,12 @@ public class KioskList {
return kioskList.keySet();
}
public KioskExtractor getExtractorByUrl(String url, String nextStreamsUrl)
public KioskExtractor getExtractorByUrl(String url, String nextPageUrl)
throws ExtractionException, IOException {
for(Map.Entry<String, KioskEntry> e : kioskList.entrySet()) {
KioskEntry ke = e.getValue();
if(ke.handler.acceptUrl(url)) {
return getExtractorById(e.getKey(), nextStreamsUrl);
return getExtractorById(e.getKey(), nextPageUrl);
}
}
throw new ExtractionException("Could not find a kiosk that fits to the url: " + url);

View file

@ -1,26 +1,37 @@
package org.schabi.newpipe.extractor.playlist;
import edu.umd.cs.findbugs.annotations.NonNull;
import org.schabi.newpipe.extractor.InfoItemsCollector;
import org.schabi.newpipe.extractor.ListExtractor;
import org.schabi.newpipe.extractor.StreamingService;
import org.schabi.newpipe.extractor.UrlIdHandler;
import org.schabi.newpipe.extractor.exceptions.ExtractionException;
import org.schabi.newpipe.extractor.exceptions.ParsingException;
import org.schabi.newpipe.extractor.stream.StreamInfoItemsCollector;
import javax.annotation.Nonnull;
import java.io.IOException;
public abstract class PlaylistExtractor extends ListExtractor {
public PlaylistExtractor(StreamingService service, String url, String nextStreamsUrl) throws IOException, ExtractionException {
super(service, url, nextStreamsUrl);
public PlaylistExtractor(StreamingService service, String url, String nextPageUrl) throws IOException, ExtractionException {
super(service, url, nextPageUrl);
}
@Nonnull
@Override
protected UrlIdHandler getUrlIdHandler() throws ParsingException {
protected UrlIdHandler getUrlIdHandler() {
return getService().getPlaylistUrlIdHandler();
}
@NonNull
@Override
public InfoItemsCollector getInfoItems()
throws IOException, ExtractionException {
return getStreams();
}
public abstract StreamInfoItemsCollector getStreams() throws IOException, ExtractionException;
public abstract String getThumbnailUrl() throws ParsingException;
public abstract String getBannerUrl() throws ParsingException;

View file

@ -1,6 +1,6 @@
package org.schabi.newpipe.extractor.playlist;
import org.schabi.newpipe.extractor.ListExtractor.NextItemsResult;
import org.schabi.newpipe.extractor.ListExtractor.InfoItemPage;
import org.schabi.newpipe.extractor.ListInfo;
import org.schabi.newpipe.extractor.NewPipe;
import org.schabi.newpipe.extractor.StreamingService;
@ -9,7 +9,7 @@ import org.schabi.newpipe.extractor.exceptions.ParsingException;
import java.io.IOException;
import static org.schabi.newpipe.extractor.utils.ExtractorHelper.getStreamsOrLogError;
import static org.schabi.newpipe.extractor.utils.ExtractorHelper.getInfoItemsOrLogError;
public class PlaylistInfo extends ListInfo {
@ -17,8 +17,8 @@ public class PlaylistInfo extends ListInfo {
super(serviceId, id, url, name);
}
public static NextItemsResult getMoreItems(StreamingService service, String url, String nextStreamsUrl) throws IOException, ExtractionException {
return service.getPlaylistExtractor(url, nextStreamsUrl).getNextStreams();
public static InfoItemPage getMoreItems(StreamingService service, String url, String nextPageUrl) throws IOException, ExtractionException {
return service.getPlaylistExtractor(url, nextPageUrl).getInfoItemPage();
}
public static PlaylistInfo getInfo(String url) throws IOException, ExtractionException {
@ -75,9 +75,9 @@ public class PlaylistInfo extends ListInfo {
info.addError(e);
}
info.setRelatedStreams(getStreamsOrLogError(info, extractor));
info.setHasMoreStreams(extractor.hasMoreStreams());
info.setNextStreamsUrl(extractor.getNextStreamsUrl());
info.setRelatedStreams(getInfoItemsOrLogError(info, extractor));
info.setHasMoreStreams(extractor.hasNextPage());
info.setNextStreamsUrl(extractor.getNextPageUrl());
return info;
}

View file

@ -1,11 +1,11 @@
package org.schabi.newpipe.extractor.playlist;
import org.schabi.newpipe.extractor.InfoItemCollector;
import org.schabi.newpipe.extractor.InfoItemsCollector;
import org.schabi.newpipe.extractor.exceptions.ParsingException;
public class PlaylistInfoItemCollector extends InfoItemCollector<PlaylistInfoItem, PlaylistInfoItemExtractor> {
public class PlaylistInfoItemsCollector extends InfoItemsCollector<PlaylistInfoItem, PlaylistInfoItemExtractor> {
public PlaylistInfoItemCollector(int serviceId) {
public PlaylistInfoItemsCollector(int serviceId) {
super(serviceId);
}

View file

@ -1,20 +1,20 @@
package org.schabi.newpipe.extractor.search;
import org.schabi.newpipe.extractor.*;
import org.schabi.newpipe.extractor.channel.ChannelInfoItemCollector;
import org.schabi.newpipe.extractor.channel.ChannelInfoItemsCollector;
import org.schabi.newpipe.extractor.channel.ChannelInfoItemExtractor;
import org.schabi.newpipe.extractor.exceptions.ExtractionException;
import org.schabi.newpipe.extractor.exceptions.ParsingException;
import org.schabi.newpipe.extractor.playlist.PlaylistInfoItemCollector;
import org.schabi.newpipe.extractor.playlist.PlaylistInfoItemsCollector;
import org.schabi.newpipe.extractor.playlist.PlaylistInfoItemExtractor;
import org.schabi.newpipe.extractor.stream.StreamInfoItemCollector;
import org.schabi.newpipe.extractor.stream.StreamInfoItemsCollector;
import org.schabi.newpipe.extractor.stream.StreamInfoItemExtractor;
/*
* Created by Christian Schabesberger on 12.02.17.
*
* Copyright (C) Christian Schabesberger 2017 <chris.schabesberger@mailbox.org>
* InfoItemSearchCollector.java is part of NewPipe.
* InfoItemsSearchCollector.java is part of NewPipe.
*
* NewPipe is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@ -42,17 +42,17 @@ import org.schabi.newpipe.extractor.stream.StreamInfoItemExtractor;
* Calling {@link #extract(InfoItemExtractor)} or {@link #commit(Object)} with any
* other extractor type will raise an exception.
*/
public class InfoItemSearchCollector extends InfoItemCollector<InfoItem, InfoItemExtractor> {
public class InfoItemsSearchCollector extends InfoItemsCollector<InfoItem, InfoItemExtractor> {
private String suggestion = "";
private final StreamInfoItemCollector streamCollector;
private final ChannelInfoItemCollector userCollector;
private final PlaylistInfoItemCollector playlistCollector;
private final StreamInfoItemsCollector streamCollector;
private final ChannelInfoItemsCollector userCollector;
private final PlaylistInfoItemsCollector playlistCollector;
InfoItemSearchCollector(int serviceId) {
InfoItemsSearchCollector(int serviceId) {
super(serviceId);
streamCollector = new StreamInfoItemCollector(serviceId);
userCollector = new ChannelInfoItemCollector(serviceId);
playlistCollector = new PlaylistInfoItemCollector(serviceId);
streamCollector = new StreamInfoItemsCollector(serviceId);
userCollector = new ChannelInfoItemsCollector(serviceId);
playlistCollector = new PlaylistInfoItemsCollector(serviceId);
}
public void setSuggestion(String suggestion) {

View file

@ -35,16 +35,16 @@ public abstract class SearchEngine {
}
}
private final InfoItemSearchCollector collector;
private final InfoItemsSearchCollector collector;
public SearchEngine(int serviceId) {
collector = new InfoItemSearchCollector(serviceId);
collector = new InfoItemsSearchCollector(serviceId);
}
protected InfoItemSearchCollector getInfoItemSearchCollector() {
protected InfoItemsSearchCollector getInfoItemSearchCollector() {
return collector;
}
public abstract InfoItemSearchCollector search(String query, int page, String contentCountry, Filter filter)
public abstract InfoItemsSearchCollector search(String query, int page, String contentCountry, Filter filter)
throws IOException, ExtractionException;
}

View file

@ -4,12 +4,11 @@ import com.grack.nanojson.JsonObject;
import com.grack.nanojson.JsonParser;
import com.grack.nanojson.JsonParserException;
import org.schabi.newpipe.extractor.Downloader;
import org.schabi.newpipe.extractor.NewPipe;
import org.schabi.newpipe.extractor.StreamingService;
import org.schabi.newpipe.extractor.channel.ChannelExtractor;
import org.schabi.newpipe.extractor.exceptions.ExtractionException;
import org.schabi.newpipe.extractor.exceptions.ParsingException;
import org.schabi.newpipe.extractor.stream.StreamInfoItemCollector;
import org.schabi.newpipe.extractor.stream.StreamInfoItemsCollector;
import javax.annotation.Nonnull;
import java.io.IOException;
@ -19,8 +18,8 @@ public class SoundcloudChannelExtractor extends ChannelExtractor {
private String userId;
private JsonObject user;
public SoundcloudChannelExtractor(StreamingService service, String url, String nextStreamsUrl) throws IOException, ExtractionException {
super(service, url, nextStreamsUrl);
public SoundcloudChannelExtractor(StreamingService service, String url, String nextPageUrl) throws IOException, ExtractionException {
super(service, url, nextPageUrl);
}
@Override
@ -87,27 +86,27 @@ public class SoundcloudChannelExtractor extends ChannelExtractor {
@Nonnull
@Override
public StreamInfoItemCollector getStreams() throws IOException, ExtractionException {
StreamInfoItemCollector collector = new StreamInfoItemCollector(getServiceId());
public StreamInfoItemsCollector getStreams() throws IOException, ExtractionException {
StreamInfoItemsCollector collector = new StreamInfoItemsCollector(getServiceId());
String apiUrl = "https://api-v2.soundcloud.com/users/" + getId() + "/tracks"
+ "?client_id=" + SoundcloudParsingHelper.clientId()
+ "&limit=20"
+ "&linked_partitioning=1";
nextStreamsUrl = SoundcloudParsingHelper.getStreamsFromApiMinItems(15, collector, apiUrl);
nextPageUrl = SoundcloudParsingHelper.getStreamsFromApiMinItems(15, collector, apiUrl);
return collector;
}
@Override
public NextItemsResult getNextStreams() throws IOException, ExtractionException {
if (!hasMoreStreams()) {
public InfoItemPage getInfoItemPage() throws IOException, ExtractionException {
if (!hasNextPage()) {
throw new ExtractionException("Channel doesn't have more streams");
}
StreamInfoItemCollector collector = new StreamInfoItemCollector(getServiceId());
nextStreamsUrl = SoundcloudParsingHelper.getStreamsFromApiMinItems(15, collector, nextStreamsUrl);
StreamInfoItemsCollector collector = new StreamInfoItemsCollector(getServiceId());
nextPageUrl = SoundcloudParsingHelper.getStreamsFromApiMinItems(15, collector, nextPageUrl);
return new NextItemsResult(collector, nextStreamsUrl);
return new InfoItemPage(collector, nextPageUrl);
}
}

View file

@ -8,18 +8,17 @@ import org.schabi.newpipe.extractor.Downloader;
import org.schabi.newpipe.extractor.StreamingService;
import org.schabi.newpipe.extractor.UrlIdHandler;
import org.schabi.newpipe.extractor.exceptions.ExtractionException;
import org.schabi.newpipe.extractor.exceptions.ParsingException;
import org.schabi.newpipe.extractor.kiosk.KioskExtractor;
import org.schabi.newpipe.extractor.stream.StreamInfoItemCollector;
import org.schabi.newpipe.extractor.stream.StreamInfoItemsCollector;
import javax.annotation.Nonnull;
public class SoundcloudChartsExtractor extends KioskExtractor {
private String url;
public SoundcloudChartsExtractor(StreamingService service, String url, String nextStreamsUrl, String kioskId)
throws IOException, ExtractionException {
super(service, url, nextStreamsUrl, kioskId);
public SoundcloudChartsExtractor(StreamingService service, String url, String nextPageUrl, String kioskId)
throws ExtractionException {
super(service, url, nextPageUrl, kioskId);
this.url = url;
}
@ -29,7 +28,7 @@ public class SoundcloudChartsExtractor extends KioskExtractor {
@Nonnull
@Override
public String getName() throws ParsingException {
public String getName() {
return "< Implement me (♥_♥) >";
}
@ -40,21 +39,21 @@ public class SoundcloudChartsExtractor extends KioskExtractor {
}
@Override
public NextItemsResult getNextStreams() throws IOException, ExtractionException {
if (!hasMoreStreams()) {
public InfoItemPage getInfoItemPage() throws IOException, ExtractionException {
if (!hasNextPage()) {
throw new ExtractionException("Chart doesn't have more streams");
}
StreamInfoItemCollector collector = new StreamInfoItemCollector(getServiceId());
nextStreamsUrl = SoundcloudParsingHelper.getStreamsFromApi(collector, nextStreamsUrl, true);
StreamInfoItemsCollector collector = new StreamInfoItemsCollector(getServiceId());
nextPageUrl = SoundcloudParsingHelper.getStreamsFromApi(collector, nextPageUrl, true);
return new NextItemsResult(collector, nextStreamsUrl);
return new InfoItemPage(collector, nextPageUrl);
}
@Nonnull
@Override
public StreamInfoItemCollector getStreams() throws IOException, ExtractionException {
StreamInfoItemCollector collector = new StreamInfoItemCollector(getServiceId());
public StreamInfoItemsCollector getInfoItems() throws IOException, ExtractionException {
StreamInfoItemsCollector collector = new StreamInfoItemsCollector(getServiceId());
String apiUrl = "https://api-v2.soundcloud.com/charts" +
"?genre=soundcloud:genres:all-music" +
@ -72,7 +71,7 @@ public class SoundcloudChartsExtractor extends KioskExtractor {
apiUrl += "&region=soundcloud:regions:" + contentCountry;
}
nextStreamsUrl = SoundcloudParsingHelper.getStreamsFromApi(collector, apiUrl, true);
nextPageUrl = SoundcloudParsingHelper.getStreamsFromApi(collector, apiUrl, true);
return collector;
}
}

View file

@ -9,10 +9,10 @@ import org.jsoup.nodes.Document;
import org.jsoup.nodes.Element;
import org.schabi.newpipe.extractor.Downloader;
import org.schabi.newpipe.extractor.NewPipe;
import org.schabi.newpipe.extractor.channel.ChannelInfoItemCollector;
import org.schabi.newpipe.extractor.channel.ChannelInfoItemsCollector;
import org.schabi.newpipe.extractor.exceptions.ParsingException;
import org.schabi.newpipe.extractor.exceptions.ReCaptchaException;
import org.schabi.newpipe.extractor.stream.StreamInfoItemCollector;
import org.schabi.newpipe.extractor.stream.StreamInfoItemsCollector;
import org.schabi.newpipe.extractor.utils.Parser;
import org.schabi.newpipe.extractor.utils.Parser.RegexException;
@ -112,19 +112,19 @@ public class SoundcloudParsingHelper {
/**
* Fetch the users from the given api and commit each of them to the collector.
* <p>
* This differ from {@link #getUsersFromApi(ChannelInfoItemCollector, String)} in the sense that they will always
* This differ from {@link #getUsersFromApi(ChannelInfoItemsCollector, String)} in the sense that they will always
* get MIN_ITEMS or more.
*
* @param minItems the method will return only when it have extracted that many items (equal or more)
*/
public static String getUsersFromApiMinItems(int minItems, ChannelInfoItemCollector collector, String apiUrl) throws IOException, ReCaptchaException, ParsingException {
String nextStreamsUrl = SoundcloudParsingHelper.getUsersFromApi(collector, apiUrl);
public static String getUsersFromApiMinItems(int minItems, ChannelInfoItemsCollector collector, String apiUrl) throws IOException, ReCaptchaException, ParsingException {
String nextPageUrl = SoundcloudParsingHelper.getUsersFromApi(collector, apiUrl);
while (!nextStreamsUrl.isEmpty() && collector.getItemList().size() < minItems) {
nextStreamsUrl = SoundcloudParsingHelper.getUsersFromApi(collector, nextStreamsUrl);
while (!nextPageUrl.isEmpty() && collector.getItemList().size() < minItems) {
nextPageUrl = SoundcloudParsingHelper.getUsersFromApi(collector, nextPageUrl);
}
return nextStreamsUrl;
return nextPageUrl;
}
/**
@ -132,7 +132,7 @@ public class SoundcloudParsingHelper {
*
* @return the next streams url, empty if don't have
*/
public static String getUsersFromApi(ChannelInfoItemCollector collector, String apiUrl) throws IOException, ReCaptchaException, ParsingException {
public static String getUsersFromApi(ChannelInfoItemsCollector collector, String apiUrl) throws IOException, ReCaptchaException, ParsingException {
String response = NewPipe.getDownloader().download(apiUrl);
JsonObject responseObject;
try {
@ -149,33 +149,33 @@ public class SoundcloudParsingHelper {
}
}
String nextStreamsUrl;
String nextPageUrl;
try {
nextStreamsUrl = responseObject.getString("next_href");
if (!nextStreamsUrl.contains("client_id=")) nextStreamsUrl += "&client_id=" + SoundcloudParsingHelper.clientId();
nextPageUrl = responseObject.getString("next_href");
if (!nextPageUrl.contains("client_id=")) nextPageUrl += "&client_id=" + SoundcloudParsingHelper.clientId();
} catch (Exception ignored) {
nextStreamsUrl = "";
nextPageUrl = "";
}
return nextStreamsUrl;
return nextPageUrl;
}
/**
* Fetch the streams from the given api and commit each of them to the collector.
* <p>
* This differ from {@link #getStreamsFromApi(StreamInfoItemCollector, String)} in the sense that they will always
* This differ from {@link #getStreamsFromApi(StreamInfoItemsCollector, String)} in the sense that they will always
* get MIN_ITEMS or more items.
*
* @param minItems the method will return only when it have extracted that many items (equal or more)
*/
public static String getStreamsFromApiMinItems(int minItems, StreamInfoItemCollector collector, String apiUrl) throws IOException, ReCaptchaException, ParsingException {
String nextStreamsUrl = SoundcloudParsingHelper.getStreamsFromApi(collector, apiUrl);
public static String getStreamsFromApiMinItems(int minItems, StreamInfoItemsCollector collector, String apiUrl) throws IOException, ReCaptchaException, ParsingException {
String nextPageUrl = SoundcloudParsingHelper.getStreamsFromApi(collector, apiUrl);
while (!nextStreamsUrl.isEmpty() && collector.getItemList().size() < minItems) {
nextStreamsUrl = SoundcloudParsingHelper.getStreamsFromApi(collector, nextStreamsUrl);
while (!nextPageUrl.isEmpty() && collector.getItemList().size() < minItems) {
nextPageUrl = SoundcloudParsingHelper.getStreamsFromApi(collector, nextPageUrl);
}
return nextStreamsUrl;
return nextPageUrl;
}
/**
@ -183,7 +183,7 @@ public class SoundcloudParsingHelper {
*
* @return the next streams url, empty if don't have
*/
public static String getStreamsFromApi(StreamInfoItemCollector collector, String apiUrl, boolean charts) throws IOException, ReCaptchaException, ParsingException {
public static String getStreamsFromApi(StreamInfoItemsCollector collector, String apiUrl, boolean charts) throws IOException, ReCaptchaException, ParsingException {
String response = NewPipe.getDownloader().download(apiUrl);
JsonObject responseObject;
try {
@ -200,18 +200,18 @@ public class SoundcloudParsingHelper {
}
}
String nextStreamsUrl;
String nextPageUrl;
try {
nextStreamsUrl = responseObject.getString("next_href");
if (!nextStreamsUrl.contains("client_id=")) nextStreamsUrl += "&client_id=" + SoundcloudParsingHelper.clientId();
nextPageUrl = responseObject.getString("next_href");
if (!nextPageUrl.contains("client_id=")) nextPageUrl += "&client_id=" + SoundcloudParsingHelper.clientId();
} catch (Exception ignored) {
nextStreamsUrl = "";
nextPageUrl = "";
}
return nextStreamsUrl;
return nextPageUrl;
}
public static String getStreamsFromApi(StreamInfoItemCollector collector, String apiUrl) throws ReCaptchaException, ParsingException, IOException {
public static String getStreamsFromApi(StreamInfoItemsCollector collector, String apiUrl) throws ReCaptchaException, ParsingException, IOException {
return getStreamsFromApi(collector, apiUrl, false);
}

View file

@ -8,7 +8,7 @@ import org.schabi.newpipe.extractor.StreamingService;
import org.schabi.newpipe.extractor.exceptions.ExtractionException;
import org.schabi.newpipe.extractor.exceptions.ParsingException;
import org.schabi.newpipe.extractor.playlist.PlaylistExtractor;
import org.schabi.newpipe.extractor.stream.StreamInfoItemCollector;
import org.schabi.newpipe.extractor.stream.StreamInfoItemsCollector;
import javax.annotation.Nonnull;
import java.io.IOException;
@ -18,8 +18,8 @@ public class SoundcloudPlaylistExtractor extends PlaylistExtractor {
private String playlistId;
private JsonObject playlist;
public SoundcloudPlaylistExtractor(StreamingService service, String url, String nextStreamsUrl) throws IOException, ExtractionException {
super(service, url, nextStreamsUrl);
public SoundcloudPlaylistExtractor(StreamingService service, String url, String nextPageUrl) throws IOException, ExtractionException {
super(service, url, nextPageUrl);
}
@Override
@ -88,8 +88,8 @@ public class SoundcloudPlaylistExtractor extends PlaylistExtractor {
@Nonnull
@Override
public StreamInfoItemCollector getStreams() throws IOException, ExtractionException {
StreamInfoItemCollector collector = new StreamInfoItemCollector(getServiceId());
public StreamInfoItemsCollector getStreams() throws IOException, ExtractionException {
StreamInfoItemsCollector collector = new StreamInfoItemsCollector(getServiceId());
// Note the "api", NOT "api-v2"
String apiUrl = "https://api.soundcloud.com/playlists/" + getId() + "/tracks"
@ -97,19 +97,19 @@ public class SoundcloudPlaylistExtractor extends PlaylistExtractor {
+ "&limit=20"
+ "&linked_partitioning=1";
nextStreamsUrl = SoundcloudParsingHelper.getStreamsFromApiMinItems(15, collector, apiUrl);
nextPageUrl = SoundcloudParsingHelper.getStreamsFromApiMinItems(15, collector, apiUrl);
return collector;
}
@Override
public NextItemsResult getNextStreams() throws IOException, ExtractionException {
if (!hasMoreStreams()) {
public InfoItemPage getInfoItemPage() throws IOException, ExtractionException {
if (!hasNextPage()) {
throw new ExtractionException("Playlist doesn't have more streams");
}
StreamInfoItemCollector collector = new StreamInfoItemCollector(getServiceId());
nextStreamsUrl = SoundcloudParsingHelper.getStreamsFromApiMinItems(15, collector, nextStreamsUrl);
StreamInfoItemsCollector collector = new StreamInfoItemsCollector(getServiceId());
nextPageUrl = SoundcloudParsingHelper.getStreamsFromApiMinItems(15, collector, nextPageUrl);
return new NextItemsResult(collector, nextStreamsUrl);
return new InfoItemPage(collector, nextPageUrl);
}
}

View file

@ -8,7 +8,7 @@ import org.schabi.newpipe.extractor.Downloader;
import org.schabi.newpipe.extractor.NewPipe;
import org.schabi.newpipe.extractor.exceptions.ExtractionException;
import org.schabi.newpipe.extractor.exceptions.ParsingException;
import org.schabi.newpipe.extractor.search.InfoItemSearchCollector;
import org.schabi.newpipe.extractor.search.InfoItemsSearchCollector;
import org.schabi.newpipe.extractor.search.SearchEngine;
import java.io.IOException;
@ -22,8 +22,8 @@ public class SoundcloudSearchEngine extends SearchEngine {
}
@Override
public InfoItemSearchCollector search(String query, int page, String languageCode, Filter filter) throws IOException, ExtractionException {
InfoItemSearchCollector collector = getInfoItemSearchCollector();
public InfoItemsSearchCollector search(String query, int page, String languageCode, Filter filter) throws IOException, ExtractionException {
InfoItemsSearchCollector collector = getInfoItemSearchCollector();
Downloader dl = NewPipe.getDownloader();

View file

@ -50,13 +50,13 @@ public class SoundcloudService extends StreamingService {
}
@Override
public ChannelExtractor getChannelExtractor(String url, String nextStreamsUrl) throws IOException, ExtractionException {
return new SoundcloudChannelExtractor(this, url, nextStreamsUrl);
public ChannelExtractor getChannelExtractor(String url, String nextPageUrl) throws IOException, ExtractionException {
return new SoundcloudChannelExtractor(this, url, nextPageUrl);
}
@Override
public PlaylistExtractor getPlaylistExtractor(String url, String nextStreamsUrl) throws IOException, ExtractionException {
return new SoundcloudPlaylistExtractor(this, url, nextStreamsUrl);
public PlaylistExtractor getPlaylistExtractor(String url, String nextPageUrl) throws IOException, ExtractionException {
return new SoundcloudPlaylistExtractor(this, url, nextPageUrl);
}
@Override
@ -70,12 +70,12 @@ public class SoundcloudService extends StreamingService {
@Override
public KioskExtractor createNewKiosk(StreamingService streamingService,
String url,
String nextStreamUrl,
String nextPageUrl,
String id)
throws ExtractionException, IOException {
return new SoundcloudChartsExtractor(SoundcloudService.this,
url,
nextStreamUrl,
nextPageUrl,
id);
}
};

View file

@ -10,7 +10,6 @@ import org.schabi.newpipe.extractor.exceptions.ParsingException;
import org.schabi.newpipe.extractor.stream.*;
import javax.annotation.Nonnull;
import javax.annotation.Nullable;
import java.io.IOException;
import java.io.UnsupportedEncodingException;
import java.net.URLEncoder;
@ -189,8 +188,8 @@ public class SoundcloudStreamExtractor extends StreamExtractor {
}
@Override
public StreamInfoItemCollector getRelatedVideos() throws IOException, ExtractionException {
StreamInfoItemCollector collector = new StreamInfoItemCollector(getServiceId());
public StreamInfoItemsCollector getRelatedVideos() throws IOException, ExtractionException {
StreamInfoItemsCollector collector = new StreamInfoItemsCollector(getServiceId());
String apiUrl = "https://api-v2.soundcloud.com/tracks/" + urlEncode(getId()) + "/related"
+ "?client_id=" + urlEncode(SoundcloudParsingHelper.clientId());

View file

@ -1,7 +1,7 @@
package org.schabi.newpipe.extractor.services.soundcloud;
import org.schabi.newpipe.extractor.channel.ChannelInfoItem;
import org.schabi.newpipe.extractor.channel.ChannelInfoItemCollector;
import org.schabi.newpipe.extractor.channel.ChannelInfoItemsCollector;
import org.schabi.newpipe.extractor.exceptions.ExtractionException;
import org.schabi.newpipe.extractor.subscription.SubscriptionExtractor;
import org.schabi.newpipe.extractor.subscription.SubscriptionItem;
@ -39,7 +39,7 @@ public class SoundcloudSubscriptionExtractor extends SubscriptionExtractor {
String apiUrl = "https://api.soundcloud.com/users/" + id + "/followings"
+ "?client_id=" + SoundcloudParsingHelper.clientId()
+ "&limit=200";
ChannelInfoItemCollector collector = new ChannelInfoItemCollector(service.getServiceId());
ChannelInfoItemsCollector collector = new ChannelInfoItemsCollector(service.getServiceId());
// ± 2000 is the limit of followings on SoundCloud, so this minimum should be enough
SoundcloudParsingHelper.getUsersFromApiMinItems(2500, collector, apiUrl);

View file

@ -14,7 +14,7 @@ import org.schabi.newpipe.extractor.channel.ChannelExtractor;
import org.schabi.newpipe.extractor.exceptions.ExtractionException;
import org.schabi.newpipe.extractor.exceptions.ParsingException;
import org.schabi.newpipe.extractor.exceptions.ReCaptchaException;
import org.schabi.newpipe.extractor.stream.StreamInfoItemCollector;
import org.schabi.newpipe.extractor.stream.StreamInfoItemsCollector;
import org.schabi.newpipe.extractor.utils.Parser;
import org.schabi.newpipe.extractor.utils.Utils;
@ -48,7 +48,7 @@ public class YoutubeChannelExtractor extends ChannelExtractor {
private Document doc;
/**
* It's lazily initialized (when getNextStreams is called)
* It's lazily initialized (when getInfoItemPage is called)
*/
private Document nextStreamsAjax;
@ -60,10 +60,10 @@ public class YoutubeChannelExtractor extends ChannelExtractor {
*/
private boolean fetchingNextStreams;
public YoutubeChannelExtractor(StreamingService service, String url, String nextStreamsUrl) throws IOException, ExtractionException {
super(service, url, nextStreamsUrl);
public YoutubeChannelExtractor(StreamingService service, String url, String nextPageUrl) throws IOException, ExtractionException {
super(service, url, nextPageUrl);
fetchingNextStreams = nextStreamsUrl != null && !nextStreamsUrl.isEmpty();
fetchingNextStreams = nextPageUrl != null && !nextPageUrl.isEmpty();
}
@Override
@ -73,7 +73,7 @@ public class YoutubeChannelExtractor extends ChannelExtractor {
doc = Jsoup.parse(pageContent, channelUrl);
if (!fetchingNextStreams) {
nextStreamsUrl = getNextStreamsUrlFrom(doc);
nextPageUrl = getNextPageUrlFrom(doc);
}
nextStreamsAjax = null;
}
@ -163,49 +163,49 @@ public class YoutubeChannelExtractor extends ChannelExtractor {
@Nonnull
@Override
public StreamInfoItemCollector getStreams() throws IOException, ExtractionException {
StreamInfoItemCollector collector = new StreamInfoItemCollector(getServiceId());
public StreamInfoItemsCollector getStreams() throws ExtractionException {
StreamInfoItemsCollector collector = new StreamInfoItemsCollector(getServiceId());
Element ul = doc.select("ul[id=\"browse-items-primary\"]").first();
collectStreamsFrom(collector, ul);
return collector;
}
@Override
public NextItemsResult getNextStreams() throws IOException, ExtractionException {
if (!hasMoreStreams()) {
public InfoItemPage getInfoItemPage() throws IOException, ExtractionException {
if (!hasNextPage()) {
throw new ExtractionException("Channel doesn't have more streams");
}
fetchPage();
StreamInfoItemCollector collector = new StreamInfoItemCollector(getServiceId());
StreamInfoItemsCollector collector = new StreamInfoItemsCollector(getServiceId());
setupNextStreamsAjax(NewPipe.getDownloader());
setupNextPageAjax(NewPipe.getDownloader());
collectStreamsFrom(collector, nextStreamsAjax.select("body").first());
return new NextItemsResult(collector, nextStreamsUrl);
return new InfoItemPage(collector, nextPageUrl);
}
private void setupNextStreamsAjax(Downloader downloader) throws IOException, ReCaptchaException, ParsingException {
String ajaxDataRaw = downloader.download(nextStreamsUrl);
private void setupNextPageAjax(Downloader downloader) throws IOException, ReCaptchaException, ParsingException {
String ajaxDataRaw = downloader.download(nextPageUrl);
try {
JsonObject ajaxData = JsonParser.object().from(ajaxDataRaw);
String htmlDataRaw = ajaxData.getString("content_html");
nextStreamsAjax = Jsoup.parse(htmlDataRaw, nextStreamsUrl);
nextStreamsAjax = Jsoup.parse(htmlDataRaw, nextPageUrl);
String nextStreamsHtmlDataRaw = ajaxData.getString("load_more_widget_html");
if (!nextStreamsHtmlDataRaw.isEmpty()) {
nextStreamsUrl = getNextStreamsUrlFrom(Jsoup.parse(nextStreamsHtmlDataRaw, nextStreamsUrl));
nextPageUrl = getNextPageUrlFrom(Jsoup.parse(nextStreamsHtmlDataRaw, nextPageUrl));
} else {
nextStreamsUrl = "";
nextPageUrl = "";
}
} catch (JsonParserException e) {
throw new ParsingException("Could not parse json data for next streams", e);
}
}
private String getNextStreamsUrlFrom(Document d) throws ParsingException {
private String getNextPageUrlFrom(Document d) throws ParsingException {
try {
Element button = d.select("button[class*=\"yt-uix-load-more\"]").first();
if (button != null) {
@ -219,7 +219,7 @@ public class YoutubeChannelExtractor extends ChannelExtractor {
}
}
private void collectStreamsFrom(StreamInfoItemCollector collector, Element element) throws ParsingException {
private void collectStreamsFrom(StreamInfoItemsCollector collector, Element element) throws ParsingException {
collector.reset();
final String uploaderName = getName();

View file

@ -14,7 +14,7 @@ import org.schabi.newpipe.extractor.exceptions.ExtractionException;
import org.schabi.newpipe.extractor.exceptions.ParsingException;
import org.schabi.newpipe.extractor.exceptions.ReCaptchaException;
import org.schabi.newpipe.extractor.playlist.PlaylistExtractor;
import org.schabi.newpipe.extractor.stream.StreamInfoItemCollector;
import org.schabi.newpipe.extractor.stream.StreamInfoItemsCollector;
import org.schabi.newpipe.extractor.stream.StreamType;
import org.schabi.newpipe.extractor.utils.Parser;
import org.schabi.newpipe.extractor.utils.Utils;
@ -27,12 +27,12 @@ public class YoutubePlaylistExtractor extends PlaylistExtractor {
private Document doc;
/**
* It's lazily initialized (when getNextStreams is called)
* It's lazily initialized (when getInfoItemPage is called)
*/
private Document nextStreamsAjax;
public YoutubePlaylistExtractor(StreamingService service, String url, String nextStreamsUrl) throws IOException, ExtractionException {
super(service, url, nextStreamsUrl);
public YoutubePlaylistExtractor(StreamingService service, String url, String nextPageUrl) throws IOException, ExtractionException {
super(service, url, nextPageUrl);
}
@Override
@ -40,7 +40,7 @@ public class YoutubePlaylistExtractor extends PlaylistExtractor {
String pageContent = downloader.download(getCleanUrl());
doc = Jsoup.parse(pageContent, getCleanUrl());
nextStreamsUrl = getNextStreamsUrlFrom(doc);
nextPageUrl = getNextPageUrlFrom(doc);
nextStreamsAjax = null;
}
@ -143,46 +143,46 @@ public class YoutubePlaylistExtractor extends PlaylistExtractor {
@Nonnull
@Override
public StreamInfoItemCollector getStreams() throws IOException, ExtractionException {
StreamInfoItemCollector collector = new StreamInfoItemCollector(getServiceId());
public StreamInfoItemsCollector getStreams() throws IOException, ExtractionException {
StreamInfoItemsCollector collector = new StreamInfoItemsCollector(getServiceId());
Element tbody = doc.select("tbody[id=\"pl-load-more-destination\"]").first();
collectStreamsFrom(collector, tbody);
return collector;
}
@Override
public NextItemsResult getNextStreams() throws IOException, ExtractionException {
if (!hasMoreStreams()) {
public InfoItemPage getInfoItemPage() throws IOException, ExtractionException {
if (!hasNextPage()) {
throw new ExtractionException("Playlist doesn't have more streams");
}
StreamInfoItemCollector collector = new StreamInfoItemCollector(getServiceId());
StreamInfoItemsCollector collector = new StreamInfoItemsCollector(getServiceId());
setupNextStreamsAjax(NewPipe.getDownloader());
collectStreamsFrom(collector, nextStreamsAjax.select("tbody[id=\"pl-load-more-destination\"]").first());
return new NextItemsResult(collector, nextStreamsUrl);
return new InfoItemPage(collector, nextPageUrl);
}
private void setupNextStreamsAjax(Downloader downloader) throws IOException, ReCaptchaException, ParsingException {
String ajaxDataRaw = downloader.download(nextStreamsUrl);
String ajaxDataRaw = downloader.download(nextPageUrl);
try {
JsonObject ajaxData = JsonParser.object().from(ajaxDataRaw);
String htmlDataRaw = "<table><tbody id=\"pl-load-more-destination\">" + ajaxData.getString("content_html") + "</tbody></table>";
nextStreamsAjax = Jsoup.parse(htmlDataRaw, nextStreamsUrl);
nextStreamsAjax = Jsoup.parse(htmlDataRaw, nextPageUrl);
String nextStreamsHtmlDataRaw = ajaxData.getString("load_more_widget_html");
if (!nextStreamsHtmlDataRaw.isEmpty()) {
nextStreamsUrl = getNextStreamsUrlFrom(Jsoup.parse(nextStreamsHtmlDataRaw, nextStreamsUrl));
nextPageUrl = getNextPageUrlFrom(Jsoup.parse(nextStreamsHtmlDataRaw, nextPageUrl));
} else {
nextStreamsUrl = "";
nextPageUrl = "";
}
} catch (JsonParserException e) {
throw new ParsingException("Could not parse json data for next streams", e);
}
}
private String getNextStreamsUrlFrom(Document d) throws ParsingException {
private String getNextPageUrlFrom(Document d) throws ParsingException {
try {
Element button = d.select("button[class*=\"yt-uix-load-more\"]").first();
if (button != null) {
@ -196,7 +196,7 @@ public class YoutubePlaylistExtractor extends PlaylistExtractor {
}
}
private void collectStreamsFrom(StreamInfoItemCollector collector, Element element) throws ParsingException {
private void collectStreamsFrom(StreamInfoItemsCollector collector, Element element) throws ParsingException {
collector.reset();
final UrlIdHandler streamUrlIdHandler = getService().getStreamUrlIdHandler();

View file

@ -6,7 +6,7 @@ import org.jsoup.nodes.Element;
import org.schabi.newpipe.extractor.Downloader;
import org.schabi.newpipe.extractor.NewPipe;
import org.schabi.newpipe.extractor.exceptions.ExtractionException;
import org.schabi.newpipe.extractor.search.InfoItemSearchCollector;
import org.schabi.newpipe.extractor.search.InfoItemsSearchCollector;
import org.schabi.newpipe.extractor.search.SearchEngine;
import java.io.IOException;
@ -43,9 +43,9 @@ public class YoutubeSearchEngine extends SearchEngine {
}
@Override
public InfoItemSearchCollector search(String query, int page, String languageCode, Filter filter)
public InfoItemsSearchCollector search(String query, int page, String languageCode, Filter filter)
throws IOException, ExtractionException {
InfoItemSearchCollector collector = getInfoItemSearchCollector();
InfoItemsSearchCollector collector = getInfoItemSearchCollector();
Downloader downloader = NewPipe.getDownloader();
String url = "https://www.youtube.com/results"

View file

@ -70,13 +70,13 @@ public class YoutubeService extends StreamingService {
}
@Override
public ChannelExtractor getChannelExtractor(String url, String nextStreamsUrl) throws IOException, ExtractionException {
return new YoutubeChannelExtractor(this, url, nextStreamsUrl);
public ChannelExtractor getChannelExtractor(String url, String nextPageUrl) throws IOException, ExtractionException {
return new YoutubeChannelExtractor(this, url, nextPageUrl);
}
@Override
public PlaylistExtractor getPlaylistExtractor(String url, String nextStreamsUrl) throws IOException, ExtractionException {
return new YoutubePlaylistExtractor(this, url, nextStreamsUrl);
public PlaylistExtractor getPlaylistExtractor(String url, String nextPageUrl) throws IOException, ExtractionException {
return new YoutubePlaylistExtractor(this, url, nextPageUrl);
}
@Override
@ -92,9 +92,9 @@ public class YoutubeService extends StreamingService {
try {
list.addKioskEntry(new KioskList.KioskExtractorFactory() {
@Override
public KioskExtractor createNewKiosk(StreamingService streamingService, String url, String nextStreamUrl, String id)
public KioskExtractor createNewKiosk(StreamingService streamingService, String url, String nextPageUrl, String id)
throws ExtractionException, IOException {
return new YoutubeTrendingExtractor(YoutubeService.this, url, nextStreamUrl, id);
return new YoutubeTrendingExtractor(YoutubeService.this, url, nextPageUrl, id);
}
}, new YoutubeTrendingUrlIdHandler(), "Trending");
list.setDefaultKiosk("Trending");

View file

@ -452,7 +452,7 @@ public class YoutubeStreamExtractor extends StreamExtractor {
public StreamInfoItem getNextVideo() throws IOException, ExtractionException {
assertPageFetched();
try {
StreamInfoItemCollector collector = new StreamInfoItemCollector(getServiceId());
StreamInfoItemsCollector collector = new StreamInfoItemsCollector(getServiceId());
collector.commit(extractVideoPreviewInfo(doc.select("div[class=\"watch-sidebar-section\"]")
.first().select("li").first()));
@ -463,10 +463,10 @@ public class YoutubeStreamExtractor extends StreamExtractor {
}
@Override
public StreamInfoItemCollector getRelatedVideos() throws IOException, ExtractionException {
public StreamInfoItemsCollector getRelatedVideos() throws IOException, ExtractionException {
assertPageFetched();
try {
StreamInfoItemCollector collector = new StreamInfoItemCollector(getServiceId());
StreamInfoItemsCollector collector = new StreamInfoItemsCollector(getServiceId());
Element ul = doc.select("ul[id=\"watch-related\"]").first();
if (ul != null) {
for (Element li : ul.children()) {

View file

@ -28,7 +28,7 @@ import org.schabi.newpipe.extractor.*;
import org.schabi.newpipe.extractor.exceptions.ExtractionException;
import org.schabi.newpipe.extractor.exceptions.ParsingException;
import org.schabi.newpipe.extractor.kiosk.KioskExtractor;
import org.schabi.newpipe.extractor.stream.StreamInfoItemCollector;
import org.schabi.newpipe.extractor.stream.StreamInfoItemsCollector;
import javax.annotation.Nonnull;
import java.io.IOException;
@ -37,9 +37,9 @@ public class YoutubeTrendingExtractor extends KioskExtractor {
private Document doc;
public YoutubeTrendingExtractor(StreamingService service, String url, String nextStreamsUrl, String kioskId)
throws IOException, ExtractionException {
super(service, url, nextStreamsUrl, kioskId);
public YoutubeTrendingExtractor(StreamingService service, String url, String nextPageUrl, String kioskId)
throws ExtractionException {
super(service, url, nextPageUrl, kioskId);
}
@Override
@ -61,7 +61,7 @@ public class YoutubeTrendingExtractor extends KioskExtractor {
}
@Override
public ListExtractor.NextItemsResult getNextStreams() {
public ListExtractor.InfoItemPage getInfoItemPage() {
return null;
}
@ -80,8 +80,8 @@ public class YoutubeTrendingExtractor extends KioskExtractor {
@Nonnull
@Override
public StreamInfoItemCollector getStreams() throws ParsingException {
StreamInfoItemCollector collector = new StreamInfoItemCollector(getServiceId());
public StreamInfoItemsCollector getInfoItems() throws ParsingException {
StreamInfoItemsCollector collector = new StreamInfoItemsCollector(getServiceId());
Elements uls = doc.select("ul[class*=\"expanded-shelf-content-list\"]");
for(Element ul : uls) {
for(final Element li : ul.children()) {

View file

@ -29,7 +29,6 @@ import org.schabi.newpipe.extractor.exceptions.ParsingException;
import org.schabi.newpipe.extractor.utils.Parser;
import javax.annotation.Nonnull;
import javax.annotation.Nullable;
import java.io.IOException;
import java.util.List;
@ -139,7 +138,7 @@ public abstract class StreamExtractor extends Extractor {
public abstract StreamType getStreamType() throws ParsingException;
public abstract StreamInfoItem getNextVideo() throws IOException, ExtractionException;
public abstract StreamInfoItemCollector getRelatedVideos() throws IOException, ExtractionException;
public abstract StreamInfoItemsCollector getRelatedVideos() throws IOException, ExtractionException;
/**
* Analyses the webpage's document and extracts any error message there might be.

View file

@ -1,7 +1,7 @@
package org.schabi.newpipe.extractor.stream;
import org.schabi.newpipe.extractor.InfoItem;
import org.schabi.newpipe.extractor.InfoItemCollector;
import org.schabi.newpipe.extractor.InfoItemsCollector;
import org.schabi.newpipe.extractor.exceptions.FoundAdException;
import org.schabi.newpipe.extractor.exceptions.ParsingException;
@ -12,7 +12,7 @@ import java.util.Vector;
* Created by Christian Schabesberger on 28.02.16.
*
* Copyright (C) Christian Schabesberger 2016 <chris.schabesberger@mailbox.org>
* StreamInfoItemCollector.java is part of NewPipe.
* StreamInfoItemsCollector.java is part of NewPipe.
*
* NewPipe is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@ -28,9 +28,9 @@ import java.util.Vector;
* along with NewPipe. If not, see <http://www.gnu.org/licenses/>.
*/
public class StreamInfoItemCollector extends InfoItemCollector<StreamInfoItem, StreamInfoItemExtractor> {
public class StreamInfoItemsCollector extends InfoItemsCollector<StreamInfoItem, StreamInfoItemExtractor> {
public StreamInfoItemCollector(int serviceId) {
public StreamInfoItemsCollector(int serviceId) {
super(serviceId);
}

View file

@ -2,11 +2,11 @@ package org.schabi.newpipe.extractor.utils;
import org.schabi.newpipe.extractor.Info;
import org.schabi.newpipe.extractor.InfoItem;
import org.schabi.newpipe.extractor.InfoItemCollector;
import org.schabi.newpipe.extractor.InfoItemsCollector;
import org.schabi.newpipe.extractor.ListExtractor;
import org.schabi.newpipe.extractor.stream.StreamExtractor;
import org.schabi.newpipe.extractor.stream.StreamInfo;
import org.schabi.newpipe.extractor.stream.StreamInfoItemCollector;
import org.schabi.newpipe.extractor.stream.StreamInfoItemsCollector;
import java.util.Collections;
import java.util.List;
@ -14,10 +14,10 @@ import java.util.List;
public class ExtractorHelper {
private ExtractorHelper() {}
public static List<InfoItem> getStreamsOrLogError(Info info, ListExtractor extractor) {
StreamInfoItemCollector collector;
public static List<InfoItem> getInfoItemsOrLogError(Info info, ListExtractor extractor) {
InfoItemsCollector collector;
try {
collector = extractor.getStreams();
collector = extractor.getInfoItems();
} catch (Exception e) {
info.addError(e);
return Collections.emptyList();
@ -28,7 +28,7 @@ public class ExtractorHelper {
public static List<InfoItem> getRelatedVideosOrLogError(StreamInfo info, StreamExtractor extractor) {
StreamInfoItemCollector collector;
StreamInfoItemsCollector collector;
try {
collector = extractor.getRelatedVideos();
} catch (Exception e) {
@ -39,7 +39,7 @@ public class ExtractorHelper {
return getInfoItems(info, collector);
}
private static List<InfoItem> getInfoItems(Info info, InfoItemCollector collector) {
private static List<InfoItem> getInfoItems(Info info, InfoItemsCollector collector) {
List<InfoItem> result;
try {
result = collector.getItemList();

View file

@ -61,7 +61,7 @@ public class SoundcloudChannelExtractorTest {
public void testHasMoreStreams() throws Exception {
// Setup the streams
extractor.getStreams();
assertTrue("don't have more streams", extractor.hasMoreStreams());
assertTrue("don't have more streams", extractor.hasNextPage());
}
@Test
@ -73,10 +73,10 @@ public class SoundcloudChannelExtractorTest {
public void testGetNextStreams() throws Exception {
// Setup the streams
extractor.getStreams();
ListExtractor.NextItemsResult nextItemsResult = extractor.getNextStreams();
assertTrue("extractor didn't have next streams", !nextItemsResult.nextItemsList.isEmpty());
ListExtractor.InfoItemPage nextItemsResult = extractor.getInfoItemPage();
assertTrue("extractor didn't have next streams", !nextItemsResult.infoItemList.isEmpty());
assertTrue("errors occurred during extraction of the next streams", nextItemsResult.errors.isEmpty());
assertTrue("extractor didn't have more streams after getNextStreams", extractor.hasMoreStreams());
assertTrue("extractor didn't have more streams after getInfoItemPage", extractor.hasNextPage());
}
}

View file

@ -4,9 +4,11 @@ import org.junit.BeforeClass;
import org.junit.Ignore;
import org.junit.Test;
import org.schabi.newpipe.Downloader;
import org.schabi.newpipe.extractor.InfoItemsCollector;
import org.schabi.newpipe.extractor.NewPipe;
import org.schabi.newpipe.extractor.kiosk.KioskExtractor;
import org.schabi.newpipe.extractor.stream.StreamInfoItemCollector;
import java.util.List;
import static org.junit.Assert.*;
import static org.schabi.newpipe.extractor.ServiceList.SoundCloud;
@ -39,16 +41,17 @@ public class SoundcloudChartsExtractorTest {
}
@Test
public void testId() throws Exception {
public void testId() {
assertEquals(extractor.getId(), "Top 50");
}
@Test
public void testGetStreams() throws Exception {
StreamInfoItemCollector collector = extractor.getStreams();
InfoItemsCollector collector = extractor.getInfoItems();
if(!collector.getErrors().isEmpty()) {
System.err.println("----------");
for(Throwable e: collector.getErrors()) {
List<Throwable> errors = collector.getErrors();
for(Throwable e: errors) {
e.printStackTrace();
System.err.println("----------");
}
@ -60,21 +63,21 @@ public class SoundcloudChartsExtractorTest {
@Test
public void testGetStreamsErrors() throws Exception {
assertTrue("errors during stream list extraction", extractor.getStreams().getErrors().isEmpty());
assertTrue("errors during stream list extraction", extractor.getInfoItems().getErrors().isEmpty());
}
@Test
public void testHasMoreStreams() throws Exception {
// Setup the streams
extractor.getStreams();
assertTrue("has more streams", extractor.hasMoreStreams());
extractor.getInfoItems();
assertTrue("has more streams", extractor.hasNextPage());
}
@Test
public void testGetNextStreams() throws Exception {
extractor.getStreams();
assertFalse("extractor has next streams", extractor.getNextStreams() == null
|| extractor.getNextStreams().nextItemsList.isEmpty());
extractor.getInfoItems();
assertFalse("extractor has next streams", extractor.getInfoItemPage() == null
|| extractor.getInfoItemPage().infoItemList.isEmpty());
}
@Test

View file

@ -81,7 +81,7 @@ public class SoundcloudPlaylistExtractorTest {
public void testHasMoreStreams() throws Exception {
// Setup the streams
extractor.getStreams();
assertTrue("extractor didn't have more streams", !extractor.hasMoreStreams());
assertTrue("extractor didn't have more streams", !extractor.hasNextPage());
}
@Test(expected = ExtractionException.class)
@ -90,7 +90,7 @@ public class SoundcloudPlaylistExtractorTest {
extractor.getStreams();
// This playlist don't have more streams, it should throw an error
extractor.getNextStreams();
extractor.getInfoItemPage();
fail("Expected exception wasn't thrown");
}

View file

@ -7,7 +7,7 @@ import org.schabi.newpipe.extractor.NewPipe;
import org.schabi.newpipe.extractor.exceptions.ExtractionException;
import org.schabi.newpipe.extractor.exceptions.ParsingException;
import org.schabi.newpipe.extractor.stream.StreamExtractor;
import org.schabi.newpipe.extractor.stream.StreamInfoItemCollector;
import org.schabi.newpipe.extractor.stream.StreamInfoItemsCollector;
import org.schabi.newpipe.extractor.stream.StreamType;
import java.io.IOException;
@ -100,7 +100,7 @@ public class SoundcloudStreamExtractorDefaultTest {
@Test
public void testGetRelatedVideos() throws ExtractionException, IOException {
StreamInfoItemCollector relatedVideos = extractor.getRelatedVideos();
StreamInfoItemsCollector relatedVideos = extractor.getRelatedVideos();
assertFalse(relatedVideos.getItemList().isEmpty());
assertTrue(relatedVideos.getErrors().isEmpty());
}

View file

@ -100,7 +100,7 @@ public class YoutubeChannelExtractorTest {
public void testHasMoreStreams() throws Exception {
// Setup the streams
extractor.getStreams();
assertTrue("don't have more streams", extractor.hasMoreStreams());
assertTrue("don't have more streams", extractor.hasNextPage());
}
@Test
@ -112,9 +112,9 @@ public class YoutubeChannelExtractorTest {
public void testGetNextStreams() throws Exception {
// Setup the streams
extractor.getStreams();
ListExtractor.NextItemsResult nextItemsResult = extractor.getNextStreams();
assertTrue("extractor didn't have next streams", !nextItemsResult.nextItemsList.isEmpty());
ListExtractor.InfoItemPage nextItemsResult = extractor.getInfoItemPage();
assertTrue("extractor didn't have next streams", !nextItemsResult.infoItemList.isEmpty());
assertEmptyErrors("errors occurred during extraction of the next streams", nextItemsResult.errors);
assertTrue("extractor didn't have more streams after getNextStreams", extractor.hasMoreStreams());
assertTrue("extractor didn't have more streams after getInfoItemPage", extractor.hasNextPage());
}
}

View file

@ -102,18 +102,18 @@ public class YoutubePlaylistExtractorTest {
public void testHasMoreStreams() throws Exception {
// Setup the streams
extractor.getStreams();
assertTrue("extractor didn't have more streams", extractor.hasMoreStreams());
assertTrue("extractor didn't have more streams", extractor.hasNextPage());
}
@Test @Ignore
public void testGetNextStreams() throws Exception {
public void testGetNextPage() throws Exception {
// Setup the streams
extractor.getStreams();
ListExtractor.NextItemsResult nextItemsResult = extractor.getNextStreams();
assertTrue("extractor didn't have next streams", !nextItemsResult.nextItemsList.isEmpty());
assertEmptyErrors("errors occurred during extraction of the next streams", nextItemsResult.errors);
assertTrue("extractor didn't have more streams after getNextStreams", extractor.hasMoreStreams());
ListExtractor.InfoItemPage infoItemPage = extractor.getInfoItemPage();
assertTrue("extractor didn't have next streams", !infoItemPage.infoItemList.isEmpty());
assertEmptyErrors("errors occurred during extraction of the next streams", infoItemPage.errors);
assertTrue("extractor didn't have more streams after getInfoItemPage", extractor.hasNextPage());
}
}

View file

@ -138,7 +138,7 @@ public class YoutubeStreamExtractorDefaultTest {
@Test
public void testGetRelatedVideos() throws ExtractionException, IOException {
StreamInfoItemCollector relatedVideos = extractor.getRelatedVideos();
StreamInfoItemsCollector relatedVideos = extractor.getRelatedVideos();
Utils.printErrors(relatedVideos);
assertFalse(relatedVideos.getItemList().isEmpty());
assertTrue(relatedVideos.getErrors().isEmpty());

View file

@ -23,8 +23,8 @@ package org.schabi.newpipe.extractor.services.youtube;
import org.junit.BeforeClass;
import org.junit.Test;
import org.schabi.newpipe.Downloader;
import org.schabi.newpipe.extractor.InfoItemsCollector;
import org.schabi.newpipe.extractor.NewPipe;
import org.schabi.newpipe.extractor.stream.StreamInfoItemCollector;
import org.schabi.newpipe.extractor.utils.Utils;
import static junit.framework.TestCase.assertFalse;
@ -66,27 +66,27 @@ public class YoutubeTrendingExtractorTest {
@Test
public void testGetStreamsQuantity() throws Exception {
StreamInfoItemCollector collector = extractor.getStreams();
InfoItemsCollector collector = extractor.getInfoItems();
Utils.printErrors(collector);
assertTrue("no streams are received", collector.getItemList().size() >= 20);
}
@Test
public void testGetStreamsErrors() throws Exception {
assertEmptyErrors("errors during stream list extraction", extractor.getStreams().getErrors());
assertEmptyErrors("errors during stream list extraction", extractor.getInfoItems().getErrors());
}
@Test
public void testHasMoreStreams() throws Exception {
// Setup the streams
extractor.getStreams();
assertFalse("has more streams", extractor.hasMoreStreams());
extractor.getInfoItems();
assertFalse("has more streams", extractor.hasNextPage());
}
@Test
public void testGetNextStreams() throws Exception {
assertTrue("extractor has next streams", extractor.getNextStreams() == null
|| extractor.getNextStreams().getNextItemsList().isEmpty());
assertTrue("extractor has next streams", extractor.getInfoItemPage() == null
|| extractor.getInfoItemPage().getNextItemsList().isEmpty());
}
@Test