Skip to content

Commit

Permalink
Fix modernizer and refactor protected terms (#9427)
Browse files Browse the repository at this point in the history
* Fix modernizer in templateexporter

* fix doi cleanup

* use unmodifable map

* use standard charset

* standard charsets

* Simplify protected terms
Fix protected terms test operating on dir instead of file

* checkstyle

* remove vector

* remove obsolete classes

* Use collectors only element

* fix unit test npe

* checkstyle

* Fixed indentation

* Fixed indentation

Co-authored-by: Carl Christian Snethlage <50491877+calixtus@users.noreply.github.com>
  • Loading branch information
Siedlerchr and calixtus committed Dec 5, 2022
1 parent bfd3446 commit 4c82d3e
Show file tree
Hide file tree
Showing 13 changed files with 178 additions and 366 deletions.
11 changes: 5 additions & 6 deletions src/main/java/org/jabref/cli/ArgumentProcessor.java
Original file line number Diff line number Diff line change
Expand Up @@ -10,7 +10,6 @@
import java.util.Locale;
import java.util.Optional;
import java.util.Set;
import java.util.Vector;
import java.util.prefs.BackingStoreException;

import org.jabref.gui.Globals;
Expand Down Expand Up @@ -288,8 +287,8 @@ private void writeMetadatatoPdf(List<ParserResult> loaded, String filesAndCiteke
return;
}

Vector<String> citeKeys = new Vector<>();
Vector<String> pdfs = new Vector<>();
List<String> citeKeys = new ArrayList<>();
List<String> pdfs = new ArrayList<>();
for (String fileOrCiteKey : filesAndCitekeys.split(",")) {
if (fileOrCiteKey.toLowerCase(Locale.ROOT).endsWith(".pdf")) {
pdfs.add(fileOrCiteKey);
Expand Down Expand Up @@ -323,7 +322,7 @@ private void writeMetadatatoPDFsOfEntry(BibDatabaseContext databaseContext, Stri
}
}

private void writeMetadatatoPdfByCitekey(BibDatabaseContext databaseContext, BibDatabase dataBase, Vector<String> citeKeys, FilePreferences filePreferences, XmpPdfExporter xmpPdfExporter, EmbeddedBibFilePdfExporter embeddedBibFilePdfExporter, boolean writeXMP, boolean embeddBibfile) {
private void writeMetadatatoPdfByCitekey(BibDatabaseContext databaseContext, BibDatabase dataBase, List<String> citeKeys, FilePreferences filePreferences, XmpPdfExporter xmpPdfExporter, EmbeddedBibFilePdfExporter embeddedBibFilePdfExporter, boolean writeXMP, boolean embeddBibfile) {
for (String citeKey : citeKeys) {
List<BibEntry> bibEntryList = dataBase.getEntriesByCitationKey(citeKey);
if (bibEntryList.isEmpty()) {
Expand All @@ -336,8 +335,8 @@ private void writeMetadatatoPdfByCitekey(BibDatabaseContext databaseContext, Bib
}
}

private void writeMetadatatoPdfByFileNames(BibDatabaseContext databaseContext, BibDatabase dataBase, Vector<String> fileNames, FilePreferences filePreferences, XmpPdfExporter xmpPdfExporter, EmbeddedBibFilePdfExporter embeddedBibFilePdfExporter, boolean writeXMP, boolean embeddBibfile) {
for (String fileName : fileNames) {
private void writeMetadatatoPdfByFileNames(BibDatabaseContext databaseContext, BibDatabase dataBase, List<String> pdfs, FilePreferences filePreferences, XmpPdfExporter xmpPdfExporter, EmbeddedBibFilePdfExporter embeddedBibFilePdfExporter, boolean writeXMP, boolean embeddBibfile) {
for (String fileName : pdfs) {
Path filePath = Path.of(fileName);
if (!filePath.isAbsolute()) {
filePath = FileHelper.find(fileName, databaseContext.getFileDirectories(filePreferences)).orElse(FileHelper.find(fileName, List.of(Path.of("").toAbsolutePath())).orElse(filePath));
Expand Down
100 changes: 0 additions & 100 deletions src/main/java/org/jabref/cli/CrossrefFetcherEvaluator.java

This file was deleted.

50 changes: 0 additions & 50 deletions src/main/java/org/jabref/cli/GenerateCharacterTable.java

This file was deleted.

Original file line number Diff line number Diff line change
Expand Up @@ -11,7 +11,7 @@
import org.jabref.model.entry.field.OrFields;
import org.jabref.model.entry.field.UnknownField;

import com.google.common.collect.Iterables;
import com.google.common.collect.MoreCollectors;

/**
* A column that displays the text-value of the field
Expand All @@ -33,9 +33,9 @@ public FieldColumn(MainTableColumnModel model) {

if (fields.size() == 1) {
// comparator can't parse more than one value
Field field = Iterables.getOnlyElement(fields);
Field field = fields.stream().collect(MoreCollectors.onlyElement());

if (field instanceof UnknownField || field.isNumeric()) {
if ((field instanceof UnknownField) || field.isNumeric()) {
this.setComparator(new NumericFieldComparator());
}
}
Expand Down
Original file line number Diff line number Diff line change
@@ -1,8 +1,7 @@
package org.jabref.gui.preferences.protectedterms;

import java.io.File;
import java.io.FileNotFoundException;
import java.io.IOException;
import java.nio.file.Path;
import java.util.ArrayList;
import java.util.List;
import java.util.Optional;
Expand Down Expand Up @@ -57,6 +56,7 @@ public void setValues() {
termsFilesProperty.addAll(termsLoader.getProtectedTermsLists().stream().map(ProtectedTermsListItemModel::new).toList());
}

@Override
public void storeSettings() {
List<String> enabledExternalList = new ArrayList<>();
List<String> disabledExternalList = new ArrayList<>();
Expand Down Expand Up @@ -97,12 +97,8 @@ public void addFile() {

dialogService.showFileOpenDialog(fileDialogConfiguration)
.ifPresent(file -> {
String fileName = file.toAbsolutePath().toString();
try {
termsFilesProperty.add(new ProtectedTermsListItemModel(ProtectedTermsLoader.readProtectedTermsListFromFile(new File(fileName), true)));
} catch (FileNotFoundException e) {
LOGGER.warn("Cannot find protected terms file " + fileName, e);
}
Path fileName = file.toAbsolutePath();
termsFilesProperty.add(new ProtectedTermsListItemModel(ProtectedTermsLoader.readProtectedTermsListFromFile(fileName, true)));
});
}

Expand Down Expand Up @@ -155,16 +151,12 @@ public void displayContent(ProtectedTermsListItemModel itemModel) {

public void reloadList(ProtectedTermsListItemModel oldItemModel) {
ProtectedTermsList oldList = oldItemModel.getTermsList();
try {
ProtectedTermsList newList = ProtectedTermsLoader.readProtectedTermsListFromFile(new File(oldList.getLocation()), oldList.isEnabled());
int index = termsFilesProperty.indexOf(oldItemModel);
if (index >= 0) {
termsFilesProperty.set(index, new ProtectedTermsListItemModel(newList));
} else {
LOGGER.warn("Problem reloading protected terms file {}.", oldList.getLocation());
}
} catch (IOException e) {
LOGGER.warn("Problem reloading protected terms file {}.", oldList.getLocation(), e);
ProtectedTermsList newList = ProtectedTermsLoader.readProtectedTermsListFromFile(Path.of(oldList.getLocation()), oldList.isEnabled());
int index = termsFilesProperty.indexOf(oldItemModel);
if (index >= 0) {
termsFilesProperty.set(index, new ProtectedTermsListItemModel(newList));
} else {
LOGGER.warn("Problem reloading protected terms file {}.", oldList.getLocation());
}
}

Expand Down
8 changes: 2 additions & 6 deletions src/main/java/org/jabref/logic/cleanup/DoiCleanup.java
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
package org.jabref.logic.cleanup;

import java.io.UnsupportedEncodingException;
import java.net.URLDecoder;
import java.nio.charset.StandardCharsets;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.List;
Expand Down Expand Up @@ -34,11 +34,7 @@ public List<FieldChange> cleanup(BibEntry entry) {
String doiFieldValue = entry.getField(StandardField.DOI).orElse(null);

String decodeDoiFieldValue = "";
try {
decodeDoiFieldValue = URLDecoder.decode(doiFieldValue, "UTF-8");
} catch (UnsupportedEncodingException e) {
decodeDoiFieldValue = doiFieldValue;
}
decodeDoiFieldValue = URLDecoder.decode(doiFieldValue, StandardCharsets.UTF_8);
doiFieldValue = decodeDoiFieldValue;

Optional<DOI> doi = DOI.parse(doiFieldValue);
Expand Down
30 changes: 12 additions & 18 deletions src/main/java/org/jabref/logic/exporter/TemplateExporter.java
Original file line number Diff line number Diff line change
@@ -1,14 +1,13 @@
package org.jabref.logic.exporter;

import java.io.File;
import java.io.FileNotFoundException;
import java.io.FileReader;
import java.io.IOException;
import java.io.InputStreamReader;
import java.io.Reader;
import java.net.URISyntaxException;
import java.net.URL;
import java.nio.charset.Charset;
import java.nio.charset.StandardCharsets;
import java.nio.file.Files;
import java.nio.file.Path;
import java.util.ArrayList;
import java.util.HashMap;
Expand Down Expand Up @@ -156,28 +155,24 @@ private Reader getReader(String filename) throws IOException {
} else {
dir = LAYOUT_PREFIX + (directory == null ? "" : directory + '/');
}

// Attempt to get a Reader for the file path given, either by
// loading it as a resource (from within JAR), or as a normal file. If
// unsuccessful (e.g. file not found), an IOException is thrown.
String name = dir + filename;
Reader reader;
// Try loading as a resource first. This works for files inside the JAR:
URL reso = TemplateExporter.class.getResource(name);

// If that did not work, try loading as a normal file URL:
try {
if (reso == null) {
File f = new File(name);
reader = new FileReader(f);
URL res = TemplateExporter.class.getResource(name);
Path reso;
if (res == null) {
reso = Path.of(name);
} else {
reader = new InputStreamReader(reso.openStream());
reso = Path.of(res.toURI());
}
} catch (FileNotFoundException ex) {
return Files.newBufferedReader(reso, StandardCharsets.UTF_8);
} catch (FileNotFoundException | URISyntaxException ex) {
throw new IOException("Cannot find layout file: '" + name + "'.");
}

return reader;
}

@Override
Expand Down Expand Up @@ -280,7 +275,6 @@ public void export(final BibDatabaseContext databaseContext, final Path file,
}

// Print footer

// changed section - begin (arudert)
Layout endLayout = null;
try (Reader reader = getReader(lfFileName + END_INFIX + LAYOUT_EXTENSION)) {
Expand Down Expand Up @@ -313,9 +307,9 @@ public void export(final BibDatabaseContext databaseContext, final Path file,
* If so, read all the name formatters so they can be used by the filter layouts.
*/
private void readFormatterFile() {
File formatterFile = new File(lfFileName + FORMATTERS_EXTENSION);
if (formatterFile.exists()) {
try (Reader in = new FileReader(formatterFile)) {
Path formatterFile = Path.of(lfFileName + FORMATTERS_EXTENSION);
if (Files.exists(formatterFile)) {
try (Reader in = Files.newBufferedReader(formatterFile, StandardCharsets.UTF_8)) {
// Ok, we found and opened the file. Read all contents:
StringBuilder sb = new StringBuilder();
int c;
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -47,7 +47,6 @@
import org.jabref.model.strings.StringUtil;
import org.jabref.model.util.OptionalUtil;

import com.google.common.collect.ImmutableMap;
import org.apache.http.client.utils.URIBuilder;
import org.apache.lucene.queryparser.flexible.core.nodes.QueryNode;
import org.slf4j.Logger;
Expand Down Expand Up @@ -93,9 +92,9 @@ public class ArXivFetcher implements FulltextFetcher, PagedSearchBasedFetcher, I
*/
private static final Set<Field> CHOSEN_MANUAL_DOI_FIELDS = Set.of(StandardField.DOI, StandardField.PUBLISHER, InternalField.KEY_FIELD);

private static final Map<String, String> ARXIV_KEYWORDS_WITH_COMMA_REPLACEMENTS = ImmutableMap.of(
private static final Map<String, String> ARXIV_KEYWORDS_WITH_COMMA_REPLACEMENTS = Collections.unmodifiableMap(Map.of(
"Computational Engineering, Finance, and Science", "Computational Engineering / Finance / Science",
"Distributed, Parallel, and Cluster Computing", "Distributed / Parallel / Cluster Computing");
"Distributed, Parallel, and Cluster Computing", "Distributed / Parallel / Cluster Computing"));

private final ArXiv arXiv;
private final DoiFetcher doiFetcher;
Expand Down Expand Up @@ -607,8 +606,8 @@ private List<BibEntry> filterYears(List<BibEntry> searchResult, ArXivQueryTransf
return searchResult.stream()
.filter(entry -> entry.getField(StandardField.DATE).isPresent())
// Filter the date field for year only
.filter(entry -> transformer.getEndYear().isEmpty() || Integer.parseInt(entry.getField(StandardField.DATE).get().substring(0, 4)) <= transformer.getEndYear().get())
.filter(entry -> transformer.getStartYear().isEmpty() || Integer.parseInt(entry.getField(StandardField.DATE).get().substring(0, 4)) >= transformer.getStartYear().get())
.filter(entry -> transformer.getEndYear().isEmpty() || (Integer.parseInt(entry.getField(StandardField.DATE).get().substring(0, 4)) <= transformer.getEndYear().get()))
.filter(entry -> transformer.getStartYear().isEmpty() || (Integer.parseInt(entry.getField(StandardField.DATE).get().substring(0, 4)) >= transformer.getStartYear().get()))
.collect(Collectors.toList());
}

Expand Down
Loading

0 comments on commit 4c82d3e

Please sign in to comment.