Compare commits

..

No commits in common. "main" and "v0.2.3" have entirely different histories.
main ... v0.2.3

113 changed files with 2665 additions and 13286 deletions

View File

@ -1,14 +0,0 @@
{
"name": "Rust",
"image": "mcr.microsoft.com/devcontainers/rust:0-1-bullseye",
"features": {
"ghcr.io/devcontainers/features/docker-in-docker:2": {}
},
"portsAttributes": {
"8080": {
"label": "redlib",
"onAutoForward": "notify"
}
},
"postCreateCommand": "cargo build"
}

View File

@ -1 +0,0 @@
target

View File

@ -1,58 +0,0 @@
# Redlib configuration
# See the Configuration section of the README for a more detailed explanation of these settings.
# Instance-specific settings
# Enable SFW-only mode for the instance
REDLIB_SFW_ONLY=off
# Set a banner message for the instance
REDLIB_BANNER=
# Disable search engine indexing
REDLIB_ROBOTS_DISABLE_INDEXING=off
# Set the Pushshift frontend for "removed" links
REDLIB_PUSHSHIFT_FRONTEND=undelete.pullpush.io
# Default user settings
# Set the default theme (options: system, light, dark, black, dracula, nord, laserwave, violet, gold, rosebox, gruvboxdark, gruvboxlight)
REDLIB_DEFAULT_THEME=system
# Set the default mascot
REDLIB_DEFAULT_MASCOT=none
# Enable showing redsunlib colorway by default
REDLIB_DEFAULT_REDSUNLIB_COLORWAY=off
# Set the default front page (options: default, popular, all)
REDLIB_DEFAULT_FRONT_PAGE=default
# Set the default layout (options: card, clean, compact)
REDLIB_DEFAULT_LAYOUT=card
# Enable wide mode by default
REDLIB_DEFAULT_WIDE=off
# Set the default post sort method (options: hot, new, top, rising, controversial)
REDLIB_DEFAULT_POST_SORT=hot
# Set the default comment sort method (options: confidence, top, new, controversial, old)
REDLIB_DEFAULT_COMMENT_SORT=confidence
# Enable blurring Spoiler content by default
REDLIB_DEFAULT_BLUR_SPOILER=off
# Enable showing NSFW content by default
REDLIB_DEFAULT_SHOW_NSFW=off
# Enable blurring NSFW content by default
REDLIB_DEFAULT_BLUR_NSFW=off
# Enable HLS video format by default
REDLIB_DEFAULT_USE_HLS=off
# Enable audio+video downloads with ffmpeg.wasm
REDLIB_DEFAULT_FFMPEG_VIDEO_DOWNLOADS=off
# Hide HLS notification by default
REDLIB_DEFAULT_HIDE_HLS_NOTIFICATION=off
# Disable autoplay videos by default
REDLIB_DEFAULT_AUTOPLAY_VIDEOS=off
# Define a default list of subreddit subscriptions (format: sub1+sub2+sub3)
REDLIB_DEFAULT_SUBSCRIPTIONS=
# Define a default list of subreddit filters (format: sub1+sub2+sub3)
REDLIB_DEFAULT_FILTERS=
# Hide awards by default
REDLIB_DEFAULT_HIDE_AWARDS=off
# Hide sidebar and summary
REDLIB_DEFAULT_HIDE_SIDEBAR_AND_SUMMARY=off
# Disable the confirmation before visiting Reddit
REDLIB_DEFAULT_DISABLE_VISIT_REDDIT_CONFIRMATION=off
# Hide score by default
REDLIB_DEFAULT_HIDE_SCORE=off
# Enable fixed navbar by default
REDLIB_DEFAULT_FIXED_NAVBAR=on

1
.envrc
View File

@ -1 +0,0 @@
use flake

1
.gitattributes vendored
View File

@ -1 +0,0 @@
Dockerfile.* linguist-language=Dockerfile

3
.github/FUNDING.yml vendored
View File

@ -1,3 +0,0 @@
liberapay: sigaloid
buy_me_a_coffee: sigaloid
github: sigaloid

View File

@ -1,41 +0,0 @@
---
name: 🐛 Bug report
about: Create a report to help us improve
title: '🐛 Bug Report: '
labels: bug
assignees: ''
---
<!--
BEFORE FILING A BUG REPORT: Ensure that you are running the latest git commit. Visit /info on your instance, and ensure the git commit listed is the same commit listed on the home page.
-->
## Describe the bug
<!--
A clear and concise description of what the bug is.
-->
## Steps to reproduce the bug
<!--
Steps to reproduce the behavior:
1. Go to '...'
2. Click on '....'
3. Scroll down to '....'
4. See error
-->
## What's the expected behavior?
<!--
A clear and concise description of what you expected to happen.
-->
## Additional context / screenshot
<!--
Add any other context about the problem here.
-->
<!-- Mandatory -->
- [ ] I checked that the instance that this was reported on is running the latest git commit, or I can reproduce it locally on the latest git commit

View File

@ -1,28 +0,0 @@
---
name: ✨ Feature parity
about: Suggest implementing a feature into Redlib that is found in Reddit.com
title: '✨ Feature parity: '
labels: feature parity
assignees: ''
---
## How does this feature work on Reddit?
<!--
A clear and concise description of what the feature is.
-->
## Describe how this could be implemented into Redlib
<!--
A clear and concise description of what you want to happen.
-->
## Describe alternatives you've considered
<!--
A clear and concise description of any alternative solutions or features you've considered.
-->
## Additional context / screenshot
<!--
Add any other context or screenshots about the feature parity request here.
-->

View File

@ -1,28 +0,0 @@
---
name: 💡 Feature request
about: Suggest a feature for Redlib that is not found in Reddit
title: '💡 Feature request: '
labels: enhancement
assignees: ''
---
## Is your feature request related to a problem? Please describe.
<!--
A clear and concise description of what the problem is. Ex. I'm always frustrated when [...]
-->
## Describe the feature you would like to be implemented
<!--
A clear and concise description of what you want to happen.
-->
## Describe alternatives you've considered
<!--
A clear and concise description of any alternative solutions or features you've considered.
-->
## Additional context / screenshot
<!--
Add any other context or screenshots about the feature request here.
-->

View File

@ -1,76 +0,0 @@
name: Release Build
on:
push:
paths-ignore:
- "*.md"
- "compose.*"
branches:
- "main"
release:
types: [published]
env:
CARGO_TERM_COLOR: always
CARGO_TARGET_AARCH64_UNKNOWN_LINUX_MUSL_LINKER: aarch64-linux-gnu-gcc
CC_aarch64_unknown_linux_musl: aarch64-linux-gnu-gcc
CARGO_TARGET_ARMV7_UNKNOWN_LINUX_MUSLEABIHF_LINKER: arm-linux-gnueabihf-gcc
CC_armv7_unknown_linux_musleabihf: arm-linux-gnueabihf-gcc
jobs:
build:
name: Rust project - latest
runs-on: ubuntu-latest
strategy:
matrix:
target:
- x86_64-unknown-linux-musl
- aarch64-unknown-linux-musl
- armv7-unknown-linux-musleabihf
steps:
- uses: actions/checkout@v4
- uses: actions-rust-lang/setup-rust-toolchain@v1
with:
target: ${{ matrix.target }}
- if: matrix.target == 'x86_64-unknown-linux-musl'
run: |
sudo apt-get update
sudo apt-get install -y --no-install-recommends musl-tools
- if: matrix.target == 'armv7-unknown-linux-musleabihf'
run: |
sudo apt update
sudo apt install -y gcc-arm-linux-gnueabihf musl-tools
- if: matrix.target == 'aarch64-unknown-linux-musl'
run: |
sudo apt update
sudo apt install -y gcc-aarch64-linux-gnu musl-tools
- name: Versions
id: version
run: echo "VERSION=$(cargo metadata --format-version 1 --no-deps | jq .packages[0].version -r | sed 's/^/v/')" >> "$GITHUB_OUTPUT"
- name: Build
run: cargo build --release --target ${{ matrix.target }}
- name: Package release
run: tar czf redlib-${{ matrix.target }}.tar.gz -C target/${{ matrix.target }}/release/ redlib
- name: Upload release
uses: softprops/action-gh-release@v1
with:
tag_name: ${{ steps.version.outputs.VERSION }}
name: ${{ steps.version.outputs.VERSION }} - ${{ github.event.head_commit.message }}
draft: true
files: |
redlib-${{ matrix.target }}.tar.gz
body: |
- ${{ github.event.head_commit.message }} ${{ github.sha }}
generate_release_notes: true

View File

@ -1,109 +0,0 @@
name: Container build
on:
workflow_run:
workflows: ["Release Build"]
types:
- completed
env:
REGISTRY_IMAGE: quay.io/redlib/redlib
jobs:
build:
runs-on: ubuntu-latest
strategy:
fail-fast: false
matrix:
include:
- { platform: linux/amd64, target: x86_64-unknown-linux-musl }
- { platform: linux/arm64, target: aarch64-unknown-linux-musl }
- { platform: linux/arm/v7, target: armv7-unknown-linux-musleabihf }
steps:
- name: Checkout
uses: actions/checkout@v4
- name: Docker meta
id: meta
uses: docker/metadata-action@v5
with:
images: ${{ env.REGISTRY_IMAGE }}
tags: |
type=sha
type=raw,value=latest,enable={{is_default_branch}}
- name: Set up QEMU
uses: docker/setup-qemu-action@v3
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v3
- name: Login to Quay.io Container Registry
uses: docker/login-action@v3
with:
registry: quay.io
username: ${{ secrets.QUAY_USERNAME }}
password: ${{ secrets.QUAY_ROBOT_TOKEN }}
- name: Build and push
id: build
uses: docker/build-push-action@v5
with:
context: .
platforms: ${{ matrix.platform }}
labels: ${{ steps.meta.outputs.labels }}
outputs: type=image,name=${{ env.REGISTRY_IMAGE }},push-by-digest=true,name-canonical=true,push=true
file: Dockerfile
build-args: TARGET=${{ matrix.target }}
- name: Export digest
run: |
mkdir -p /tmp/digests
digest="${{ steps.build.outputs.digest }}"
touch "/tmp/digests/${digest#sha256:}"
- name: Upload digest
uses: actions/upload-artifact@v4
with:
name: digests-${{ matrix.target }}
path: /tmp/digests/*
if-no-files-found: error
retention-days: 1
merge:
runs-on: ubuntu-latest
needs:
- build
steps:
- name: Download digests
uses: actions/download-artifact@v4.1.7
with:
path: /tmp/digests
pattern: digests-*
merge-multiple: true
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v3
- name: Docker meta
id: meta
uses: docker/metadata-action@v5
with:
images: ${{ env.REGISTRY_IMAGE }}
tags: |
type=sha
type=raw,value=latest,enable={{is_default_branch}}
- name: Login to Quay.io Container Registry
uses: docker/login-action@v3
with:
registry: quay.io
username: ${{ secrets.QUAY_USERNAME }}
password: ${{ secrets.QUAY_ROBOT_TOKEN }}
- name: Create manifest list and push
working-directory: /tmp/digests
run: |
docker buildx imagetools create $(jq -cr '.tags | map("-t " + .) | join(" ")' <<< "$DOCKER_METADATA_OUTPUT_JSON") \
$(printf '${{ env.REGISTRY_IMAGE }}@sha256:%s ' *)
# - name: Push README to Quay.io
# uses: christian-korneck/update-container-description-action@v1
# env:
# DOCKER_APIKEY: ${{ secrets.APIKEY__QUAY_IO }}
# with:
# destination_container_repo: quay.io/redlib/redlib
# provider: quay
# readme_file: 'README.md'
- name: Inspect image
run: |
docker buildx imagetools inspect ${{ env.REGISTRY_IMAGE }}:${{ steps.meta.outputs.version }}

View File

@ -1,84 +0,0 @@
name: Rust Build & Publish
on:
push:
paths-ignore:
- "**.md"
branches:
- 'main'
release:
types: [published]
env:
CARGO_TERM_COLOR: always
jobs:
build:
runs-on: ubuntu-latest
steps:
- name: Checkout sources
uses: actions/checkout@v3
- name: Cache Packages
uses: Swatinem/rust-cache@v2
- name: Install stable toolchain
uses: dtolnay/rust-toolchain@stable
with:
toolchain: stable
- name: Install musl-gcc
run: sudo apt-get install musl-tools
- name: Install cargo musl target
run: rustup target add x86_64-unknown-linux-musl
# Building actions
- name: Build
run: RUSTFLAGS='-C target-feature=+crt-static' cargo build --release --target x86_64-unknown-linux-musl
- name: Versions
id: version
run: echo "VERSION=$(cargo metadata --format-version 1 --no-deps | jq .packages[0].version -r | sed 's/^/v/')" >> "$GITHUB_OUTPUT"
# Publishing actions
- name: Publish to crates.io
if: github.event_name == 'release'
run: cargo publish --no-verify --token ${{ secrets.CARGO_REGISTRY_TOKEN }}
- name: Calculate SHA512 checksum
run: sha512sum target/x86_64-unknown-linux-musl/release/redlib > redlib.sha512
- name: Calculate SHA256 checksum
run: sha256sum target/x86_64-unknown-linux-musl/release/redlib > redlib.sha256
- uses: actions/upload-artifact@v4
name: Upload a Build Artifact
with:
name: redlib
path: |
target/x86_64-unknown-linux-musl/release/redlib
redlib.sha512
redlib.sha256
- name: Release
uses: softprops/action-gh-release@v1
if: github.base_ref != 'main' && github.event_name == 'release'
with:
tag_name: ${{ steps.version.outputs.VERSION }}
name: ${{ steps.version.outputs.VERSION }} - ${{ github.event.head_commit.message }}
draft: true
files: |
target/x86_64-unknown-linux-musl/release/redlib
redlib.sha512
redlib.sha256
body: |
- ${{ github.event.head_commit.message }} ${{ github.sha }}
generate_release_notes: true
env:
GITHUB_TOKEN: ${{ secrets.RELEASE_TOKEN }}

View File

@ -1,67 +0,0 @@
name: Pull Request
env:
CARGO_TERM_COLOR: always
NEXTEST_RETRIES: 10
on:
push:
branches:
- 'main'
pull_request:
branches:
- 'main'
jobs:
test:
name: cargo test
runs-on: ubuntu-latest
steps:
- name: Checkout sources
uses: actions/checkout@v3
- name: Install stable toolchain
uses: dtolnay/rust-toolchain@stable
with:
toolchain: stable
- name: Install cargo-nextest
uses: taiki-e/install-action@nextest
- name: Run cargo nextest
run: cargo nextest run
format:
name: cargo fmt --all -- --check
runs-on: ubuntu-latest
steps:
- name: Checkout sources
uses: actions/checkout@v3
- name: Install stable toolchain with rustfmt component
uses: dtolnay/rust-toolchain@stable
with:
toolchain: stable
components: rustfmt
- name: Run cargo fmt
run: cargo fmt --all -- --check
clippy:
name: cargo clippy -- -D warnings
runs-on: ubuntu-latest
steps:
- name: Checkout sources
uses: actions/checkout@v3
- name: Install stable toolchain with clippy component
uses: dtolnay/rust-toolchain@stable
with:
toolchain: stable
components: clippy
- name: Run cargo clippy
run: cargo clippy -- -D warnings

29
.github/workflows/rust.yml vendored Normal file
View File

@ -0,0 +1,29 @@
name: Rust
on:
push:
branches: [master]
pull_request:
branches: [master]
env:
CARGO_TERM_COLOR: always
jobs:
build:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v2
- name: Cache Packages
uses: Swatinem/rust-cache@v1.0.1
- name: Build
run: cargo build --release
- uses: actions/upload-artifact@v2.2.1
name: Upload a Build Artifact
with:
name: libreddit
path: target/release/libreddit

9
.gitignore vendored
View File

@ -1,10 +1 @@
/target
.env
redlib.toml
# Idea Files
.idea/
# nix files
.direnv/
result

133
CREDITS
View File

@ -1,133 +0,0 @@
5trongthany <65565784+5trongthany@users.noreply.github.com>
674Y3r <87250374+674Y3r@users.noreply.github.com>
accountForIssues <52367365+accountForIssues@users.noreply.github.com>
Adrian Lebioda <adrianlebioda@gmail.com>
Akanksh Chitimalla <55909985+Akanksh12@users.noreply.github.com>
alefvanoon <53198048+alefvanoon@users.noreply.github.com>
Ales Lerch <13370338+axeII@users.noreply.github.com>
Alexandre Iooss <erdnaxe@crans.org>
alyaeanyx <alexandra.hollmeier@mailbox.org>
AndreVuillemot160 <84594011+AndreVuillemot160@users.noreply.github.com>
Andrew Kaufman <57281817+andrew-kaufman@users.noreply.github.com>
Artemis <51862164+artemislena@users.noreply.github.com>
arthomnix <35371030+arthomnix@users.noreply.github.com>
Arya K <73596856+gi-yt@users.noreply.github.com>
Austin Huang <im@austinhuang.me>
Ayaka <ayaka@kitty.community>
backfire-monism-net <development.0extl@simplelogin.com>
Basti <pred2k@users.noreply.github.com>
Ben Sherman <bennettmsherman@gmail.com>
Ben Smith <37027883+smithbm2316@users.noreply.github.com>
beucismis <beucismis@tutamail.com>
BobIsMyManager <ahoumatt@yahoo.com>
Butter Cat <butteredcats@protonmail.com>
Butter Cat <ButteredCats@protonmail.com>
Carbrex <95964955+Carbrex@users.noreply.github.com>
ccuser44 <68124053+ccuser44@users.noreply.github.com>
Connor Holloway <c.holloway314@outlook.com>
curlpipe <11898833+curlpipe@users.noreply.github.com>
dacousb <53299044+dacousb@users.noreply.github.com>
Daniel Nathan Gray <dng@disroot.org>
Daniel Valentine <Daniel-Valentine@users.noreply.github.com>
Daniel Valentine <daniel@vielle.ws>
dbrennand <52419383+dbrennand@users.noreply.github.com>
Dean Sallinen <deza604@gmail.com>
dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
Diego Magdaleno <38844659+DiegoMagdaleno@users.noreply.github.com>
domve <domve@posteo.net>
Dyras <jevwmguf@duck.com>
Edward <101938856+EdwardLangdon@users.noreply.github.com>
Éli Marshal <835958+EMarshal@users.noreply.github.com>
elliot <75391956+ellieeet123@users.noreply.github.com>
erdnaxe <erdnaxe@users.noreply.github.com>
Esmail EL BoB <github.defilable@simplelogin.co>
fawn <fawn@envs.net>
FireMasterK <20838718+FireMasterK@users.noreply.github.com>
George Roubos <cowkingdom@hotmail.com>
git-bruh <e817509a-8ee9-4332-b0ad-3a6bdf9ab63f@aleeas.com>
gmnsii <95436780+gmnsii@users.noreply.github.com>
gmnsii <github.gmnsii@pm.me>
gmnsii <gmnsii@void.noreply>
Gonçalo Valério <dethos@users.noreply.github.com>
guaddy <67671414+guaddy@users.noreply.github.com>
Harsh Mishra <erbeusgriffincasper@gmail.com>
hinto.janai <hinto.janai@protonmail.com>
igna <igna@intent.cool>
imabritishcow <bcow@protonmail.com>
invakid404 <invakid404@riseup.net>
İsmail Karslı <ismail@karsli.net>
Johannes Schleifenbaum <johannes@js-webcoding.de>
Jonathan Dahan <git@jonathan.is>
Josiah <70736638+fres7h@users.noreply.github.com>
JPyke3 <pyke.jacob1@gmail.com>
Kavin <20838718+FireMasterK@users.noreply.github.com>
Kazi <kzshantonu@users.noreply.github.com>
Kieran <42723993+EnderDev@users.noreply.github.com>
Kieran <kieran@dothq.co>
Kirk1984 <christoph-m@posteo.de>
kuanhulio <66286575+kuanhulio@users.noreply.github.com>
Kyle Roth <kylrth@gmail.com>
laazyCmd <laazy.pr00gramming@protonmail.com>
Laurențiu Nicola <lnicola@users.noreply.github.com>
Lena <102762572+MarshDeer@users.noreply.github.com>
Leopardus <leopardus3@pm.me>
Macic <46872282+Macic-Dev@users.noreply.github.com>
Mario A <10923513+Midblyte@users.noreply.github.com>
Márton <marton2@gmail.com>
Mathew Davies <ThePixelDeveloper@users.noreply.github.com>
Matthew Crossman <matt@crossman.page>
Matthew E <matt@matthew.science>
Matthew Esposito <matt@matthew.science>
Mennaruuk <52135169+Mennaruuk@users.noreply.github.com>
Midou36O <midou@midou.dev>
mikupls <93015331+mikupls@users.noreply.github.com>
Myzel394 <50424412+Myzel394@users.noreply.github.com>
Nainar <nainar.mb@gmail.com>
Nathan Moos <moosingin3space@gmail.com>
Nazar <63452145+Tokarak@users.noreply.github.com>
Nicholas Christopher <nchristopher@tuta.io>
Nick Lowery <ClockVapor@users.noreply.github.com>
Nico <github@dr460nf1r3.org>
NKIPSC <15067635+NKIPSC@users.noreply.github.com>
nohoster <136514837+nohoster@users.noreply.github.com>
o69mar <119129086+o69mar@users.noreply.github.com>
obeho <71698631+obeho@users.noreply.github.com>
obscurity <z@x4.pm>
Om G <34579088+OxyMagnesium@users.noreply.github.com>
Ondřej Pešek <iTzBoboCz@users.noreply.github.com>
perennial <mail@perennialte.ch>
Peter Sawyer <petersawyer314@gmail.com>
pin <90570748+0323pin@users.noreply.github.com>
potatoesAreGod <118043038+potatoesAreGod@users.noreply.github.com>
RiversideRocks <59586759+RiversideRocks@users.noreply.github.com>
robin <8597693+robrobinbin@users.noreply.github.com>
Robin <8597693+robrobinbin@users.noreply.github.com>
robrobinbin <>
robrobinbin <8597693+robrobinbin@users.noreply.github.com>
robrobinbin <robindepril@gmail.com>
Ruben Elshof <15641671+rubenelshof@users.noreply.github.com>
Rupert Angermeier <rangermeier@users.noreply.github.com>
Scoder12 <34356756+Scoder12@users.noreply.github.com>
Slayer <51095261+GhostSlayer@users.noreply.github.com>
Soheb <somoso@users.noreply.github.com>
somini <somini@users.noreply.github.com>
somoso <github@soheb.anonaddy.com>
Spenser Black <spenserblack01@gmail.com>
Spike <19519553+spikecodes@users.noreply.github.com>
spikecodes <19519553+spikecodes@users.noreply.github.com>
sybenx <syb@duck.com>
TheCultLeader666 <65368815+TheCultLeader666@users.noreply.github.com>
TheFrenchGhosty <47571719+TheFrenchGhosty@users.noreply.github.com>
The TwilightBlood <hwengerstickel@protonmail.com>
tirz <36501933+tirz@users.noreply.github.com>
tmak2002 <torben@tmak2002.dev>
Tokarak <63452145+Tokarak@users.noreply.github.com>
Tsvetomir Bonev <invakid404@riseup.net>
Vivek <vivek@revankar.net>
Vladislav Nepogodin <nepogodin.vlad@gmail.com>
Walkx <walkxnl@gmail.com>
Wichai <1482605+Chengings@users.noreply.github.com>
wsy2220 <wsy@dogben.com>
xatier <xatierlike@gmail.com>
Yaroslav Chvanov <yaroslav.chvanov@gmail.com>
Zach <72994911+zachjmurphy@users.noreply.github.com>

2872
Cargo.lock generated

File diff suppressed because it is too large Load Diff

View File

@ -1,69 +1,22 @@
[package]
name = "redsunlib"
name = "libreddit"
description = " Alternative private front-end to Reddit"
license = "AGPL-3.0-only"
repository = "https://git.stardust.wtf/iridium/redsunlib"
version = "0.35.4"
authors = [
"Matthew Esposito <matt+cargo@matthew.science>",
"spikecodes <19519553+spikecodes@users.noreply.github.com>",
]
edition = "2021"
default-run = "redsunlib"
license = "AGPL-3.0"
repository = "https://github.com/spikecodes/libreddit"
version = "0.2.3"
authors = ["spikecodes <19519553+spikecodes@users.noreply.github.com>"]
edition = "2018"
[features]
default = ["proxy"]
proxy = ["actix-web/rustls", "base64"]
[dependencies]
rinja = { version = "0.3.4", default-features = false }
cached = { version = "0.54.0", features = ["async"] }
clap = { version = "4.4.11", default-features = false, features = [
"std",
"env",
"derive",
] }
regex = "1.10.2"
serde = { version = "1.0.193", features = ["derive"] }
cookie = "0.18.0"
futures-lite = "2.2.0"
hyper = { version = "0.14.31", features = ["full"] }
percent-encoding = "2.3.1"
route-recognizer = "0.3.1"
serde_json = "1.0.133"
tokio = { version = "1.35.1", features = ["full"] }
time = { version = "0.3.31", features = ["local-offset"] }
url = "2.5.0"
rust-embed = { version = "8.1.0", features = ["include-exclude"] }
libflate = "2.0.0"
brotli = { version = "7.0.0", features = ["std"] }
toml = "0.8.8"
once_cell = "1.19.0"
serde_yaml = "0.9.29"
build_html = "2.4.0"
uuid = { version = "1.6.1", features = ["v4"] }
base64 = "0.22.1"
fastrand = "2.0.1"
log = "0.4.20"
pretty_env_logger = "0.5.0"
dotenvy = "0.15.7"
rss = "2.0.7"
arc-swap = "1.7.1"
serde_json_path = "0.7.1"
async-recursion = "1.1.1"
common-words-all = { version = "0.0.2", default-features = false, features = ["english", "one"] }
hyper-rustls = { version = "0.24.2", features = [ "http2" ] }
[dev-dependencies]
lipsum = "0.9.0"
sealed_test = "1.0.0"
[profile.release]
codegen-units = 1
lto = true
strip = "symbols"
[[bin]]
name = "redsunlib"
path = "src/main.rs"
[[bin]]
name = "scraper"
path = "src/scraper/main.rs"
base64 = { version = "0.13.0", optional = true }
actix-web = "3.2.0"
reqwest = { version = "0.10", default_features = false, features = ["rustls-tls"] }
askama = "0.8.0"
serde = "1.0.117"
serde_json = "1.0"
chrono = "0.4.19"
async-recursion = "0.3.1"

View File

@ -1,34 +1,9 @@
## Builder
FROM rust:alpine AS builder
RUN apk add --no-cache musl-dev git
WORKDIR /redsunlib
FROM rust:alpine as builder
WORKDIR /usr/src/libreddit
COPY . .
RUN cargo build --target x86_64-unknown-linux-musl --release
## Final image
RUN apk add --no-cache g++ openssl-dev
RUN cargo install --path .
FROM alpine:latest
# Import ca-certificates from builder
COPY --from=builder /usr/share/ca-certificates /usr/share/ca-certificates
COPY --from=builder /etc/ssl/certs /etc/ssl/certs
# Copy our build
COPY --from=builder /redsunlib/target/x86_64-unknown-linux-musl/release/redsunlib /usr/local/bin/redsunlib
# Use an unprivileged user.
RUN adduser --home /nonexistent --no-create-home --disabled-password redsunlib
USER redsunlib
# Tell Docker to expose port 8080
EXPOSE 8080
# Run a healthcheck every minute to make sure redsunlib is functional
HEALTHCHECK --interval=1m --timeout=3s CMD wget --spider -q http://localhost:8080/settings || exit 1
CMD ["redsunlib"]
COPY --from=builder /usr/local/cargo/bin/libreddit /usr/local/bin/libreddit
CMD ["libreddit"]

378
README.md
View File

@ -1,123 +1,102 @@
<img align="left" width="128" height="128" src="https://git.stardust.wtf/attachments/842086e3-b718-4379-b718-c3a542842152" alt="logo">
# Libreddit
# Redsunlib
> An alternative private front-end to Reddit, a fork of [Redlib](https://github.com/redlib-org/redlib) with some <sup><sub>(minor)</sub></sup> function and cosmetic changes.
> An alternative private front-end to Reddit
<br>
Libre + Reddit = [Libreddit](https://libredd.it)
![screenshot](https://git.stardust.wtf/attachments/ccf81f52-e653-4722-94b9-b370c58d6359)
- 🚀 Fast: written in Rust for blazing fast speeds and safety
- ☁️ Light: no JavaScript, no ads, no tracking
- 🕵 Private: all requests are proxied through the server, including media
- 🦺 Safe: does not rely on Reddit OAuth or require a Reddit API Key
- 🔒 Secure: strong [Content Security Policy](https://developer.mozilla.org/en-US/docs/Web/HTTP/CSP) prevents browser requests to Reddit
### Disclaimer
Like [Invidious](https://github.com/iv-org/invidious) but for Reddit. Browse the coldest takes of [r/unpopularopinion](https://libredd.it/r/unpopularopinion) without being [tracked](#reddit).
There are rapid changes/features in this fork that can<sup>(will)</sup> change without notice. If you want to host this version, be aware that it's likely to break at some point. I still wouldn't recommend it in a production environment unless you know what you're doing. Or like living on the edge.......
## Contents
- [Screenshot](#screenshot)
- [Instances](#instances)
- [About](#about)
- [Elsewhere](#elsewhere)
- [Info](#info)
- [In Progress](#in-progress)
- [Teddit Comparison](#how-does-it-compare-to-teddit)
- [Comparison](#comparison)
- [Speed](#speed)
- [Privacy](#privacy)
- [Installation](#installation)
- [Cargo](#a-cargo)
- [Docker](#b-docker)
- [AUR](#c-aur)
- [GitHub Releases](#d-github-releases)
- [Repl.it](#e-replit)
- Developing
- [Deployment](#deployment)
- [Building](#building)
> I would also like to thank the maintainers and contributors of both [Redlib](https://github.com/redlib-org/redlib) and [Libreddit](https://github.com/libreddit/libreddit) for all the work they did while I just added some low quality tacky features. ❤️
## Screenshot
---
![](https://i.ibb.co/1RyKrBz/libreddit-rust.png)
## Table of Contents
1. [Redsunlib](#redsunlib)
- [Disclaimer](#disclaimer)
2. [Table of Contents](#table-of-contents)
3. [Instances](#instances)
4. [About](#about)
- [The Name](#the-name)
- [Built with](#built-with)
- [How is it different from other Reddit front ends?](#how-is-it-different-from-other-reddit-front-ends)
- [Teddit](#teddit)
- [Libreddit](#libreddit)
5. [Comparison](#comparison)
- [Speed](#speed)
- [Privacy](#privacy)
- [Reddit](#reddit)
- [Redlib](#redlib-1)
- [Server](#server)
6. [Deployment](#deployment)
- [Docker](#docker)
- [Docker Compose](#docker-compose)
- [Docker CLI](#docker-cli)
- [Binary](#binary)
- [Running as a systemd service](#running-as-a-systemd-service)
- [Building from source](#building-from-source)
7. [Configuration](#configuration)
- [Instance settings](#instance-settings)
- [Default user settings](#default-user-settings)
## Instances
---
Feel free to [open an issue](https://github.com/spikecodes/libreddit/issues/new) to have your [selfhosted instance](#deployment) listed here!
# Instances
| Website | Country | Cloudflare |
|-|-|-|
| [libredd.it](https://libredd.it) (official) | 🇺🇸 US | |
| [libreddit.spike.codes](https://libreddit.spike.codes) (official) | 🇺🇸 US | |
| [libreddit.dothq.co](https://libreddit.dothq.co) | 🇺🇸 US | ✅ |
| [libreddit.insanity.wtf](https://libreddit.insanity.wtf) | 🇺🇸 US | ✅ |
> [!WARNING]
> 🔗 **Currently public Redsunlib instance are not available, consider using a [redlib](https://github.com/redlib-org/redlib-instances/blob/main/instances.md) instance if you are not comfortable running your own**
A checkmark in the "Cloudflare" category here refers to the use of the reverse proxy, [Cloudflare](https://cloudflare). The checkmark will not be listed for a site which uses Cloudflare DNS but rather the proxying service which grants Cloudflare the ability to monitor traffic to the website.
You are more than welcome to host an instance and submit an issue if you want it added. That is, if you've read the [Disclaimer](#disclaimer) and it's within your "personal risk tolerance." ;)
## About
---
### Elsewhere
Find Libreddit on...
- 💬 Matrix: [#libreddit:matrix.org](https://matrix.to/#/#libreddit:matrix.org)
- 🐋 Docker: [spikecodes/libreddit](https://hub.docker.com/r/spikecodes/libreddit)
- :octocat: GitHub: [spikecodes/libreddit](https://github.com/spikecodes/libreddit)
- 🦊 GitLab: [spikecodes/libreddit](https://gitlab.com/spikecodes/libreddit)
# About
### Info
Libreddit hopes to provide an easier way to browse Reddit, without the ads, trackers, and bloat. Libreddit was inspired by other alternative front-ends to popular services such as [Invidious](https://github.com/iv-org/invidious) for YouTube, [Nitter](https://github.com/zedeus/nitter) for Twitter, and [Bibliogram](https://sr.ht/~cadence/bibliogram/) for Instagram.
Redlib hopes to provide an easier way to browse Reddit, without the ads, trackers, and bloat. Redlib was inspired by other alternative front-ends to popular services such as [Invidious](https://github.com/iv-org/invidious) for YouTube, [Nitter](https://github.com/zedeus/nitter) for Twitter, and [Bibliogram](https://sr.ht/~cadence/bibliogram/) for Instagram.
Libreddit currently implements most of Reddit's functionalities but still lacks a few features that are being worked on below.
Redlib currently implements most of Reddit's (signed-out) functionalities but still lacks [a few features](https://github.com/redlib-org/redlib/issues).
### In Progress
- Searching
## The Name
### How does it compare to Teddit?
**Red sun** in the sky + Red**lib** = Redsunlib
<sup>I do self criticism constantly, because I'm trapped in a Maoist *cult* where comrades (white terrorists) criticize me merciloussly for having a fascist credit card (VISA Silver Signature Rewards) They won't let me order vegan pizza anymore because the phone is fascist and "summoning my pizza slave with bourgeois app" is "bad vibes"</sup>
## Built with
- [Rust](https://www.rust-lang.org/) - Programming language
- [Hyper](https://github.com/hyperium/hyper) - HTTP server and client
- [Rinja](https://github.com/rinja-rs/rinja) - Templating engine
- [Rustls](https://github.com/rustls/rustls) - TLS library
## How is it different from other Reddit front ends?
### Teddit
Teddit is another awesome open source project designed to provide an alternative frontend to Reddit. There is no connection between the two, and you're welcome to use whichever one you favor. Competition fosters innovation and Teddit's release has motivated me to build Redlib into an even more polished product.
Teddit is another awesome open source project designed to provide an alternative frontend to Reddit. There is no connection between the two and you're welcome to use whichever one you favor. Competition fosters innovation and Teddit's release has motivated me to build Libreddit into an even more polished product.
If you are looking to compare, the biggest differences I have noticed are:
- Libreddit is themed around Reddit's redesign whereas Teddit appears to stick much closer to Reddit's old design. This may suit some users better as design is always subjective.
- Libreddit is written in [Rust](https://www.rust-lang.org) for speed and memory safety. It uses [Actix Web](https://actix.rs), which was [benchmarked as the fastest web server for single queries](https://www.techempower.com/benchmarks/#hw=ph&test=db).
- Unlike Teddit (at the time of writing this), Libreddit does not require a Reddit API key to host.
- Redlib is themed around Reddit's redesign whereas Teddit appears to stick much closer to Reddit's old design. This may suit some users better as design is always subjective.
- Redlib is written in [Rust](https://www.rust-lang.org) for speed and memory safety. It uses [Hyper](https://hyper.rs), a speedy and lightweight HTTP server/client implementation.
## Comparison
### Libreddit
This section outlines how Libreddit compares to Reddit.
While originating as a fork of Libreddit, the name "Redlib" was adopted to avoid legal issues, as Reddit only allows the use of their name if structured as "XYZ For Reddit".
### Speed
Several technical improvements have also been made, including:
Lasted tested December 21, 2020.
- **OAuth token spoofing**: To circumvent rate limits imposed by Reddit, OAuth token spoofing is used to mimick the most common iOS and Android clients. While spoofing both iOS and Android clients was explored, only the Android client was chosen due to content restrictions when using an anonymous iOS client.
- **Token refreshing**: The authentication token is refreshed every 24 hours, emulating the behavior of the official Android app.
- **HTTP header mimicking**: Efforts are made to send along as many of the official app's headers as possible to reduce the likelihood of Reddit's crackdown on Redlib's requests.
Results from Google Lighthouse ([Libreddit Report](https://lighthouse-dot-webdotdevsite.appspot.com/lh/html?url=https%3A%2F%2Flibredd.it), [Reddit Report](https://lighthouse-dot-webdotdevsite.appspot.com/lh/html?url=https%3A%2F%2Fwww.reddit.com%2F)).
---
| | Libreddit | Reddit |
|---------------------|---------------|-----------|
| Requests | 22 | 70 |
| Resource Size | 135 KiB | 2,222 KiB |
| Time to Interactive | **1.7 s** | **11.5 s**|
# Comparison
### Privacy
This section outlines how Redlib compares to Reddit in terms of speed and privacy.
## Speed
Last tested on January 12, 2024.
Results from Google PageSpeed Insights ([Redlib Report](https://pagespeed.web.dev/report?url=https%3A%2F%2Fredlib.matthew.science%2F), [Reddit Report](https://pagespeed.web.dev/report?url=https://www.reddit.com)).
| Performance metric | Redlib | Reddit |
| ------------------- | -------- | --------- |
| Speed Index | 0.6s | 1.9s |
| Performance Score | 100% | 64% |
| Time to Interactive | **2.8s** | **12.4s** |
## Privacy
### Reddit
#### Reddit
**Logging:** According to Reddit's [privacy policy](https://www.redditinc.com/policies/privacy-policy), they "may [automatically] log information" including:
- IP address
- User-agent string
- Browser type
@ -130,15 +109,13 @@ Results from Google PageSpeed Insights ([Redlib Report](https://pagespeed.web.de
- The requested URL
- Search terms
**Location:** The same privacy policy goes on to describe that location data may be collected through the use of:
**Location:** The same privacy policy goes on to describe location data may be collected through the use of:
- GPS (consensual)
- Bluetooth (consensual)
- Content associated with a location (consensual)
- Your IP Address
**Cookies:** Reddit's [cookie notice](https://www.redditinc.com/policies/cookies) documents the array of cookies used by Reddit including/regarding:
- Authentication
- Functionality
- Analytics and Performance
@ -146,197 +123,86 @@ Results from Google PageSpeed Insights ([Redlib Report](https://pagespeed.web.de
- Third-Party Cookies
- Third-Party Site
### Redlib
#### Libreddit
For transparency, I hope to describe all the ways Redlib handles user privacy.
For transparency, I hope to describe all the ways Libreddit handles user privacy.
#### Server
**Logging:** In production (when running the binary, hosting with docker, or using the official instances), Libreddit logs nothing. When debugging (running from source without `--release`), Libreddit logs post IDs fetched to aid troubleshooting but nothing else.
- **Logging:** In production (when running the binary, hosting with docker, or using the official instances), Redlib logs nothing. When debugging (running from source without `--release`), Redlib logs post IDs fetched to aid with troubleshooting.
**DNS:** Both official domains (`libredd.it` and `libreddit.spike.codes`) use Cloudflare as the DNS resolver. Though, the sites are not proxied through Cloudflare meaning Cloudflare doesn't have access to user traffic.
- **Cookies:** Redlib uses optional cookies to store any configured settings in [the settings menu](https://redlib.matthew.science/settings). These are not cross-site cookies and the cookies hold no personal data.
**Cookies:** Libreddit uses no cookies currently but eventually, I plan to add a configuration page where users can store an optional cookie to save their preferred theme, default sorting algorithm, or default layout.
---
**Hosting:** The official instances (`libredd.it` and `libreddit.spike.codes`) are hosted on [Repl.it](https://repl.it/) which monitors usage to prevent abuse. I can understand if this invalidates certain users' threat models and therefore, selfhosting and browsing through Tor are welcomed.
# Deployment
## Installation
This section covers multiple ways of deploying Redlib. Using [Docker](#docker) is recommended for production.
### A) Cargo
For configuration options, see the [Configuration section](#Configuration).
Make sure Rust stable is installed along with `cargo`, Rust's package manager.
## Docker
[Docker](https://www.docker.com) lets you run containerized applications. Containers are loosely isolated environments that are lightweight and contain everything needed to run the application, so there's no need to rely on what's installed on the host.
Docker images for Redsunlib are available at our [Gitea container registry](https://git.stardust.wtf/iridium/-/packages/container/redsunlib/latest), currently only with support for `amd64`, if you need `arm64`, or `armv7` platforms you can either build Redsunlib yourself or open an [issue](https://git.stardust.wtf/iridium/redsunlib/issues) :)
### Docker Compose
> [!IMPORTANT]
> These instructions assume the [Compose plugin](https://docs.docker.com/compose/migrate/#what-are-the-differences-between-compose-v1-and-compose-v2) has already been installed. If not, follow these [instructions on the Docker Docs](https://docs.docker.com/compose/install) for how to do so.
Copy `compose.yaml` and modify any relevant values (for example, the ports Redlib should listen on).
Start Redlib in detached mode (running in the background):
```bash
docker compose up -d
```
cargo install libreddit
```
Stream logs from the Redlib container:
### B) Docker
```bash
docker logs -f redlib
Deploy the Docker image of Libreddit:
```
docker run -d --name libreddit -p 8080:8080 spikecodes/libreddit
```
### Docker CLI
Deploy Redlib:
```bash
docker pull git.stardust.wtf/iridium/redsunlib:latest
docker run -d --name redlib -p 8080:8080 git.stardust.wtf/iridium/redsunlib:latest
Deploy using a different port (in this case, port 80):
```
docker run -d --name libreddit -p 80:8080 spikecodes/libreddit
```
Deploy using a different port on the host (in this case, port 80):
### C) AUR
```bash
docker pull git.stardust.wtf/iridium/redsunlib:latest
docker run -d --name redlib -p 80:8080 git.stardust.wtf/iridium/redsunlib:latest
For ArchLinux users, Libreddit is available from the AUR as [`libreddit-git`](https://aur.archlinux.org/packages/libreddit-git).
Install:
```
yay -S libreddit-git
```
If you're using a reverse proxy in front of Redlib, prefix the port numbers with `127.0.0.1` so that Redlib only listens on the host port **locally**. For example, if the host port for Redlib is `8080`, specify `127.0.0.1:8080:8080`.
### D) GitHub Releases
Stream logs from the Redlib container:
If you're on Linux and none of these methods work for you, you can grab a Linux binary from [the newest release](https://github.com/spikecodes/libreddit/releases/latest).
Currently, Libreddit does not have Windows or macOS binaries but those will be available soon.
```bash
docker logs -f redlib
### E) Repl.it
**Note:** Repl.it is a free option but they are *not* private and are monitor server usage to prevent abuse. If you really need a free and easy setup, this method may work best for you.
1. Create a Repl.it account (see note above)
2. Visit [the official Repl](https://repl.it/@spikethecoder/libreddit) and fork it
3. Hit the run button to download the latest Libreddit version and start it
In the web preview (defaults to top right), you should see your instance hosted where you can assign a [custom domain](https://docs.repl.it/repls/web-hosting#custom-domains).
## Deployment
Once installed, deploy Libreddit (unless you're using Docker) by running:
```
libreddit
```
## Binary
Currently binaries are not supplied at this moment but will be at some point in the future but can be [built from source](#building-from-source)
Copy the binary to `/usr/bin`:
```bash
sudo cp ./redlib /usr/bin/redlib
Specify a custom address for the server by passing the `-a` or `--address` argument:
```
libreddit --address=0.0.0.0:8111
```
Deploy Redlib to `0.0.0.0:8080`:
```bash
redlib
To disable the media proxy built into Libreddit, run:
```
libreddit --no-default-features
```
> [!IMPORTANT]
> If you're proxying Redlib through NGINX (see [issue #122](https://github.com/libreddit/libreddit/issues/122#issuecomment-782226853)), add
>
> ```nginx
> proxy_http_version 1.1;
> ```
>
> to your NGINX configuration file above your `proxy_pass` line.
## Building
### Running as a systemd service
You can use the systemd service available in `contrib/redlib.service`
(install it on `/etc/systemd/system/redlib.service`).
That service can be optionally configured in terms of environment variables by
creating a file in `/etc/redlib.conf`. Use the `contrib/redlib.conf` as a
template. You can also add the `REDLIB_DEFAULT__{X}` settings explained
above.
When "Proxying using NGINX" where the proxy is on the same machine, you should
guarantee nginx waits for this service to start. Edit
`/etc/systemd/system/redlib.service.d/reverse-proxy.conf`:
```conf
[Unit]
Before=nginx.service
```
## Building from source
To deploy Redsunlib with changes not yet included in the latest release, you can build the application from source.
```bash
git clone https://git.stardust.wtf/iridium/redsunlib && cd redsunlib
git clone https://github.com/spikecodes/libreddit
cd libreddit
cargo run
```
---
# Configuration
You can configure Redlib further using environment variables. For example:
```bash
REDLIB_DEFAULT_SHOW_NSFW=on redlib
```
```bash
REDLIB_DEFAULT_WIDE=on REDLIB_DEFAULT_THEME=dark redlib -r
```
You can also configure Redlib with a configuration file named `redlib.toml`. For example:
```toml
REDLIB_DEFAULT_WIDE = "on"
REDLIB_DEFAULT_USE_HLS = "on"
```
> [!NOTE]
> If you're deploying Redlib using the **Docker CLI or Docker Compose**, environment variables can be defined in a [`.env` file](https://docs.docker.com/compose/environment-variables/set-environment-variables/), allowing you to centralize and manage configuration in one place.
>
> To configure Redlib using a `.env` file, copy the `.env.example` file to `.env` and edit it accordingly.
>
> If using the Docker CLI, add ` --env-file .env` to the command that runs Redlib. For example:
>
> ```bash
> docker run -d --name redlib -p 8080:8080 --env-file .env git.stardust.wtf/iridium/redsunlib:latest
> ```
>
> If using Docker Compose, no changes are needed as the `.env` file is already referenced in `compose.yaml` via the `env_file: .env` line.
## Instance settings
Assign a default value for each instance-specific setting by passing environment variables to Redlib in the format `REDLIB_{X}`. Replace `{X}` with the setting name (see list below) in capital letters.
| Name | Possible values | Default value | Description |
| ------------------------- | --------------- | ---------------- | --------------------------------------------------------------------------------------------------------- |
| `SFW_ONLY` | `["on", "off"]` | `off` | Enables SFW-only mode for the instance, i.e. all NSFW content is filtered. |
| `BANNER` | String | (empty) | Allows the server to set a banner to be displayed. Currently this is displayed on the instance info page. |
| `ROBOTS_DISABLE_INDEXING` | `["on", "off"]` | `off` | Disables indexing of the instance by search engines. |
| `PUSHSHIFT_FRONTEND` | String | `undelete.pullpush.io` | Allows the server to set the Pushshift frontend to be used with "removed" links. |
| `PORT` | Integer 0-65535 | `8080` | The **internal** port Redlib listens on. |
| `ENABLE_RSS` | `["on", "off"]` | `off` | Enables RSS feed generation. |
| `FULL_URL` | String | (empty) | Allows for proper URLs (for now, only needed by RSS)
## Default user settings
Assign a default value for each user-modifiable setting by passing environment variables to Redlib in the format `REDLIB_DEFAULT_{Y}`. Replace `{Y}` with the setting name (see list below) in capital letters.
| Name | Possible values | Default value |
| ----------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------- | ------------- |
| `THEME` | `["system", "light", "dark", "black", "dracula", "nord", "laserwave", "violet", "gold", "rosebox", "gruvboxdark", "gruvboxlight", "tokyoNight", "catppuccin", "icebergDark", "doomone", "libredditBlack", "libredditDark", "libredditLight"]` | `system` |
| `MASCOT` | `["BoymoderBlahaj", "redsunlib" ... Add more at ./static/mascots] ` | _(none)_ |
| `FRONT_PAGE` | `["default", "popular", "all"]` | `default` |
| `LAYOUT` | `["card", "clean", "compact", "old", "waterfall"]` | `card` |
| `WIDE` | `["on", "off"]` | `off` |
| `POST_SORT` | `["hot", "new", "top", "rising", "controversial"]` | `hot` |
| `COMMENT_SORT` | `["confidence", "top", "new", "controversial", "old"]` | `confidence` |
| `BLUR_SPOILER` | `["on", "off"]` | `off` |
| `SHOW_NSFW` | `["on", "off"]` | `off` |
| `BLUR_NSFW` | `["on", "off"]` | `off` |
| `USE_HLS` | `["on", "off"]` | `off` |
| `FFMPEG_VIDEO_DOWNLOADS` | `["on", "off"]` | `off` |
| `HIDE_HLS_NOTIFICATION` | `["on", "off"]` | `off` |
| `AUTOPLAY_VIDEOS` | `["on", "off"]` | `off` |
| `SUBSCRIPTIONS` | `+`-delimited list of subreddits (`sub1+sub2+sub3+...`) | _(none)_ |
| `HIDE_AWARDS` | `["on", "off"]` | `off` |
| `DISABLE_VISIT_REDDIT_CONFIRMATION` | `["on", "off"]` | `off` |
| `HIDE_SCORE` | `["on", "off"]` | `off` |
| `HIDE_SIDEBAR_AND_SUMMARY` | `["on", "off"]` | `off` |
| `HIDE_BANNER` | `["on", "off"]` | `off` |
| `FIXED_NAVBAR` | `["on", "off"]` | `on` |

View File

@ -1,87 +0,0 @@
{
"name": "Redsunlib",
"description": "Private front-end for Reddit",
"buildpacks": [
{
"url": "https://github.com/emk/heroku-buildpack-rust"
},
{
"url": "emk/rust"
}
],
"stack": "container",
"env": {
"REDLIB_DEFAULT_THEME": {
"required": false
},
"REDLIB_DEFAULT_MASCOT": {
"required": false
},
"REDLIB_DEFAULT_FRONT_PAGE": {
"required": false
},
"REDLIB_DEFAULT_LAYOUT": {
"required": false
},
"REDLIB_DEFAULT_WIDE": {
"required": false
},
"REDLIB_DEFAULT_COMMENT_SORT": {
"required": false
},
"REDLIB_DEFAULT_POST_SORT": {
"required": false
},
"REDLIB_DEFAULT_BLUR_SPOILER": {
"required": false
},
"REDLIB_DEFAULT_SHOW_NSFW": {
"required": false
},
"REDLIB_DEFAULT_BLUR_NSFW": {
"required": false
},
"REDLIB_DEFAULT_USE_HLS": {
"required": false
},
"REDLIB_DEFAULT_FFMPEG_VIDEO_DOWNLOADS": {
"required": false
},
"REDLIB_HIDE_HLS_NOTIFICATION": {
"required": false
},
"REDLIB_SFW_ONLY": {
"required": false
},
"REDLIB_DEFAULT_HIDE_AWARDS": {
"required": false
},
"REDLIB_DEFAULT_HIDE_SCORE": {
"required": false
},
"REDLIB_BANNER": {
"required": false
},
"REDLIB_ROBOTS_DISABLE_INDEXING": {
"required": false
},
"REDLIB_DEFAULT_SUBSCRIPTIONS": {
"required": false
},
"REDLIB_DEFAULT_FILTERS": {
"required": false
},
"REDLIB_DEFAULT_DISABLE_VISIT_REDDIT_CONFIRMATION": {
"required": false
},
"REDLIB_PUSHSHIFT_FRONTEND": {
"required": false
},
"REDLIB_ENABLE_RSS": {
"required": false
},
"REDLIB_FULL_URL": {
"required": false
}
}
}

View File

@ -1,25 +0,0 @@
use std::process::{Command, ExitStatus, Output};
#[cfg(not(target_os = "windows"))]
use std::os::unix::process::ExitStatusExt;
#[cfg(target_os = "windows")]
use std::os::windows::process::ExitStatusExt;
fn main() {
println!("cargo:rerun-if-changed=src/");
let output = String::from_utf8(
Command::new("git")
.args(["rev-parse", "HEAD"])
.output()
.unwrap_or(Output {
stdout: vec![],
stderr: vec![],
status: ExitStatus::from_raw(0),
})
.stdout,
)
.unwrap_or_default();
let git_hash = if output == String::default() { "dev".into() } else { output };
println!("cargo:rustc-env=GIT_HASH={git_hash}");
}

View File

@ -1,26 +0,0 @@
# docker-compose -f docker-compose.dev.yml up -d
version: "3.8"
services:
redsunlib:
build: .
restart: always
container_name: "redsunlib"
ports:
- 8080:8080 # Specify `127.0.0.1:8080:8080` instead if using a reverse proxy
user: nobody
read_only: true
security_opt:
- no-new-privileges:true
# - seccomp=seccomp-redsunlib.json
cap_drop:
- ALL
networks:
- redsunlib
healthcheck:
test: ["CMD", "wget", "--spider", "-q", "--tries=1", "http://localhost:8080/settings"]
interval: 5m
timeout: 3s
networks:
redsunlib:

View File

@ -1,24 +0,0 @@
services:
redsunlib:
image: git.stardust.wtf/iridium/redsunlib:latest
restart: always
container_name: "redsunlib"
ports:
- 8080:8080 # Specify `127.0.0.1:8080:8080` instead if using a reverse proxy
user: nobody
read_only: true
security_opt:
- no-new-privileges:true
# - seccomp=seccomp-redsunlib.json
cap_drop:
- ALL
env_file: .env
networks:
- redsunlib
healthcheck:
test: ["CMD", "wget", "--spider", "-q", "--tries=1", "http://localhost:8080/settings"]
interval: 5m
timeout: 3s
networks:
redsunlib:

View File

@ -1,19 +0,0 @@
ADDRESS=0.0.0.0
PORT=12345
#REDLIB_DEFAULT_THEME=default
#REDLIB_DEFAULT_MASCOT=none
#REDLIB_DEFAULT_FRONT_PAGE=default
#REDLIB_DEFAULT_LAYOUT=card
#REDLIB_DEFAULT_WIDE=off
#REDLIB_DEFAULT_POST_SORT=hot
#REDLIB_DEFAULT_COMMENT_SORT=confidence
#REDLIB_DEFAULT_BLUR_SPOILER=off
#REDLIB_DEFAULT_SHOW_NSFW=off
#REDLIB_DEFAULT_BLUR_NSFW=off
#REDLIB_DEFAULT_USE_HLS=off
#REDLIB_DEFAULT_FFMPEG_VIDEO_DOWNLOADS=off
#REDLIB_DEFAULT_HIDE_HLS_NOTIFICATION=off
#REDLIB_DEFAULT_AUTOPLAY_VIDEOS=off
#REDLIB_DEFAULT_SUBSCRIPTIONS=(sub1+sub2+sub3)
#REDLIB_DEFAULT_HIDE_AWARDS=off
#REDLIB_DEFAULT_DISABLE_VISIT_REDDIT_CONFIRMATION=off

View File

@ -1,19 +0,0 @@
<?xml version="1.0" encoding="UTF-8"?>
<!DOCTYPE plist PUBLIC "-//Apple//DTD PLIST 1.0//EN" "http://www.apple.com/DTDs/PropertyList-1.0.dtd">
<plist version="1.0">
<dict>
<key>Label</key>
<string>redlib</string>
<key>Program</key>
<string>redlib</string>
<key>KeepAlive</key>
<true/>
<key>RunAtLoad</key>
<true/>
</dict>
</plist>

View File

@ -1,38 +0,0 @@
[Unit]
Description=redlib daemon
After=network.service
[Service]
DynamicUser=yes
# Default Values
#Environment=ADDRESS=0.0.0.0
#Environment=PORT=8080
# Optional Override
EnvironmentFile=-/etc/redlib.conf
ExecStart=/usr/bin/redlib -a ${ADDRESS} -p ${PORT}
# Hardening
DeviceAllow=
LockPersonality=yes
MemoryDenyWriteExecute=yes
PrivateDevices=yes
ProcSubset=pid
ProtectClock=yes
ProtectControlGroups=yes
ProtectHome=yes
ProtectHostname=yes
ProtectKernelLogs=yes
ProtectKernelModules=yes
ProtectKernelTunables=yes
ProtectProc=invisible
RestrictAddressFamilies=AF_INET AF_INET6
RestrictNamespaces=yes
RestrictRealtime=yes
RestrictSUIDSGID=yes
SystemCallArchitectures=native
SystemCallFilter=@system-service
SystemCallFilter=~@privileged @resources
UMask=0077
[Install]
WantedBy=default.target

106
flake.lock generated
View File

@ -1,106 +0,0 @@
{
"nodes": {
"crane": {
"inputs": {
"nixpkgs": [
"nixpkgs"
]
},
"locked": {
"lastModified": 1717025063,
"narHash": "sha256-dIubLa56W9sNNz0e8jGxrX3CAkPXsq7snuFA/Ie6dn8=",
"owner": "ipetkov",
"repo": "crane",
"rev": "480dff0be03dac0e51a8dfc26e882b0d123a450e",
"type": "github"
},
"original": {
"owner": "ipetkov",
"repo": "crane",
"type": "github"
}
},
"flake-utils": {
"inputs": {
"systems": "systems"
},
"locked": {
"lastModified": 1710146030,
"narHash": "sha256-SZ5L6eA7HJ/nmkzGG7/ISclqe6oZdOZTNoesiInkXPQ=",
"owner": "numtide",
"repo": "flake-utils",
"rev": "b1d9ab70662946ef0850d488da1c9019f3a9752a",
"type": "github"
},
"original": {
"owner": "numtide",
"repo": "flake-utils",
"type": "github"
}
},
"nixpkgs": {
"locked": {
"lastModified": 1717112898,
"narHash": "sha256-7R2ZvOnvd9h8fDd65p0JnB7wXfUvreox3xFdYWd1BnY=",
"owner": "NixOS",
"repo": "nixpkgs",
"rev": "6132b0f6e344ce2fe34fc051b72fb46e34f668e0",
"type": "github"
},
"original": {
"owner": "NixOS",
"ref": "nixpkgs-unstable",
"repo": "nixpkgs",
"type": "github"
}
},
"root": {
"inputs": {
"crane": "crane",
"flake-utils": "flake-utils",
"nixpkgs": "nixpkgs",
"rust-overlay": "rust-overlay"
}
},
"rust-overlay": {
"inputs": {
"flake-utils": [
"flake-utils"
],
"nixpkgs": [
"nixpkgs"
]
},
"locked": {
"lastModified": 1717121863,
"narHash": "sha256-/3sxIe7MZqF/jw1RTQCSmgTjwVod43mmrk84m50MJQ4=",
"owner": "oxalica",
"repo": "rust-overlay",
"rev": "2a7b53172ed08f856b8382d7dcfd36a4e0cbd866",
"type": "github"
},
"original": {
"owner": "oxalica",
"repo": "rust-overlay",
"type": "github"
}
},
"systems": {
"locked": {
"lastModified": 1681028828,
"narHash": "sha256-Vy1rq5AaRuLzOxct8nz4T6wlgyUR7zLU309k9mBC768=",
"owner": "nix-systems",
"repo": "default",
"rev": "da67096a3b9bf56a91d16901293e51ba5b49a27e",
"type": "github"
},
"original": {
"owner": "nix-systems",
"repo": "default",
"type": "github"
}
}
},
"root": "root",
"version": 7
}

View File

@ -1,71 +0,0 @@
{
description = "Redlib: Private front-end for Reddit";
inputs = {
nixpkgs.url = "github:NixOS/nixpkgs/nixpkgs-unstable";
crane = {
url = "github:ipetkov/crane";
inputs.nixpkgs.follows = "nixpkgs";
};
flake-utils.url = "github:numtide/flake-utils";
rust-overlay = {
url = "github:oxalica/rust-overlay";
inputs = {
nixpkgs.follows = "nixpkgs";
flake-utils.follows = "flake-utils";
};
};
};
outputs = { nixpkgs, crane, flake-utils, rust-overlay, ... }:
flake-utils.lib.eachSystem [ "x86_64-linux" ] (system:
let
pkgs = import nixpkgs {
inherit system;
overlays = [ (import rust-overlay) ];
};
inherit (pkgs) lib;
rustToolchain = pkgs.rust-bin.stable.latest.default.override {
targets = [ "x86_64-unknown-linux-musl" ];
};
craneLib = (crane.mkLib pkgs).overrideToolchain rustToolchain;
src = lib.cleanSourceWith {
src = craneLib.path ./.;
filter = path: type:
(lib.hasInfix "/templates/" path) ||
(lib.hasInfix "/static/" path) ||
(craneLib.filterCargoSources path type);
};
redlib = craneLib.buildPackage {
inherit src;
strictDeps = true;
doCheck = false;
CARGO_BUILD_TARGET = "x86_64-unknown-linux-musl";
CARGO_BUILD_RUSTFLAGS = "-C target-feature=+crt-static";
};
in
{
checks = {
my-crate = redlib;
};
packages.default = redlib;
packages.docker = pkgs.dockerTools.buildImage {
name = "quay.io/redlib/redlib";
tag = "latest";
created = "now";
copyToRoot = with pkgs.dockerTools; [ caCertificates fakeNss ];
config.Cmd = "${redlib}/bin/redlib";
};
});
}

View File

@ -1,3 +0,0 @@
build:
docker:
web: Dockerfile

View File

@ -1,16 +0,0 @@
[Install]
WantedBy=default.target
[Container]
AutoUpdate=registry
ContainerName=redlib
DropCapability=ALL
EnvironmentFile=.env
HealthCmd=["wget","--spider","-q","--tries=1","http://localhost:8080/settings"]
HealthInterval=5m
HealthTimeout=3s
Image=quay.io/redlib/redlib:latest
NoNewPrivileges=true
PublishPort=8080:8080
ReadOnly=true
User=nobody

View File

@ -1,15 +0,0 @@
#!/usr/bin/env bash
# This scripts generates the CREDITS file in the repository root, which
# contains a list of all contributors ot the Redlib project.
#
# We use git-log to surface the names and emails of all authors and committers,
# and grep will filter any automated commits due to GitHub.
set -o pipefail
cd "$(dirname "${BASH_SOURCE[0]}")/../" || exit 1
git --no-pager log --pretty='%an <%ae>%n%cn <%ce>' main \
| sort -t'<' -u -k1,1 -k2,2 \
| grep -Fv -- 'GitHub <noreply@github.com>' \
> CREDITS

View File

@ -1,31 +0,0 @@
import requests
from bs4 import BeautifulSoup
from concurrent.futures import ThreadPoolExecutor
base_url = "http://localhost:8080"
full_path = f"{base_url}/r/politics"
ctr = 0
def fetch_url(url):
global ctr
response = requests.get(url)
ctr += 1
print(f"Request count: {ctr}")
return response
while full_path:
response = requests.get(full_path)
ctr += 1
print(f"Request count: {ctr}")
soup = BeautifulSoup(response.text, 'html.parser')
comment_links = soup.find_all('a', class_='post_comments')
comment_urls = [base_url + link['href'] for link in comment_links]
with ThreadPoolExecutor(max_workers=10) as executor:
executor.map(fetch_url, comment_urls)
next_link = soup.find('a', accesskey='N')
if next_link:
full_path = base_url + next_link['href']
else:
break

View File

@ -1,18 +0,0 @@
#!/bin/bash
cd "$(dirname "$0")"
LATEST_TAG=$(curl -s https://api.github.com/repos/video-dev/hls.js/releases/latest | jq -r '.tag_name')
if [[ -z "$LATEST_TAG" || "$LATEST_TAG" == "null" ]]; then
echo "Failed to fetch the latest release tag from GitHub."
exit 1
fi
LICENSE="// @license http://www.apache.org/licenses/LICENSE-2.0 Apache-2.0
// @source https://github.com/video-dev/hls.js/tree/$LATEST_TAG"
echo "$LICENSE" > ../static/hls.min.js
curl -s https://cdn.jsdelivr.net/npm/hls.js@${LATEST_TAG}/dist/hls.min.js >> ../static/hls.min.js
echo "Update complete. The latest hls.js (${LATEST_TAG}) has been saved to static/hls.min.js."

View File

@ -1,112 +0,0 @@
#!/bin/bash
# Requirements
# - curl
# - rg
# - jq
# Fetch iOS app versions
ios_version_list=$(curl -s "https://ipaarchive.com/app/usa/1064216828" | rg "(20\d{2}\.\d+.\d+) / (\d+)" --only-matching -r "Version \$1/Build \$2" | sort | uniq)
# Count the number of lines in the version list
ios_app_count=$(echo "$ios_version_list" | wc -l)
echo -e "Fetching \e[34m$ios_app_count iOS app versions...\e[0m"
# Specify the filename as a variable
filename="src/oauth_resources.rs"
# Add comment that it is user generated
echo "// This file was generated by scripts/update_oauth_resources.sh" > "$filename"
echo "// Rerun scripts/update_oauth_resources.sh to update this file" >> "$filename"
echo "// Please do not edit manually" >> "$filename"
echo "// Filled in with real app versions" >> "$filename"
# Open the array in the source file
echo "pub static _IOS_APP_VERSION_LIST: &[&str; $ios_app_count] = &[" >> "$filename"
num=0
# Append the version list to the source file
echo "$ios_version_list" | while IFS= read -r line; do
num=$((num+1))
echo " \"$line\"," >> "$filename"
echo -e "[$num/$ios_app_count] Fetched \e[34m$line\e[0m."
done
# Close the array in the source file
echo "];" >> "$filename"
# Fetch Android app versions
page_1=$(curl -s "https://apkcombo.com/reddit/com.reddit.frontpage/old-versions/" | rg "<a class=\"ver-item\" href=\"(/reddit/com\.reddit\.frontpage/download/phone-20\d{2}\.\d+\.\d+-apk)\" rel=\"nofollow\">" -r "https://apkcombo.com\$1" | sort | uniq)
# Append with pages
page_2=$(curl -s "https://apkcombo.com/reddit/com.reddit.frontpage/old-versions?page=2" | rg "<a class=\"ver-item\" href=\"(/reddit/com\.reddit\.frontpage/download/phone-20\d{2}\.\d+\.\d+-apk)\" rel=\"nofollow\">" -r "https://apkcombo.com\$1" | sort | uniq)
page_3=$(curl -s "https://apkcombo.com/reddit/com.reddit.frontpage/old-versions?page=3" | rg "<a class=\"ver-item\" href=\"(/reddit/com\.reddit\.frontpage/download/phone-20\d{2}\.\d+\.\d+-apk)\" rel=\"nofollow\">" -r "https://apkcombo.com\$1" | sort | uniq)
page_4=$(curl -s "https://apkcombo.com/reddit/com.reddit.frontpage/old-versions?page=4" | rg "<a class=\"ver-item\" href=\"(/reddit/com\.reddit\.frontpage/download/phone-20\d{2}\.\d+\.\d+-apk)\" rel=\"nofollow\">" -r "https://apkcombo.com\$1" | sort | uniq)
page_5=$(curl -s "https://apkcombo.com/reddit/com.reddit.frontpage/old-versions?page=5" | rg "<a class=\"ver-item\" href=\"(/reddit/com\.reddit\.frontpage/download/phone-20\d{2}\.\d+\.\d+-apk)\" rel=\"nofollow\">" -r "https://apkcombo.com\$1" | sort | uniq)
# Concatenate all pages
versions="${page_1}"
versions+=$'\n'
versions+="${page_2}"
versions+=$'\n'
versions+="${page_3}"
versions+=$'\n'
versions+="${page_4}"
versions+=$'\n'
versions+="${page_5}"
# Count the number of lines in the version list
android_count=$(echo "$versions" | wc -l)
echo -e "Fetching \e[32m$android_count Android app versions...\e[0m"
# Append to the source file
echo "pub static ANDROID_APP_VERSION_LIST: &[&str; $android_count] = &[" >> "$filename"
num=0
# For each in versions, curl the page and extract the build number
echo "$versions" | while IFS= read -r line; do
num=$((num+1))
fetch_page=$(curl -s "$line")
build=$(echo "$fetch_page" | rg "<span class=\"vercode\">\((\d+)\)</span>" --only-matching -r "\$1" | head -n1)
version=$(echo "$fetch_page" | rg "<span class=\"vername\">Reddit (20\d{2}\.\d+\.\d+)</span>" --only-matching -r "\$1" | head -n1)
echo " \"Version $version/Build $build\"," >> "$filename"
echo -e "[$num/$android_count] Fetched \e[32mVersion $version/Build $build\e[0m."
done
# Close the array in the source file
echo "];" >> "$filename"
# Retrieve iOS versions
table=$(curl -s "https://en.wikipedia.org/w/api.php?action=parse&page=IOS_17&prop=wikitext&section=31&format=json" | jq ".parse.wikitext.\"*\"" | rg "(17\.[\d\.]*)\\\n\|(\w*)\\\n\|" --only-matching -r "Version \$1 (Build \$2)")
# Count the number of lines in the version list
ios_count=$(echo "$table" | wc -l)
echo -e "Fetching \e[34m$ios_count iOS versions...\e[0m"
# Append to the source file
echo "pub static _IOS_OS_VERSION_LIST: &[&str; $ios_count] = &[" >> "$filename"
num=0
# For each in versions, curl the page and extract the build number
echo "$table" | while IFS= read -r line; do
num=$((num+1))
echo " \"$line\"," >> "$filename"
echo -e "\e[34m[$num/$ios_count] Fetched $line\e[0m."
done
# Close the array in the source file
echo "];" >> "$filename"
echo -e "\e[34mRetrieved $ios_app_count iOS app versions.\e[0m"
echo -e "\e[32mRetrieved $android_count Android app versions.\e[0m"
echo -e "\e[34mRetrieved $ios_count iOS versions.\e[0m"
echo -e "\e[34mTotal: $((ios_app_count + android_count + ios_count))\e[0m"
echo -e "\e[32mSuccess!\e[0m"

View File

@ -1,125 +0,0 @@
{
"defaultAction": "SCMP_ACT_ERRNO",
"archMap": [
{
"architecture": "SCMP_ARCH_X86_64",
"subArchitectures": [
"SCMP_ARCH_X86",
"SCMP_ARCH_X32"
]
},
{
"architecture": "SCMP_ARCH_AARCH64",
"subArchitectures": [
"SCMP_ARCH_ARM"
]
},
{
"architecture": "SCMP_ARCH_MIPS64",
"subArchitectures": [
"SCMP_ARCH_MIPS",
"SCMP_ARCH_MIPS64N32"
]
},
{
"architecture": "SCMP_ARCH_MIPS64N32",
"subArchitectures": [
"SCMP_ARCH_MIPS",
"SCMP_ARCH_MIPS64"
]
},
{
"architecture": "SCMP_ARCH_MIPSEL64",
"subArchitectures": [
"SCMP_ARCH_MIPSEL",
"SCMP_ARCH_MIPSEL64N32"
]
},
{
"architecture": "SCMP_ARCH_MIPSEL64N32",
"subArchitectures": [
"SCMP_ARCH_MIPSEL",
"SCMP_ARCH_MIPSEL64"
]
},
{
"architecture": "SCMP_ARCH_S390X",
"subArchitectures": [
"SCMP_ARCH_S390"
]
}
],
"syscalls": [
{
"names": [
"accept4",
"arch_prctl",
"bind",
"brk",
"clock_gettime",
"clone",
"close",
"connect",
"epoll_create1",
"epoll_ctl",
"epoll_pwait",
"eventfd2",
"execve",
"exit",
"exit_group",
"fcntl",
"flock",
"fork",
"fstat",
"futex",
"getcwd",
"getpeername",
"getpid",
"getrandom",
"getsockname",
"getsockopt",
"getgid",
"getppid",
"gettid",
"getuid",
"ioctl",
"listen",
"lseek",
"madvise",
"mmap",
"mprotect",
"mremap",
"munmap",
"newfstatat",
"open",
"openat",
"prctl",
"poll",
"read",
"recvfrom",
"rt_sigaction",
"rt_sigprocmask",
"rt_sigreturn",
"sched_getaffinity",
"sched_yield",
"sendto",
"setitimer",
"setsockopt",
"set_tid_address",
"shutdown",
"sigaltstack",
"socket",
"socketpair",
"stat",
"wait4",
"write",
"writev"
],
"action": "SCMP_ACT_ALLOW",
"args": [],
"comment": "",
"includes": {},
"excludes": {}
}
]
}

View File

@ -1,530 +0,0 @@
use arc_swap::ArcSwap;
use cached::proc_macro::cached;
use futures_lite::future::block_on;
use futures_lite::{future::Boxed, FutureExt};
use hyper::client::HttpConnector;
use hyper::header::HeaderValue;
use hyper::{body, body::Buf, header, Body, Client, Method, Request, Response, Uri};
use hyper_rustls::HttpsConnector;
use libflate::gzip;
use log::{debug, error, warn};
use once_cell::sync::Lazy;
use percent_encoding::{percent_encode, CONTROLS};
use serde_json::Value;
use std::sync::atomic::Ordering;
use std::sync::atomic::{AtomicBool, AtomicU16};
use std::{io, result::Result};
use crate::dbg_msg;
use crate::oauth::{force_refresh_token, token_daemon, Oauth};
use crate::server::RequestExt;
use crate::utils::format_url;
const REDDIT_URL_BASE: &str = "https://oauth.reddit.com";
const REDDIT_URL_BASE_HOST: &str = "oauth.reddit.com";
const REDDIT_SHORT_URL_BASE: &str = "https://redd.it";
const REDDIT_SHORT_URL_BASE_HOST: &str = "redd.it";
const ALTERNATIVE_REDDIT_URL_BASE: &str = "https://www.reddit.com";
const ALTERNATIVE_REDDIT_URL_BASE_HOST: &str = "www.reddit.com";
pub static HTTPS_CONNECTOR: Lazy<HttpsConnector<HttpConnector>> =
Lazy::new(|| hyper_rustls::HttpsConnectorBuilder::new().with_native_roots().https_only().enable_http2().build());
pub static CLIENT: Lazy<Client<HttpsConnector<HttpConnector>>> = Lazy::new(|| Client::builder().build::<_, Body>(HTTPS_CONNECTOR.clone()));
pub static OAUTH_CLIENT: Lazy<ArcSwap<Oauth>> = Lazy::new(|| {
let client = block_on(Oauth::new());
tokio::spawn(token_daemon());
ArcSwap::new(client.into())
});
pub static OAUTH_RATELIMIT_REMAINING: AtomicU16 = AtomicU16::new(99);
pub static OAUTH_IS_ROLLING_OVER: AtomicBool = AtomicBool::new(false);
static URL_PAIRS: [(&str, &str); 2] = [
(ALTERNATIVE_REDDIT_URL_BASE, ALTERNATIVE_REDDIT_URL_BASE_HOST),
(REDDIT_SHORT_URL_BASE, REDDIT_SHORT_URL_BASE_HOST),
];
/// Gets the canonical path for a resource on Reddit. This is accomplished by
/// making a `HEAD` request to Reddit at the path given in `path`.
///
/// This function returns `Ok(Some(path))`, where `path`'s value is identical
/// to that of the value of the argument `path`, if Reddit responds to our
/// `HEAD` request with a 2xx-family HTTP code. It will also return an
/// `Ok(Some(String))` if Reddit responds to our `HEAD` request with a
/// `Location` header in the response, and the HTTP code is in the 3xx-family;
/// the `String` will contain the path as reported in `Location`. The return
/// value is `Ok(None)` if Reddit responded with a 3xx, but did not provide a
/// `Location` header. An `Err(String)` is returned if Reddit responds with a
/// 429, or if we were unable to decode the value in the `Location` header.
#[cached(size = 1024, time = 600, result = true)]
#[async_recursion::async_recursion]
pub async fn canonical_path(path: String, tries: i8) -> Result<Option<String>, String> {
if tries == 0 {
return Ok(None);
}
// for each URL pair, try the HEAD request
let res = {
// for url base and host in URL_PAIRS, try reddit_short_head(path.clone(), true, url_base, url_base_host) and if it succeeds, set res. else, res = None
let mut res = None;
for (url_base, url_base_host) in URL_PAIRS {
res = reddit_short_head(path.clone(), true, url_base, url_base_host).await.ok();
if let Some(res) = &res {
if !res.status().is_client_error() {
break;
}
}
}
res
};
let res = res.ok_or_else(|| "Unable to make HEAD request to Reddit.".to_string())?;
let status = res.status().as_u16();
let policy_error = res.headers().get(header::RETRY_AFTER).is_some();
match status {
// If Reddit responds with a 2xx, then the path is already canonical.
200..=299 => Ok(Some(path)),
// If Reddit responds with a 301, then the path is redirected.
301 => match res.headers().get(header::LOCATION) {
Some(val) => {
let Ok(original) = val.to_str() else {
return Err("Unable to decode Location header.".to_string());
};
// We need to strip the .json suffix from the original path.
// In addition, we want to remove share parameters.
// Cut it off here instead of letting it propagate all the way
// to main.rs
let stripped_uri = original.strip_suffix(".json").unwrap_or(original).split('?').next().unwrap_or_default();
// The reason why we now have to format_url, is because the new OAuth
// endpoints seem to return full paths, instead of relative paths.
// So we need to strip the .json suffix from the original path, and
// also remove all Reddit domain parts with format_url.
// Otherwise, it will literally redirect to Reddit.com.
let uri = format_url(stripped_uri);
// Decrement tries and try again
canonical_path(uri, tries - 1).await
}
None => Ok(None),
},
// If Reddit responds with anything other than 3xx (except for the 2xx and 301
// as above), return a None.
300..=399 => Ok(None),
// Rate limiting
429 => Err("Too many requests.".to_string()),
// Special condition rate limiting - https://github.com/redlib-org/redlib/issues/229
403 if policy_error => Err("Too many requests.".to_string()),
_ => Ok(
res
.headers()
.get(header::LOCATION)
.map(|val| percent_encode(val.as_bytes(), CONTROLS).to_string().trim_start_matches(REDDIT_URL_BASE).to_string()),
),
}
}
pub async fn proxy(req: Request<Body>, format: &str) -> Result<Response<Body>, String> {
let mut url = format!("{format}?{}", req.uri().query().unwrap_or_default());
// For each parameter in request
for (name, value) in &req.params() {
// Fill the parameter value in the url
url = url.replace(&format!("{{{name}}}"), value);
}
stream(&url, &req).await
}
async fn stream(url: &str, req: &Request<Body>) -> Result<Response<Body>, String> {
// First parameter is target URL (mandatory).
let parsed_uri = url.parse::<Uri>().map_err(|_| "Couldn't parse URL".to_string())?;
// Build the hyper client from the HTTPS connector.
let client: &Lazy<Client<_, Body>> = &CLIENT;
let mut builder = Request::get(parsed_uri);
// Copy useful headers from original request
for &key in &["Range", "If-Modified-Since", "Cache-Control"] {
if let Some(value) = req.headers().get(key) {
builder = builder.header(key, value);
}
}
let stream_request = builder.body(Body::empty()).map_err(|_| "Couldn't build empty body in stream".to_string())?;
client
.request(stream_request)
.await
.map(|mut res| {
let mut rm = |key: &str| res.headers_mut().remove(key);
rm("access-control-expose-headers");
rm("server");
rm("vary");
rm("etag");
rm("x-cdn");
rm("x-cdn-client-region");
rm("x-cdn-name");
rm("x-cdn-server-region");
rm("x-reddit-cdn");
rm("x-reddit-video-features");
rm("Nel");
rm("Report-To");
res
})
.map_err(|e| e.to_string())
}
/// Makes a GET request to Reddit at `path`. By default, this will honor HTTP
/// 3xx codes Reddit returns and will automatically redirect.
fn reddit_get(path: String, quarantine: bool) -> Boxed<Result<Response<Body>, String>> {
request(&Method::GET, path, true, quarantine, REDDIT_URL_BASE, REDDIT_URL_BASE_HOST)
}
/// Makes a HEAD request to Reddit at `path, using the short URL base. This will not follow redirects.
fn reddit_short_head(path: String, quarantine: bool, base_path: &'static str, host: &'static str) -> Boxed<Result<Response<Body>, String>> {
request(&Method::HEAD, path, false, quarantine, base_path, host)
}
// /// Makes a HEAD request to Reddit at `path`. This will not follow redirects.
// fn reddit_head(path: String, quarantine: bool) -> Boxed<Result<Response<Body>, String>> {
// request(&Method::HEAD, path, false, quarantine, false)
// }
// Unused - reddit_head is only ever called in the context of a short URL
/// Makes a request to Reddit. If `redirect` is `true`, `request_with_redirect`
/// will recurse on the URL that Reddit provides in the Location HTTP header
/// in its response.
fn request(method: &'static Method, path: String, redirect: bool, quarantine: bool, base_path: &'static str, host: &'static str) -> Boxed<Result<Response<Body>, String>> {
// Build Reddit URL from path.
let url = format!("{base_path}{path}");
// Construct the hyper client from the HTTPS connector.
let client: &Lazy<Client<_, Body>> = &CLIENT;
let (token, vendor_id, device_id, user_agent, loid) = {
let client = OAUTH_CLIENT.load_full();
(
client.token.clone(),
client.headers_map.get("Client-Vendor-Id").cloned().unwrap_or_default(),
client.headers_map.get("X-Reddit-Device-Id").cloned().unwrap_or_default(),
client.headers_map.get("User-Agent").cloned().unwrap_or_default(),
client.headers_map.get("x-reddit-loid").cloned().unwrap_or_default(),
)
};
// Build request to Reddit. When making a GET, request gzip compression.
// (Reddit doesn't do brotli yet.)
let mut headers = vec![
("User-Agent", user_agent),
("Client-Vendor-Id", vendor_id),
("X-Reddit-Device-Id", device_id),
("x-reddit-loid", loid),
("Host", host.to_string()),
("Authorization", format!("Bearer {token}")),
("Accept-Encoding", if method == Method::GET { "gzip".into() } else { "identity".into() }),
(
"Cookie",
if quarantine {
"_options=%7B%22pref_quarantine_optin%22%3A%20true%2C%20%22pref_gated_sr_optin%22%3A%20true%7D".into()
} else {
"".into()
},
),
("X-Reddit-Width", fastrand::u32(300..500).to_string()),
("X-Reddit-DPR", "2".to_owned()),
("Device-Name", format!("Android {}", fastrand::u8(9..=14))),
];
// shuffle headers: https://github.com/redlib-org/redlib/issues/324
fastrand::shuffle(&mut headers);
let mut builder = Request::builder().method(method).uri(&url);
for (key, value) in headers {
builder = builder.header(key, value);
}
let builder = builder.body(Body::empty());
async move {
match builder {
Ok(req) => match client.request(req).await {
Ok(mut response) => {
// Reddit may respond with a 3xx. Decide whether or not to
// redirect based on caller params.
if response.status().is_redirection() {
if !redirect {
return Ok(response);
};
let location_header = response.headers().get(header::LOCATION);
if location_header == Some(&HeaderValue::from_static("https://www.reddit.com/")) {
return Err("Reddit response was invalid".to_string());
}
return request(
method,
location_header
.map(|val| {
// We need to make adjustments to the URI
// we get back from Reddit. Namely, we
// must:
//
// 1. Remove the authority (e.g.
// https://www.reddit.com) that may be
// present, so that we recurse on the
// path (and query parameters) as
// required.
//
// 2. Percent-encode the path.
let new_path = percent_encode(val.as_bytes(), CONTROLS)
.to_string()
.trim_start_matches(REDDIT_URL_BASE)
.trim_start_matches(ALTERNATIVE_REDDIT_URL_BASE)
.to_string();
format!("{new_path}{}raw_json=1", if new_path.contains('?') { "&" } else { "?" })
})
.unwrap_or_default()
.to_string(),
true,
quarantine,
base_path,
host,
)
.await;
};
match response.headers().get(header::CONTENT_ENCODING) {
// Content not compressed.
None => Ok(response),
// Content encoded (hopefully with gzip).
Some(hdr) => {
match hdr.to_str() {
Ok(val) => match val {
"gzip" => {}
"identity" => return Ok(response),
_ => return Err("Reddit response was encoded with an unsupported compressor".to_string()),
},
Err(_) => return Err("Reddit response was invalid".to_string()),
}
// We get here if the body is gzip-compressed.
// The body must be something that implements
// std::io::Read, hence the conversion to
// bytes::buf::Buf and then transformation into a
// Reader.
let mut decompressed: Vec<u8>;
{
let mut aggregated_body = match body::aggregate(response.body_mut()).await {
Ok(b) => b.reader(),
Err(e) => return Err(e.to_string()),
};
let mut decoder = match gzip::Decoder::new(&mut aggregated_body) {
Ok(decoder) => decoder,
Err(e) => return Err(e.to_string()),
};
decompressed = Vec::<u8>::new();
if let Err(e) = io::copy(&mut decoder, &mut decompressed) {
return Err(e.to_string());
};
}
response.headers_mut().remove(header::CONTENT_ENCODING);
response.headers_mut().insert(header::CONTENT_LENGTH, decompressed.len().into());
*(response.body_mut()) = Body::from(decompressed);
Ok(response)
}
}
}
Err(e) => {
dbg_msg!("{method} {REDDIT_URL_BASE}{path}: {}", e);
Err(e.to_string())
}
},
Err(_) => Err("Post url contains non-ASCII characters".to_string()),
}
}
.boxed()
}
// Make a request to a Reddit API and parse the JSON response
#[cached(size = 100, time = 30, result = true)]
pub async fn json(path: String, quarantine: bool) -> Result<Value, String> {
// Closure to quickly build errors
let err = |msg: &str, e: String, path: String| -> Result<Value, String> {
// eprintln!("{} - {}: {}", url, msg, e);
Err(format!("{msg}: {e} | {path}"))
};
// First, handle rolling over the OAUTH_CLIENT if need be.
let current_rate_limit = OAUTH_RATELIMIT_REMAINING.load(Ordering::SeqCst);
let is_rolling_over = OAUTH_IS_ROLLING_OVER.load(Ordering::SeqCst);
if current_rate_limit < 10 && !is_rolling_over {
warn!("Rate limit {current_rate_limit} is low. Spawning force_refresh_token()");
tokio::spawn(force_refresh_token());
}
OAUTH_RATELIMIT_REMAINING.fetch_sub(1, Ordering::SeqCst);
// Fetch the url...
match reddit_get(path.clone(), quarantine).await {
Ok(response) => {
let status = response.status();
let reset: Option<String> = if let (Some(remaining), Some(reset), Some(used)) = (
response.headers().get("x-ratelimit-remaining").and_then(|val| val.to_str().ok().map(|s| s.to_string())),
response.headers().get("x-ratelimit-reset").and_then(|val| val.to_str().ok().map(|s| s.to_string())),
response.headers().get("x-ratelimit-used").and_then(|val| val.to_str().ok().map(|s| s.to_string())),
) {
debug!(
"Ratelimit remaining: Header says {remaining}, we have {current_rate_limit}. Resets in {reset}. Rollover: {}. Ratelimit used: {used}",
if is_rolling_over { "yes" } else { "no" },
);
Some(reset)
} else {
None
};
// asynchronously aggregate the chunks of the body
match hyper::body::aggregate(response).await {
Ok(body) => {
let has_remaining = body.has_remaining();
if !has_remaining {
// Rate limited, so spawn a force_refresh_token()
tokio::spawn(force_refresh_token());
return match reset {
Some(val) => Err(format!(
"Reddit rate limit exceeded. Try refreshing in a few seconds.\
Rate limit will reset in: {val}"
)),
None => Err("Reddit rate limit exceeded".to_string()),
};
}
// Parse the response from Reddit as JSON
match serde_json::from_reader(body.reader()) {
Ok(value) => {
let json: Value = value;
// If user is suspended
if let Some(data) = json.get("data") {
if let Some(is_suspended) = data.get("is_suspended").and_then(Value::as_bool) {
if is_suspended {
return Err("suspended".into());
}
}
}
// If Reddit returned an error
if json["error"].is_i64() {
// OAuth token has expired; http status 401
if json["message"] == "Unauthorized" {
error!("Forcing a token refresh");
let () = force_refresh_token().await;
return Err("OAuth token has expired. Please refresh the page!".to_string());
}
// Handle quarantined
if json["reason"] == "quarantined" {
return Err("quarantined".into());
}
// Handle gated
if json["reason"] == "gated" {
return Err("gated".into());
}
// Handle private subs
if json["reason"] == "private" {
return Err("private".into());
}
// Handle banned subs
if json["reason"] == "banned" {
return Err("banned".into());
}
Err(format!("Reddit error {} \"{}\": {} | {path}", json["error"], json["reason"], json["message"]))
} else {
Ok(json)
}
}
Err(e) => {
error!("Got an invalid response from reddit {e}. Status code: {status}");
if status.is_server_error() {
Err("Reddit is having issues, check if there's an outage".to_string())
} else {
err("Failed to parse page JSON data", e.to_string(), path)
}
}
}
}
Err(e) => err("Failed receiving body from Reddit", e.to_string(), path),
}
}
Err(e) => err("Couldn't send request to Reddit", e, path),
}
}
#[cfg(test)]
static POPULAR_URL: &str = "/r/popular/hot.json?&raw_json=1&geo_filter=GLOBAL";
#[tokio::test(flavor = "multi_thread")]
async fn test_localization_popular() {
let val = json(POPULAR_URL.to_string(), false).await.unwrap();
assert_eq!("GLOBAL", val["data"]["geo_filter"].as_str().unwrap());
}
#[tokio::test(flavor = "multi_thread")]
async fn test_obfuscated_share_link() {
let share_link = "/r/rust/s/kPgq8WNHRK".into();
// Correct link without share parameters
let canonical_link = "/r/rust/comments/18t5968/why_use_tuple_struct_over_standard_struct/kfbqlbc/".into();
assert_eq!(canonical_path(share_link, 3).await, Ok(Some(canonical_link)));
}
#[tokio::test(flavor = "multi_thread")]
async fn test_share_link_strip_json() {
let link = "/17krzvz".into();
let canonical_link = "/comments/17krzvz".into();
assert_eq!(canonical_path(link, 3).await, Ok(Some(canonical_link)));
}
#[tokio::test(flavor = "multi_thread")]
async fn test_private_sub() {
let link = json("/r/suicide/about.json?raw_json=1".into(), true).await;
assert!(link.is_err());
assert_eq!(link, Err("private".into()));
}
#[tokio::test(flavor = "multi_thread")]
async fn test_banned_sub() {
let link = json("/r/aaa/about.json?raw_json=1".into(), true).await;
assert!(link.is_err());
assert_eq!(link, Err("banned".into()));
}
#[tokio::test(flavor = "multi_thread")]
async fn test_gated_sub() {
// quarantine to false to specifically catch when we _don't_ catch it
let link = json("/r/drugs/about.json?raw_json=1".into(), false).await;
assert!(link.is_err());
assert_eq!(link, Err("gated".into()));
}

View File

@ -1,299 +0,0 @@
use once_cell::sync::Lazy;
use serde::{Deserialize, Serialize};
use std::{env::var, fs::read_to_string};
// Waiting for https://github.com/rust-lang/rust/issues/74465 to land, so we
// can reduce reliance on once_cell.
//
// This is the local static that is initialized at runtime (technically at
// first request) and contains the instance settings.
pub static CONFIG: Lazy<Config> = Lazy::new(Config::load);
// This serves as the frontend for an archival API - on removed comments, this URL
// will be the base of a link, to display removed content (on another site).
pub const DEFAULT_PUSHSHIFT_FRONTEND: &str = "undelete.pullpush.io";
/// Stores the configuration parsed from the environment variables and the
/// config file. `Config::Default()` contains None for each setting.
/// When adding more config settings, add it to `Config::load`,
/// `get_setting_from_config`, both below, as well as
/// `instance_info::InstanceInfo.to_string`(), README.md and app.json.
#[derive(Default, Serialize, Deserialize, Clone, Debug)]
pub struct Config {
#[serde(rename = "REDLIB_SFW_ONLY")]
#[serde(alias = "LIBREDDIT_SFW_ONLY")]
pub(crate) sfw_only: Option<String>,
#[serde(rename = "REDLIB_DEFAULT_THEME")]
#[serde(alias = "LIBREDDIT_DEFAULT_THEME")]
pub(crate) default_theme: Option<String>,
#[serde(rename = "REDLIB_DEFAULT_MASCOT")]
#[serde(alias = "LIBREDDIT_DEFAULT_MASCOT")]
pub(crate) default_mascot: Option<String>,
#[serde(rename = "REDLIB_DEFAULT_FRONT_PAGE")]
#[serde(alias = "LIBREDDIT_DEFAULT_FRONT_PAGE")]
pub(crate) default_front_page: Option<String>,
#[serde(rename = "REDLIB_DEFAULT_LAYOUT")]
#[serde(alias = "LIBREDDIT_DEFAULT_LAYOUT")]
pub(crate) default_layout: Option<String>,
#[serde(rename = "REDLIB_DEFAULT_WIDE")]
#[serde(alias = "LIBREDDIT_DEFAULT_WIDE")]
pub(crate) default_wide: Option<String>,
#[serde(rename = "REDLIB_DEFAULT_COMMENT_SORT")]
#[serde(alias = "LIBREDDIT_DEFAULT_COMMENT_SORT")]
pub(crate) default_comment_sort: Option<String>,
#[serde(rename = "REDLIB_DEFAULT_POST_SORT")]
#[serde(alias = "LIBREDDIT_DEFAULT_POST_SORT")]
pub(crate) default_post_sort: Option<String>,
#[serde(rename = "REDLIB_DEFAULT_BLUR_SPOILER")]
#[serde(alias = "LIBREDDIT_DEFAULT_BLUR_SPOILER")]
pub(crate) default_blur_spoiler: Option<String>,
#[serde(rename = "REDLIB_DEFAULT_SHOW_NSFW")]
#[serde(alias = "LIBREDDIT_DEFAULT_SHOW_NSFW")]
pub(crate) default_show_nsfw: Option<String>,
#[serde(rename = "REDLIB_DEFAULT_BLUR_NSFW")]
#[serde(alias = "LIBREDDIT_DEFAULT_BLUR_NSFW")]
pub(crate) default_blur_nsfw: Option<String>,
#[serde(rename = "REDLIB_DEFAULT_USE_HLS")]
#[serde(alias = "LIBREDDIT_DEFAULT_USE_HLS")]
pub(crate) default_use_hls: Option<String>,
#[serde(rename = "REDLIB_DEFAULT_FFMPEG_VIDEO_DOWNLOADS")]
#[serde(alias = "LIBREDDIT_DEFAULT_FFMPEG_VIDEO_DOWNLOADS")]
pub(crate) default_ffmpeg_video_downloads: Option<String>,
#[serde(rename = "REDLIB_DEFAULT_HIDE_HLS_NOTIFICATION")]
#[serde(alias = "LIBREDDIT_DEFAULT_HIDE_HLS_NOTIFICATION")]
pub(crate) default_hide_hls_notification: Option<String>,
#[serde(rename = "REDLIB_DEFAULT_HIDE_AWARDS")]
#[serde(alias = "LIBREDDIT_DEFAULT_HIDE_AWARDS")]
pub(crate) default_hide_awards: Option<String>,
#[serde(rename = "REDLIB_DEFAULT_HIDE_SIDEBAR_AND_SUMMARY")]
#[serde(alias = "LIBREDDIT_DEFAULT_HIDE_SIDEBAR_AND_SUMMARY")]
pub(crate) default_hide_sidebar_and_summary: Option<String>,
#[serde(rename = "REDLIB_DEFAULT_HIDE_BANNER")]
#[serde(alias = "LIBREDDIT_DEFAULT_HIDE_BANNER")]
pub(crate) default_hide_banner: Option<String>,
#[serde(rename = "REDLIB_DEFAULT_HIDE_SCORE")]
#[serde(alias = "LIBREDDIT_DEFAULT_HIDE_SCORE")]
pub(crate) default_hide_score: Option<String>,
#[serde(rename = "REDLIB_DEFAULT_SUBSCRIPTIONS")]
#[serde(alias = "LIBREDDIT_DEFAULT_SUBSCRIPTIONS")]
pub(crate) default_subscriptions: Option<String>,
#[serde(rename = "REDLIB_DEFAULT_FILTERS")]
#[serde(alias = "LIBREDDIT_DEFAULT_FILTERS")]
pub(crate) default_filters: Option<String>,
#[serde(rename = "REDLIB_DEFAULT_QUICKLIST")]
#[serde(alias = "LIBREDDIT_DEFAULT_QUICKLIST")]
pub(crate) default_quicklist: Option<String>,
#[serde(rename = "REDLIB_DEFAULT_DISABLE_VISIT_REDDIT_CONFIRMATION")]
#[serde(alias = "LIBREDDIT_DEFAULT_DISABLE_VISIT_REDDIT_CONFIRMATION")]
pub(crate) default_disable_visit_reddit_confirmation: Option<String>,
#[serde(rename = "REDLIB_BANNER")]
#[serde(alias = "LIBREDDIT_BANNER")]
pub(crate) banner: Option<String>,
#[serde(rename = "REDLIB_ROBOTS_DISABLE_INDEXING")]
#[serde(alias = "LIBREDDIT_ROBOTS_DISABLE_INDEXING")]
pub(crate) robots_disable_indexing: Option<String>,
#[serde(rename = "REDLIB_PUSHSHIFT_FRONTEND")]
#[serde(alias = "LIBREDDIT_PUSHSHIFT_FRONTEND")]
pub(crate) pushshift: Option<String>,
#[serde(rename = "REDLIB_ENABLE_RSS")]
pub(crate) enable_rss: Option<String>,
#[serde(rename = "REDLIB_FULL_URL")]
pub(crate) full_url: Option<String>,
}
impl Config {
/// Load the configuration from the environment variables and the config file.
/// In the case that there are no environment variables set and there is no
/// config file, this function returns a Config that contains all None values.
pub fn load() -> Self {
let load_config = |name: &str| {
let new_file = read_to_string(name);
new_file.ok().and_then(|new_file| toml::from_str::<Self>(&new_file).ok())
};
let config = load_config("redlib.toml").or_else(|| load_config("libreddit.toml")).unwrap_or_default();
// This function defines the order of preference - first check for
// environment variables with "REDLIB", then check the legacy LIBREDDIT
// option, then check the config, then if all are `None`, return a `None`
let parse = |key: &str| -> Option<String> {
// Return the first non-`None` value
// If all are `None`, return `None`
let legacy_key = key.replace("REDLIB_", "LIBREDDIT_");
var(key).ok().or_else(|| var(legacy_key).ok()).or_else(|| get_setting_from_config(key, &config))
};
Self {
sfw_only: parse("REDLIB_SFW_ONLY"),
default_theme: parse("REDLIB_DEFAULT_THEME"),
default_mascot: parse("REDLIB_DEFAULT_MASCOT"),
default_front_page: parse("REDLIB_DEFAULT_FRONT_PAGE"),
default_layout: parse("REDLIB_DEFAULT_LAYOUT"),
default_post_sort: parse("REDLIB_DEFAULT_POST_SORT"),
default_wide: parse("REDLIB_DEFAULT_WIDE"),
default_comment_sort: parse("REDLIB_DEFAULT_COMMENT_SORT"),
default_blur_spoiler: parse("REDLIB_DEFAULT_BLUR_SPOILER"),
default_show_nsfw: parse("REDLIB_DEFAULT_SHOW_NSFW"),
default_blur_nsfw: parse("REDLIB_DEFAULT_BLUR_NSFW"),
default_use_hls: parse("REDLIB_DEFAULT_USE_HLS"),
default_ffmpeg_video_downloads: parse("REDLIB_DEFAULT_FFMPEG_VIDEO_DOWNLOADS"),
default_hide_hls_notification: parse("REDLIB_DEFAULT_HIDE_HLS_NOTIFICATION"),
default_hide_awards: parse("REDLIB_DEFAULT_HIDE_AWARDS"),
default_hide_sidebar_and_summary: parse("REDLIB_DEFAULT_HIDE_SIDEBAR_AND_SUMMARY"),
default_hide_banner: parse("REDLIB_DEFAULT_HIDE_BANNER"),
default_hide_score: parse("REDLIB_DEFAULT_HIDE_SCORE"),
default_subscriptions: parse("REDLIB_DEFAULT_SUBSCRIPTIONS"),
default_filters: parse("REDLIB_DEFAULT_FILTERS"),
default_quicklist: parse("REDLIB_DEFAULT_QUICKLIST"),
default_disable_visit_reddit_confirmation: parse("REDLIB_DEFAULT_DISABLE_VISIT_REDDIT_CONFIRMATION"),
banner: parse("REDLIB_BANNER"),
robots_disable_indexing: parse("REDLIB_ROBOTS_DISABLE_INDEXING"),
pushshift: parse("REDLIB_PUSHSHIFT_FRONTEND"),
enable_rss: parse("REDLIB_ENABLE_RSS"),
full_url: parse("REDLIB_FULL_URL"),
}
}
}
fn get_setting_from_config(name: &str, config: &Config) -> Option<String> {
match name {
"REDLIB_SFW_ONLY" => config.sfw_only.clone(),
"REDLIB_DEFAULT_THEME" => config.default_theme.clone(),
"REDLIB_DEFAULT_MASCOT" => config.default_mascot.clone(),
"REDLIB_DEFAULT_FRONT_PAGE" => config.default_front_page.clone(),
"REDLIB_DEFAULT_LAYOUT" => config.default_layout.clone(),
"REDLIB_DEFAULT_COMMENT_SORT" => config.default_comment_sort.clone(),
"REDLIB_DEFAULT_POST_SORT" => config.default_post_sort.clone(),
"REDLIB_DEFAULT_BLUR_SPOILER" => config.default_blur_spoiler.clone(),
"REDLIB_DEFAULT_SHOW_NSFW" => config.default_show_nsfw.clone(),
"REDLIB_DEFAULT_BLUR_NSFW" => config.default_blur_nsfw.clone(),
"REDLIB_DEFAULT_USE_HLS" => config.default_use_hls.clone(),
"REDLIB_DEFAULT_FFMPEG_VIDEO_DOWNLOADS" => config.default_ffmpeg_video_downloads.clone(),
"REDLIB_DEFAULT_HIDE_HLS_NOTIFICATION" => config.default_hide_hls_notification.clone(),
"REDLIB_DEFAULT_WIDE" => config.default_wide.clone(),
"REDLIB_DEFAULT_HIDE_AWARDS" => config.default_hide_awards.clone(),
"REDLIB_DEFAULT_HIDE_SIDEBAR_AND_SUMMARY" => config.default_hide_sidebar_and_summary.clone(),
"REDLIB_DEFAULT_HIDE_BANNER" => config.default_hide_banner.clone(),
"REDLIB_DEFAULT_HIDE_SCORE" => config.default_hide_score.clone(),
"REDLIB_DEFAULT_SUBSCRIPTIONS" => config.default_subscriptions.clone(),
"REDLIB_DEFAULT_FILTERS" => config.default_filters.clone(),
"REDLIB_DEFAULT_QUICKLIST" => config.default_quicklist.clone(),
"REDLIB_DEFAULT_DISABLE_VISIT_REDDIT_CONFIRMATION" => config.default_disable_visit_reddit_confirmation.clone(),
"REDLIB_BANNER" => config.banner.clone(),
"REDLIB_ROBOTS_DISABLE_INDEXING" => config.robots_disable_indexing.clone(),
"REDLIB_PUSHSHIFT_FRONTEND" => config.pushshift.clone(),
"REDLIB_ENABLE_RSS" => config.enable_rss.clone(),
"REDLIB_FULL_URL" => config.full_url.clone(),
_ => None,
}
}
/// Retrieves setting from environment variable or config file.
pub fn get_setting(name: &str) -> Option<String> {
get_setting_from_config(name, &CONFIG)
}
#[cfg(test)]
use {sealed_test::prelude::*, std::fs::write};
#[test]
fn test_deserialize() {
// Must handle empty input
let result = toml::from_str::<Config>("");
assert!(result.is_ok(), "Error: {}", result.unwrap_err());
}
#[test]
#[sealed_test(env = [("REDLIB_SFW_ONLY", "on")])]
fn test_env_var() {
assert!(crate::utils::sfw_only())
}
#[test]
#[sealed_test]
fn test_config() {
let config_to_write = r#"REDLIB_DEFAULT_COMMENT_SORT = "best""#;
write("redlib.toml", config_to_write).unwrap();
assert_eq!(get_setting("REDLIB_DEFAULT_COMMENT_SORT"), Some("best".into()));
}
#[test]
#[sealed_test]
fn test_config_legacy() {
let config_to_write = r#"LIBREDDIT_DEFAULT_COMMENT_SORT = "best""#;
write("libreddit.toml", config_to_write).unwrap();
assert_eq!(get_setting("REDLIB_DEFAULT_COMMENT_SORT"), Some("best".into()));
}
#[test]
#[sealed_test(env = [("LIBREDDIT_SFW_ONLY", "on")])]
fn test_env_var_legacy() {
assert!(crate::utils::sfw_only())
}
#[test]
#[sealed_test(env = [("REDLIB_DEFAULT_COMMENT_SORT", "top")])]
fn test_env_config_precedence() {
let config_to_write = r#"REDLIB_DEFAULT_COMMENT_SORT = "best""#;
write("redlib.toml", config_to_write).unwrap();
assert_eq!(get_setting("REDLIB_DEFAULT_COMMENT_SORT"), Some("top".into()))
}
#[test]
#[sealed_test(env = [("REDLIB_DEFAULT_COMMENT_SORT", "top")])]
fn test_alt_env_config_precedence() {
let config_to_write = r#"REDLIB_DEFAULT_COMMENT_SORT = "best""#;
write("redlib.toml", config_to_write).unwrap();
assert_eq!(get_setting("REDLIB_DEFAULT_COMMENT_SORT"), Some("top".into()))
}
#[test]
#[sealed_test(env = [("REDLIB_DEFAULT_SUBSCRIPTIONS", "news+bestof")])]
fn test_default_subscriptions() {
assert_eq!(get_setting("REDLIB_DEFAULT_SUBSCRIPTIONS"), Some("news+bestof".into()));
}
#[test]
#[sealed_test(env = [("REDLIB_DEFAULT_FILTERS", "news+bestof")])]
fn test_default_filters() {
assert_eq!(get_setting("REDLIB_DEFAULT_FILTERS"), Some("news+bestof".into()));
}
#[test]
#[sealed_test(env = [("REDLIB_DEFAULT_QUICKLIST", "news+popular")])]
fn test_default_quicklist() {
assert_eq!(get_setting("REDLIB_DEFAULT_QUICKLIST"), Some("news+popular".into()));
}
#[test]
#[sealed_test]
fn test_pushshift() {
let config_to_write = r#"REDLIB_PUSHSHIFT_FRONTEND = "https://api.pushshift.io""#;
write("redlib.toml", config_to_write).unwrap();
assert!(get_setting("REDLIB_PUSHSHIFT_FRONTEND").is_some());
assert_eq!(get_setting("REDLIB_PUSHSHIFT_FRONTEND"), Some("https://api.pushshift.io".into()));
}

View File

@ -1,236 +0,0 @@
// Handler for post duplicates.
use crate::client::json;
use crate::server::RequestExt;
use crate::subreddit::{can_access_quarantine, quarantine};
use crate::utils::{error, filter_posts, get_filters, nsfw_landing, parse_post, template, Post, Preferences};
use hyper::{Body, Request, Response};
use rinja::Template;
use serde_json::Value;
use std::borrow::ToOwned;
use std::collections::HashSet;
use std::vec::Vec;
/// `DuplicatesParams` contains the parameters in the URL.
struct DuplicatesParams {
before: String,
after: String,
sort: String,
}
/// `DuplicatesTemplate` defines an Askama template for rendering duplicate
/// posts.
#[derive(Template)]
#[template(path = "duplicates.html")]
struct DuplicatesTemplate {
/// params contains the relevant request parameters.
params: DuplicatesParams,
/// post is the post whose ID is specified in the reqeust URL. Note that
/// this is not necessarily the "original" post.
post: Post,
/// duplicates is the list of posts that, per Reddit, are duplicates of
/// Post above.
duplicates: Vec<Post>,
/// prefs are the user preferences.
prefs: Preferences,
/// url is the request URL.
url: String,
/// num_posts_filtered counts how many posts were filtered from the
/// duplicates list.
num_posts_filtered: u64,
/// all_posts_filtered is true if every duplicate was filtered. This is an
/// edge case but can still happen.
all_posts_filtered: bool,
}
/// Make the GET request to Reddit. It assumes `req` is the appropriate Reddit
/// REST endpoint for enumerating post duplicates.
pub async fn item(req: Request<Body>) -> Result<Response<Body>, String> {
let path: String = format!("{}.json?{}&raw_json=1", req.uri().path(), req.uri().query().unwrap_or_default());
let sub = req.param("sub").unwrap_or_default();
let quarantined = can_access_quarantine(&req, &sub);
// Log the request in debugging mode
#[cfg(debug_assertions)]
req.param("id").unwrap_or_default();
// Send the GET, and await JSON.
match json(path, quarantined).await {
// Process response JSON.
Ok(response) => {
let post = parse_post(&response[0]["data"]["children"][0]).await;
let req_url = req.uri().to_string();
// Return landing page if this post if this Reddit deems this post
// NSFW, but we have also disabled the display of NSFW content
// or if the instance is SFW-only
if post.nsfw && crate::utils::should_be_nsfw_gated(&req, &req_url) {
return Ok(nsfw_landing(req, req_url).await.unwrap_or_default());
}
let filters = get_filters(&req);
let (duplicates, num_posts_filtered, all_posts_filtered) = parse_duplicates(&response[1], &filters).await;
// These are the values for the "before=", "after=", and "sort="
// query params, respectively.
let mut before: String = String::new();
let mut after: String = String::new();
let mut sort: String = String::new();
// FIXME: We have to perform a kludge to work around a Reddit API
// bug.
//
// The JSON object in "data" will never contain a "before" value so
// it is impossible to use it to determine our position in a
// listing. We'll make do by getting the ID of the first post in
// the listing, setting that as our "before" value, and ask Reddit
// to give us a batch of duplicate posts up to that post.
//
// Likewise, if we provide a "before" request in the GET, the
// result won't have an "after" in the JSON, in addition to missing
// the "before." So we will have to use the final post in the list
// of duplicates.
//
// That being said, we'll also need to capture the value of the
// "sort=" parameter as well, so we will need to inspect the
// query key-value pairs anyway.
let l = duplicates.len();
if l > 0 {
// This gets set to true if "before=" is one of the GET params.
let mut have_before: bool = false;
// This gets set to true if "after=" is one of the GET params.
let mut have_after: bool = false;
// Inspect the query key-value pairs. We will need to record
// the value of "sort=", along with checking to see if either
// one of "before=" or "after=" are given.
//
// If we're in the middle of the batch (evidenced by the
// presence of a "before=" or "after=" parameter in the GET),
// then use the first post as the "before" reference.
//
// We'll do this iteratively. Better than with .map_or()
// since a closure will continue to operate on remaining
// elements even after we've determined one of "before=" or
// "after=" (or both) are in the GET request.
//
// In practice, here should only ever be one of "before=" or
// "after=" and never both.
let query_str = req.uri().query().unwrap_or_default().to_string();
if !query_str.is_empty() {
for param in query_str.split('&') {
let kv: Vec<&str> = param.split('=').collect();
if kv.len() < 2 {
// Reject invalid query parameter.
continue;
}
let key: &str = kv[0];
match key {
"before" => have_before = true,
"after" => have_after = true,
"sort" => {
let val: &str = kv[1];
match val {
"new" | "num_comments" => sort = val.to_string(),
_ => {}
}
}
_ => {}
}
}
}
if have_after {
"t3_".clone_into(&mut before);
before.push_str(&duplicates[0].id);
}
// Address potentially missing "after". If "before=" is in the
// GET, then "after" will be null in the JSON (see FIXME
// above).
if have_before {
// The next batch will need to start from one after the
// last post in the current batch.
"t3_".clone_into(&mut after);
after.push_str(&duplicates[l - 1].id);
// Here is where things get terrible. Notice that we
// haven't set `before`. In order to do so, we will
// need to know if there is a batch that exists before
// this one, and doing so requires actually fetching the
// previous batch. In other words, we have to do yet one
// more GET to Reddit. There is no other way to determine
// whether or not to define `before`.
//
// We'll mitigate that by requesting at most one duplicate.
let new_path: String = format!(
"{}.json?before=t3_{}&sort={}&limit=1&raw_json=1",
req.uri().path(),
&duplicates[0].id,
if sort.is_empty() { "num_comments".to_string() } else { sort.clone() }
);
match json(new_path, true).await {
Ok(response) => {
if !response[1]["data"]["children"].as_array().unwrap_or(&Vec::new()).is_empty() {
"t3_".clone_into(&mut before);
before.push_str(&duplicates[0].id);
}
}
Err(msg) => {
// Abort entirely if we couldn't get the previous
// batch.
return error(req, &msg).await;
}
}
} else {
after = response[1]["data"]["after"].as_str().unwrap_or_default().to_string();
}
}
Ok(template(&DuplicatesTemplate {
params: DuplicatesParams { before, after, sort },
post,
duplicates,
prefs: Preferences::new(&req),
url: req_url,
num_posts_filtered,
all_posts_filtered,
}))
}
// Process error.
Err(msg) => {
if msg == "quarantined" || msg == "gated" {
let sub = req.param("sub").unwrap_or_default();
Ok(quarantine(&req, sub, &msg))
} else {
error(req, &msg).await
}
}
}
}
// DUPLICATES
async fn parse_duplicates(json: &Value, filters: &HashSet<String>) -> (Vec<Post>, u64, bool) {
let post_duplicates: &Vec<Value> = &json["data"]["children"].as_array().map_or(Vec::new(), ToOwned::to_owned);
let mut duplicates: Vec<Post> = Vec::new();
// Process each post and place them in the Vec<Post>.
for val in post_duplicates {
let post: Post = parse_post(val).await;
duplicates.push(post);
}
let (num_posts_filtered, all_posts_filtered) = filter_posts(&mut duplicates, filters);
(duplicates, num_posts_filtered, all_posts_filtered)
}

View File

@ -1,241 +0,0 @@
use crate::{
config::{Config, CONFIG},
server::RequestExt,
utils::{ErrorTemplate, Preferences},
};
use build_html::{Container, Html, HtmlContainer, Table};
use hyper::{http::Error, Body, Request, Response};
use once_cell::sync::Lazy;
use rinja::Template;
use serde::{Deserialize, Serialize};
use time::OffsetDateTime;
// This is the local static that is intialized at runtime (technically at
// the first request to the info endpoint) and contains the data
// retrieved from the info endpoint.
pub static INSTANCE_INFO: Lazy<InstanceInfo> = Lazy::new(InstanceInfo::new);
/// Handles instance info endpoint
pub async fn instance_info(req: Request<Body>) -> Result<Response<Body>, String> {
// This will retrieve the extension given, or create a new string - which will
// simply become the last option, an HTML page.
let extension = req.param("extension").unwrap_or_default();
let response = match extension.as_str() {
"yaml" | "yml" => info_yaml(),
"txt" => info_txt(),
"json" => info_json(),
"html" | "" => info_html(&req),
_ => {
let error = ErrorTemplate {
msg: "Error: Invalid info extension".into(),
prefs: Preferences::new(&req),
url: req.uri().to_string(),
}
.render()
.unwrap();
Response::builder().status(404).header("content-type", "text/html; charset=utf-8").body(error.into())
}
};
response.map_err(|err| format!("{err}"))
}
fn info_json() -> Result<Response<Body>, Error> {
if let Ok(body) = serde_json::to_string(&*INSTANCE_INFO) {
Response::builder().status(200).header("content-type", "application/json").body(body.into())
} else {
Response::builder()
.status(500)
.header("content-type", "text/plain")
.body(Body::from("Error serializing JSON"))
}
}
fn info_yaml() -> Result<Response<Body>, Error> {
if let Ok(body) = serde_yaml::to_string(&*INSTANCE_INFO) {
// We can use `application/yaml` as media type, though there is no guarantee
// that browsers will honor it. But we'll do it anyway. See:
// https://github.com/ietf-wg-httpapi/mediatypes/blob/main/draft-ietf-httpapi-yaml-mediatypes.md#media-type-applicationyaml-application-yaml
Response::builder().status(200).header("content-type", "application/yaml").body(body.into())
} else {
Response::builder()
.status(500)
.header("content-type", "text/plain")
.body(Body::from("Error serializing YAML."))
}
}
fn info_txt() -> Result<Response<Body>, Error> {
Response::builder()
.status(200)
.header("content-type", "text/plain")
.body(Body::from(INSTANCE_INFO.to_string(&StringType::Raw)))
}
fn info_html(req: &Request<Body>) -> Result<Response<Body>, Error> {
let message = MessageTemplate {
title: String::from("Instance information"),
body: INSTANCE_INFO.to_string(&StringType::Html),
prefs: Preferences::new(req),
url: req.uri().to_string(),
}
.render()
.unwrap();
Response::builder().status(200).header("content-type", "text/html; charset=utf8").body(Body::from(message))
}
#[derive(Serialize, Deserialize, Default)]
pub struct InstanceInfo {
package_name: String,
crate_version: String,
pub git_commit: String,
deploy_date: String,
compile_mode: String,
deploy_unix_ts: i64,
config: Config,
}
impl InstanceInfo {
pub fn new() -> Self {
Self {
package_name: env!("CARGO_PKG_NAME").to_string(),
crate_version: env!("CARGO_PKG_VERSION").to_string(),
git_commit: env!("GIT_HASH").to_string(),
deploy_date: OffsetDateTime::now_local().unwrap_or_else(|_| OffsetDateTime::now_utc()).to_string(),
#[cfg(debug_assertions)]
compile_mode: "Debug".into(),
#[cfg(not(debug_assertions))]
compile_mode: "Release".into(),
deploy_unix_ts: OffsetDateTime::now_local().unwrap_or_else(|_| OffsetDateTime::now_utc()).unix_timestamp(),
config: CONFIG.clone(),
}
}
fn to_table(&self) -> String {
let mut container = Container::default();
let convert = |o: &Option<String>| -> String { o.clone().unwrap_or_else(|| "<span class=\"unset\"><i>Unset</i></span>".to_owned()) };
if let Some(banner) = &self.config.banner {
container.add_header(3, "Instance banner");
container.add_raw("<br />");
container.add_paragraph(banner);
container.add_raw("<br />");
}
container.add_table(
Table::from([
["Package name", &self.package_name],
["Crate version", &self.crate_version],
["Git commit", &self.git_commit],
["Deploy date", &self.deploy_date],
["Deploy timestamp", &self.deploy_unix_ts.to_string()],
["Compile mode", &self.compile_mode],
["SFW only", &convert(&self.config.sfw_only)],
["Pushshift frontend", &convert(&self.config.pushshift)],
["RSS enabled", &convert(&self.config.enable_rss)],
["Full URL", &convert(&self.config.full_url)],
//TODO: fallback to crate::config::DEFAULT_PUSHSHIFT_FRONTEND
])
.with_header_row(["Settings"]),
);
container.add_raw("<br />");
container.add_table(
Table::from([
["Hide awards", &convert(&self.config.default_hide_awards)],
["Hide score", &convert(&self.config.default_hide_score)],
["Theme", &convert(&self.config.default_theme)],
["Mascot", &convert(&self.config.default_mascot)],
["Front page", &convert(&self.config.default_front_page)],
["Layout", &convert(&self.config.default_layout)],
["Wide", &convert(&self.config.default_wide)],
["Comment sort", &convert(&self.config.default_comment_sort)],
["Post sort", &convert(&self.config.default_post_sort)],
["Blur Spoiler", &convert(&self.config.default_blur_spoiler)],
["Show NSFW", &convert(&self.config.default_show_nsfw)],
["Blur NSFW", &convert(&self.config.default_blur_nsfw)],
["Use HLS", &convert(&self.config.default_use_hls)],
["Use FFmpeg", &convert(&self.config.default_ffmpeg_video_downloads)],
["Hide HLS notification", &convert(&self.config.default_hide_hls_notification)],
["Subscriptions", &convert(&self.config.default_subscriptions)],
["Filters", &convert(&self.config.default_filters)],
["Quick Access Feeds", &convert(&self.config.default_quicklist)],
])
.with_header_row(["Default preferences"]),
);
container.to_html_string().replace("<th>", "<th colspan=\"2\">")
}
fn to_string(&self, string_type: &StringType) -> String {
match string_type {
StringType::Raw => {
format!(
"Package name: {}\n
Crate version: {}\n
Git commit: {}\n
Deploy date: {}\n
Deploy timestamp: {}\n
Compile mode: {}\n
SFW only: {:?}\n
Pushshift frontend: {:?}\n
RSS enabled: {:?}\n
Full URL: {:?}\n
Config:\n
Banner: {:?}\n
Hide awards: {:?}\n
Hide score: {:?}\n
Default theme: {:?}\n
Default mascot: {:?}\n
Default front page: {:?}\n
Default layout: {:?}\n
Default wide: {:?}\n
Default comment sort: {:?}\n
Default post sort: {:?}\n
Default blur Spoiler: {:?}\n
Default show NSFW: {:?}\n
Default blur NSFW: {:?}\n
Default use HLS: {:?}\n
Default use FFmpeg: {:?}\n
Default hide HLS notification: {:?}\n
Default subscriptions: {:?}\n
Default filters: {:?}\n
Default quicklist: {:?}\n",
self.package_name,
self.crate_version,
self.git_commit,
self.deploy_date,
self.deploy_unix_ts,
self.compile_mode,
self.config.sfw_only,
self.config.enable_rss,
self.config.full_url,
self.config.pushshift,
self.config.banner,
self.config.default_hide_awards,
self.config.default_hide_score,
self.config.default_theme,
self.config.default_mascot,
self.config.default_front_page,
self.config.default_layout,
self.config.default_wide,
self.config.default_comment_sort,
self.config.default_post_sort,
self.config.default_blur_spoiler,
self.config.default_show_nsfw,
self.config.default_blur_nsfw,
self.config.default_use_hls,
self.config.default_ffmpeg_video_downloads,
self.config.default_hide_hls_notification,
self.config.default_subscriptions,
self.config.default_filters,
self.config.default_quicklist,
)
}
StringType::Html => self.to_table(),
}
}
}
enum StringType {
Raw,
Html,
}
#[derive(Template)]
#[template(path = "message.html")]
struct MessageTemplate {
title: String,
body: String,
prefs: Preferences,
url: String,
}

View File

@ -1,13 +0,0 @@
pub mod client;
pub mod config;
pub mod duplicates;
pub mod instance_info;
pub mod oauth;
pub mod oauth_resources;
pub mod post;
pub mod search;
pub mod server;
pub mod settings;
pub mod subreddit;
pub mod user;
pub mod utils;

View File

@ -1,471 +1,69 @@
// Global specifiers
#![forbid(unsafe_code)]
#![allow(clippy::cmp_owned)]
// Import Crates
use actix_web::{get, middleware::NormalizePath, web, App, HttpResponse, HttpServer};
use cached::proc_macro::cached;
use clap::{Arg, ArgAction, Command};
use std::str::FromStr;
use futures_lite::FutureExt;
use hyper::Uri;
use hyper::{header::HeaderValue, Body, Request, Response};
use log::info;
use once_cell::sync::Lazy;
use redsunlib::client::{canonical_path, proxy, CLIENT};
use redsunlib::server::{self, RequestExt};
use redsunlib::utils::{error, redirect, MascotAssets, ThemeAssets};
use redsunlib::{config, duplicates, headers, instance_info, post, search, settings, subreddit, user};
use redsunlib::client::OAUTH_CLIENT;
// Reference local files
mod popular;
mod post;
mod proxy;
mod subreddit;
mod user;
mod utils;
// Create Services
// Required for the manifest to be valid
async fn pwa_logo() -> Result<Response<Body>, String> {
Ok(
Response::builder()
.status(200)
.header("content-type", "image/png")
.body(include_bytes!("../static/logo.png").as_ref().into())
.unwrap_or_default(),
)
async fn style() -> HttpResponse {
HttpResponse::Ok().content_type("text/css").body(include_str!("../static/style.css"))
}
// Required for iOS App Icons
async fn iphone_logo() -> Result<Response<Body>, String> {
Ok(
Response::builder()
.status(200)
.header("content-type", "image/png")
.body(include_bytes!("../static/apple-touch-icon.png").as_ref().into())
.unwrap_or_default(),
)
async fn robots() -> HttpResponse {
HttpResponse::Ok().body(include_str!("../static/robots.txt"))
}
async fn favicon() -> Result<Response<Body>, String> {
Ok(
Response::builder()
.status(200)
.header("content-type", "image/vnd.microsoft.icon")
.header("Cache-Control", "public, max-age=1209600, s-maxage=86400")
.body(include_bytes!("../static/favicon.ico").as_ref().into())
.unwrap_or_default(),
)
#[get("/favicon.ico")]
async fn favicon() -> HttpResponse {
HttpResponse::Ok().body("")
}
async fn font() -> Result<Response<Body>, String> {
Ok(
Response::builder()
.status(200)
.header("content-type", "font/woff2")
.header("Cache-Control", "public, max-age=1209600, s-maxage=86400")
.body(include_bytes!("../static/Inter.var.woff2").as_ref().into())
.unwrap_or_default(),
)
}
#[actix_web::main]
async fn main() -> std::io::Result<()> {
let args: Vec<String> = std::env::args().collect();
let mut address = "0.0.0.0:8080".to_string();
async fn ffmpeg() -> Result<Response<Body>, String> {
Ok(
Response::builder()
.status(200)
.header("content-type", "application/wasm")
.header("Cache-Control", "public, max-age=1209600, s-maxage=86400")
.body(include_bytes!("../static/ffmpeg/ffmpeg-core.wasm").as_ref().into())
.unwrap_or_default(),
)
}
async fn resource(body: &str, content_type: &str, cache: bool) -> Result<Response<Body>, String> {
let mut res = Response::builder()
.status(200)
.header("content-type", content_type)
.body(body.to_string().into())
.unwrap_or_default();
if cache {
if let Ok(val) = HeaderValue::from_str("public, max-age=1209600, s-maxage=86400") {
res.headers_mut().insert("Cache-Control", val);
if args.len() > 1 {
for arg in args {
if arg.starts_with("--address=") || arg.starts_with("-a=") {
let split: Vec<&str> = arg.split("=").collect();
address = split[1].to_string();
}
}
}
Ok(res)
}
async fn style() -> Result<Response<Body>, String> {
let mut res = include_str!("../static/style.css").to_string();
for file in ThemeAssets::iter() {
res.push('\n');
let theme = ThemeAssets::get(file.as_ref()).unwrap();
res.push_str(std::str::from_utf8(theme.data.as_ref()).unwrap());
}
Ok(
Response::builder()
.status(200)
.header("content-type", "text/css")
.header("Cache-Control", "public, max-age=1209600, s-maxage=86400")
.body(res.to_string().into())
.unwrap_or_default(),
)
}
/// Serve mascot
async fn mascot_image(req: Request<Body>) -> Result<Response<Body>, String> {
let res = MascotAssets::get(&req.param("name").unwrap()).unwrap_or(MascotAssets::get("redsunlib.png").unwrap());
Ok(
Response::builder()
.status(200)
.header("content-type", "image/png")
.header("Cache-Control", "public, max-age=1209600, s-maxage=86400")
.body(res.data.into())
.unwrap_or_default(),
)
}
#[tokio::main]
async fn main() {
// Load environment variables
_ = dotenvy::dotenv();
// Initialize logger
pretty_env_logger::init();
let matches = Command::new("Redlib")
.version(env!("CARGO_PKG_VERSION"))
.about("Private front-end for Reddit written in Rust ")
.arg(
Arg::new("redirect-https")
.short('r')
.long("redirect-https")
.help("Redirect all HTTP requests to HTTPS (no longer functional)")
.num_args(0),
)
.arg(
Arg::new("address")
.short('a')
.long("address")
.value_name("ADDRESS")
.help("Sets address to listen on")
.default_value("[::]")
.num_args(1),
)
.arg(
Arg::new("port")
.short('p')
.long("port")
.value_name("PORT")
.env("PORT")
.help("Port to listen on")
.default_value("8080")
.action(ArgAction::Set)
.num_args(1),
)
.arg(
Arg::new("hsts")
.short('H')
.long("hsts")
.value_name("EXPIRE_TIME")
.help("HSTS header to tell browsers that this site should only be accessed over HTTPS")
.default_value("604800")
.num_args(1),
)
.get_matches();
let address = matches.get_one::<String>("address").unwrap();
let port = matches.get_one::<String>("port").unwrap();
let hsts = matches.get_one("hsts").map(|m: &String| m.as_str());
let listener = [address, ":", port].concat();
println!("Starting Redsunlib...");
// Begin constructing a server
let mut app = server::Server::new();
// Force evaluation of statics. In instance_info case, we need to evaluate
// the timestamp so deploy date is accurate - in config case, we need to
// evaluate the configuration to avoid paying penalty at first request -
// in OAUTH case, we need to retrieve the token to avoid paying penalty
// at first request
info!("Evaluating config.");
Lazy::force(&config::CONFIG);
info!("Evaluating instance info.");
Lazy::force(&instance_info::INSTANCE_INFO);
info!("Creating OAUTH client.");
Lazy::force(&OAUTH_CLIENT);
// Define default headers (added to all responses)
app.default_headers = headers! {
"Referrer-Policy" => "no-referrer",
"X-Content-Type-Options" => "nosniff",
"X-Frame-Options" => "DENY",
"Content-Security-Policy" => "default-src 'none'; font-src 'self'; script-src 'self' 'wasm-unsafe-eval' blob:; manifest-src 'self'; media-src 'self' data: blob: about:; style-src 'self' 'unsafe-inline'; base-uri 'none'; img-src 'self' data:; form-action 'self'; frame-ancestors 'none'; connect-src 'self'; worker-src 'self' blob:;"
};
if let Some(expire_time) = hsts {
if let Ok(val) = HeaderValue::from_str(&format!("max-age={expire_time}")) {
app.default_headers.insert("Strict-Transport-Security", val);
}
}
// Read static files
app.at("/style.css").get(|_| style().boxed());
app
.at("/manifest.json")
.get(|_| resource(include_str!("../static/manifest.json"), "application/json", false).boxed());
app.at("/robots.txt").get(|_| {
resource(
if match config::get_setting("REDLIB_ROBOTS_DISABLE_INDEXING") {
Some(val) => val == "on",
None => false,
} {
"User-agent: *\nDisallow: /"
} else {
"User-agent: *\nDisallow: /u/\nDisallow: /user/"
},
"text/plain",
true,
)
.boxed()
});
app.at("/favicon.ico").get(|_| favicon().boxed());
app.at("/logo.png").get(|_| pwa_logo().boxed());
app.at("/Inter.var.woff2").get(|_| font().boxed());
app.at("/touch-icon-iphone.png").get(|_| iphone_logo().boxed());
app.at("/apple-touch-icon.png").get(|_| iphone_logo().boxed());
app
.at("/videoUtils.js")
.get(|_| resource(include_str!("../static/videoUtils.js"), "text/javascript", false).boxed());
app
.at("/hls.min.js")
.get(|_| resource(include_str!("../static/hls.min.js"), "text/javascript", false).boxed());
app
.at("/highlighted.js")
.get(|_| resource(include_str!("../static/highlighted.js"), "text/javascript", false).boxed());
app
.at("/check_update.js")
.get(|_| resource(include_str!("../static/check_update.js"), "text/javascript", false).boxed());
app.at("/commits.json").get(|_| async move { proxy_commit_info().await }.boxed());
app.at("/instances.json").get(|_| async move { proxy_instances().await }.boxed());
// FFmpeg
app
.at("/ffmpeg/814.ffmpeg.js")
.get(|_| resource(include_str!("../static/ffmpeg/814.ffmpeg.js"), "text/javascript", false).boxed());
app
.at("/ffmpeg/814.ffmpeg.js.map")
.get(|_| resource(include_str!("../static/ffmpeg/814.ffmpeg.js.map"), "text/javascript", false).boxed());
app
.at("/ffmpeg/ffmpeg-core.js")
.get(|_| resource(include_str!("../static/ffmpeg/ffmpeg-core.js"), "text/javascript", false).boxed());
app.at("/ffmpeg/ffmpeg-core.wasm").get(|_| ffmpeg().boxed());
app
.at("/ffmpeg/ffmpeg-util.js")
.get(|_| resource(include_str!("../static/ffmpeg/ffmpeg-util.js"), "text/javascript", false).boxed());
app
.at("/ffmpeg/ffmpeg.js")
.get(|_| resource(include_str!("../static/ffmpeg/ffmpeg.js"), "text/javascript", false).boxed());
app
.at("/ffmpeg/ffmpeg.js.map")
.get(|_| resource(include_str!("../static/ffmpeg/ffmpeg.js.map"), "text/javascript", false).boxed());
// Proxy media through Redlib
app.at("/vid/:id/:size").get(|r| proxy(r, "https://v.redd.it/{id}/DASH_{size}").boxed());
app.at("/hls/:id/*path").get(|r| proxy(r, "https://v.redd.it/{id}/{path}").boxed());
app.at("/img/*path").get(|r| proxy(r, "https://i.redd.it/{path}").boxed());
app.at("/thumb/:point/:id").get(|r| proxy(r, "https://{point}.thumbs.redditmedia.com/{id}").boxed());
app.at("/emoji/:id/:name").get(|r| proxy(r, "https://emoji.redditmedia.com/{id}/{name}").boxed());
app
.at("/emote/:subreddit_id/:filename")
.get(|r| proxy(r, "https://reddit-econ-prod-assets-permanent.s3.amazonaws.com/asset-manager/{subreddit_id}/{filename}").boxed());
app
.at("/preview/:loc/award_images/:fullname/:id")
.get(|r| proxy(r, "https://{loc}view.redd.it/award_images/{fullname}/{id}").boxed());
app.at("/preview/:loc/:id").get(|r| proxy(r, "https://{loc}view.redd.it/{id}").boxed());
app.at("/style/*path").get(|r| proxy(r, "https://styles.redditmedia.com/{path}").boxed());
app.at("/static/*path").get(|r| proxy(r, "https://www.redditstatic.com/{path}").boxed());
// Browse user profile
app
.at("/u/:name")
.get(|r| async move { Ok(redirect(&format!("/user/{}", r.param("name").unwrap_or_default()))) }.boxed());
app.at("/u/:name/comments/:id/:title").get(|r| post::item(r).boxed());
app.at("/u/:name/comments/:id/:title/:comment_id").get(|r| post::item(r).boxed());
app.at("/user/[deleted]").get(|req| error(req, "User has deleted their account").boxed());
app.at("/user/:name.rss").get(|r| user::rss(r).boxed());
app.at("/user/:name").get(|r| user::profile(r).boxed());
app.at("/user/:name/:listing").get(|r| user::profile(r).boxed());
app.at("/user/:name/comments/:id").get(|r| post::item(r).boxed());
app.at("/user/:name/comments/:id/:title").get(|r| post::item(r).boxed());
app.at("/user/:name/comments/:id/:title/:comment_id").get(|r| post::item(r).boxed());
// Configure settings
app.at("/settings").get(|r| settings::get(r).boxed()).post(|r| settings::set(r).boxed());
app.at("/settings/restore").get(|r| settings::restore(r).boxed());
app.at("/settings/update").get(|r| settings::update(r).boxed());
// Mascots
app.at("/mascot/:name").get(|r| mascot_image(r).boxed());
// RSS Subscriptions
app.at("/r/:sub.rss").get(|r| subreddit::rss(r).boxed());
// Subreddit services
app
.at("/r/:sub")
.get(|r| subreddit::community(r).boxed())
.post(|r| subreddit::add_quarantine_exception(r).boxed());
app
.at("/r/u_:name")
.get(|r| async move { Ok(redirect(&format!("/user/{}", r.param("name").unwrap_or_default()))) }.boxed());
app.at("/r/:sub/subscribe").post(|r| subreddit::subscriptions_filters_quicklists(r).boxed());
app.at("/r/:sub/unsubscribe").post(|r| subreddit::subscriptions_filters_quicklists(r).boxed());
app.at("/r/:sub/filter").post(|r| subreddit::subscriptions_filters_quicklists(r).boxed());
app.at("/r/:sub/unfilter").post(|r| subreddit::subscriptions_filters_quicklists(r).boxed());
app.at("/r/:sub/quicklist").post(|r| subreddit::subscriptions_filters_quicklists(r).boxed());
app.at("/r/:sub/unquicklist").post(|r| subreddit::subscriptions_filters_quicklists(r).boxed());
app.at("/r/:sub/comments/:id").get(|r| post::item(r).boxed());
app.at("/r/:sub/comments/:id/:title").get(|r| post::item(r).boxed());
app.at("/r/:sub/comments/:id/:title/:comment_id").get(|r| post::item(r).boxed());
app.at("/comments/:id").get(|r| post::item(r).boxed());
app.at("/comments/:id/comments").get(|r| post::item(r).boxed());
app.at("/comments/:id/comments/:comment_id").get(|r| post::item(r).boxed());
app.at("/comments/:id/:title").get(|r| post::item(r).boxed());
app.at("/comments/:id/:title/:comment_id").get(|r| post::item(r).boxed());
app.at("/r/:sub/duplicates/:id").get(|r| duplicates::item(r).boxed());
app.at("/r/:sub/duplicates/:id/:title").get(|r| duplicates::item(r).boxed());
app.at("/duplicates/:id").get(|r| duplicates::item(r).boxed());
app.at("/duplicates/:id/:title").get(|r| duplicates::item(r).boxed());
app.at("/r/:sub/search").get(|r| search::find(r).boxed());
app
.at("/r/:sub/w")
.get(|r| async move { Ok(redirect(&format!("/r/{}/wiki", r.param("sub").unwrap_or_default()))) }.boxed());
app
.at("/r/:sub/w/*page")
.get(|r| async move { Ok(redirect(&format!("/r/{}/wiki/{}", r.param("sub").unwrap_or_default(), r.param("wiki").unwrap_or_default()))) }.boxed());
app.at("/r/:sub/wiki").get(|r| subreddit::wiki(r).boxed());
app.at("/r/:sub/wiki/*page").get(|r| subreddit::wiki(r).boxed());
app.at("/r/:sub/about/sidebar").get(|r| subreddit::sidebar(r).boxed());
app.at("/r/:sub/:sort").get(|r| subreddit::community(r).boxed());
// Front page
app.at("/").get(|r| subreddit::community(r).boxed());
// View Reddit wiki
app.at("/w").get(|_| async { Ok(redirect("/wiki")) }.boxed());
app
.at("/w/*page")
.get(|r| async move { Ok(redirect(&format!("/wiki/{}", r.param("page").unwrap_or_default()))) }.boxed());
app.at("/wiki").get(|r| subreddit::wiki(r).boxed());
app.at("/wiki/*page").get(|r| subreddit::wiki(r).boxed());
// Search all of Reddit
app.at("/search").get(|r| search::find(r).boxed());
// Handle about pages
app.at("/about").get(|req| error(req, "About pages aren't added yet").boxed());
// Instance info page
app.at("/info").get(|r| instance_info::instance_info(r).boxed());
app.at("/info.:extension").get(|r| instance_info::instance_info(r).boxed());
// Handle obfuscated share links.
// Note that this still forces the server to follow the share link to get to the post, so maybe this wants to be updated with a warning before it follow it
app.at("/r/:sub/s/:id").get(|req: Request<Body>| {
Box::pin(async move {
let sub = req.param("sub").unwrap_or_default();
match req.param("id").as_deref() {
// Share link
Some(id) if (8..12).contains(&id.len()) => match canonical_path(format!("/r/{sub}/s/{id}"), 3).await {
Ok(Some(path)) => Ok(redirect(&path)),
Ok(None) => error(req, "Post ID is invalid. It may point to a post on a community that has been banned.").await,
Err(e) => error(req, &e).await,
},
// Error message for unknown pages
_ => error(req, "Nothing here").await,
}
})
});
app.at("/:id").get(|req: Request<Body>| {
Box::pin(async move {
match req.param("id").as_deref() {
// Sort front page
Some("best" | "hot" | "new" | "top" | "rising" | "controversial") => subreddit::community(req).await,
// Short link for post
Some(id) if (5..8).contains(&id.len()) => match canonical_path(format!("/{id}"), 3).await {
Ok(path_opt) => match path_opt {
Some(path) => Ok(redirect(&path)),
None => error(req, "Post ID is invalid. It may point to a post on a community that has been banned.").await,
},
Err(e) => error(req, &e).await,
},
// Error message for unknown pages
_ => error(req, "Nothing here").await,
}
})
});
// Default service in case no routes match
app.at("/*").get(|req| error(req, "Nothing here").boxed());
println!("Running Redsunlib v{} on {listener}!", env!("CARGO_PKG_VERSION"));
let server = app.listen(&listener);
// Run this server for... forever!
if let Err(e) = server.await {
eprintln!("Server error: {e}");
}
}
pub async fn proxy_commit_info() -> Result<Response<Body>, String> {
Ok(
Response::builder()
.status(200)
.header("content-type", "application/atom+xml")
.body(Body::from(fetch_commit_info().await))
.unwrap_or_default(),
)
}
#[cached(time = 600)]
async fn fetch_commit_info() -> String {
let uri = Uri::from_str("https://git.stardust.wtf/api/v1/repos/iridium/redsunlib/commits?verification=false&stat=false").expect("Invalid URI");
let resp: Body = CLIENT.get(uri).await.expect("Failed to request git.stardust.wtf").into_body();
hyper::body::to_bytes(resp).await.expect("Failed to read body").iter().copied().map(|x| x as char).collect()
}
pub async fn proxy_instances() -> Result<Response<Body>, String> {
Ok(
Response::builder()
.status(200)
.header("content-type", "application/json")
.body(Body::from(fetch_instances().await))
.unwrap_or_default(),
)
}
#[cached(time = 600)]
async fn fetch_instances() -> String {
let uri = Uri::from_str("https://raw.githubusercontent.com/redlib-org/redlib-instances/refs/heads/main/instances.json").expect("Invalid URI");
let resp: Body = CLIENT.get(uri).await.expect("Failed to request GitHub").into_body();
hyper::body::to_bytes(resp).await.expect("Failed to read body").iter().copied().map(|x| x as char).collect()
// start http server
println!("Running Libreddit v{} on {}!", env!("CARGO_PKG_VERSION"), address.clone());
HttpServer::new(|| {
App::new()
// TRAILING SLASH MIDDLEWARE
.wrap(NormalizePath::default())
// GENERAL SERVICES
.route("/style.css/", web::get().to(style))
.route("/favicon.ico/", web::get().to(|| HttpResponse::Ok()))
.route("/robots.txt/", web::get().to(robots))
// PROXY SERVICE
.route("/proxy/{url:.*}/", web::get().to(proxy::handler))
// USER SERVICES
.route("/u/{username}/", web::get().to(user::page))
.route("/user/{username}/", web::get().to(user::page))
// SUBREDDIT SERVICES
.route("/r/{sub}/", web::get().to(subreddit::page))
// POPULAR SERVICES
.route("/", web::get().to(popular::page))
// POST SERVICES
.route("/{id:.{5,6}}/", web::get().to(post::short))
.route("/r/{sub}/comments/{id}/{title}/", web::get().to(post::page))
.route("/r/{sub}/comments/{id}/{title}/{comment_id}/", web::get().to(post::comment))
})
.bind(address.clone())
.expect(format!("Cannot bind to the address: {}", address).as_str())
.run()
.await
}

View File

@ -1,235 +0,0 @@
use std::{collections::HashMap, sync::atomic::Ordering, time::Duration};
use crate::{
client::{CLIENT, OAUTH_CLIENT, OAUTH_IS_ROLLING_OVER, OAUTH_RATELIMIT_REMAINING},
oauth_resources::ANDROID_APP_VERSION_LIST,
};
use base64::{engine::general_purpose, Engine as _};
use hyper::{client, Body, Method, Request};
use log::{debug, error, info, trace};
use serde_json::json;
use tokio::time::{error::Elapsed, timeout};
static REDDIT_ANDROID_OAUTH_CLIENT_ID: &str = "ohXpoqrZYub1kg";
static AUTH_ENDPOINT: &str = "https://www.reddit.com";
// Spoofed client for Android devices
#[derive(Debug, Clone, Default)]
pub struct Oauth {
pub(crate) initial_headers: HashMap<String, String>,
pub(crate) headers_map: HashMap<String, String>,
pub(crate) token: String,
expires_in: u64,
device: Device,
}
impl Oauth {
/// Create a new OAuth client
pub(crate) async fn new() -> Self {
// Call new_internal until it succeeds
loop {
let attempt = Self::new_with_timeout().await;
match attempt {
Ok(Some(oauth)) => {
info!("[✅] Successfully created OAuth client");
return oauth;
}
Ok(None) => {
error!("Failed to create OAuth client. Retrying in 5 seconds...");
continue;
}
Err(duration) => {
error!("Failed to create OAuth client in {duration:?}. Retrying in 5 seconds...");
}
}
}
}
async fn new_with_timeout() -> Result<Option<Self>, Elapsed> {
let mut oauth = Self::default();
timeout(Duration::from_secs(5), oauth.login()).await.map(|result| result.map(|_| oauth))
}
pub(crate) fn default() -> Self {
// Generate a device to spoof
let device = Device::new();
let headers_map = device.headers.clone();
let initial_headers = device.initial_headers.clone();
// For now, just insert headers - no token request
Self {
headers_map,
initial_headers,
token: String::new(),
expires_in: 0,
device,
}
}
async fn login(&mut self) -> Option<()> {
// Construct URL for OAuth token
let url = format!("{AUTH_ENDPOINT}/auth/v2/oauth/access-token/loid");
let mut builder = Request::builder().method(Method::POST).uri(&url);
// Add headers from spoofed client
for (key, value) in &self.initial_headers {
builder = builder.header(key, value);
}
// Set up HTTP Basic Auth - basically just the const OAuth ID's with no password,
// Base64-encoded. https://en.wikipedia.org/wiki/Basic_access_authentication
// This could be constant, but I don't think it's worth it. OAuth ID's can change
// over time and we want to be flexible.
let auth = general_purpose::STANDARD.encode(format!("{}:", self.device.oauth_id));
builder = builder.header("Authorization", format!("Basic {auth}"));
// Set JSON body. I couldn't tell you what this means. But that's what the client sends
let json = json!({
"scopes": ["*","email"]
});
let body = Body::from(json.to_string());
// Build request
let request = builder.body(body).unwrap();
trace!("Sending token request...");
// Send request
let client: &once_cell::sync::Lazy<client::Client<_, Body>> = &CLIENT;
let resp = client.request(request).await.ok()?;
trace!("Received response with status {} and length {:?}", resp.status(), resp.headers().get("content-length"));
// Parse headers - loid header _should_ be saved sent on subsequent token refreshes.
// Technically it's not needed, but it's easy for Reddit API to check for this.
// It's some kind of header that uniquely identifies the device.
// Not worried about the privacy implications, since this is randomly changed
// and really only as privacy-concerning as the OAuth token itself.
if let Some(header) = resp.headers().get("x-reddit-loid") {
self.headers_map.insert("x-reddit-loid".to_owned(), header.to_str().ok()?.to_string());
}
// Same with x-reddit-session
if let Some(header) = resp.headers().get("x-reddit-session") {
self.headers_map.insert("x-reddit-session".to_owned(), header.to_str().ok()?.to_string());
}
trace!("Serializing response...");
// Serialize response
let body_bytes = hyper::body::to_bytes(resp.into_body()).await.ok()?;
let json: serde_json::Value = serde_json::from_slice(&body_bytes).ok()?;
trace!("Accessing relevant fields...");
// Save token and expiry
self.token = json.get("access_token")?.as_str()?.to_string();
self.expires_in = json.get("expires_in")?.as_u64()?;
self.headers_map.insert("Authorization".to_owned(), format!("Bearer {}", self.token));
info!("[✅] Success - Retrieved token \"{}...\", expires in {}", &self.token[..32], self.expires_in);
Some(())
}
}
pub async fn token_daemon() {
// Monitor for refreshing token
loop {
// Get expiry time - be sure to not hold the read lock
let expires_in = { OAUTH_CLIENT.load_full().expires_in };
// sleep for the expiry time minus 2 minutes
let duration = Duration::from_secs(expires_in - 120);
info!("[⏳] Waiting for {duration:?} seconds before refreshing OAuth token...");
tokio::time::sleep(duration).await;
info!("[⌛] {duration:?} Elapsed! Refreshing OAuth token...");
// Refresh token - in its own scope
{
force_refresh_token().await;
}
}
}
pub async fn force_refresh_token() {
if OAUTH_IS_ROLLING_OVER.compare_exchange(false, true, Ordering::SeqCst, Ordering::SeqCst).is_err() {
trace!("Skipping refresh token roll over, already in progress");
return;
}
debug!("Rolling over refresh token. Current rate limit: {}", OAUTH_RATELIMIT_REMAINING.load(Ordering::SeqCst));
let new_client = Oauth::new().await;
OAUTH_CLIENT.swap(new_client.into());
OAUTH_RATELIMIT_REMAINING.store(99, Ordering::SeqCst);
OAUTH_IS_ROLLING_OVER.store(false, Ordering::SeqCst);
}
#[derive(Debug, Clone, Default)]
struct Device {
oauth_id: String,
initial_headers: HashMap<String, String>,
headers: HashMap<String, String>,
}
impl Device {
fn android() -> Self {
// Generate uuid
let uuid = uuid::Uuid::new_v4().to_string();
// Generate random user-agent
let android_app_version = choose(ANDROID_APP_VERSION_LIST).to_string();
let android_version = fastrand::u8(9..=14);
let android_user_agent = format!("Reddit/{android_app_version}/Android {android_version}");
// Android device headers
let headers = HashMap::from([
("Client-Vendor-Id".into(), uuid.clone()),
("X-Reddit-Device-Id".into(), uuid.clone()),
("User-Agent".into(), android_user_agent),
]);
info!("[🔄] Spoofing Android client with headers: {headers:?}, uuid: \"{uuid}\", and OAuth ID \"{REDDIT_ANDROID_OAUTH_CLIENT_ID}\"");
Self {
oauth_id: REDDIT_ANDROID_OAUTH_CLIENT_ID.to_string(),
headers: headers.clone(),
initial_headers: headers,
}
}
fn new() -> Self {
// See https://github.com/redlib-org/redlib/issues/8
Self::android()
}
}
fn choose<T: Copy>(list: &[T]) -> T {
*fastrand::choose_multiple(list.iter(), 1)[0]
}
#[tokio::test(flavor = "multi_thread")]
async fn test_oauth_client() {
assert!(!OAUTH_CLIENT.load_full().token.is_empty());
}
#[tokio::test(flavor = "multi_thread")]
async fn test_oauth_client_refresh() {
force_refresh_token().await;
}
#[tokio::test(flavor = "multi_thread")]
async fn test_oauth_token_exists() {
assert!(!OAUTH_CLIENT.load_full().token.is_empty());
}
#[tokio::test(flavor = "multi_thread")]
async fn test_oauth_headers_len() {
assert!(OAUTH_CLIENT.load_full().headers_map.len() >= 3);
}
#[test]
fn test_creating_device() {
Device::new();
}

View File

@ -1,158 +0,0 @@
// This file was generated by scripts/update_oauth_resources.sh
// Rerun scripts/update_oauth_resources.sh to update this file
// Please do not edit manually
// Filled in with real app versions
pub static _IOS_APP_VERSION_LIST: &[&str; 1] = &[""];
pub static ANDROID_APP_VERSION_LIST: &[&str; 150] = &[
"Version 2023.48.0/Build 1319123",
"Version 2023.49.0/Build 1321715",
"Version 2023.49.1/Build 1322281",
"Version 2023.50.0/Build 1332338",
"Version 2023.50.1/Build 1345844",
"Version 2024.02.0/Build 1368985",
"Version 2024.03.0/Build 1379408",
"Version 2024.04.0/Build 1391236",
"Version 2024.05.0/Build 1403584",
"Version 2024.06.0/Build 1418489",
"Version 2024.07.0/Build 1429651",
"Version 2024.08.0/Build 1439531",
"Version 2024.10.0/Build 1470045",
"Version 2024.10.1/Build 1478645",
"Version 2024.11.0/Build 1480707",
"Version 2024.12.0/Build 1494694",
"Version 2024.13.0/Build 1505187",
"Version 2024.14.0/Build 1520556",
"Version 2024.15.0/Build 1536823",
"Version 2024.16.0/Build 1551366",
"Version 2024.17.0/Build 1568106",
"Version 2024.18.0/Build 1577901",
"Version 2024.18.1/Build 1585304",
"Version 2024.19.0/Build 1593346",
"Version 2024.20.0/Build 1612800",
"Version 2024.20.1/Build 1615586",
"Version 2024.20.2/Build 1624969",
"Version 2024.21.0/Build 1631686",
"Version 2024.22.0/Build 1645257",
"Version 2024.22.1/Build 1652272",
"Version 2023.21.0/Build 956283",
"Version 2023.22.0/Build 968223",
"Version 2023.23.0/Build 983896",
"Version 2023.24.0/Build 998541",
"Version 2023.25.0/Build 1014750",
"Version 2023.25.1/Build 1018737",
"Version 2023.26.0/Build 1019073",
"Version 2023.27.0/Build 1031923",
"Version 2023.28.0/Build 1046887",
"Version 2023.29.0/Build 1059855",
"Version 2023.30.0/Build 1078734",
"Version 2023.31.0/Build 1091027",
"Version 2023.32.0/Build 1109919",
"Version 2023.32.1/Build 1114141",
"Version 2023.33.1/Build 1129741",
"Version 2023.34.0/Build 1144243",
"Version 2023.35.0/Build 1157967",
"Version 2023.36.0/Build 1168982",
"Version 2023.37.0/Build 1182743",
"Version 2023.38.0/Build 1198522",
"Version 2023.39.0/Build 1211607",
"Version 2023.39.1/Build 1221505",
"Version 2023.40.0/Build 1221521",
"Version 2023.41.0/Build 1233125",
"Version 2023.41.1/Build 1239615",
"Version 2023.42.0/Build 1245088",
"Version 2023.43.0/Build 1257426",
"Version 2023.44.0/Build 1268622",
"Version 2023.45.0/Build 1281371",
"Version 2023.47.0/Build 1303604",
"Version 2022.42.0/Build 638508",
"Version 2022.43.0/Build 648277",
"Version 2022.44.0/Build 664348",
"Version 2022.45.0/Build 677985",
"Version 2023.01.0/Build 709875",
"Version 2023.02.0/Build 717912",
"Version 2023.03.0/Build 729220",
"Version 2023.04.0/Build 744681",
"Version 2023.05.0/Build 755453",
"Version 2023.06.0/Build 775017",
"Version 2023.07.0/Build 788827",
"Version 2023.07.1/Build 790267",
"Version 2023.08.0/Build 798718",
"Version 2023.09.0/Build 812015",
"Version 2023.09.1/Build 816833",
"Version 2023.10.0/Build 821148",
"Version 2023.11.0/Build 830610",
"Version 2023.12.0/Build 841150",
"Version 2023.13.0/Build 852246",
"Version 2023.14.0/Build 861593",
"Version 2023.14.1/Build 864826",
"Version 2023.15.0/Build 870628",
"Version 2023.16.0/Build 883294",
"Version 2023.16.1/Build 886269",
"Version 2023.17.0/Build 896030",
"Version 2023.17.1/Build 900542",
"Version 2023.18.0/Build 911877",
"Version 2023.19.0/Build 927681",
"Version 2023.20.0/Build 943980",
"Version 2023.20.1/Build 946732",
"Version 2022.20.0/Build 487703",
"Version 2022.21.0/Build 492436",
"Version 2022.22.0/Build 498700",
"Version 2022.23.0/Build 502374",
"Version 2022.23.1/Build 506606",
"Version 2022.24.0/Build 510950",
"Version 2022.24.1/Build 513462",
"Version 2022.25.0/Build 515072",
"Version 2022.25.1/Build 516394",
"Version 2022.25.2/Build 519915",
"Version 2022.26.0/Build 521193",
"Version 2022.27.0/Build 527406",
"Version 2022.27.1/Build 529687",
"Version 2022.28.0/Build 533235",
"Version 2022.30.0/Build 548620",
"Version 2022.31.0/Build 556666",
"Version 2022.31.1/Build 562612",
"Version 2022.32.0/Build 567875",
"Version 2022.33.0/Build 572600",
"Version 2022.34.0/Build 579352",
"Version 2022.35.0/Build 588016",
"Version 2022.35.1/Build 589034",
"Version 2022.36.0/Build 593102",
"Version 2022.37.0/Build 601691",
"Version 2022.38.0/Build 607460",
"Version 2022.39.0/Build 615385",
"Version 2022.39.1/Build 619019",
"Version 2022.40.0/Build 624782",
"Version 2022.41.0/Build 630468",
"Version 2022.41.1/Build 634168",
"Version 2021.39.1/Build 372418",
"Version 2021.41.0/Build 376052",
"Version 2021.42.0/Build 378193",
"Version 2021.43.0/Build 382019",
"Version 2021.44.0/Build 385129",
"Version 2021.45.0/Build 387663",
"Version 2021.46.0/Build 392043",
"Version 2021.47.0/Build 394342",
"Version 2022.10.0/Build 429896",
"Version 2022.1.0/Build 402829",
"Version 2022.11.0/Build 433004",
"Version 2022.12.0/Build 436848",
"Version 2022.13.0/Build 442084",
"Version 2022.13.1/Build 444621",
"Version 2022.14.1/Build 452742",
"Version 2022.15.0/Build 455453",
"Version 2022.16.0/Build 462377",
"Version 2022.17.0/Build 468480",
"Version 2022.18.0/Build 473740",
"Version 2022.19.1/Build 482464",
"Version 2022.2.0/Build 405543",
"Version 2022.3.0/Build 408637",
"Version 2022.4.0/Build 411368",
"Version 2022.5.0/Build 414731",
"Version 2022.6.0/Build 418391",
"Version 2022.6.1/Build 419585",
"Version 2022.6.2/Build 420562",
"Version 2022.7.0/Build 420849",
"Version 2022.8.0/Build 423906",
"Version 2022.9.0/Build 426592",
];
pub static _IOS_OS_VERSION_LIST: &[&str; 1] = &[""];

55
src/popular.rs Normal file
View File

@ -0,0 +1,55 @@
// CRATES
use crate::utils::{fetch_posts, ErrorTemplate, Params, Post};
use actix_web::{http::StatusCode, web, HttpResponse, Result};
use askama::Template;
// STRUCTS
#[derive(Template)]
#[template(path = "popular.html", escape = "none")]
struct PopularTemplate {
posts: Vec<Post>,
sort: String,
ends: (String, String),
}
// RENDER
async fn render(sub_name: String, sort: Option<String>, ends: (Option<String>, Option<String>)) -> Result<HttpResponse> {
let sorting = sort.unwrap_or("hot".to_string());
let before = ends.1.clone().unwrap_or(String::new()); // If there is an after, there must be a before
// Build the Reddit JSON API url
let url = match ends.0 {
Some(val) => format!("r/{}/{}.json?before={}&count=25", sub_name, sorting, val),
None => match ends.1 {
Some(val) => format!("r/{}/{}.json?after={}&count=25", sub_name, sorting, val),
None => format!("r/{}/{}.json", sub_name, sorting),
},
};
let items_result = fetch_posts(url, String::new()).await;
if items_result.is_err() {
let s = ErrorTemplate {
message: items_result.err().unwrap().to_string(),
}
.render()
.unwrap();
Ok(HttpResponse::Ok().status(StatusCode::NOT_FOUND).content_type("text/html").body(s))
} else {
let items = items_result.unwrap();
let s = PopularTemplate {
posts: items.0,
sort: sorting,
ends: (before, items.1),
}
.render()
.unwrap();
Ok(HttpResponse::Ok().content_type("text/html").body(s))
}
}
// SERVICES
pub async fn page(params: web::Query<Params>) -> Result<HttpResponse> {
render("popular".to_string(), params.sort.clone(), (params.before.clone(), params.after.clone())).await
}

View File

@ -1,257 +1,181 @@
#![allow(clippy::cmp_owned)]
// CRATES
use crate::client::json;
use crate::config::get_setting;
use crate::server::RequestExt;
use crate::subreddit::{can_access_quarantine, quarantine};
use crate::utils::{
error, format_num, get_filters, nsfw_landing, param, parse_post, rewrite_emotes, setting, template, time, val, Author, Awards, Comment, Flair, FlairPart, Post, Preferences,
};
use hyper::{Body, Request, Response};
use crate::utils::{format_num, format_url, request, val, Comment, ErrorTemplate, Flair, Params, Post};
use actix_web::{http::StatusCode, web, HttpResponse, Result};
use once_cell::sync::Lazy;
use regex::Regex;
use rinja::Template;
use std::collections::{HashMap, HashSet};
use async_recursion::async_recursion;
use askama::Template;
use chrono::{TimeZone, Utc};
// STRUCTS
#[derive(Template)]
#[template(path = "post.html")]
#[template(path = "post.html", escape = "none")]
struct PostTemplate {
comments: Vec<Comment>,
post: Post,
sort: String,
prefs: Preferences,
single_thread: bool,
url: String,
url_without_query: String,
comment_query: String,
}
static COMMENT_SEARCH_CAPTURE: Lazy<Regex> = Lazy::new(|| Regex::new(r"\?q=(.*)&type=comment").unwrap());
pub async fn item(req: Request<Body>) -> Result<Response<Body>, String> {
// Build Reddit API path
let mut path: String = format!("{}.json?{}&raw_json=1", req.uri().path(), req.uri().query().unwrap_or_default());
let sub = req.param("sub").unwrap_or_default();
let quarantined = can_access_quarantine(&req, &sub);
let url = req.uri().to_string();
// Set sort to sort query parameter
let sort = param(&path, "sort").unwrap_or_else(|| {
// Grab default comment sort method from Cookies
let default_sort = setting(&req, "comment_sort");
// If there's no sort query but there's a default sort, set sort to default_sort
if default_sort.is_empty() {
String::new()
} else {
path = format!("{}.json?{}&sort={}&raw_json=1", req.uri().path(), req.uri().query().unwrap_or_default(), default_sort);
default_sort
}
});
async fn render(id: String, sort: Option<String>, comment_id: Option<String>) -> Result<HttpResponse> {
// Log the post ID being fetched in debug mode
#[cfg(debug_assertions)]
req.param("id").unwrap_or_default();
dbg!(&id);
let single_thread = req.param("comment_id").is_some();
let highlighted_comment = &req.param("comment_id").unwrap_or_default();
// Handling sort paramater
let sorting: String = sort.unwrap_or("confidence".to_string());
// Build the Reddit JSON API url
let url: String = match comment_id {
None => format!("{}.json?sort={}&raw_json=1", id, sorting),
Some(val) => format!("{}.json?sort={}&comment={}&raw_json=1", id, sorting, val),
};
// Send a request to the url, receive JSON in response
match json(path, quarantined).await {
// Otherwise, grab the JSON output from the request
Ok(response) => {
// Parse the JSON into Post and Comment structs
let post = parse_post(&response[0]["data"]["children"][0]).await;
let req = request(url).await;
let req_url = req.uri().to_string();
// Return landing page if this post if this Reddit deems this post
// NSFW, but we have also disabled the display of NSFW content
// or if the instance is SFW-only.
if post.nsfw && crate::utils::should_be_nsfw_gated(&req, &req_url) {
return Ok(nsfw_landing(req, req_url).await.unwrap_or_default());
}
let query_body = match COMMENT_SEARCH_CAPTURE.captures(&url) {
Some(captures) => captures.get(1).unwrap().as_str().replace("%20", " ").replace('+', " "),
None => String::new(),
};
let query_string = format!("q={query_body}&type=comment");
let form = url::form_urlencoded::parse(query_string.as_bytes()).collect::<HashMap<_, _>>();
let query = form.get("q").unwrap().clone().to_string();
let comments = match query.as_str() {
"" => parse_comments(&response[1], &post.permalink, &post.author.name, highlighted_comment, &get_filters(&req), &req),
_ => query_comments(&response[1], &post.permalink, &post.author.name, highlighted_comment, &get_filters(&req), &query, &req),
};
// Use the Post and Comment structs to generate a website to show users
Ok(template(&PostTemplate {
comments,
post,
url_without_query: url.clone().trim_end_matches(&format!("?q={query}&type=comment")).to_string(),
sort,
prefs: Preferences::new(&req),
single_thread,
url: req_url,
comment_query: query,
}))
}
// If the Reddit API returns an error, exit and send error page to user
Err(msg) => {
if msg == "quarantined" || msg == "gated" {
let sub = req.param("sub").unwrap_or_default();
Ok(quarantine(&req, sub, &msg))
} else {
error(req, &msg).await
}
// If the Reddit API returns an error, exit and send error page to user
if req.is_err() {
let s = ErrorTemplate {
message: req.err().unwrap().to_string(),
}
.render()
.unwrap();
return Ok(HttpResponse::Ok().status(StatusCode::NOT_FOUND).content_type("text/html").body(s));
}
// Otherwise, grab the JSON output from the request
let res = req.unwrap();
// Parse the JSON into Post and Comment structs
let post = parse_post(res[0].clone()).await;
let comments = parse_comments(res[1].clone()).await;
// Use the Post and Comment structs to generate a website to show users
let s = PostTemplate {
comments: comments.unwrap(),
post: post.unwrap(),
sort: sorting,
}
.render()
.unwrap();
Ok(HttpResponse::Ok().content_type("text/html").body(s))
}
// SERVICES
pub async fn short(web::Path(id): web::Path<String>, params: web::Query<Params>) -> Result<HttpResponse> {
render(id, params.sort.clone(), None).await
}
pub async fn comment(web::Path((_sub, id, _title, comment_id)): web::Path<(String, String, String, String)>, params: web::Query<Params>) -> Result<HttpResponse> {
render(id, params.sort.clone(), Some(comment_id)).await
}
pub async fn page(web::Path((_sub, id)): web::Path<(String, String)>, params: web::Query<Params>) -> Result<HttpResponse> {
render(id, params.sort.clone(), None).await
}
// UTILITIES
async fn media(data: &serde_json::Value) -> (String, String) {
let post_type: &str;
let url = if !data["preview"]["reddit_video_preview"]["fallback_url"].is_null() {
post_type = "video";
format_url(data["preview"]["reddit_video_preview"]["fallback_url"].as_str().unwrap().to_string()).await
} else if !data["secure_media"]["reddit_video"]["fallback_url"].is_null() {
post_type = "video";
format_url(data["secure_media"]["reddit_video"]["fallback_url"].as_str().unwrap().to_string()).await
} else if data["post_hint"].as_str().unwrap_or("") == "image" {
post_type = "image";
format_url(data["preview"]["images"][0]["source"]["url"].as_str().unwrap().to_string()).await
} else {
post_type = "link";
data["url"].as_str().unwrap().to_string()
};
(post_type.to_string(), url)
}
// POSTS
async fn parse_post(json: serde_json::Value) -> Result<Post, &'static str> {
// Retrieve post (as opposed to comments) from JSON
let post_data: &serde_json::Value = &json["data"]["children"][0];
// Grab UTC time as unix timestamp
let unix_time: i64 = post_data["data"]["created_utc"].as_f64().unwrap().round() as i64;
// Parse post score
let score = post_data["data"]["score"].as_i64().unwrap();
// Determine the type of media along with the media URL
let media = media(&post_data["data"]).await;
// Build a post using data parsed from Reddit post API
let post = Post {
title: val(post_data, "title").await,
community: val(post_data, "subreddit").await,
body: val(post_data,"selftext_html").await,
author: val(post_data, "author").await,
author_flair: Flair(
val(post_data, "author_flair_text").await,
val(post_data, "author_flair_background_color").await,
val(post_data, "author_flair_text_color").await,
),
url: val(post_data, "permalink").await,
score: format_num(score),
post_type: media.0,
flair: Flair(
val(post_data, "link_flair_text").await,
val(post_data, "link_flair_background_color").await,
if val(post_data, "link_flair_text_color").await == "dark" {
"black".to_string()
} else {
"white".to_string()
},
),
nsfw: post_data["data"]["over_18"].as_bool().unwrap_or(false),
media: media.1,
time: Utc.timestamp(unix_time, 0).format("%b %e %Y %H:%M UTC").to_string(),
};
Ok(post)
}
// COMMENTS
#[async_recursion]
async fn parse_comments(json: serde_json::Value) -> Result<Vec<Comment>, &'static str> {
// Separate the comment JSON into a Vector of comments
let comment_data = json["data"]["children"].as_array().unwrap();
fn parse_comments(json: &serde_json::Value, post_link: &str, post_author: &str, highlighted_comment: &str, filters: &HashSet<String>, req: &Request<Body>) -> Vec<Comment> {
// Parse the comment JSON into a Vector of Comments
let comments = json["data"]["children"].as_array().map_or(Vec::new(), std::borrow::ToOwned::to_owned);
let mut comments: Vec<Comment> = Vec::new();
// For each comment, retrieve the values to build a Comment object
comments
.into_iter()
.map(|comment| {
let data = &comment["data"];
let replies: Vec<Comment> = if data["replies"].is_object() {
parse_comments(&data["replies"], post_link, post_author, highlighted_comment, filters, req)
} else {
Vec::new()
};
build_comment(&comment, data, replies, post_link, post_author, highlighted_comment, filters, req)
})
.collect()
}
fn query_comments(
json: &serde_json::Value,
post_link: &str,
post_author: &str,
highlighted_comment: &str,
filters: &HashSet<String>,
query: &str,
req: &Request<Body>,
) -> Vec<Comment> {
let comments = json["data"]["children"].as_array().map_or(Vec::new(), std::borrow::ToOwned::to_owned);
let mut results = Vec::new();
for comment in comments {
let data = &comment["data"];
// If this comment contains replies, handle those too
if data["replies"].is_object() {
results.append(&mut query_comments(&data["replies"], post_link, post_author, highlighted_comment, filters, query, req));
for comment in comment_data {
let unix_time: i64 = comment["data"]["created_utc"].as_f64().unwrap_or(0.0).round() as i64;
if unix_time == 0 {
continue;
}
let c = build_comment(&comment, data, Vec::new(), post_link, post_author, highlighted_comment, filters, req);
if c.body.to_lowercase().contains(&query.to_lowercase()) {
results.push(c);
}
}
let score = comment["data"]["score"].as_i64().unwrap_or(0);
let body = val(comment, "body_html").await;
results
}
#[allow(clippy::too_many_arguments)]
fn build_comment(
comment: &serde_json::Value,
data: &serde_json::Value,
replies: Vec<Comment>,
post_link: &str,
post_author: &str,
highlighted_comment: &str,
filters: &HashSet<String>,
req: &Request<Body>,
) -> Comment {
let id = val(comment, "id");
let body = if (val(comment, "author") == "[deleted]" && val(comment, "body") == "[removed]") || val(comment, "body") == "[ Removed by Reddit ]" {
format!(
"<div class=\"md\"><p>[removed] — <a href=\"https://{}{post_link}{id}\">view removed comment</a></p></div>",
get_setting("REDLIB_PUSHSHIFT_FRONTEND").unwrap_or_else(|| String::from(crate::config::DEFAULT_PUSHSHIFT_FRONTEND)),
)
} else {
rewrite_emotes(&data["media_metadata"], val(comment, "body_html"))
};
let kind = comment["kind"].as_str().unwrap_or_default().to_string();
let unix_time = data["created_utc"].as_f64().unwrap_or_default();
let (rel_time, created) = time(unix_time);
let edited = data["edited"].as_f64().map_or((String::new(), String::new()), time);
let score = data["score"].as_i64().unwrap_or(0);
// The JSON API only provides comments up to some threshold.
// Further comments have to be loaded by subsequent requests.
// The "kind" value will be "more" and the "count"
// shows how many more (sub-)comments exist in the respective nesting level.
// Note that in certain (seemingly random) cases, the count is simply wrong.
let more_count = data["count"].as_i64().unwrap_or_default();
let awards: Awards = Awards::parse(&data["all_awardings"]);
let parent_kind_and_id = val(comment, "parent_id");
let parent_info = parent_kind_and_id.split('_').collect::<Vec<&str>>();
let highlighted = id == highlighted_comment;
let author = Author {
name: val(comment, "author"),
flair: Flair {
flair_parts: FlairPart::parse(
data["author_flair_type"].as_str().unwrap_or_default(),
data["author_flair_richtext"].as_array(),
data["author_flair_text"].as_str(),
),
text: val(comment, "link_flair_text"),
background_color: val(comment, "author_flair_background_color"),
foreground_color: val(comment, "author_flair_text_color"),
},
distinguished: val(comment, "distinguished"),
};
let is_filtered = filters.contains(&["u_", author.name.as_str()].concat());
// Many subreddits have a default comment posted about the sub's rules etc.
// Many Redlib users do not wish to see this kind of comment by default.
// Reddit does not tell us which users are "bots", so a good heuristic is to
// collapse stickied moderator comments.
let is_moderator_comment = data["distinguished"].as_str().unwrap_or_default() == "moderator";
let is_stickied = data["stickied"].as_bool().unwrap_or_default();
let collapsed = (is_moderator_comment && is_stickied) || is_filtered;
Comment {
id,
kind,
parent_id: parent_info[1].to_string(),
parent_kind: parent_info[0].to_string(),
post_link: post_link.to_string(),
post_author: post_author.to_string(),
body,
author,
score: if data["score_hidden"].as_bool().unwrap_or_default() {
("\u{2022}".to_string(), "Hidden".to_string())
let replies: Vec<Comment> = if comment["data"]["replies"].is_object() {
parse_comments(comment["data"]["replies"].clone()).await.unwrap_or(Vec::new())
} else {
format_num(score)
},
rel_time,
created,
edited,
replies,
highlighted,
awards,
collapsed,
is_filtered,
more_count,
prefs: Preferences::new(req),
Vec::new()
};
comments.push(Comment {
id: val(comment, "id").await,
body: body,
author: val(comment, "author").await,
score: format_num(score),
time: Utc.timestamp(unix_time, 0).format("%b %e %Y %H:%M UTC").to_string(),
replies: replies,
flair: Flair(
val(comment, "author_flair_text").await,
val(comment, "author_flair_background_color").await,
val(comment, "author_flair_text_color").await,
),
});
}
Ok(comments)
}

29
src/proxy.rs Normal file
View File

@ -0,0 +1,29 @@
use actix_web::{client::Client, web, Error, HttpResponse, Result};
#[cfg(feature = "proxy")]
use base64::decode;
pub async fn handler(web::Path(url): web::Path<String>) -> Result<HttpResponse> {
if cfg!(feature = "proxy") {
let media: String;
#[cfg(not(feature = "proxy"))]
let media = url;
#[cfg(feature = "proxy")]
match decode(url) {
Ok(bytes) => media = String::from_utf8(bytes).unwrap(),
Err(_e) => return Ok(HttpResponse::Ok().body("")),
};
let client = Client::default();
client
.get(media.replace("&amp;", "&"))
.send()
.await
.map_err(Error::from)
.and_then(|res| Ok(HttpResponse::build(res.status()).streaming(res)))
} else {
Ok(HttpResponse::Ok().body(""))
}
}

View File

@ -1,132 +0,0 @@
use std::{collections::HashMap, fmt::Display, io::Write};
use clap::{Parser, ValueEnum};
use common_words_all::{get_top, Language, NgramSize};
use redsunlib::utils::Post;
#[derive(Parser)]
#[command(name = "my_cli")]
#[command(about = "A simple CLI example", long_about = None)]
struct Cli {
#[arg(short = 's', long = "sub")]
sub: String,
#[arg(long = "sort")]
sort: SortOrder,
#[arg(short = 'f', long = "format", value_enum)]
format: Format,
#[arg(short = 'o', long = "output")]
output: Option<String>,
}
#[derive(Debug, Clone, ValueEnum)]
enum SortOrder {
Hot,
Rising,
New,
Top,
Controversial,
}
impl Display for SortOrder {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
match self {
SortOrder::Hot => write!(f, "hot"),
SortOrder::Rising => write!(f, "rising"),
SortOrder::New => write!(f, "new"),
SortOrder::Top => write!(f, "top"),
SortOrder::Controversial => write!(f, "controversial"),
}
}
}
#[derive(Debug, Clone, ValueEnum)]
enum Format {
Json,
}
#[tokio::main]
async fn main() {
pretty_env_logger::init();
let cli = Cli::parse();
let (sub, sort, format, output) = (cli.sub, cli.sort, cli.format, cli.output);
let initial = format!("/r/{sub}/{sort}.json?&raw_json=1");
let (posts, mut after) = Post::fetch(&initial, false).await.unwrap();
let mut hashmap = HashMap::new();
hashmap.extend(posts.into_iter().map(|post| (post.id.clone(), post)));
loop {
print!("\r");
let path = format!("/r/{sub}/{sort}.json?sort={sort}&t=&after={after}&raw_json=1");
let (new_posts, new_after) = Post::fetch(&path, false).await.unwrap();
let old_len = hashmap.len();
// convert to hashmap and extend hashmap
let new_posts = new_posts.into_iter().map(|post| (post.id.clone(), post)).collect::<HashMap<String, Post>>();
let len = new_posts.len();
hashmap.extend(new_posts);
if hashmap.len() - old_len < 3 {
break;
}
let x = hashmap.len() - old_len;
after = new_after;
// Print number of posts fetched
print!("Fetched {len} posts (+{x})",);
std::io::stdout().flush().unwrap();
}
println!("\n\n");
// additionally search if final count not reached
for word in get_top(Language::English, 10_000, NgramSize::One) {
let mut retrieved_posts_from_search = 0;
let initial = format!("/r/{sub}/search.json?q={word}&restrict_sr=on&include_over_18=on&raw_json=1&sort={sort}");
println!("Grabbing posts with word {word}.");
let (posts, mut after) = Post::fetch(&initial, false).await.unwrap();
hashmap.extend(posts.into_iter().map(|post| (post.id.clone(), post)));
'search: loop {
let path = format!("/r/{sub}/search.json?q={word}&restrict_sr=on&include_over_18=on&raw_json=1&sort={sort}&after={after}");
let (new_posts, new_after) = Post::fetch(&path, false).await.unwrap();
if new_posts.is_empty() || new_after.is_empty() {
println!("No more posts for word {word}");
break 'search;
}
retrieved_posts_from_search += new_posts.len();
let old_len = hashmap.len();
let new_posts = new_posts.into_iter().map(|post| (post.id.clone(), post)).collect::<HashMap<String, Post>>();
let len = new_posts.len();
hashmap.extend(new_posts);
let delta = hashmap.len() - old_len;
after = new_after;
// Print number of posts fetched
println!("Fetched {len} posts (+{delta})",);
if retrieved_posts_from_search > 1000 {
println!("Reached 1000 posts from search");
break 'search;
}
}
// Need to save incrementally. atomic save + move
let tmp_file = output.clone().unwrap_or_else(|| format!("{sub}.json.tmp"));
let perm_file = output.clone().unwrap_or_else(|| format!("{sub}.json"));
write_posts(&hashmap.values().collect(), tmp_file.clone());
// move file
std::fs::rename(tmp_file, perm_file).unwrap();
}
println!("\n\n");
println!("Size of hashmap: {}", hashmap.len());
let posts: Vec<&Post> = hashmap.values().collect();
match format {
Format::Json => {
let filename: String = output.unwrap_or_else(|| format!("{sub}.json"));
write_posts(&posts, filename);
}
}
}
fn write_posts(posts: &Vec<&Post>, filename: String) {
let json = serde_json::to_string(&posts).unwrap();
std::fs::write(filename, json).unwrap();
}

View File

@ -1,190 +0,0 @@
#![allow(clippy::cmp_owned)]
// CRATES
use crate::utils::{self, catch_random, error, filter_posts, format_num, format_url, get_filters, param, redirect, setting, template, val, Post, Preferences};
use crate::{
client::json,
server::RequestExt,
subreddit::{can_access_quarantine, quarantine},
};
use hyper::{Body, Request, Response};
use once_cell::sync::Lazy;
use regex::Regex;
use rinja::Template;
// STRUCTS
struct SearchParams {
q: String,
sort: String,
t: String,
before: String,
after: String,
restrict_sr: String,
typed: String,
}
// STRUCTS
struct Subreddit {
name: String,
url: String,
icon: String,
description: String,
subscribers: (String, String),
}
#[derive(Template)]
#[template(path = "search.html")]
struct SearchTemplate {
posts: Vec<Post>,
subreddits: Vec<Subreddit>,
sub: String,
params: SearchParams,
prefs: Preferences,
url: String,
/// Whether the subreddit itself is filtered.
is_filtered: bool,
/// Whether all fetched posts are filtered (to differentiate between no posts fetched in the first place,
/// and all fetched posts being filtered).
all_posts_filtered: bool,
/// Whether all posts were hidden because they are NSFW (and user has disabled show NSFW)
all_posts_hidden_nsfw: bool,
no_posts: bool,
}
// Regex matched against search queries to determine if they are reddit urls.
static REDDIT_URL_MATCH: Lazy<Regex> = Lazy::new(|| Regex::new(r"^https?://([^\./]+\.)*reddit.com/").unwrap());
// SERVICES
pub async fn find(req: Request<Body>) -> Result<Response<Body>, String> {
// This ensures that during a search, no NSFW posts are fetched at all
let nsfw_results = if setting(&req, "show_nsfw") == "on" && !utils::sfw_only() {
"&include_over_18=on"
} else {
""
};
let uri_path = req.uri().path().replace("+", "%2B");
let path = format!("{}.json?{}{}&raw_json=1", uri_path, req.uri().query().unwrap_or_default(), nsfw_results);
let mut query = param(&path, "q").unwrap_or_default();
query = REDDIT_URL_MATCH.replace(&query, "").to_string();
if query.is_empty() {
return Ok(redirect("/"));
}
if query.starts_with("r/") || query.starts_with("user/") {
return Ok(redirect(&format!("/{query}")));
}
if query.starts_with("u/") {
return Ok(redirect(&format!("/user{}", &query[1..])));
}
let sub = req.param("sub").unwrap_or_default();
let quarantined = can_access_quarantine(&req, &sub);
// Handle random subreddits
if let Ok(random) = catch_random(&sub, "/find").await {
return Ok(random);
}
let typed = param(&path, "type").unwrap_or_default();
let sort = param(&path, "sort").unwrap_or_else(|| "relevance".to_string());
let filters = get_filters(&req);
// If search is not restricted to this subreddit, show other subreddits in search results
let subreddits = if param(&path, "restrict_sr").is_none() {
let mut subreddits = search_subreddits(&query, &typed).await;
subreddits.retain(|s| !filters.contains(s.name.as_str()));
subreddits
} else {
Vec::new()
};
let url = String::from(req.uri().path_and_query().map_or("", |val| val.as_str()));
// If all requested subs are filtered, we don't need to fetch posts.
if sub.split('+').all(|s| filters.contains(s)) {
Ok(template(&SearchTemplate {
posts: Vec::new(),
subreddits,
sub,
params: SearchParams {
q: query.replace('"', "&quot;"),
sort,
t: param(&path, "t").unwrap_or_default(),
before: param(&path, "after").unwrap_or_default(),
after: String::new(),
restrict_sr: param(&path, "restrict_sr").unwrap_or_default(),
typed,
},
prefs: Preferences::new(&req),
url,
is_filtered: true,
all_posts_filtered: false,
all_posts_hidden_nsfw: false,
no_posts: false,
}))
} else {
match Post::fetch(&path, quarantined).await {
Ok((mut posts, after)) => {
let (_, all_posts_filtered) = filter_posts(&mut posts, &filters);
let no_posts = posts.is_empty();
let all_posts_hidden_nsfw = !no_posts && (posts.iter().all(|p| p.flags.nsfw) && setting(&req, "show_nsfw") != "on");
Ok(template(&SearchTemplate {
posts,
subreddits,
sub,
params: SearchParams {
q: query.replace('"', "&quot;"),
sort,
t: param(&path, "t").unwrap_or_default(),
before: param(&path, "after").unwrap_or_default(),
after,
restrict_sr: param(&path, "restrict_sr").unwrap_or_default(),
typed,
},
prefs: Preferences::new(&req),
url,
is_filtered: false,
all_posts_filtered,
all_posts_hidden_nsfw,
no_posts,
}))
}
Err(msg) => {
if msg == "quarantined" || msg == "gated" {
let sub = req.param("sub").unwrap_or_default();
Ok(quarantine(&req, sub, &msg))
} else {
error(req, &msg).await
}
}
}
}
}
async fn search_subreddits(q: &str, typed: &str) -> Vec<Subreddit> {
let limit = if typed == "sr_user" { "50" } else { "3" };
let subreddit_search_path = format!("/subreddits/search.json?q={}&limit={limit}", q.replace(' ', "+"));
// Send a request to the url
json(subreddit_search_path, false).await.unwrap_or_default()["data"]["children"]
.as_array()
.map(ToOwned::to_owned)
.unwrap_or_default()
.iter()
.map(|subreddit| {
// For each subreddit from subreddit list
// Fetch subreddit icon either from the community_icon or icon_img value
let icon = subreddit["data"]["community_icon"].as_str().map_or_else(|| val(subreddit, "icon_img"), ToString::to_string);
Subreddit {
name: val(subreddit, "display_name"),
url: val(subreddit, "url"),
icon: format_url(&icon),
description: val(subreddit, "public_description"),
subscribers: format_num(subreddit["data"]["subscribers"].as_f64().unwrap_or_default() as i64),
}
})
.collect::<Vec<Subreddit>>()
}

View File

@ -1,744 +0,0 @@
#![allow(dead_code)]
#![allow(clippy::cmp_owned)]
use brotli::enc::{BrotliCompress, BrotliEncoderParams};
use cached::proc_macro::cached;
use cookie::Cookie;
use core::f64;
use futures_lite::{future::Boxed, Future, FutureExt};
use hyper::{
body,
body::HttpBody,
header,
service::{make_service_fn, service_fn},
HeaderMap,
};
use hyper::{Body, Method, Request, Response, Server as HyperServer};
use libflate::gzip;
use route_recognizer::{Params, Router};
use std::{
cmp::Ordering,
fmt::Display,
io,
pin::Pin,
result::Result,
str::{from_utf8, Split},
string::ToString,
};
use time::Duration;
use crate::dbg_msg;
type BoxResponse = Pin<Box<dyn Future<Output = Result<Response<Body>, String>> + Send>>;
/// Compressors for the response Body, in ascending order of preference.
#[derive(Copy, Clone, Debug, Eq, Hash, Ord, PartialEq, PartialOrd)]
enum CompressionType {
Passthrough,
Gzip,
Brotli,
}
/// All browsers support gzip, so if we are given `Accept-Encoding: *`, deliver
/// gzipped-content.
///
/// Brotli would be nice universally, but Safari (iOS, iPhone, macOS) reportedly
/// doesn't support it yet.
const DEFAULT_COMPRESSOR: CompressionType = CompressionType::Gzip;
impl CompressionType {
/// Returns a `CompressionType` given a content coding
/// in [RFC 7231](https://datatracker.ietf.org/doc/html/rfc7231#section-5.3.4)
/// format.
fn parse(s: &str) -> Option<Self> {
let c = match s {
// Compressors we support.
"gzip" => Self::Gzip,
"br" => Self::Brotli,
// The wildcard means that we can choose whatever
// compression we prefer. In this case, use the
// default.
"*" => DEFAULT_COMPRESSOR,
// Compressor not supported.
_ => return None,
};
Some(c)
}
}
impl Display for CompressionType {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
match self {
Self::Gzip => write!(f, "gzip"),
Self::Brotli => write!(f, "br"),
Self::Passthrough => Ok(()),
}
}
}
pub struct Route<'a> {
router: &'a mut Router<fn(Request<Body>) -> BoxResponse>,
path: String,
}
pub struct Server {
pub default_headers: HeaderMap,
router: Router<fn(Request<Body>) -> BoxResponse>,
}
#[macro_export]
macro_rules! headers(
{ $($key:expr => $value:expr),+ } => {
{
let mut m = hyper::HeaderMap::new();
$(
if let Ok(val) = hyper::header::HeaderValue::from_str($value) {
m.insert($key, val);
}
)+
m
}
};
);
pub trait RequestExt {
fn params(&self) -> Params;
fn param(&self, name: &str) -> Option<String>;
fn set_params(&mut self, params: Params) -> Option<Params>;
fn cookies(&self) -> Vec<Cookie<'_>>;
fn cookie(&self, name: &str) -> Option<Cookie<'_>>;
}
pub trait ResponseExt {
fn cookies(&self) -> Vec<Cookie<'_>>;
fn insert_cookie(&mut self, cookie: Cookie<'_>);
fn remove_cookie(&mut self, name: String);
}
impl RequestExt for Request<Body> {
fn params(&self) -> Params {
self.extensions().get::<Params>().unwrap_or(&Params::new()).clone()
// self.extensions()
// .get::<RequestMeta>()
// .and_then(|meta| meta.route_params())
// .expect("Routerify: No RouteParams added while processing request")
}
fn param(&self, name: &str) -> Option<String> {
self.params().find(name).map(std::borrow::ToOwned::to_owned)
}
fn set_params(&mut self, params: Params) -> Option<Params> {
self.extensions_mut().insert(params)
}
fn cookies(&self) -> Vec<Cookie<'_>> {
self.headers().get("Cookie").map_or(Vec::new(), |header| {
header
.to_str()
.unwrap_or_default()
.split("; ")
.map(|cookie| Cookie::parse(cookie).unwrap_or_else(|_| Cookie::from("")))
.collect()
})
}
fn cookie(&self, name: &str) -> Option<Cookie<'_>> {
self.cookies().into_iter().find(|c| c.name() == name)
}
}
impl ResponseExt for Response<Body> {
fn cookies(&self) -> Vec<Cookie<'_>> {
self.headers().get("Cookie").map_or(Vec::new(), |header| {
header
.to_str()
.unwrap_or_default()
.split("; ")
.map(|cookie| Cookie::parse(cookie).unwrap_or_else(|_| Cookie::from("")))
.collect()
})
}
fn insert_cookie(&mut self, cookie: Cookie<'_>) {
if let Ok(val) = header::HeaderValue::from_str(&cookie.to_string()) {
self.headers_mut().append("Set-Cookie", val);
}
}
fn remove_cookie(&mut self, name: String) {
let mut cookie = Cookie::from(name);
cookie.set_path("/");
cookie.set_max_age(Duration::seconds(1));
if let Ok(val) = header::HeaderValue::from_str(&cookie.to_string()) {
self.headers_mut().append("Set-Cookie", val);
}
}
}
impl Route<'_> {
fn method(&mut self, method: &Method, dest: fn(Request<Body>) -> BoxResponse) -> &mut Self {
self.router.add(&format!("/{}{}", method.as_str(), self.path), dest);
self
}
/// Add an endpoint for `GET` requests
pub fn get(&mut self, dest: fn(Request<Body>) -> BoxResponse) -> &mut Self {
self.method(&Method::GET, dest)
}
/// Add an endpoint for `POST` requests
pub fn post(&mut self, dest: fn(Request<Body>) -> BoxResponse) -> &mut Self {
self.method(&Method::POST, dest)
}
}
impl Default for Server {
fn default() -> Self {
Self::new()
}
}
impl Server {
pub fn new() -> Self {
Self {
default_headers: HeaderMap::new(),
router: Router::new(),
}
}
pub fn at(&mut self, path: &str) -> Route<'_> {
Route {
path: path.to_owned(),
router: &mut self.router,
}
}
pub fn listen(self, addr: &str) -> Boxed<Result<(), hyper::Error>> {
let make_svc = make_service_fn(move |_conn| {
// For correct borrowing, these values need to be borrowed
let router = self.router.clone();
let default_headers = self.default_headers.clone();
// This is the `Service` that will handle the connection.
// `service_fn` is a helper to convert a function that
// returns a Response into a `Service`.
// let shared_router = router.clone();
async move {
Ok::<_, String>(service_fn(move |req: Request<Body>| {
let req_headers = req.headers().clone();
let def_headers = default_headers.clone();
// Remove double slashes and decode encoded slashes
let mut path = req.uri().path().replace("//", "/").replace("%2F", "/");
// Remove trailing slashes
if path != "/" && path.ends_with('/') {
path.pop();
}
// Match the visited path with an added route
match router.recognize(&format!("/{}{}", req.method().as_str(), path)) {
// If a route was configured for this path
Ok(found) => {
let mut parammed = req;
parammed.set_params(found.params().clone());
// Run the route's function
let func = (found.handler().to_owned().to_owned())(parammed);
async move {
match func.await {
Ok(mut res) => {
res.headers_mut().extend(def_headers);
let _ = compress_response(&req_headers, &mut res).await;
Ok(res)
}
Err(msg) => new_boilerplate(def_headers, req_headers, 500, Body::from(msg)).await,
}
}
.boxed()
}
// If there was a routing error
Err(e) => new_boilerplate(def_headers, req_headers, 404, e.into()).boxed(),
}
}))
}
});
// Build SocketAddr from provided address
let address = &addr.parse().unwrap_or_else(|_| panic!("Cannot parse {addr} as address (example format: 0.0.0.0:8080)"));
// Bind server to address specified above. Gracefully shut down if CTRL+C is pressed
let server = HyperServer::bind(address).serve(make_svc).with_graceful_shutdown(async {
// Wait for the CTRL+C signal
tokio::signal::ctrl_c().await.expect("Failed to install CTRL+C signal handler");
});
server.boxed()
}
}
/// Create a boilerplate Response for error conditions. This response will be
/// compressed if requested by client.
async fn new_boilerplate(
default_headers: HeaderMap<header::HeaderValue>,
req_headers: HeaderMap<header::HeaderValue>,
status: u16,
body: Body,
) -> Result<Response<Body>, String> {
match Response::builder().status(status).body(body) {
Ok(mut res) => {
let _ = compress_response(&req_headers, &mut res).await;
res.headers_mut().extend(default_headers.clone());
Ok(res)
}
Err(msg) => Err(msg.to_string()),
}
}
/// Determines the desired compressor based on the Accept-Encoding header.
///
/// This function will honor the [q-value](https://developer.mozilla.org/en-US/docs/Glossary/Quality_values)
/// for each compressor. The q-value is an optional parameter, a decimal value
/// on \[0..1\], to order the compressors by preference. An Accept-Encoding value
/// with no q-values is also accepted.
///
/// Here are [examples](https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Accept-Encoding#examples)
/// of valid Accept-Encoding headers.
///
/// ```http
/// Accept-Encoding: gzip
/// Accept-Encoding: gzip, compress, br
/// Accept-Encoding: br;q=1.0, gzip;q=0.8, *;q=0.1
/// ```
#[cached]
fn determine_compressor(accept_encoding: String) -> Option<CompressionType> {
if accept_encoding.is_empty() {
return None;
};
// Keep track of the compressor candidate based on both the client's
// preference and our own. Concrete examples:
//
// 1. "Accept-Encoding: gzip, br" => assuming we like brotli more than
// gzip, and the browser supports brotli, we choose brotli
//
// 2. "Accept-Encoding: gzip;q=0.8, br;q=0.3" => the client has stated a
// preference for gzip over brotli, so we choose gzip
//
// To do this, we need to define a struct which contains the requested
// requested compressor (abstracted as a CompressionType enum) and the
// q-value. If no q-value is defined for the compressor, we assume one of
// 1.0. We first compare compressor candidates by comparing q-values, and
// then CompressionTypes. We keep track of whatever is the greatest per our
// ordering.
struct CompressorCandidate {
alg: CompressionType,
q: f64,
}
impl Ord for CompressorCandidate {
fn cmp(&self, other: &Self) -> Ordering {
// Compare q-values. Break ties with the
// CompressionType values.
match self.q.total_cmp(&other.q) {
Ordering::Equal => self.alg.cmp(&other.alg),
ord => ord,
}
}
}
impl PartialOrd for CompressorCandidate {
fn partial_cmp(&self, other: &Self) -> Option<Ordering> {
Some(self.cmp(other))
}
}
impl PartialEq for CompressorCandidate {
fn eq(&self, other: &Self) -> bool {
(self.q == other.q) && (self.alg == other.alg)
}
}
impl Eq for CompressorCandidate {}
// This is the current candidate.
//
// Assmume no candidate so far. We do this by assigning the sentinel value
// of negative infinity to the q-value. If this value is negative infinity,
// that means there was no viable compressor candidate.
let mut cur_candidate = CompressorCandidate {
alg: CompressionType::Passthrough,
q: f64::NEG_INFINITY,
};
// This loop reads the requested compressors and keeps track of whichever
// one has the highest priority per our heuristic.
for val in accept_encoding.split(',') {
let mut q: f64 = 1.0;
// The compressor and q-value (if the latter is defined)
// will be delimited by semicolons.
let mut spl: Split<'_, char> = val.split(';');
// Get the compressor. For example, in
// gzip;q=0.8
// this grabs "gzip" in the string. It
// will further validate the compressor against the
// list of those we support. If it is not supported,
// we move onto the next one.
let compressor: CompressionType = match spl.next() {
// CompressionType::parse will return the appropriate enum given
// a string. For example, it will return CompressionType::Gzip
// when given "gzip".
Some(s) => match CompressionType::parse(s.trim()) {
Some(candidate) => candidate,
// We don't support the requested compression algorithm.
None => continue,
},
// We should never get here, but I'm paranoid.
None => continue,
};
// Get the q-value. This might not be defined, in which case assume
// 1.0.
if let Some(s) = spl.next() {
if !(s.len() > 2 && s.starts_with("q=")) {
// If the q-value is malformed, the header is malformed, so
// abort.
return None;
}
match s[2..].parse::<f64>() {
Ok(val) => {
if (0.0..=1.0).contains(&val) {
q = val;
} else {
// If the value is outside [0..1], header is malformed.
// Abort.
return None;
};
}
Err(_) => {
// If this isn't a f64, then assume a malformed header
// value and abort.
return None;
}
}
};
// If new_candidate > cur_candidate, make new_candidate the new
// cur_candidate. But do this safely! It is very possible that
// someone gave us the string "NAN", which (&str).parse::<f64>
// will happily translate to f64::NAN.
let new_candidate = CompressorCandidate { alg: compressor, q };
if let Some(ord) = new_candidate.partial_cmp(&cur_candidate) {
if ord == Ordering::Greater {
cur_candidate = new_candidate;
}
};
}
if cur_candidate.q == f64::NEG_INFINITY {
None
} else {
Some(cur_candidate.alg)
}
}
/// Compress the response body, if possible or desirable. The Body will be
/// compressed in place, and a new header Content-Encoding will be set
/// indicating the compression algorithm.
///
/// This function deems Body eligible compression if and only if the following
/// conditions are met:
///
/// 1. the HTTP client requests a compression encoding in the Content-Encoding
/// header (hence the need for the `req_headers`);
///
/// 2. the content encoding corresponds to a compression algorithm we support;
///
/// 3. the Media type in the Content-Type response header is text with any
/// subtype (e.g. text/plain) or application/json.
///
/// `compress_response` returns Ok on successful compression, or if not all three
/// conditions above are met. It returns Err if there was a problem decoding
/// any header in either `req_headers` or res, but res will remain intact.
///
/// This function logs errors to stderr, but only in debug mode. No information
/// is logged in release builds.
async fn compress_response(req_headers: &HeaderMap<header::HeaderValue>, res: &mut Response<Body>) -> Result<(), String> {
// Check if the data is eligible for compression.
if let Some(hdr) = res.headers().get(header::CONTENT_TYPE) {
match from_utf8(hdr.as_bytes()) {
Ok(val) => {
let s = val.to_string();
// TODO: better determination of what is eligible for compression
if !(s.starts_with("text/") || s.starts_with("application/json")) {
return Ok(());
};
}
Err(e) => {
dbg_msg!(e);
return Err(e.to_string());
}
};
} else {
// Response declares no Content-Type. Assume for simplicity that it
// cannot be compressed.
return Ok(());
};
// Don't bother if the size of the size of the response body will fit
// within an IP frame (less the bytes that make up the TCP/IP and HTTP
// headers).
if res.body().size_hint().lower() < 1452 {
return Ok(());
};
// Check to see which compressor is requested, and if we can use it.
let accept_encoding: String = match req_headers.get(header::ACCEPT_ENCODING) {
None => return Ok(()), // Client requested no compression.
Some(hdr) => match String::from_utf8(hdr.as_bytes().into()) {
Ok(val) => val,
#[cfg(debug_assertions)]
Err(e) => {
dbg_msg!(e);
return Ok(());
}
#[cfg(not(debug_assertions))]
Err(_) => return Ok(()),
},
};
let compressor: CompressionType = match determine_compressor(accept_encoding) {
Some(c) => c,
None => return Ok(()),
};
// Get the body from the response.
let body_bytes: Vec<u8> = match body::to_bytes(res.body_mut()).await {
Ok(b) => b.to_vec(),
Err(e) => {
dbg_msg!(e);
return Err(e.to_string());
}
};
// Compress!
match compress_body(compressor, body_bytes) {
Ok(compressed) => {
// We get here iff the compression was successful. Replace the body
// with the compressed payload, and add the appropriate
// Content-Encoding header in the response.
res.headers_mut().insert(header::CONTENT_ENCODING, compressor.to_string().parse().unwrap());
*(res.body_mut()) = Body::from(compressed);
}
Err(e) => return Err(e),
}
Ok(())
}
/// Compresses a `Vec<u8>` given a [`CompressionType`].
///
/// This is a helper function for [`compress_response`] and should not be
/// called directly.
// I've chosen a TTL of 600 (== 10 minutes) since compression is
// computationally expensive and we don't want to be doing it often. This is
// larger than client::json's TTL, but that's okay, because if client::json
// returns a new serde_json::Value, body_bytes changes, so this function will
// execute again.
#[cached(size = 100, time = 600, result = true)]
fn compress_body(compressor: CompressionType, body_bytes: Vec<u8>) -> Result<Vec<u8>, String> {
// io::Cursor implements io::Read, required for our encoders.
let mut reader = io::Cursor::new(body_bytes);
let compressed: Vec<u8> = match compressor {
CompressionType::Gzip => {
let mut gz: gzip::Encoder<Vec<u8>> = match gzip::Encoder::new(Vec::new()) {
Ok(gz) => gz,
Err(e) => {
dbg_msg!(e);
return Err(e.to_string());
}
};
match io::copy(&mut reader, &mut gz) {
Ok(_) => match gz.finish().into_result() {
Ok(compressed) => compressed,
Err(e) => {
dbg_msg!(e);
return Err(e.to_string());
}
},
Err(e) => {
dbg_msg!(e);
return Err(e.to_string());
}
}
}
CompressionType::Brotli => {
// We may want to make the compression parameters configurable
// in the future. For now, the defaults are sufficient.
let brotli_params = BrotliEncoderParams::default();
let mut compressed = Vec::<u8>::new();
match BrotliCompress(&mut reader, &mut compressed, &brotli_params) {
Ok(_) => compressed,
Err(e) => {
dbg_msg!(e);
return Err(e.to_string());
}
}
}
// This arm is for any requested compressor for which we don't yet
// have an implementation.
CompressionType::Passthrough => {
let msg = "unsupported compressor".to_string();
return Err(msg);
}
};
Ok(compressed)
}
#[cfg(test)]
mod tests {
use super::*;
use brotli::Decompressor as BrotliDecompressor;
use futures_lite::future::block_on;
use lipsum::lipsum;
use std::{boxed::Box, io};
#[test]
fn test_determine_compressor() {
// Single compressor given.
assert_eq!(determine_compressor("unsupported".to_string()), None);
assert_eq!(determine_compressor("gzip".to_string()), Some(CompressionType::Gzip));
assert_eq!(determine_compressor("*".to_string()), Some(DEFAULT_COMPRESSOR));
// Multiple compressors.
assert_eq!(determine_compressor("gzip, br".to_string()), Some(CompressionType::Brotli));
assert_eq!(determine_compressor("gzip;q=0.8, br;q=0.3".to_string()), Some(CompressionType::Gzip));
assert_eq!(determine_compressor("br, gzip".to_string()), Some(CompressionType::Brotli));
assert_eq!(determine_compressor("br;q=0.3, gzip;q=0.4".to_string()), Some(CompressionType::Gzip));
// Invalid q-values.
assert_eq!(determine_compressor("gzip;q=NAN".to_string()), None);
}
#[test]
fn test_compress_response() {
// This macro generates an Accept-Encoding header value given any number of
// compressors.
macro_rules! ae_gen {
($x:expr) => {
$x.to_string().as_str()
};
($x:expr, $($y:expr),+) => {
format!("{}, {}", $x.to_string(), ae_gen!($($y),+)).as_str()
};
}
for accept_encoding in [
"*",
ae_gen!(CompressionType::Gzip),
ae_gen!(CompressionType::Brotli, CompressionType::Gzip),
ae_gen!(CompressionType::Brotli),
] {
// Determine what the expected encoding should be based on both the
// specific encodings we accept.
let expected_encoding: CompressionType = match determine_compressor(accept_encoding.to_string()) {
Some(s) => s,
None => panic!("determine_compressor(accept_encoding.to_string()) => None"),
};
// Build headers with our Accept-Encoding.
let mut req_headers = HeaderMap::new();
req_headers.insert(header::ACCEPT_ENCODING, header::HeaderValue::from_str(accept_encoding).unwrap());
// Build test response.
let lorem_ipsum: String = lipsum(10000);
let expected_lorem_ipsum = Vec::<u8>::from(lorem_ipsum.as_str());
let mut res = Response::builder()
.status(200)
.header(header::CONTENT_TYPE, "text/plain")
.body(Body::from(lorem_ipsum))
.unwrap();
// Perform the compression.
if let Err(e) = block_on(compress_response(&req_headers, &mut res)) {
panic!("compress_response(&req_headers, &mut res) => Err(\"{e}\")");
};
// If the content was compressed, we expect the Content-Encoding
// header to be modified.
assert_eq!(
res
.headers()
.get(header::CONTENT_ENCODING)
.unwrap_or_else(|| panic!("missing content-encoding header"))
.to_str()
.unwrap_or_else(|_| panic!("failed to convert Content-Encoding header::HeaderValue to String")),
expected_encoding.to_string()
);
// Decompress body and make sure it's equal to what we started
// with.
//
// In the case of no compression, just make sure the "new" body in
// the Response is the same as what with which we start.
let body_vec = match block_on(body::to_bytes(res.body_mut())) {
Ok(b) => b.to_vec(),
Err(e) => panic!("{e}"),
};
if expected_encoding == CompressionType::Passthrough {
assert!(body_vec.eq(&expected_lorem_ipsum));
continue;
}
// This provides an io::Read for the underlying body.
let mut body_cursor: io::Cursor<Vec<u8>> = io::Cursor::new(body_vec);
// Match the appropriate decompresor for the given
// expected_encoding.
let mut decoder: Box<dyn io::Read> = match expected_encoding {
CompressionType::Gzip => match gzip::Decoder::new(&mut body_cursor) {
Ok(dgz) => Box::new(dgz),
Err(e) => panic!("{e}"),
},
CompressionType::Brotli => Box::new(BrotliDecompressor::new(body_cursor, expected_lorem_ipsum.len())),
_ => panic!("no decompressor for {}", expected_encoding),
};
let mut decompressed = Vec::<u8>::new();
if let Err(e) = io::copy(&mut decoder, &mut decompressed) {
panic!("{e}");
};
assert!(decompressed.eq(&expected_lorem_ipsum));
}
}
}

View File

@ -1,153 +0,0 @@
#![allow(clippy::cmp_owned)]
use std::collections::HashMap;
// CRATES
use crate::server::ResponseExt;
use crate::utils::{redirect, template, Preferences};
use cookie::Cookie;
use futures_lite::StreamExt;
use hyper::{Body, Request, Response};
use rinja::Template;
use time::{Duration, OffsetDateTime};
// STRUCTS
#[derive(Template)]
#[template(path = "settings.html")]
struct SettingsTemplate {
prefs: Preferences,
url: String,
}
// CONSTANTS
const PREFS: [&str; 22] = [
"theme",
"mascot",
"redsunlib_colorway",
"front_page",
"layout",
"wide",
"comment_sort",
"post_sort",
"blur_spoiler",
"show_nsfw",
"blur_nsfw",
"use_hls",
"ffmpeg_video_downloads",
"hide_hls_notification",
"autoplay_videos",
"hide_sidebar_and_summary",
"hide_banner",
"fixed_navbar",
"hide_awards",
"hide_score",
"disable_visit_reddit_confirmation",
"video_quality",
];
// FUNCTIONS
// Retrieve cookies from request "Cookie" header
pub async fn get(req: Request<Body>) -> Result<Response<Body>, String> {
let url = req.uri().to_string();
Ok(template(&SettingsTemplate {
prefs: Preferences::new(&req),
url,
}))
}
// Set cookies using response "Set-Cookie" header
pub async fn set(req: Request<Body>) -> Result<Response<Body>, String> {
// Split the body into parts
let (parts, mut body) = req.into_parts();
// Grab existing cookies
let _cookies: Vec<Cookie<'_>> = parts
.headers
.get_all("Cookie")
.iter()
.filter_map(|header| Cookie::parse(header.to_str().unwrap_or_default()).ok())
.collect();
// Aggregate the body...
// let whole_body = hyper::body::aggregate(req).await.map_err(|e| e.to_string())?;
let body_bytes = body
.try_fold(Vec::new(), |mut data, chunk| {
data.extend_from_slice(&chunk);
Ok(data)
})
.await
.map_err(|e| e.to_string())?;
let form = url::form_urlencoded::parse(&body_bytes).collect::<HashMap<_, _>>();
let mut response = redirect("/settings");
for &name in &PREFS {
match form.get(name) {
Some(value) => response.insert_cookie(
Cookie::build((name.to_owned(), value.clone()))
.path("/")
.http_only(name != "ffmpeg_video_downloads")
.expires(OffsetDateTime::now_utc() + Duration::weeks(52))
.into(),
),
None => response.remove_cookie(name.to_string()),
};
}
Ok(response)
}
fn set_cookies_method(req: Request<Body>, remove_cookies: bool) -> Response<Body> {
// Split the body into parts
let (parts, _) = req.into_parts();
// Grab existing cookies
let _cookies: Vec<Cookie<'_>> = parts
.headers
.get_all("Cookie")
.iter()
.filter_map(|header| Cookie::parse(header.to_str().unwrap_or_default()).ok())
.collect();
let query = parts.uri.query().unwrap_or_default().as_bytes();
let form = url::form_urlencoded::parse(query).collect::<HashMap<_, _>>();
let path = match form.get("redirect") {
Some(value) => format!("/{}", value.replace("%26", "&").replace("%23", "#")),
None => "/".to_string(),
};
let mut response = redirect(&path);
for name in [PREFS.to_vec(), vec!["subscriptions", "filters", "quicklist"]].concat() {
match form.get(name) {
Some(value) => response.insert_cookie(
Cookie::build((name.to_owned(), value.clone()))
.path("/")
.http_only(name != "ffmpeg_video_downloads")
.expires(OffsetDateTime::now_utc() + Duration::weeks(52))
.into(),
),
None => {
if remove_cookies {
response.remove_cookie(name.to_string());
}
}
};
}
response
}
// Set cookies using response "Set-Cookie" header
pub async fn restore(req: Request<Body>) -> Result<Response<Body>, String> {
Ok(set_cookies_method(req, true))
}
pub async fn update(req: Request<Body>) -> Result<Response<Body>, String> {
Ok(set_cookies_method(req, false))
}

View File

@ -1,566 +1,105 @@
#![allow(clippy::cmp_owned)]
use crate::{config, utils};
// CRATES
use crate::utils::{
catch_random, error, filter_posts, format_num, format_url, get_filters, nsfw_landing, param, redirect, rewrite_urls, setting, template, val, Post, Preferences, Subreddit,
};
use crate::{client::json, server::RequestExt, server::ResponseExt};
use cookie::Cookie;
use hyper::{Body, Request, Response};
use rinja::Template;
use once_cell::sync::Lazy;
use regex::Regex;
use time::{macros::format_description, Duration, OffsetDateTime};
use log::trace;
use crate::utils::{fetch_posts, format_num, format_url, request, val, ErrorTemplate, Params, Post, Subreddit};
use actix_web::{http::StatusCode, web, HttpResponse, Result};
use askama::Template;
use std::convert::TryInto;
// STRUCTS
#[derive(Template)]
#[template(path = "subreddit.html")]
#[template(path = "subreddit.html", escape = "none")]
struct SubredditTemplate {
sub: Subreddit,
posts: Vec<Post>,
sort: (String, String),
sort: String,
ends: (String, String),
prefs: Preferences,
url: String,
redirect_url: String,
/// Whether the subreddit itself is filtered.
is_filtered: bool,
/// Whether all fetched posts are filtered (to differentiate between no posts fetched in the first place,
/// and all fetched posts being filtered).
all_posts_filtered: bool,
/// Whether all posts were hidden because they are NSFW (and user has disabled show NSFW)
all_posts_hidden_nsfw: bool,
no_posts: bool,
}
#[derive(Template)]
#[template(path = "wiki.html")]
struct WikiTemplate {
sub: String,
wiki: String,
page: String,
prefs: Preferences,
url: String,
}
#[derive(Template)]
#[template(path = "wall.html")]
struct WallTemplate {
title: String,
sub: String,
msg: String,
prefs: Preferences,
url: String,
}
static GEO_FILTER_MATCH: Lazy<Regex> = Lazy::new(|| Regex::new(r"geo_filter=(?<region>\w+)").unwrap());
// SERVICES
pub async fn community(req: Request<Body>) -> Result<Response<Body>, String> {
// Build Reddit API path
let root = req.uri().path() == "/";
let query = req.uri().query().unwrap_or_default().to_string();
let subscribed = setting(&req, "subscriptions");
let front_page = setting(&req, "front_page");
let post_sort = req.cookie("post_sort").map_or_else(|| "hot".to_string(), |c| c.value().to_string());
let sort = req.param("sort").unwrap_or_else(|| req.param("id").unwrap_or(post_sort));
let sub_name = req.param("sub").unwrap_or(if front_page == "default" || front_page.is_empty() {
if subscribed.is_empty() {
"popular".to_string()
} else {
subscribed.clone()
}
} else {
front_page.clone()
});
let quarantined = can_access_quarantine(&req, &sub_name) || root;
// Handle random subreddits
if let Ok(random) = catch_random(&sub_name, "").await {
return Ok(random);
}
if req.param("sub").is_some() && sub_name.starts_with("u_") {
return Ok(redirect(&["/user/", &sub_name[2..]].concat()));
}
// Request subreddit metadata
let sub = if !sub_name.contains('+') && sub_name != subscribed && sub_name != "popular" && sub_name != "all" {
// Regular subreddit
subreddit(&sub_name, quarantined).await.unwrap_or_default()
} else if sub_name == subscribed {
// Subscription feed
if req.uri().path().starts_with("/r/") {
subreddit(&sub_name, quarantined).await.unwrap_or_default()
} else {
Subreddit::default()
}
} else {
// Multireddit, all, popular
Subreddit {
name: sub_name.clone(),
..Subreddit::default()
}
};
let req_url = req.uri().to_string();
// Return landing page if this post if this is NSFW community but the user
// has disabled the display of NSFW content or if the instance is SFW-only.
if sub.nsfw && crate::utils::should_be_nsfw_gated(&req, &req_url) {
return Ok(nsfw_landing(req, req_url).await.unwrap_or_default());
}
let mut params = String::from("&raw_json=1");
if sub_name == "popular" {
let geo_filter = match GEO_FILTER_MATCH.captures(&query) {
Some(geo_filter) => geo_filter["region"].to_string(),
None => "GLOBAL".to_owned(),
};
params.push_str(&format!("&geo_filter={geo_filter}"));
}
let path = format!("/r/{}/{sort}.json?{}{params}", sub_name.replace('+', "%2B"), req.uri().query().unwrap_or_default());
let url = String::from(req.uri().path_and_query().map_or("", |val| val.as_str()));
let redirect_url = url[1..].replace('?', "%3F").replace('&', "%26").replace('+', "%2B");
let filters = get_filters(&req);
// If all requested subs are filtered, we don't need to fetch posts.
if sub_name.split('+').all(|s| filters.contains(s)) {
Ok(template(&SubredditTemplate {
sub,
posts: Vec::new(),
sort: (sort, param(&path, "t").unwrap_or_default()),
ends: (param(&path, "after").unwrap_or_default(), String::new()),
prefs: Preferences::new(&req),
url,
redirect_url,
is_filtered: true,
all_posts_filtered: false,
all_posts_hidden_nsfw: false,
no_posts: false,
}))
} else {
match Post::fetch(&path, quarantined).await {
Ok((mut posts, after)) => {
let (_, all_posts_filtered) = filter_posts(&mut posts, &filters);
let no_posts = posts.is_empty();
let all_posts_hidden_nsfw = !no_posts && (posts.iter().all(|p| p.flags.nsfw) && setting(&req, "show_nsfw") != "on");
if sort == "new" {
posts.sort_by(|a, b| b.created_ts.cmp(&a.created_ts));
posts.sort_by(|a, b| b.flags.stickied.cmp(&a.flags.stickied));
}
Ok(template(&SubredditTemplate {
sub,
posts,
sort: (sort, param(&path, "t").unwrap_or_default()),
ends: (param(&path, "after").unwrap_or_default(), after),
prefs: Preferences::new(&req),
url,
redirect_url,
is_filtered: false,
all_posts_filtered,
all_posts_hidden_nsfw,
no_posts,
}))
}
Err(msg) => match msg.as_str() {
"quarantined" | "gated" => Ok(quarantine(&req, sub_name, &msg)),
"private" => error(req, &format!("r/{sub_name} is a private community")).await,
"banned" => error(req, &format!("r/{sub_name} has been banned from Reddit")).await,
_ => error(req, &msg).await,
},
}
}
#[allow(dead_code)]
pub async fn page(web::Path(sub): web::Path<String>, params: web::Query<Params>) -> Result<HttpResponse> {
render(sub, params.sort.clone(), (params.before.clone(), params.after.clone())).await
}
pub fn quarantine(req: &Request<Body>, sub: String, restriction: &str) -> Response<Body> {
let wall = WallTemplate {
title: format!("r/{sub} is {restriction}"),
msg: "Please click the button below to continue to this subreddit.".to_string(),
url: req.uri().to_string(),
sub,
prefs: Preferences::new(req),
};
Response::builder()
.status(403)
.header("content-type", "text/html")
.body(wall.render().unwrap_or_default().into())
.unwrap_or_default()
}
pub async fn add_quarantine_exception(req: Request<Body>) -> Result<Response<Body>, String> {
let subreddit = req.param("sub").ok_or("Invalid URL")?;
let redir = param(&format!("?{}", req.uri().query().unwrap_or_default()), "redir").ok_or("Invalid URL")?;
let mut response = redirect(&redir);
response.insert_cookie(
Cookie::build((&format!("allow_quaran_{}", subreddit.to_lowercase()), "true"))
.path("/")
.http_only(true)
.expires(cookie::Expiration::Session)
.into(),
);
Ok(response)
}
pub fn can_access_quarantine(req: &Request<Body>, sub: &str) -> bool {
// Determine if the subreddit can be accessed
setting(req, &format!("allow_quaran_{}", sub.to_lowercase())).parse().unwrap_or_default()
}
// Sub, filter, unfilter, quicklist, unquicklist or unsub by setting subscription cookie using response "Set-Cookie" header
pub async fn subscriptions_filters_quicklists(req: Request<Body>) -> Result<Response<Body>, String> {
let sub = req.param("sub").unwrap_or_default();
let action: Vec<String> = req.uri().path().split('/').map(String::from).collect();
// Handle random subreddits
if sub == "random" || sub == "randnsfw" {
if action.contains(&"filter".to_string()) || action.contains(&"unfilter".to_string()) {
return Err("Can't filter random subreddit!".to_string());
}
return Err("Can't subscribe to random subreddit!".to_string());
}
let query = req.uri().query().unwrap_or_default().to_string();
let preferences = Preferences::new(&req);
let mut sub_list = preferences.subscriptions;
let mut filters = preferences.filters;
let mut quicklist = preferences.quicklist;
// Retrieve list of posts for these subreddits to extract display names
let posts = json(format!("/r/{sub}/hot.json?raw_json=1"), true).await;
let display_lookup: Vec<(String, &str)> = match &posts {
Ok(posts) => posts["data"]["children"]
.as_array()
.map(|list| {
list
.iter()
.map(|post| {
let display_name = post["data"]["subreddit"].as_str().unwrap_or_default();
(display_name.to_lowercase(), display_name)
})
.collect::<Vec<_>>()
})
.unwrap_or_default(),
Err(_) => vec![],
};
// Find each subreddit name (separated by '+') in sub parameter
for part in sub.split('+').filter(|x| x != &"") {
// Retrieve display name for the subreddit
let display;
let part = if part.starts_with("u_") {
part
} else if let Some(&(_, display)) = display_lookup.iter().find(|x| x.0 == part.to_lowercase()) {
// This is already known, doesn't require separate request
display
} else {
// This subreddit display name isn't known, retrieve it
let path: String = format!("/r/{part}/about.json?raw_json=1");
display = json(path, true).await;
match &display {
Ok(display) => display["data"]["display_name"].as_str(),
Err(_) => None,
}
.unwrap_or(part)
};
// Modify sub list based on action
if action.contains(&"subscribe".to_string()) && !sub_list.contains(&part.to_owned()) {
// Add each sub name to the subscribed list
sub_list.push(part.to_owned());
filters.retain(|s| s.to_lowercase() != part.to_lowercase());
// Reorder sub names alphabetically
sub_list.sort_by_key(|a| a.to_lowercase());
filters.sort_by_key(|a| a.to_lowercase());
} else if action.contains(&"unsubscribe".to_string()) {
// Remove sub name from subscribed list
sub_list.retain(|s| s.to_lowercase() != part.to_lowercase());
} else if action.contains(&"filter".to_string()) && !filters.contains(&part.to_owned()) {
// Add each sub name to the filtered list
filters.push(part.to_owned());
sub_list.retain(|s| s.to_lowercase() != part.to_lowercase());
// Reorder sub names alphabetically
filters.sort_by_key(|a| a.to_lowercase());
sub_list.sort_by_key(|a| a.to_lowercase());
} else if action.contains(&"unfilter".to_string()) {
// Remove sub name from filtered list
filters.retain(|s| s.to_lowercase() != part.to_lowercase());
} else if action.contains(&"quicklist".to_string()) && !quicklist.contains(&part.to_owned()) {
// Add each sub name to the filtered list
quicklist.push(part.to_owned());
// Reorder quicklist alphabetically
quicklist.sort_by_key(|a| a.to_lowercase());
} else if action.contains(&"unquicklist".to_string()) {
// Remove sub name from filtered list
quicklist.retain(|s| s.to_lowercase() != part.to_lowercase());
}
}
// Redirect back to subreddit
// check for redirect parameter if unsubscribing/unfiltering from outside sidebar
let path = if let Some(redirect_path) = param(&format!("?{query}"), "redirect") {
format!("/{redirect_path}")
} else {
format!("/r/{sub}")
};
let mut response = redirect(&path);
// Delete cookie if empty, else set
if sub_list.is_empty() {
response.remove_cookie("subscriptions".to_string());
} else {
response.insert_cookie(
Cookie::build(("subscriptions", sub_list.join("+")))
.path("/")
.http_only(true)
.expires(OffsetDateTime::now_utc() + Duration::weeks(52))
.into(),
);
}
if filters.is_empty() {
response.remove_cookie("filters".to_string());
} else {
response.insert_cookie(
Cookie::build(("filters", filters.join("+")))
.path("/")
.http_only(true)
.expires(OffsetDateTime::now_utc() + Duration::weeks(52))
.into(),
);
}
if quicklist.is_empty() {
response.remove_cookie("quicklist".to_string());
} else {
response.insert_cookie(
Cookie::build(("quicklist", quicklist.join("+")))
.path("/")
.http_only(true)
.expires(OffsetDateTime::now_utc() + Duration::weeks(52))
.into(),
);
}
Ok(response)
}
pub async fn wiki(req: Request<Body>) -> Result<Response<Body>, String> {
let sub = req.param("sub").unwrap_or_else(|| "reddit.com".to_string());
let quarantined = can_access_quarantine(&req, &sub);
// Handle random subreddits
if let Ok(random) = catch_random(&sub, "/wiki").await {
return Ok(random);
}
let page = req.param("page").unwrap_or_else(|| "index".to_string());
let path: String = format!("/r/{sub}/wiki/{page}.json?raw_json=1");
let url = req.uri().to_string();
match json(path, quarantined).await {
Ok(response) => Ok(template(&WikiTemplate {
sub,
wiki: rewrite_urls(response["data"]["content_html"].as_str().unwrap_or("<h3>Wiki not found</h3>")),
page,
prefs: Preferences::new(&req),
url,
})),
Err(msg) => {
if msg == "quarantined" || msg == "gated" {
Ok(quarantine(&req, sub, &msg))
} else {
error(req, &msg).await
}
}
}
}
pub async fn sidebar(req: Request<Body>) -> Result<Response<Body>, String> {
let sub = req.param("sub").unwrap_or_else(|| "reddit.com".to_string());
let quarantined = can_access_quarantine(&req, &sub);
// Handle random subreddits
if let Ok(random) = catch_random(&sub, "/about/sidebar").await {
return Ok(random);
}
pub async fn render(sub_name: String, sort: Option<String>, ends: (Option<String>, Option<String>)) -> Result<HttpResponse> {
let sorting = sort.unwrap_or("hot".to_string());
let before = ends.1.clone().unwrap_or(String::new()); // If there is an after, there must be a before
// Build the Reddit JSON API url
let path: String = format!("/r/{sub}/about.json?raw_json=1");
let url = req.uri().to_string();
let url = match ends.0 {
Some(val) => format!("r/{}/{}.json?before={}&count=25", sub_name, sorting, val),
None => match ends.1 {
Some(val) => format!("r/{}/{}.json?after={}&count=25", sub_name, sorting, val),
None => format!("r/{}/{}.json", sub_name, sorting),
},
};
// Send a request to the url
match json(path, quarantined).await {
// If success, receive JSON in response
Ok(response) => Ok(template(&WikiTemplate {
wiki: rewrite_urls(&val(&response, "description_html")),
// wiki: format!(
// "{}<hr><h1>Moderators</h1><br><ul>{}</ul>",
// rewrite_urls(&val(&response, "description_html"),
// moderators(&sub, quarantined).await.unwrap_or(vec!["Could not fetch moderators".to_string()]).join(""),
// ),
sub,
page: "Sidebar".to_string(),
prefs: Preferences::new(&req),
url,
})),
Err(msg) => {
if msg == "quarantined" || msg == "gated" {
Ok(quarantine(&req, sub, &msg))
} else {
error(req, &msg).await
}
let sub_result = if !&sub_name.contains("+") {
subreddit(&sub_name).await
} else {
Ok(Subreddit::default())
};
let items_result = fetch_posts(url, String::new()).await;
if sub_result.is_err() || items_result.is_err() {
let s = ErrorTemplate {
message: sub_result.err().unwrap().to_string(),
}
.render()
.unwrap();
Ok(HttpResponse::Ok().status(StatusCode::NOT_FOUND).content_type("text/html").body(s))
} else {
let sub = sub_result.unwrap();
let items = items_result.unwrap();
let s = SubredditTemplate {
sub: sub,
posts: items.0,
sort: sorting,
ends: (before, items.1),
}
.render()
.unwrap();
Ok(HttpResponse::Ok().content_type("text/html").body(s))
}
}
// pub async fn moderators(sub: &str, quarantined: bool) -> Result<Vec<String>, String> {
// // Retrieve and format the html for the moderators list
// Ok(
// moderators_list(sub, quarantined)
// .await?
// .iter()
// .map(|m| format!("<li><a style=\"color: var(--accent)\" href=\"/u/{name}\">{name}</a></li>", name = m))
// .collect(),
// )
// }
// async fn moderators_list(sub: &str, quarantined: bool) -> Result<Vec<String>, String> {
// // Build the moderator list URL
// let path: String = format!("/r/{}/about/moderators.json?raw_json=1", sub);
// // Retrieve response
// json(path, quarantined).await.map(|response| {
// // Traverse json tree and format into list of strings
// response["data"]["children"]
// .as_array()
// .unwrap_or(&Vec::new())
// .iter()
// .filter_map(|moderator| {
// let name = moderator["name"].as_str().unwrap_or_default();
// if name.is_empty() {
// None
// } else {
// Some(name.to_string())
// }
// })
// .collect::<Vec<_>>()
// })
// }
// SUBREDDIT
async fn subreddit(sub: &str, quarantined: bool) -> Result<Subreddit, String> {
async fn subreddit(sub: &String) -> Result<Subreddit, &'static str> {
// Build the Reddit JSON API url
let path: String = format!("/r/{sub}/about.json?raw_json=1");
let url: String = format!("r/{}/about.json?raw_json=1", sub);
// Send a request to the url
let res = json(path, quarantined).await?;
// Send a request to the url, receive JSON in response
let req = request(url).await;
trace!("Subreddit info from r/{} : {}", sub, res["data"]);
// Metadata regarding the subreddit
let members: i64 = res["data"]["subscribers"].as_u64().unwrap_or_default() as i64;
let active: i64 = res["data"]["accounts_active"].as_u64().unwrap_or_default() as i64;
// Grab creation date as unix timestamp
let created_unix = res["data"]["created"].as_f64().unwrap_or(0.0).round() as i64;
let created = OffsetDateTime::from_unix_timestamp(created_unix).unwrap_or(OffsetDateTime::UNIX_EPOCH);
// Fetch subreddit icon either from the community_icon or icon_img value
let community_icon: &str = res["data"]["community_icon"].as_str().unwrap_or_default();
let icon = if community_icon.is_empty() { val(&res, "icon_img") } else { community_icon.to_string() };
// Fetch subreddit banner either from the banner_background_image or banner_img value
let banner_background_image: &str = res["data"]["banner_background_image"].as_str().unwrap_or_default();
let banner = if banner_background_image.is_empty() { val(&res, "banner_img") } else { banner_background_image.to_string() };
Ok(Subreddit {
name: val(&res, "display_name"),
title: val(&res, "title"),
description: val(&res, "public_description"),
info: rewrite_urls(&val(&res, "description_html")),
// moderators: moderators_list(sub, quarantined).await.unwrap_or_default(),
icon: format_url(&icon),
banner: format_url(&banner),
members: format_num(members),
active: format_num(active),
created: created.format(format_description!("[month repr:short] [day] '[year repr:last_two]")).unwrap_or_default(),
wiki: res["data"]["wiki_enabled"].as_bool().unwrap_or_default(),
nsfw: res["data"]["over18"].as_bool().unwrap_or_default(),
})
}
pub async fn rss(req: Request<Body>) -> Result<Response<Body>, String> {
if config::get_setting("REDLIB_ENABLE_RSS").is_none() {
return Ok(error(req, "RSS is disabled on this instance.").await.unwrap_or_default());
// If the Reddit API returns an error, exit this function
if req.is_err() {
return Err(req.err().unwrap());
}
use hyper::header::CONTENT_TYPE;
use rss::{ChannelBuilder, Item};
// Otherwise, grab the JSON output from the request
let res = req.unwrap();
// Get subreddit
let sub = req.param("sub").unwrap_or_default();
let post_sort = req.cookie("post_sort").map_or_else(|| "hot".to_string(), |c| c.value().to_string());
let sort = req.param("sort").unwrap_or_else(|| req.param("id").unwrap_or(post_sort));
// Metadata regarding the subreddit
let members = res["data"]["subscribers"].as_u64().unwrap_or(0);
let active = res["data"]["accounts_active"].as_u64().unwrap_or(0);
// Get path
let path = format!("/r/{sub}/{sort}.json?{}", req.uri().query().unwrap_or_default());
// Fetch subreddit icon either from the community_icon or icon_img value
let community_icon: &str = res["data"]["community_icon"].as_str().unwrap().split("?").collect::<Vec<&str>>()[0];
let icon = if community_icon.is_empty() {
val(&res, "icon_img").await
} else {
community_icon.to_string()
};
// Get subreddit data
let subreddit = subreddit(&sub, false).await?;
let sub = Subreddit {
name: val(&res, "display_name").await,
title: val(&res, "title").await,
description: val(&res, "public_description").await,
info: val(&res, "description_html").await.replace("\\", ""),
icon: format_url(icon).await,
members: format_num(members.try_into().unwrap()),
active: format_num(active.try_into().unwrap()),
};
// Get posts
let (posts, _) = Post::fetch(&path, false).await?;
// Build the RSS feed
let channel = ChannelBuilder::default()
.title(&subreddit.title)
.description(&subreddit.description)
.items(
posts
.into_iter()
.map(|post| Item {
title: Some(post.title.to_string()),
link: Some(utils::get_post_url(&post)),
author: Some(post.author.name),
content: Some(rewrite_urls(&post.body)),
description: Some(format!(
"<a href='{}{}'>Comments</a>",
config::get_setting("REDLIB_FULL_URL").unwrap_or_default(),
post.permalink
)),
..Default::default()
})
.collect::<Vec<_>>(),
)
.build();
// Serialize the feed to RSS
let body = channel.to_string().into_bytes();
// Create the HTTP response
let mut res = Response::new(Body::from(body));
res.headers_mut().insert(CONTENT_TYPE, hyper::header::HeaderValue::from_static("application/rss+xml"));
Ok(res)
}
#[tokio::test(flavor = "multi_thread")]
async fn test_fetching_subreddit() {
let subreddit = subreddit("rust", false).await;
assert!(subreddit.is_ok());
}
#[tokio::test(flavor = "multi_thread")]
async fn test_gated_and_quarantined() {
let quarantined = subreddit("edgy", true).await;
assert!(quarantined.is_ok());
let gated = subreddit("drugs", true).await;
assert!(gated.is_ok());
Ok(sub)
}

View File

@ -1,192 +1,89 @@
#![allow(clippy::cmp_owned)]
// CRATES
use crate::client::json;
use crate::server::RequestExt;
use crate::utils::{error, filter_posts, format_url, get_filters, nsfw_landing, param, setting, template, Post, Preferences, User};
use crate::{config, utils};
use hyper::{Body, Request, Response};
use log::trace;
use rinja::Template;
use time::{macros::format_description, OffsetDateTime};
use crate::utils::{fetch_posts, format_url, nested_val, request, ErrorTemplate, Params, Post, User};
use actix_web::{http::StatusCode, web, HttpResponse, Result};
use askama::Template;
use chrono::{TimeZone, Utc};
// STRUCTS
#[derive(Template)]
#[template(path = "user.html")]
#[template(path = "user.html", escape = "none")]
struct UserTemplate {
user: User,
posts: Vec<Post>,
sort: (String, String),
sort: String,
ends: (String, String),
/// "overview", "comments", or "submitted"
listing: String,
prefs: Preferences,
url: String,
redirect_url: String,
/// Whether the user themself is filtered.
is_filtered: bool,
/// Whether all fetched posts are filtered (to differentiate between no posts fetched in the first place,
/// and all fetched posts being filtered).
all_posts_filtered: bool,
/// Whether all posts were hidden because they are NSFW (and user has disabled show NSFW)
all_posts_hidden_nsfw: bool,
no_posts: bool,
}
// FUNCTIONS
pub async fn profile(req: Request<Body>) -> Result<Response<Body>, String> {
let listing = req.param("listing").unwrap_or_else(|| "overview".to_string());
async fn render(username: String, sort: Option<String>, ends: (Option<String>, Option<String>)) -> Result<HttpResponse> {
let sorting = sort.unwrap_or("new".to_string());
// Build the Reddit JSON API path
let path = format!(
"/user/{}/{listing}.json?{}&raw_json=1",
req.param("name").unwrap_or_else(|| "reddit".to_string()),
req.uri().query().unwrap_or_default(),
);
let url = String::from(req.uri().path_and_query().map_or("", |val| val.as_str()));
let redirect_url = url[1..].replace('?', "%3F").replace('&', "%26");
let before = ends.1.clone().unwrap_or(String::new()); // If there is an after, there must be a before
// Retrieve other variables from Redlib request
let sort = param(&path, "sort").unwrap_or_default();
let username = req.param("name").unwrap_or_default();
// Build the Reddit JSON API url
let url = match ends.0 {
Some(val) => format!("user/{}/.json?sort={}&before={}&count=25&raw_json=1", username, sorting, val),
None => match ends.1 {
Some(val) => format!("user/{}/.json?sort={}&after={}&count=25&raw_json=1", username, sorting, val),
None => format!("user/{}/.json?sort={}&raw_json=1", username, sorting),
},
};
// Retrieve info from user about page.
let user = user(&username).await.unwrap_or_default();
let user = user(&username).await;
let posts = fetch_posts(url, "Comment".to_string()).await;
let req_url = req.uri().to_string();
// Return landing page if this post if this Reddit deems this user NSFW,
// but we have also disabled the display of NSFW content or if the instance
// is SFW-only.
if user.nsfw && crate::utils::should_be_nsfw_gated(&req, &req_url) {
return Ok(nsfw_landing(req, req_url).await.unwrap_or_default());
}
let filters = get_filters(&req);
if filters.contains(&["u_", &username].concat()) {
Ok(template(&UserTemplate {
user,
posts: Vec::new(),
sort: (sort, param(&path, "t").unwrap_or_default()),
ends: (param(&path, "after").unwrap_or_default(), String::new()),
listing,
prefs: Preferences::new(&req),
url,
redirect_url,
is_filtered: true,
all_posts_filtered: false,
all_posts_hidden_nsfw: false,
no_posts: false,
}))
} else {
// Request user posts/comments from Reddit
match Post::fetch(&path, false).await {
Ok((mut posts, after)) => {
let (_, all_posts_filtered) = filter_posts(&mut posts, &filters);
let no_posts = posts.is_empty();
let all_posts_hidden_nsfw = !no_posts && (posts.iter().all(|p| p.flags.nsfw) && setting(&req, "show_nsfw") != "on");
Ok(template(&UserTemplate {
user,
posts,
sort: (sort, param(&path, "t").unwrap_or_default()),
ends: (param(&path, "after").unwrap_or_default(), after),
listing,
prefs: Preferences::new(&req),
url,
redirect_url,
is_filtered: false,
all_posts_filtered,
all_posts_hidden_nsfw,
no_posts,
}))
}
// If there is an error show error page
Err(msg) => error(req, &msg).await,
if user.is_err() || posts.is_err() {
let s = ErrorTemplate {
message: user.err().unwrap().to_string(),
}
.render()
.unwrap();
Ok(HttpResponse::Ok().status(StatusCode::NOT_FOUND).content_type("text/html").body(s))
} else {
let posts_unwrapped = posts.unwrap();
let s = UserTemplate {
user: user.unwrap(),
posts: posts_unwrapped.0,
sort: sorting,
ends: (before, posts_unwrapped.1)
}
.render()
.unwrap();
Ok(HttpResponse::Ok().content_type("text/html").body(s))
}
}
// SERVICES
pub async fn page(web::Path(username): web::Path<String>, params: web::Query<Params>) -> Result<HttpResponse> {
render(username, params.sort.clone(), (params.before.clone(), params.after.clone())).await
}
// USER
async fn user(name: &str) -> Result<User, String> {
// Build the Reddit JSON API path
let path: String = format!("/user/{name}/about.json?raw_json=1");
async fn user(name: &String) -> Result<User, &'static str> {
// Build the Reddit JSON API url
let url: String = format!("user/{}/about.json", name);
// Send a request to the url
json(path, false).await.map(|res| {
trace!("User info from r/{} : {}", name, res["data"]);
// Grab creation date as unix timestamp
let created_unix = res["data"]["created"].as_f64().unwrap_or(0.0).round() as i64;
let created = OffsetDateTime::from_unix_timestamp(created_unix).unwrap_or(OffsetDateTime::UNIX_EPOCH);
// Send a request to the url, receive JSON in response
let req = request(url).await;
// Closure used to parse JSON from Reddit APIs
let about = |item| res["data"]["subreddit"][item].as_str().unwrap_or_default().to_string();
// If the Reddit API returns an error, exit this function
if req.is_err() {
return Err(req.err().unwrap());
}
// Parse the JSON output into a User struct
User {
name: res["data"]["name"].as_str().unwrap_or(name).to_owned(),
title: about("title"),
icon: format_url(&about("icon_img")),
karma: res["data"]["total_karma"].as_i64().unwrap_or(0),
created: created.format(format_description!("[month repr:short] [day] '[year repr:last_two]")).unwrap_or_default(),
banner: about("banner_img"),
description: about("public_description"),
nsfw: res["data"]["subreddit"]["over_18"].as_bool().unwrap_or_default(),
}
// Otherwise, grab the JSON output from the request
let res = req.unwrap();
// Grab creation date as unix timestamp
let created: i64 = res["data"]["created"].as_f64().unwrap().round() as i64;
// Parse the JSON output into a User struct
Ok(User {
name: name.to_string(),
icon: format_url(nested_val(&res, "subreddit", "icon_img").await).await,
karma: res["data"]["total_karma"].as_i64().unwrap(),
created: Utc.timestamp(created, 0).format("%b %e, %Y").to_string(),
banner: nested_val(&res, "subreddit", "banner_img").await,
description: nested_val(&res, "subreddit", "public_description").await,
})
}
pub async fn rss(req: Request<Body>) -> Result<Response<Body>, String> {
if config::get_setting("REDLIB_ENABLE_RSS").is_none() {
return Ok(error(req, "RSS is disabled on this instance.").await.unwrap_or_default());
}
use crate::utils::rewrite_urls;
use hyper::header::CONTENT_TYPE;
use rss::{ChannelBuilder, Item};
// Get user
let user_str = req.param("name").unwrap_or_default();
let listing = req.param("listing").unwrap_or_else(|| "overview".to_string());
// Get path
let path = format!("/user/{user_str}/{listing}.json?{}&raw_json=1", req.uri().query().unwrap_or_default(),);
// Get user
let user_obj = user(&user_str).await.unwrap_or_default();
// Get posts
let (posts, _) = Post::fetch(&path, false).await?;
// Build the RSS feed
let channel = ChannelBuilder::default()
.title(user_str)
.description(user_obj.description)
.items(
posts
.into_iter()
.map(|post| Item {
title: Some(post.title.to_string()),
link: Some(utils::get_post_url(&post)),
author: Some(post.author.name),
content: Some(rewrite_urls(&post.body)),
..Default::default()
})
.collect::<Vec<_>>(),
)
.build();
// Serialize the feed to RSS
let body = channel.to_string().into_bytes();
// Create the HTTP response
let mut res = Response::new(Body::from(body));
res.headers_mut().insert(CONTENT_TYPE, hyper::header::HeaderValue::from_static("application/rss+xml"));
Ok(res)
}
#[tokio::test(flavor = "multi_thread")]
async fn test_fetching_user() {
let user = user("spez").await;
assert!(user.is_ok());
assert!(user.unwrap().karma > 100);
}

File diff suppressed because it is too large Load Diff

Binary file not shown.

Binary file not shown.

Before

Width:  |  Height:  |  Size: 60 KiB

View File

@ -1,55 +0,0 @@
async function checkInstanceUpdateStatus() {
try {
const response = await fetch('/commits.json');
const text = await response.text();
const entries = JSON.parse(text);
const localCommit = document.getElementById('git_commit').dataset.value;
let statusMessage = '';
if (entries.length > 0) {
const commitHashes = Array.from(entries).map(entry => {
return entry.sha
});
const commitIndex = commitHashes.indexOf(localCommit);
if (commitIndex === 0) {
statusMessage = '✅ Instance is up to date.';
} else if (commitIndex > 0) {
statusMessage = `⚠️ This instance is not up to date and is ${commitIndex} commits old. Test and confirm on an up-to-date instance before reporting.`;
document.getElementById('error-318').remove();
} else {
statusMessage = `⚠️ This instance is not up to date and is at least ${commitHashes.length} commits old. Test and confirm on an up-to-date instance before reporting.`;
document.getElementById('error-318').remove();
}
} else {
statusMessage = '⚠️ Unable to fetch commit information.';
}
document.getElementById('update-status').innerText = statusMessage;
} catch (error) {
console.error('Error fetching commits:', error);
document.getElementById('update-status').innerText = '⚠️ Error checking update status.';
}
}
async function checkOtherInstances() {
try {
const response = await fetch('/instances.json');
const data = await response.json();
const randomInstance = data.instances[Math.floor(Math.random() * data.instances.length)];
const instanceUrl = randomInstance.url;
// Set the href of the <a> tag to the instance URL with path included
document.getElementById('random-instance').href = instanceUrl + window.location.pathname;
//document.getElementById('random-instance').innerText = "Visit Random Instance";
} catch (error) {
console.error('Error fetching instances:', error);
document.getElementById('update-status').innerText = '⚠️ Error checking update status.';
}
}
// Set the target URL when the page loads
window.addEventListener('load', checkOtherInstances);
checkInstanceUpdateStatus();

Binary file not shown.

Before

Width:  |  Height:  |  Size: 13 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 4.3 KiB

View File

@ -1,2 +0,0 @@
!function(e,t){"object"==typeof exports&&"object"==typeof module?module.exports=t():"function"==typeof define&&define.amd?define([],t):"object"==typeof exports?exports.FFmpegWASM=t():e.FFmpegWASM=t()}(self,(()=>(()=>{var e={454:e=>{function t(e){return Promise.resolve().then((()=>{var t=new Error("Cannot find module '"+e+"'");throw t.code="MODULE_NOT_FOUND",t}))}t.keys=()=>[],t.resolve=t,t.id=454,e.exports=t}},t={};function r(a){var o=t[a];if(void 0!==o)return o.exports;var s=t[a]={exports:{}};return e[a](s,s.exports,r),s.exports}return r.o=(e,t)=>Object.prototype.hasOwnProperty.call(e,t),(()=>{"use strict";var e;!function(e){e.LOAD="LOAD",e.EXEC="EXEC",e.WRITE_FILE="WRITE_FILE",e.READ_FILE="READ_FILE",e.DELETE_FILE="DELETE_FILE",e.RENAME="RENAME",e.CREATE_DIR="CREATE_DIR",e.LIST_DIR="LIST_DIR",e.DELETE_DIR="DELETE_DIR",e.ERROR="ERROR",e.DOWNLOAD="DOWNLOAD",e.PROGRESS="PROGRESS",e.LOG="LOG",e.MOUNT="MOUNT",e.UNMOUNT="UNMOUNT"}(e||(e={}));const t=new Error("unknown message type"),a=new Error("ffmpeg is not loaded, call `await ffmpeg.load()` first"),o=(new Error("called FFmpeg.terminate()"),new Error("failed to import ffmpeg-core.js"));let s;self.onmessage=async({data:{id:n,type:E,data:i}})=>{const c=[];let p;try{if(E!==e.LOAD&&!s)throw a;switch(E){case e.LOAD:p=await(async({coreURL:t="https://unpkg.com/@ffmpeg/core@0.12.1/dist/umd/ffmpeg-core.js",wasmURL:a,workerURL:n})=>{const E=!s,i=t,c=a||t.replace(/.js$/g,".wasm"),p=n||t.replace(/.js$/g,".worker.js");try{importScripts(i)}catch{if(self.createFFmpegCore=(await r(454)(i)).default,!self.createFFmpegCore)throw o}return s=await self.createFFmpegCore({mainScriptUrlOrBlob:`${i}#${btoa(JSON.stringify({wasmURL:c,workerURL:p}))}`}),s.setLogger((t=>self.postMessage({type:e.LOG,data:t}))),s.setProgress((t=>self.postMessage({type:e.PROGRESS,data:t}))),E})(i);break;case e.EXEC:p=(({args:e,timeout:t=-1})=>{s.setTimeout(t),s.exec(...e);const r=s.ret;return s.reset(),r})(i);break;case e.WRITE_FILE:p=(({path:e,data:t})=>(s.FS.writeFile(e,t),!0))(i);break;case e.READ_FILE:p=(({path:e,encoding:t})=>s.FS.readFile(e,{encoding:t}))(i);break;case e.DELETE_FILE:p=(({path:e})=>(s.FS.unlink(e),!0))(i);break;case e.RENAME:p=(({oldPath:e,newPath:t})=>(s.FS.rename(e,t),!0))(i);break;case e.CREATE_DIR:p=(({path:e})=>(s.FS.mkdir(e),!0))(i);break;case e.LIST_DIR:p=(({path:e})=>{const t=s.FS.readdir(e),r=[];for(const a of t){const t=s.FS.stat(`${e}/${a}`),o=s.FS.isDir(t.mode);r.push({name:a,isDir:o})}return r})(i);break;case e.DELETE_DIR:p=(({path:e})=>(s.FS.rmdir(e),!0))(i);break;case e.MOUNT:p=(({fsType:e,options:t,mountPoint:r})=>{let a=e,o=s.FS.filesystems[a];return!!o&&(s.FS.mount(o,t,r),!0)})(i);break;case e.UNMOUNT:p=(({mountPoint:e})=>(s.FS.unmount(e),!0))(i);break;default:throw t}}catch(t){return void self.postMessage({id:n,type:e.ERROR,data:t.toString()})}p instanceof Uint8Array&&c.push(p.buffer),self.postMessage({id:n,type:E,data:p},c)}})(),{}})()));
//# sourceMappingURL=814.ffmpeg.js.map

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

Binary file not shown.

View File

@ -1 +0,0 @@
!function(e,t){"object"==typeof exports&&"object"==typeof module?module.exports=t():"function"==typeof define&&define.amd?define([],t):"object"==typeof exports?exports.FFmpegUtil=t():e.FFmpegUtil=t()}(self,(()=>(()=>{"use strict";var e={591:(e,t)=>{Object.defineProperty(t,"__esModule",{value:!0}),t.HeaderContentLength=void 0,t.HeaderContentLength="Content-Length"},431:(e,t)=>{Object.defineProperty(t,"__esModule",{value:!0}),t.ERROR_INCOMPLETED_DOWNLOAD=t.ERROR_RESPONSE_BODY_READER=void 0,t.ERROR_RESPONSE_BODY_READER=new Error("failed to get response body reader"),t.ERROR_INCOMPLETED_DOWNLOAD=new Error("failed to complete download")},915:function(e,t,o){var r=this&&this.__awaiter||function(e,t,o,r){return new(o||(o=Promise))((function(n,i){function d(e){try{l(r.next(e))}catch(e){i(e)}}function a(e){try{l(r.throw(e))}catch(e){i(e)}}function l(e){var t;e.done?n(e.value):(t=e.value,t instanceof o?t:new o((function(e){e(t)}))).then(d,a)}l((r=r.apply(e,t||[])).next())}))};Object.defineProperty(t,"__esModule",{value:!0}),t.toBlobURL=t.downloadWithProgress=t.importScript=t.fetchFile=void 0;const n=o(431),i=o(591);t.fetchFile=e=>r(void 0,void 0,void 0,(function*(){let t;if("string"==typeof e)t=/data:_data\/([a-zA-Z]*);base64,([^"]*)/.test(e)?atob(e.split(",")[1]).split("").map((e=>e.charCodeAt(0))):yield(yield fetch(e)).arrayBuffer();else if(e instanceof URL)t=yield(yield fetch(e)).arrayBuffer();else{if(!(e instanceof File||e instanceof Blob))return new Uint8Array;t=yield(o=e,new Promise(((e,t)=>{const r=new FileReader;r.onload=()=>{const{result:t}=r;t instanceof ArrayBuffer?e(new Uint8Array(t)):e(new Uint8Array)},r.onerror=e=>{var o,r;t(Error(`File could not be read! Code=${(null===(r=null===(o=null==e?void 0:e.target)||void 0===o?void 0:o.error)||void 0===r?void 0:r.code)||-1}`))},r.readAsArrayBuffer(o)})))}var o;return new Uint8Array(t)})),t.importScript=e=>r(void 0,void 0,void 0,(function*(){return new Promise((t=>{const o=document.createElement("script"),r=()=>{o.removeEventListener("load",r),t()};o.src=e,o.type="text/javascript",o.addEventListener("load",r),document.getElementsByTagName("head")[0].appendChild(o)}))})),t.downloadWithProgress=(e,t)=>r(void 0,void 0,void 0,(function*(){var o;const r=yield fetch(e);let d;try{const a=parseInt(r.headers.get(i.HeaderContentLength)||"-1"),l=null===(o=r.body)||void 0===o?void 0:o.getReader();if(!l)throw n.ERROR_RESPONSE_BODY_READER;const c=[];let s=0;for(;;){const{done:o,value:r}=yield l.read(),i=r?r.length:0;if(o){if(-1!=a&&a!==s)throw n.ERROR_INCOMPLETED_DOWNLOAD;t&&t({url:e,total:a,received:s,delta:i,done:o});break}c.push(r),s+=i,t&&t({url:e,total:a,received:s,delta:i,done:o})}const f=new Uint8Array(s);let u=0;for(const e of c)f.set(e,u),u+=e.length;d=f.buffer}catch(o){console.log("failed to send download progress event: ",o),d=yield r.arrayBuffer(),t&&t({url:e,total:d.byteLength,received:d.byteLength,delta:0,done:!0})}return d})),t.toBlobURL=(e,o,n=!1,i)=>r(void 0,void 0,void 0,(function*(){const r=n?yield(0,t.downloadWithProgress)(e,i):yield(yield fetch(e)).arrayBuffer(),d=new Blob([r],{type:o});return URL.createObjectURL(d)}))}},t={};return function o(r){var n=t[r];if(void 0!==n)return n.exports;var i=t[r]={exports:{}};return e[r].call(i.exports,i,i.exports,o),i.exports}(915)})()));

View File

@ -1,2 +0,0 @@
!function(e,t){"object"==typeof exports&&"object"==typeof module?module.exports=t():"function"==typeof define&&define.amd?define([],t):"object"==typeof exports?exports.FFmpegWASM=t():e.FFmpegWASM=t()}(self,(()=>(()=>{"use strict";var e={m:{},d:(t,s)=>{for(var r in s)e.o(s,r)&&!e.o(t,r)&&Object.defineProperty(t,r,{enumerable:!0,get:s[r]})},u:e=>e+".ffmpeg.js"};e.g=function(){if("object"==typeof globalThis)return globalThis;try{return this||new Function("return this")()}catch(e){if("object"==typeof window)return window}}(),e.o=(e,t)=>Object.prototype.hasOwnProperty.call(e,t),e.r=e=>{"undefined"!=typeof Symbol&&Symbol.toStringTag&&Object.defineProperty(e,Symbol.toStringTag,{value:"Module"}),Object.defineProperty(e,"__esModule",{value:!0})},(()=>{var t;e.g.importScripts&&(t=e.g.location+"");var s=e.g.document;if(!t&&s&&(s.currentScript&&(t=s.currentScript.src),!t)){var r=s.getElementsByTagName("script");if(r.length)for(var a=r.length-1;a>-1&&!t;)t=r[a--].src}if(!t)throw new Error("Automatic publicPath is not supported in this browser");t=t.replace(/#.*$/,"").replace(/\?.*$/,"").replace(/\/[^\/]+$/,"/"),e.p=t})(),e.b=document.baseURI||self.location.href;var t,s={};e.r(s),e.d(s,{FFmpeg:()=>i}),function(e){e.LOAD="LOAD",e.EXEC="EXEC",e.WRITE_FILE="WRITE_FILE",e.READ_FILE="READ_FILE",e.DELETE_FILE="DELETE_FILE",e.RENAME="RENAME",e.CREATE_DIR="CREATE_DIR",e.LIST_DIR="LIST_DIR",e.DELETE_DIR="DELETE_DIR",e.ERROR="ERROR",e.DOWNLOAD="DOWNLOAD",e.PROGRESS="PROGRESS",e.LOG="LOG",e.MOUNT="MOUNT",e.UNMOUNT="UNMOUNT"}(t||(t={}));const r=(()=>{let e=0;return()=>e++})(),a=(new Error("unknown message type"),new Error("ffmpeg is not loaded, call `await ffmpeg.load()` first")),o=new Error("called FFmpeg.terminate()");new Error("failed to import ffmpeg-core.js");class i{#e=null;#t={};#s={};#r=[];#a=[];loaded=!1;#o=()=>{this.#e&&(this.#e.onmessage=({data:{id:e,type:s,data:r}})=>{switch(s){case t.LOAD:this.loaded=!0,this.#t[e](r);break;case t.MOUNT:case t.UNMOUNT:case t.EXEC:case t.WRITE_FILE:case t.READ_FILE:case t.DELETE_FILE:case t.RENAME:case t.CREATE_DIR:case t.LIST_DIR:case t.DELETE_DIR:this.#t[e](r);break;case t.LOG:this.#r.forEach((e=>e(r)));break;case t.PROGRESS:this.#a.forEach((e=>e(r)));break;case t.ERROR:this.#s[e](r)}delete this.#t[e],delete this.#s[e]})};#i=({type:e,data:t},s=[],o)=>this.#e?new Promise(((a,i)=>{const n=r();this.#e&&this.#e.postMessage({id:n,type:e,data:t},s),this.#t[n]=a,this.#s[n]=i,o?.addEventListener("abort",(()=>{i(new DOMException(`Message # ${n} was aborted`,"AbortError"))}),{once:!0})})):Promise.reject(a);on(e,t){"log"===e?this.#r.push(t):"progress"===e&&this.#a.push(t)}off(e,t){"log"===e?this.#r=this.#r.filter((e=>e!==t)):"progress"===e&&(this.#a=this.#a.filter((e=>e!==t)))}load=(s={},{signal:r}={})=>(this.#e||(this.#e=new Worker(new URL(e.p+e.u(814),e.b),{type:void 0}),this.#o()),this.#i({type:t.LOAD,data:s},void 0,r));exec=(e,s=-1,{signal:r}={})=>this.#i({type:t.EXEC,data:{args:e,timeout:s}},void 0,r);terminate=()=>{const e=Object.keys(this.#s);for(const t of e)this.#s[t](o),delete this.#s[t],delete this.#t[t];this.#e&&(this.#e.terminate(),this.#e=null,this.loaded=!1)};writeFile=(e,s,{signal:r}={})=>{const a=[];return s instanceof Uint8Array&&a.push(s.buffer),this.#i({type:t.WRITE_FILE,data:{path:e,data:s}},a,r)};mount=(e,s,r)=>this.#i({type:t.MOUNT,data:{fsType:e,options:s,mountPoint:r}},[]);unmount=e=>this.#i({type:t.UNMOUNT,data:{mountPoint:e}},[]);readFile=(e,s="binary",{signal:r}={})=>this.#i({type:t.READ_FILE,data:{path:e,encoding:s}},void 0,r);deleteFile=(e,{signal:s}={})=>this.#i({type:t.DELETE_FILE,data:{path:e}},void 0,s);rename=(e,s,{signal:r}={})=>this.#i({type:t.RENAME,data:{oldPath:e,newPath:s}},void 0,r);createDir=(e,{signal:s}={})=>this.#i({type:t.CREATE_DIR,data:{path:e}},void 0,s);listDir=(e,{signal:s}={})=>this.#i({type:t.LIST_DIR,data:{path:e}},void 0,s);deleteDir=(e,{signal:s}={})=>this.#i({type:t.DELETE_DIR,data:{path:e}},void 0,s)}return s})()));
//# sourceMappingURL=ffmpeg.js.map

File diff suppressed because one or more lines are too long

View File

@ -1 +0,0 @@
document.querySelector('#commentQueryForms').scrollIntoView();

3
static/hls.min.js vendored

File diff suppressed because one or more lines are too long

Binary file not shown.

Before

Width:  |  Height:  |  Size: 60 KiB

View File

@ -1,33 +0,0 @@
<?xml version="1.0" encoding="UTF-8" standalone="no"?>
<svg
version="1.1"
viewBox="0 0 512 512"
id="svg2"
width="512"
height="512"
xmlns="http://www.w3.org/2000/svg"
xmlns:svg="http://www.w3.org/2000/svg">
<defs id="defs2" />
<rect width="512" height="512" fill="#4c082a" />
<g
transform="matrix(0.75272,0,0,0.75272,-1.1596187,-0.37987125)"
id="g2">
<circle
fill="#1a1a1a"
id="circle1"
style="fill:#4c082a;fill-opacity:0"
r="340.10001"
cy="340.32001"
cx="341.10999" />
<path
d="m 320.64,126.73 v 300.8 h 92.264 V 219.61 h 75.803 v -92.83 h -75.803 v -0.0508 z"
fill="#f83240"
id="path1"
style="fill:#f83240;fill-opacity:1" />
<path
d="M 193.1,126.74 V 510.7 h 0.006 v 43.543 h 295.82 v -92.338 h -202.74 v -335.16 z"
fill="#f83240"
id="path2"
style="fill:#f83240;fill-opacity:1" />
</g>
</svg>

Before

Width:  |  Height:  |  Size: 943 B

Binary file not shown.

Before

Width:  |  Height:  |  Size: 219 KiB

View File

@ -1,24 +0,0 @@
{
"name": "Redlib",
"short_name": "Redlib",
"display": "standalone",
"background_color": "#1f1f1f",
"description": "An alternative private front-end to Reddit",
"theme_color": "#1f1f1f",
"start_url": "/",
"icons": [
{
"src": "logo.png",
"sizes": "512x512",
"type": "image/png"
},
{
"src": "apple-touch-icon.png",
"sizes": "180x180"
},
{
"src": "favicon.ico",
"sizes": "32x32"
}
]
}

Binary file not shown.

Before

Width:  |  Height:  |  Size: 9.5 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 24 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 5.8 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 4.8 KiB

View File

@ -1,11 +0,0 @@
<OpenSearchDescription xmlns="http://a9.com/-/spec/opensearch/1.1/"
xmlns:moz="http://www.mozilla.org/2006/browser/search/">
<ShortName>Search Redlib</ShortName>
<Description>Search for whatever you want on Redlib, awesome Reddit frontend</Description>
<InputEncoding>UTF-8</InputEncoding>
<Image width="32" height="32" type="image/x-icon">/favicon.ico</Image>
<Url type="text/html" template="/search">
<Param name="q" value="{searchTerms}"/>
</Url>
<moz:SearchForm>/search</moz:SearchForm>
</OpenSearchDescription>

2
static/robots.txt Normal file
View File

@ -0,0 +1,2 @@
User-agent: *
Allow: /

File diff suppressed because it is too large Load Diff

View File

@ -1,14 +0,0 @@
/* Black theme setting */
.black {
--accent: #bb2b3b;
--green: #00a229;
--text: white;
--foreground: #0f0f0f;
--background: black;
--outside: black;
--post: black;
--panel-border: 2px solid #0f0f0f;
--highlighted: #0f0f0f;
--visited: #aaa;
--shadow: 0 1px 3px rgba(0, 0, 0, 0.1);
}

View File

@ -1,17 +0,0 @@
/* Catppuccin theme setting */
.catppuccin {
--accent: #b4befe; /* lavender */
--green: #a6e3a1; /* green */
--text: #cdd6f4; /* text */
--foreground: #181825; /* mantle */
--background: #1e1e2e; /* base */
--outside: #11111b; /* crust */
--post: #11111b; /* crust */
--panel-border: none;
--highlighted: #313244; /* surface0 */
--visited: #6c7086; /* overlay0 */
--shadow: 0 0 0 transparent;
--nsfw: #fab387; /* peach */
--admin: #eba0ac; /* maroon */
}

View File

@ -1,14 +0,0 @@
/* Dark theme setting */
.dark{
--accent: #d54455;
--green: #5cff85;
--text: white;
--foreground: #222;
--background: #0f0f0f;
--outside: #1f1f1f;
--post: #161616;
--panel-border: 1px solid #333;
--highlighted: #333;
--visited: #aaa;
--shadow: 0 1px 3px rgba(0, 0, 0, 0.5);
}

View File

@ -1,13 +0,0 @@
.doomone {
--accent: #51afef;
--green: #00a229;
--text: #bbc2cf;
--foreground: #3d4148;
--background: #282c34;
--outside: #52565c;
--post: #24272e;
--panel-border: 2px solid #52565c;
--highlighted: #686b70;
--visited: #969692;
--shadow: 0 1px 3px rgba(0, 0, 0, 0.1);
}

View File

@ -1,14 +0,0 @@
/* Dracula theme setting */
.dracula {
--accent: #bd93f9;
--green: #50fa7b;
--text: #f8f8f2;
--foreground: #3d4051;
--background: #282a36;
--outside: #393c4d;
--post: #333544;
--panel-border: 2px solid #44475a;
--highlighted: #4e5267;
--visited: #969692;
--shadow: 0 1px 3px rgba(0, 0, 0, 0.1);
}

View File

@ -1,14 +0,0 @@
/* Gold theme setting */
.gold {
--accent: #f2aa4c;
--green: #5cff85;
--text: white;
--foreground: #234;
--background: #101820;
--outside: #1b2936;
--post: #1b2936;
--panel-border: 0px solid black;
--highlighted: #234;
--visited: #aaa;
--shadow: 0 2px 5px rgba(0, 0, 0, 0.5);
}

View File

@ -1,13 +0,0 @@
/* Gruvbox-Dark theme setting */
.gruvboxdark {
--accent: #8ec07c;
--green: #b8bb26;
--text: #ebdbb2;
--foreground: #3c3836;
--background: #282828;
--outside: #3c3836;
--post: #3c3836;
--panel-border: 1px solid #504945;
--highlighted: #282828;
--shadow: 0 1px 3px rgba(0, 0, 0, 0.5);
}

View File

@ -1,18 +0,0 @@
/* Gruvbox-Light theme setting */
.gruvboxlight {
--accent: #427b58;
--green: #79740e;
--text: #3c3836;
--foreground: #ebdbb2;
--background: #fbf1c7;
--outside: #ebdbb2;
--post: #ebdbb2;
--panel-border: 1px solid #d5c4a1;
--highlighted: #fbf1c7;
--shadow: 0 1px 3px rgba(0, 0, 0, 0.25);
}
html:has(> .gruvboxlight) {
/* Hint color theme to browser for scrollbar */
color-scheme: light;
}

View File

@ -1,14 +0,0 @@
/* icebergDark theme setting */
.icebergDark {
--accent: #85a0c7;
--green: #b5bf82;
--text: #c6c8d1;
--foreground: #454d73;
--background: #161821;
--outside: #1f2233;
--post: #1f2233;
--panel-border: 1px solid #454d73;
--highlighted: #0f1117;
--visited: #0f1117;
--shadow: 0 1px 3px rgba(0, 0, 0, 0.5);
}

View File

@ -1,14 +0,0 @@
/* Laserwave theme setting */
.laserwave {
--accent: #eb64b9;
--green: #74dfc4;
--text: #e0dfe1;
--foreground: #302a36;
--background: #27212e;
--outside: #3e3647;
--post: #3e3647;
--panel-border: 2px solid #2f2738;
--highlighted: #302a36;
--visited: #91889b;
--shadow: 0 1px 3px rgba(0, 0, 0, 0.1);
}

View File

@ -1,14 +0,0 @@
/* Libreddit black theme setting */
.libredditBlack {
--accent: #009a9a;
--green: #00a229;
--text: white;
--foreground: #0f0f0f;
--background: black;
--outside: black;
--post: black;
--panel-border: 2px solid #0f0f0f;
--highlighted: #0f0f0f;
--visited: #aaa;
--shadow: 0 1px 3px rgba(0, 0, 0, 0.1);
}

View File

@ -1,14 +0,0 @@
/* Libreddit dark theme setting */
.libredditDark{
--accent: aqua;
--green: #5cff85;
--text: white;
--foreground: #222;
--background: #0f0f0f;
--outside: #1f1f1f;
--post: #161616;
--panel-border: 1px solid #333;
--highlighted: #333;
--visited: #aaa;
--shadow: 0 1px 3px rgba(0, 0, 0, 0.5);
}

View File

@ -1,19 +0,0 @@
/* Libreddit light theme setting */
.libredditLight {
--accent: #009a9a;
--green: #00a229;
--text: black;
--foreground: #f5f5f5;
--background: #ddd;
--outside: #ececec;
--post: #eee;
--panel-border: 1px solid #ccc;
--highlighted: white;
--visited: #555;
--shadow: 0 1px 3px rgba(0, 0, 0, 0.1);
}
html:has(> .libredditLight) {
/* Hint color theme to browser for scrollbar */
color-scheme: light;
}

View File

@ -1,19 +0,0 @@
/* Light theme setting */
.light {
--accent: #bb2b3b;
--green: #00a229;
--text: black;
--foreground: #f5f5f5;
--background: #ddd;
--outside: #ececec;
--post: #eee;
--panel-border: 1px solid #ccc;
--highlighted: white;
--visited: #555;
--shadow: 0 1px 3px rgba(0, 0, 0, 0.1);
}
html:has(> .light) {
/* Hint color theme to browser for scrollbar */
color-scheme: light;
}

View File

@ -1,14 +0,0 @@
/* Nord theme setting */
.nord {
--accent: #8fbcbb;
--green: #a3be8c;
--text: #eceff4;
--foreground: #3b4252;
--background: #2e3440;
--outside: #434c5e;
--post: #434c5e;
--panel-border: 2px solid #4c566a;
--highlighted: #3b4252;
--visited: #a3a5aa;
--shadow: 0 1px 3px rgba(0, 0, 0, 0.1);
}

View File

@ -1,13 +0,0 @@
/* Rosebox theme setting */
.rosebox {
--accent: #a57562;
--green: #a3be8c;
--text: white;
--foreground: #222;
--background: #262626;
--outside: #222;
--post: #222;
--panel-border: 1px solid #222;
--highlighted: #262626;
--shadow: 0 1px 3px rgba(0, 0, 0, 0.5);
}

View File

@ -1,14 +0,0 @@
/* Tokyo Night theme setting */
.tokyoNight {
--accent: #565f89;
--green: #73daca;
--text: #a9b1d6;
--foreground: #24283b;
--background: #1a1b26;
--outside: #24283b;
--post: #1a1b26;
--panel-border: 1px solid #a9b1d6;
--highlighted: #414868;
--visited: #414868;
--shadow: 0 1px 3px rgba(0, 0, 0, 0.5);
}

View File

@ -1,14 +0,0 @@
/* Violet theme setting */
.violet {
--accent: #7c71dd;
--green: #5cff85;
--text: white;
--foreground: #1F2347;
--background: #12152b;
--outside: #181c3a;
--post: #181c3a;
--panel-border: 1px solid #1F2347;
--highlighted: #1F2347;
--visited: #aaa;
--shadow: 0 2px 5px rgba(0, 0, 0, 0.5);
}

View File

@ -1,232 +0,0 @@
// @license http://www.gnu.org/licenses/agpl-3.0.html AGPL-3.0
let ffmpeg = null;
let loadingsvg = `<svg class="rotate" xmlns="http://www.w3.org/2000/svg" width="24" height="24" viewBox="0 0 24 24"><g fill="none" fill-rule="evenodd"><path d="m12.593 23.258l-.011.002l-.071.035l-.02.004l-.014-.004l-.071-.035q-.016-.005-.024.005l-.004.01l-.017.428l.005.02l.01.013l.104.074l.015.004l.012-.004l.104-.074l.012-.016l.004-.017l-.017-.427q-.004-.016-.017-.018m.265-.113l-.013.002l-.185.093l-.01.01l-.003.011l.018.43l.005.012l.008.007l.201.093q.019.005.029-.008l.004-.014l-.034-.614q-.005-.018-.02-.022m-.715.002a.02.02 0 0 0-.027.006l-.006.014l-.034.614q.001.018.017.024l.015-.002l.201-.093l.01-.008l.004-.011l.017-.43l-.003-.012l-.01-.01z"/><path fill="currentColor" d="M12 4.5a7.5 7.5 0 1 0 0 15a7.5 7.5 0 0 0 0-15M1.5 12C1.5 6.201 6.201 1.5 12 1.5S22.5 6.201 22.5 12S17.799 22.5 12 22.5S1.5 17.799 1.5 12" opacity="0.1"/><path fill="currentColor" d="M12 4.5a7.46 7.46 0 0 0-5.187 2.083a1.5 1.5 0 0 1-2.075-2.166A10.46 10.46 0 0 1 12 1.5a1.5 1.5 0 0 1 0 3"/></g></svg>`;
let downloadsvg = `<svg xmlns="http://www.w3.org/2000/svg" width="24" height="24" viewBox="0 0 24 24"><g fill="none"><path d="m12.593 23.258l-.011.002l-.071.035l-.02.004l-.014-.004l-.071-.035q-.016-.005-.024.005l-.004.01l-.017.428l.005.02l.01.013l.104.074l.015.004l.012-.004l.104-.074l.012-.016l.004-.017l-.017-.427q-.004-.016-.017-.018m.265-.113l-.013.002l-.185.093l-.01.01l-.003.011l.018.43l.005.012l.008.007l.201.093q.019.005.029-.008l.004-.014l-.034-.614q-.005-.018-.02-.022m-.715.002a.02.02 0 0 0-.027.006l-.006.014l-.034.614q.001.018.017.024l.015-.002l.201-.093l.01-.008l.004-.011l.017-.43l-.003-.012l-.01-.01z"/><path fill="currentColor" d="M20 15a1 1 0 0 1 1 1v4a2 2 0 0 1-2 2H5a2 2 0 0 1-2-2v-4a1 1 0 1 1 2 0v4h14v-4a1 1 0 0 1 1-1M12 2a1 1 0 0 1 1 1v10.243l2.536-2.536a1 1 0 1 1 1.414 1.414l-4.066 4.066a1.25 1.25 0 0 1-1.768 0L7.05 12.121a1 1 0 1 1 1.414-1.414L11 13.243V3a1 1 0 0 1 1-1"/></g></svg>`;
(function () {
if (Hls.isSupported()) {
var downloadsEnabled = document.cookie.split("; ").find((row) => row.startsWith("ffmpeg_video_downloads="))?.split("=")[1] == "on";
var videoSources = document.querySelectorAll("video source[type='application/vnd.apple.mpegurl']");
videoSources.forEach(function (source) {
var playlist = source.src;
var oldVideo = source.parentNode;
var autoplay = oldVideo.classList.contains("hls_autoplay");
// If HLS is supported natively then don't use hls.js
if (oldVideo.canPlayType(source.type) === "probably" && !downloadsEnabled) {
if (autoplay) {
oldVideo.play();
}
return;
}
// Replace video with copy that will have all "source" elements removed
var newVideo = oldVideo.cloneNode(true);
var allSources = newVideo.querySelectorAll("source");
allSources.forEach(function (source) {
source.remove();
});
// Empty source to enable play event
newVideo.src = "about:blank";
oldVideo.parentNode.replaceChild(newVideo, oldVideo);
function initializeHls() {
newVideo.removeEventListener('play', initializeHls);
var hls = new Hls({ autoStartLoad: false });
hls.loadSource(playlist);
hls.attachMedia(newVideo);
hls.on(Hls.Events.MANIFEST_PARSED, function () {
hls.loadLevel = hls.levels.length - 1;
var availableLevels = hls.levels.map(function(level) {
return {
height: level.height,
width: level.width,
bitrate: level.bitrate,
};
});
addQualitySelector(newVideo, hls, availableLevels);
if (downloadsEnabled){ addVideoDownload(newVideo, hls); }
hls.startLoad();
newVideo.play();
});
hls.on(Hls.Events.ERROR, function (event, data) {
var errorType = data.type;
var errorFatal = data.fatal;
if (errorFatal) {
switch (errorType) {
case Hls.ErrorType.NETWORK_ERROR:
hls.startLoad();
break;
case Hls.ErrorType.MEDIA_ERROR:
hls.recoverMediaError();
break;
default:
hls.destroy();
break;
}
}
console.error("HLS error", data);
});
}
if (downloadsEnabled){
const { fetchFile } = FFmpegUtil;
const { FFmpeg } = FFmpegWASM;
function addVideoDownload(videoElement, hlsInstance) {
var mediaStream = [];
var downloadButton = document.createElement('button');
downloadButton.classList.add('video-options','download');
downloadButton.innerHTML = loadingsvg
const mergeStreams = async () => {
if (ffmpeg === null) {
ffmpeg = new FFmpeg();
await ffmpeg.load({
coreURL: "/ffmpeg/ffmpeg-core.js",
});
ffmpeg.on("log", ({ message }) => {
console.log(message); // This is quite noisy but i will include it
})
ffmpeg.on("progress", ({ progress, time }) => { // Progress TODO: show progress ring around button not just ⏳
// console.log("ffmpeg prog:",progress * 100)
});
}
// Combine Video Audio Streams
await ffmpeg.writeFile("video", await fetchFile(concatBlob(mediaStream['video'])));
await ffmpeg.writeFile("audio", await fetchFile(concatBlob(mediaStream['audio'])));
console.time('ffmpeg-exec');
await ffmpeg.exec(['-i', "video", '-i', "audio",'-c:v', "copy", '-c:a', "aac", 'output.mp4']);
console.timeEnd('ffmpeg-exec')
// Save
const toSlug = (str) => {
return str
.normalize('NFD')
.replace(/[\u0300-\u036f]/g, '')
.replace(/[\W_]+/g, '-')
.toLowerCase()
.replace(/^-+|-+$/g, '');
}
var filename = toSlug(videoElement.parentNode.parentNode.id || document.title)
const data = await ffmpeg.readFile('output.mp4');
saveAs(new Blob([data.buffer], {type: 'video/mp4'}),filename);
return
}
function saveAs(blob, filename) { // Yeah ok...
var url = URL.createObjectURL(blob);
var a = document.createElement("a");
document.body.appendChild(a);
a.style = "display: none";
a.href = url;
a.download = filename;
a.click();
window.URL.revokeObjectURL(url);
}
function concatBlob(inputArray) {
var totalLength = inputArray.reduce(function (prev, cur) {
return prev + cur.length
}, 0);
var result = new Uint8Array(totalLength);
var offset = 0;
inputArray.forEach(function (element) {
result.set(element, offset);
offset += element.length;
});
return new Blob([result], {
type: 'application/octet-stream'
});
}
function getStreams() {
var video = document.createElement('video');
video.autoplay = true;
var dataStreams = {
'video': [],
'audio': []
};
mediaStream = dataStreams; // Update stream
hlsInstance.on(Hls.Events.BUFFER_APPENDING, function (event, data) {
dataStreams[data.type].push(data.data);
});
var isDownloading = false
function startDownload() {
if (!isDownloading) { isDownloading = true } else { return }
downloadButton.innerHTML = loadingsvg
mergeStreams()
.then(_ => {
isDownloading = false
downloadButton.innerHTML = downloadsvg
});
}
function waitForLoad() {
const poll = resolve => {
if(hlsInstance._media.buffered.length === 1 &&
hlsInstance._media.buffered.start(0) === 0 &&
hlsInstance._media.buffered.end(0) === hlsInstance._media.duration)
resolve();
else setTimeout(_ => poll(resolve), 400);
}
return new Promise(poll);
}
waitForLoad(_ => flag === true)
.then(_ => {
downloadButton.innerHTML = downloadsvg
downloadButton.addEventListener('click', startDownload);
});
}
videoElement.parentNode.appendChild(downloadButton);
getStreams()
}
}
function addQualitySelector(videoElement, hlsInstance, availableLevels) {
var qualitySelector = document.createElement('select');
qualitySelector.classList.add('video-options');
var last = availableLevels.length - 1;
availableLevels.forEach(function (level, index) {
var option = document.createElement('option');
option.value = index.toString();
var bitrate = (level.bitrate / 1_000).toFixed(0);
option.text = level.height + 'p (' + bitrate + ' kbps)';
if (index === last) {
option.selected = "selected";
}
qualitySelector.appendChild(option);
});
qualitySelector.selectedIndex = availableLevels.length - 1;
qualitySelector.addEventListener('change', function () {
var selectedIndex = qualitySelector.selectedIndex;
hlsInstance.nextLevel = selectedIndex;
hlsInstance.startLoad();
});
videoElement.parentNode.appendChild(qualitySelector);
}
newVideo.addEventListener('play', initializeHls);
if (autoplay) {
newVideo.play();
}
});
} else {
var videos = document.querySelectorAll("video.hls_autoplay");
videos.forEach(function (video) {
video.setAttribute("autoplay", "");
});
}
})();
// @license-end

View File

@ -1,104 +1,29 @@
{% import "utils.html" as utils %}
<!DOCTYPE html>
<html lang="en" class="{% if prefs.fixed_navbar == "on" %}fixed_navbar{% endif %}">
<html lang="en">
<head>
{% block head %}
<title>{% block title %}Redlib{% endblock %}</title>
<title>{% block title %}Libreddit{% endblock %}</title>
<meta http-equiv="Referrer-Policy" content="no-referrer">
<meta http-equiv="Content-Security-Policy" content="default-src 'self'; style-src 'self' 'unsafe-inline'; base-uri 'none'; form-action 'self';">
<meta http-equiv="Content-Type" content="text/html; charset=UTF-8" />
<meta name="description" content="View on Redlib, an alternative private front-end to Reddit.">
<meta name="description" content="View on Libreddit, an alternative private front-end to Reddit.">
<meta name="viewport" content="width=device-width, initial-scale=1.0">
{% if crate::utils::disable_indexing() %}
<meta name="robots" content="noindex, nofollow">
{% endif %}
<!-- General PWA -->
<meta name="theme-color" content="#1F1F1F">
<!-- iOS Application -->
<meta name="apple-mobile-web-app-title" content="Redlib">
<meta name="apple-mobile-web-app-capable" content="yes">
<meta name="apple-mobile-web-app-status-bar-style" content="default">
<!-- Android -->
<meta name="mobile-web-app-capable" content="yes">
<!-- iOS Logo -->
<link href="/touch-icon-iphone.png" rel="apple-touch-icon">
<!-- OpenSearch description file -->
<link rel="search" type="application/opensearchdescription+xml" title="Search Redlib" href="/opensearch.xml">
<!-- PWA Manifest -->
<link rel="manifest" type="application/json" href="/manifest.json">
<link rel="shortcut icon" type="image/x-icon" href="/favicon.ico">
<link rel="stylesheet" type="text/css" href="/style.css?v={{ env!("CARGO_PKG_VERSION") }}">
<!-- Video quality -->
<div id="video_quality" data-value="{{ prefs.video_quality }}"></div>
<link rel="stylesheet" href="/style.css">
{% endblock %}
</head>
<body class="
{% if prefs.layout != "" %}{{ prefs.layout }}{% endif %}
{% if prefs.wide == "on" || prefs.layout == "old" || prefs.layout == "waterfall" %} wide{% endif %}
{% if prefs.theme != "system" %} {{ prefs.theme }}{% endif %}
{% if prefs.fixed_navbar == "on" %} fixed_navbar{% endif %}">
<!-- NAVIGATION BAR -->
<nav class="
{% if prefs.fixed_navbar == "on" %} fixed_navbar{% endif %}">
<div id="logo">
<a id="redlib" href="/">
<span id="lib" {% if prefs.redsunlib_colorway == "on" %}style="color: #ff8585;"{% endif %}">red</span><span id="reddit" {% if prefs.redsunlib_colorway == "on" %}style="color: #ffbfbf;"{% endif %}>sun</span><span id="lib" {% if prefs.redsunlib_colorway == "on" %}style="color: #ff8585;"{% endif %}>lib.</span>
</a>
{% block subscriptions %}{% endblock %}
</div>
{% block search %}{% endblock %}
<div id="links">
<a id="reddit_link" {% if prefs.disable_visit_reddit_confirmation != "on" %}href="#popup"{% else %}href="https://www.reddit.com{{ url }}" rel="nofollow"{% endif %}>
<svg xmlns="http://www.w3.org/2000/svg" width="24" height="24" viewBox="0 0 24 24">
<title>redirect to reddit</title>
<g fill="none">
<path d="m12.594 23.258l-.012.002l-.071.035l-.02.004l-.014-.004l-.071-.036q-.016-.004-.024.006l-.004.01l-.017.428l.005.02l.01.013l.104.074l.015.004l.012-.004l.104-.074l.012-.016l.004-.017l-.017-.427q-.004-.016-.016-.018m.264-.113l-.014.002l-.184.093l-.01.01l-.003.011l.018.43l.005.012l.008.008l.201.092q.019.005.029-.008l.004-.014l-.034-.614q-.005-.019-.02-.022m-.715.002a.02.02 0 0 0-.027.006l-.006.014l-.034.614q.001.018.017.024l.015-.002l.201-.093l.01-.008l.003-.011l.018-.43l-.003-.012l-.01-.01z" />
<path fill="currentColor" d="M6.301 6a4 4 0 0 1 3.312 1.756l.118.186l4.253 7.087a2 2 0 0 0 1.553.965L15.7 16h1.194l.02-.415l.022-.36l.012-.159c.027-.346.352-.557.631-.41l.306.164l.36.203l.198.117l.43.263l.229.147l.463.31l.21.147l.377.273l.315.24l.133.104c.236.188.225.566-.023.762l-.28.217l-.34.252l-.4.282l-.456.305l-.462.291l-.416.249l-.365.205l-.307.165c-.275.143-.572-.036-.598-.36l-.025-.347l-.024-.415l-.01-.23H15.7a4 4 0 0 1-3.312-1.756l-.118-.186l-4.253-7.087a2 2 0 0 0-1.553-.965L6.3 8H4a1 1 0 0 1-.117-1.993L4 6zm3.714 7.643a1 1 0 0 1 .342 1.371l-.626 1.044A4 4 0 0 1 6.301 18H4a1 1 0 1 1 0-2h2.301a2 2 0 0 0 1.715-.971l.627-1.043a1 1 0 0 1 1.371-.344Zm7.563-8.988l.306.165l.36.203l.198.117l.43.263l.229.147l.463.31l.21.147l.377.273l.315.24l.133.104c.236.188.225.566-.023.762l-.28.217l-.34.252q-.186.135-.4.282l-.456.305l-.462.291l-.416.249l-.365.206l-.307.164c-.275.143-.572-.036-.598-.36l-.025-.347l-.024-.415l-.01-.23H15.7a2 2 0 0 0-1.627.836l-.088.135l-.626 1.043a1 1 0 0 1-1.77-.925l.055-.104l.626-1.043a4 4 0 0 1 3.209-1.936l.22-.006h1.195l.02-.415l.022-.36l.012-.159c.027-.346.352-.557.631-.41Z" />
</g>
</svg>
<span>reddit</span>
</a>
{% if prefs.disable_visit_reddit_confirmation != "on" %}
{% call utils::visit_reddit_confirmation(url) %}
{% endif %}
<a id="settings_link" href="/settings">
<svg xmlns="http://www.w3.org/2000/svg" width="24" height="24" viewBox="0 0 24 24">
<g fill="none" fill-rule="evenodd">
<title>settings</title>
<path d="m12.593 23.258l-.011.002l-.071.035l-.02.004l-.014-.004l-.071-.035q-.016-.005-.024.005l-.004.01l-.017.428l.005.02l.01.013l.104.074l.015.004l.012-.004l.104-.074l.012-.016l.004-.017l-.017-.427q-.004-.016-.017-.018m.265-.113l-.013.002l-.185.093l-.01.01l-.003.011l.018.43l.005.012l.008.007l.201.093q.019.005.029-.008l.004-.014l-.034-.614q-.005-.018-.02-.022m-.715.002a.02.02 0 0 0-.027.006l-.006.014l-.034.614q.001.018.017.024l.015-.002l.201-.093l.01-.008l.004-.011l.017-.43l-.003-.012l-.01-.01z" />
<path fill="currentColor" d="M14.035 2.809c.37-.266.89-.39 1.401-.203a10 10 0 0 1 2.982 1.725c.417.35.57.861.524 1.313c-.075.753.057 1.48.42 2.106c.32.557.802.997 1.39 1.307l.225.11c.414.187.782.576.875 1.113a10 10 0 0 1 0 3.44c-.083.484-.39.847-.753 1.051l-.122.063c-.69.31-1.254.79-1.616 1.416c-.362.627-.494 1.353-.419 2.106c.045.452-.107.964-.524 1.313a10 10 0 0 1-2.982 1.725a1.51 1.51 0 0 1-1.4-.203C13.42 20.75 12.723 20.5 12 20.5s-1.42.249-2.035.691a1.51 1.51 0 0 1-1.401.203a10 10 0 0 1-2.982-1.725a1.51 1.51 0 0 1-.524-1.313c.075-.753-.058-1.48-.42-2.106a3.4 3.4 0 0 0-1.39-1.307l-.225-.11a1.51 1.51 0 0 1-.875-1.113a10 10 0 0 1 0-3.44c.083-.484.39-.847.753-1.051l.122-.062c.69-.311 1.254-.79 1.616-1.417c.361-.626.494-1.353.419-2.106a1.51 1.51 0 0 1 .524-1.313a10 10 0 0 1 2.982-1.725a1.51 1.51 0 0 1 1.4.203c.615.442 1.312.691 2.036.691s1.42-.249 2.035-.691m.957 1.769c-.866.57-1.887.922-2.992.922s-2.126-.353-2.992-.922A8 8 0 0 0 7.068 5.7c.06 1.033-.145 2.093-.697 3.05c-.553.956-1.368 1.663-2.293 2.128a8 8 0 0 0 0 2.242c.925.465 1.74 1.172 2.293 2.13c.552.955.757 2.015.697 3.048a8 8 0 0 0 1.94 1.123c.866-.57 1.887-.922 2.992-.922s2.126.353 2.992.922a8 8 0 0 0 1.94-1.122c-.06-1.034.145-2.094.697-3.05c.552-.957 1.368-1.664 2.293-2.13a8 8 0 0 0 0-2.24c-.925-.466-1.74-1.173-2.293-2.13c-.552-.956-.757-2.016-.697-3.05a8 8 0 0 0-1.94-1.122ZM12 8a4 4 0 1 1 0 8a4 4 0 0 1 0-8m0 2a2 2 0 1 0 0 4a2 2 0 0 0 0-4" />
</g>
</svg>
<span>settings</span>
</a>
</div>
</head>
<body style="visibility: hidden;">
{% block navbar %}
<nav>
<a href="/"><span id="lib">lib</span>reddit. <span id="version">v{{ env!("CARGO_PKG_VERSION") }}</span></a>
<a id="github" href="https://github.com/spikecodes/libreddit">GITHUB</a>
</nav>
{% if prefs.mascot != "none" && prefs.mascot != "" %}
<!-- MASCOT -->
<div class="mascot">
<img src="/mascot/{{ prefs.mascot }}.png">
</div>
{% endif %}
{% endblock %}
<!-- MAIN CONTENT -->
{% block body %}
<main>
{% block content %}
{% endblock %}
</main>
{% endblock %}
<!-- FOOTER -->
{% block footer %}
<footer>
<p id="version">v{{ env!("CARGO_PKG_VERSION") }}</p>
<div class="footer-button">
<a href="/info" title="View instance information">ⓘ View instance info</a>
</div>
<div class="footer-button">
<a href="https://git.stardust.wtf/iridium/redsunlib" title="View code on git.stardust.wtf">&lt;&gt; Code</a>
</div>
</footer>
{% endblock %}
</body>
</html>
</html>

View File

@ -1,47 +0,0 @@
{% import "utils.html" as utils %}
{% if kind == "more" && parent_kind == "t1" %}
<a class="deeper_replies" href="{{ post_link }}{{ parent_id }}">&rarr; More replies ({{ more_count }})</a>
{% else if kind == "t1" %}
<div id="{{ id }}" class="comment">
<div class="comment_left">
<p class="comment_score" title="{{ score.1 }}">
{% if prefs.hide_score != "on" %}
{{ score.0 }}
{% else %}
&#x2022;
{% endif %}
</p>
<div class="line"></div>
</div>
<details class="comment_right" {% if !collapsed || highlighted %}open{% endif %}>
<summary class="comment_data">
{% if author.name != "[deleted]" %}
<a class="comment_author {{ author.distinguished }} {% if author.name == post_author %}op{% endif %}" href="/user/{{ author.name }}">u/{{ author.name }}</a>
{% else %}
<span class="comment_author {{ author.distinguished }}">u/[deleted]</span>
{% endif %}
{% if author.flair.flair_parts.len() > 0 %}
<small class="author_flair">{% call utils::render_flair(author.flair.flair_parts) %}</small>
{% endif %}
<a href="{{ post_link }}{{ id }}/?context=3#{{ id }}" class="created" title="{{ created }}">{{ rel_time }}</a>
{% if edited.0 != "".to_string() %}<span class="edited" title="{{ edited.1 }}">edited {{ edited.0 }}</span>{% endif %}
{% if !awards.is_empty() && prefs.hide_awards != "on" %}
<span class="dot">&bull;</span>
{% for award in awards.clone() %}
<span class="award" title="{{ award.name }}">
<img alt="{{ award.name }}" src="{{ award.icon_url }}" width="16" height="16"/>
</span>
{% endfor %}
{% endif %}
</summary>
{% if is_filtered %}
<div class="comment_body_filtered {% if highlighted %}highlighted{% endif %}">(Filtered content)</div>
{% else %}
<div class="comment_body {% if highlighted %}highlighted{% endif %}">{{ body|safe }}</div>
{% endif %}
<blockquote class="replies">{% for c in replies -%}{{ c.render().unwrap()|safe }}{%- endfor %}
</bockquote>
</details>
</div>
{% endif %}

Some files were not shown because too many files have changed in this diff Show More