Compare commits

..

No commits in common. "ollama" and "master" have entirely different histories.

10 changed files with 173 additions and 638 deletions

1
.gitignore vendored
View file

@ -3,4 +3,3 @@
Cargo.lock
config.json
.netrc
config.json.instagram

View file

@ -1,17 +1,14 @@
[package]
name = "pixelfed_batch_uploader"
version = "1.2.0"
version = "1.0.3"
edition = "2021"
[build]
rustflags = ["--out-dir", "target/output"]
[dependencies]
serde = { version = "1.0.218", features = ["derive"] }
serde_json = "1.0.140"
reqwest = { version = "0.12", features = ["blocking", "json", "multipart", "stream"] }
serde = { version = "1.0", features = ["derive"] }
serde_json = "1.0"
reqwest = { version = "0.11", features = ["blocking", "json", "multipart"] }
tokio = { version = "1.0", features = ["full"] }
base64 = "0.22.1"
clap = { version = "4.5.3", features = ["derive"] }
log = "0.4"
env_logger = "0.11"
base64 = "0.21"

View file

@ -1,63 +1,20 @@
# Instagram Pixelfed Batch Upload
![Tool illustration](assets/pixelfed-bot_illustration_small.png "Littel Robot describing the pictures for you.")
This program takes a folder, iterates over the images and creates Pixelfed postings with a specified batch size.
There are 4 different options for the image description (ALT text) supported now:
1. ChatGPT/OpenAI mode: generating the image description using the OpenAI API and respective model. You'll need to provide your OpenAI API access key and the model you wanna use.
2. Local/Ollama mode: generate the image description using a local/own installation of Ollama. You'll have to configure the base URL of your ollama installation, an (optional) access key and the model to be used.
3.) File mode: reading the image description from a text file with the same name as the image + a configurable extension.
4.) No description: don't generate an image description, just batch upload the pictures.
The description of the post can be given via the `config.json`.
This program takes a folder and iterates over the images and creates Pixelfed postings with a specified batch size.
The description of the post can be given via the config.json.
Two variables in the post description can be give (see the `config.json.example` ).
```
Usage: pixelfed_batch_uploader [OPTIONS] --title <TITLE> --image-path <IMAGE_PATH>
Options:
-m, --mode <MODE>
Image description mode
[default: file]
Possible values:
- chat-gpt: Use ChatGTP
- file: Taking from a file
- local: Local LLM
- none
-t, --title <TITLE>
The title of the posting
-i, --image-path <IMAGE_PATH>
The path to the file to read
-c, --config <FILE>
Sets a custom config file
-v, --visibility <private>
-h, --help
Print help (see a summary with '-h')
-V, --version
Print version
```
Example: `Usage: `./pixelfed_batch_uploader -i ../../Downloads/Instagram-Backup/media/posts/201406 --title "June 2014" -m local`
Usage: `./pixelfed_batch_uploader ../../Downloads/Instagram-Backup/media/posts/201406 --title "June 2014"`
The `config.json` must be in the same directory the program is called from (`$PWD`)
[![asciicast](https://asciinema.mxhdr.net/a/6.svg)](https://asciinema.mxhdr.net/a/6)
Check the [package of this repo](https://repos.mxhdr.net/maxheadroom/insta-import-pixelfed/packages) to get pre-compiled binaries for macOS (Apple Silicon), Linux x86_64, Windows ARM
## OpenAI Integration for Image Description
Added OpenAI integration to generate image descriptions and put them into the ALT text for each image. If an `openai_api_key` is present in the `config.json` then the Image description is fetched from the OpenAI API.

Binary file not shown.

Before

(image error) Size: 5.4 MiB

Binary file not shown.

Before

(image error) Size: 694 KiB

View file

@ -1,15 +1,12 @@
{
"pixelfed_url": "https://pxl.mxhdr.net",
"pixelfed_access_token": "longstringofpersonalaccesstokenfromyourpixelfedaccount",
"pixelfed_visibility": "public",
"pixelfed_batch_size": 20,
"pixelfed_default_text": "Instagram dump from @title@ @batch@ #instabackup #instaimport #instaexit",
"openai_api_key": "youropenapiaccesskey",
"pixelfed_url": "https://pixelfed.example.com",
// See https://docs.pixelfed.org/running-pixelfed/installation.html#setting-up-services
"access_token": "sdg;lkjrglksjh;lkshj;lksjthrst;hoijrt;ihj;sithj;itjh",
"visibility": "unlisted",
"batch_size": 10,
"default_text": "Instagram dump from @title@ @batch@ #instabackup #instaimport #instaexit",
// https://help.openai.com/en/articles/4936850-where-do-i-find-my-openai-api-key
"openai_api_key": "0bff275feca7baab5ac508e635543f59fff42d4436c9918cd37c330f9adb4eb4fda643c212794b800bb05fb26016f55425c6755a3525c64792197e4d0fbe95d5",
"openai_api_url": "https://api.openai.com/v1/chat/completions",
"openai_model": "gpt-4o",
"ollama_api_key": "yourollamaaccesskey_mightbeoptional",
"ollama_api_url": "http://localhost:11434/api/generate",
"ollama_model": "llama3.2-vision:11b-instruct-q8_0",
"caption_extension": ".caption.txt",
"prompt": "Imagine you are posting on social media and want to provide image descriptions for people with vision disabilities. Those people might use screen readers. Thats why you want to put an ALT text for images so those people can understand what is shown in the picture. Try to detect the main topic and mood or intention of the picture. If there is visible and readable text in the image, then also provide that text. Complete the sentence: “This picture shows” and respond in plain text format with less than 5000 characters?"
"openai_model": "gpt-4o"
}

View file

@ -1,6 +1,6 @@
#!/bin/bash -x
RELEASE_VERSION="1.1.0"
RELEASE_VERSION="1.0.3"
PLATFORM=$(rustc -vV | grep host | cut -d ' ' -f2)

View file

@ -1,193 +0,0 @@
use base64::{engine::general_purpose::STANDARD, Engine as _};
use log::{debug, error, info};
use serde::Deserialize;
use serde::Serialize;
use serde_json::json;
use std::str;
use std::time::Duration;
// module to hold all code for generating/fetching image descriptions
// Input is the image name
// Output is a String containing the image description
pub struct ChatGPTConfig {
pub openai_api_key: String,
pub openai_api_url: String,
pub openai_model: String,
pub openai_prompt: String,
}
pub struct OllamaConfig {
pub ollama_api_key: String,
pub ollama_api_url: String,
pub ollama_model: String,
pub ollama_prompt: String,
}
pub struct FileConfig {
pub caption_extension: String,
}
#[derive(Serialize, Deserialize)]
struct LlamaModel {
model: String,
prompt: String,
stream: bool,
format: String,
suffix: String,
images: Vec<String>,
keep_alive: i8,
}
// fetch the imagedescription from a file named like the Image
pub fn get_description_from_file(
image_name: String,
file_config: FileConfig,
) -> Result<String, Box<dyn std::error::Error>> {
//read image caption from a local file that
//has the same name than the image with the extension ".caption.txt"
let caption_extension = file_config.caption_extension;
let captionname = format!("{}{}", &image_name, caption_extension);
debug!("Looking for {}", &captionname);
let caption_data = match std::fs::read_to_string(captionname) {
Ok(caption) => caption,
Err(e) => {
error!("Failed to find caption file: {}", &e.to_string());
std::process::exit(1);
}
};
println!(
"Description fetched successfully from FILE for {}",
&image_name
);
Ok(caption_data)
}
// fetch image description from ChatGPT
pub fn get_description_from_chatgpt(
image_name: String,
chatgpt_config: self::ChatGPTConfig,
) -> Result<String, Box<dyn super::Error>> {
// Read and encode image
let image_data = std::fs::read(&image_name)?;
// Base64 encode the image for ChatGTP API
let base64_image = STANDARD.encode(image_data);
// Create ChatGPT API request
let client = reqwest::blocking::Client::new();
let response = client
.post(chatgpt_config.openai_api_url)
.header(
"Authorization",
format!("Bearer {}", chatgpt_config.openai_api_key),
)
.header("Content-Type", "application/json")
.json(&super::json!({
"model": chatgpt_config.openai_model,
"max_tokens": 300,
"messages": [
{
"role": "user",
"content": [
{
"type": "text",
"text": chatgpt_config.openai_prompt.to_string(),
},
{
"type": "image_url",
"image_url": {
"url": format!("data:image/jpeg;base64,{}", base64_image)
}
}
]
}
]
}))
.send();
// Improved error handling for API response
//if !response.unwrap().status().is_success() {
// let error_text = response.unwrap_err();
// return Err(format!("OpenAI API error: ", std::error.box(error_text));
//}
let result: super::Value = response.unwrap().json()?;
// More detailed error handling for JSON parsing
let description = result["choices"].get(0).ok_or("No choices in response")?["message"]
["content"]
.as_str()
.ok_or("Invalid content format in response")?
.to_string();
println!(
"Description generated successfully from ChatGPT for {}",
&image_name
);
Ok(description)
}
// fetch images description from own OLLAMA server
pub fn get_description_from_ollama(
image_name: String,
ollama_config: OllamaConfig,
) -> Result<String, Box<dyn super::Error>> {
// Read and encode image
let image_data = std::fs::read(&image_name)?;
// Base64 encode the image for ChatGTP API
let base64_image = STANDARD.encode(image_data);
// Create the JSON payload
let payload = json!({
"model": ollama_config.ollama_model.to_string(),
"prompt": ollama_config.ollama_prompt.to_string(),
"stream": false,
"images": [base64_image]
});
debug!("Payload for image OLLAMA API: \n{}", payload.clone());
// println!("JSON output:\n{}", json.clone());
// Create ChatGPT API request
// let client = reqwest::blocking::Client::new();
let client = reqwest::blocking::ClientBuilder::new()
.connect_timeout(Duration::new(30, 0))
.timeout(Duration::new(300, 0))
.connection_verbose(true)
.build()?;
let response = client
.post(ollama_config.ollama_api_url)
.header("Content-Type", "application/json")
.json(&payload)
.send();
let response = match response {
Ok(response) => {
info!("success!");
response
}
Err(e) => {
error!("Failed to send request to OLLAMA: {}", e);
return Err(Box::from(e));
}
};
// Extract response text
let result: super::Value = response.json()?;
let description = if let Some(response_text) = result["response"].as_str() {
response_text.replace("\\n", "\n").replace("\\\"", "\"")
} else if let Some(error_text) = result["error"].as_str() {
error_text.replace("\\n", "\n").replace("\\\"", "\"")
} else {
"Could not find response or error from OLLAMA".to_string()
};
info!("Description generated by OLLAMA: {}", &description);
println!(
"Description generated successfully from OLLAMA for {}",
&image_name
);
Ok(description)
}

View file

@ -1,82 +1,30 @@
use clap::{Parser, ValueEnum};
use log::info;
use std::fs;
use serde::Deserialize;
use serde_json::{json, Value};
use reqwest::blocking::Client;
use reqwest::{self};
use std::error::Error;
use std::fs;
use std::fs::File;
use std::io::BufReader;
use std::path::PathBuf;
pub mod image_description;
mod pixelfed;
#[derive(Parser)]
#[command(name = "Pixelfed Image Bulk Uploader")]
#[command(version = "1.0")]
#[command(about = "Bulk uploads images to Pixelfed with image descriptions", long_about = None)]
#[command(version, about, long_about = None)]
struct Cli {
/// Image description mode
#[arg(short, long, default_value = "file")]
mode: Mode,
/// The title of the posting
#[arg(short, long)]
title: String,
/// The path to the file to read
#[arg(short, long)]
image_path: String,
/// Sets a custom config file
#[arg(short, long, value_name = "FILE")]
config: Option<String>,
#[arg(short, long, value_name = "private")]
visibility: Option<String>,
}
#[derive(Copy, Clone, PartialEq, Eq, PartialOrd, Ord, ValueEnum)]
enum Mode {
/// Use ChatGTP
ChatGPT,
/// Taking from a file
File,
/// Local LLM
Local,
// No Image Description
None,
}
use base64::{Engine as _, engine::general_purpose::STANDARD};
#[derive(Debug, Deserialize)]
struct Config {
pixelfed_url: String,
pixelfed_access_token: String,
pixelfed_visibility: String, // Should be "unlisted"
pixelfed_default_text: String,
pixelfed_batch_size: usize,
access_token: String,
visibility: String, // Should be "unlisted"
default_text: String,
batch_size: usize,
openai_api_key: String,
openai_api_url: String,
openai_model: String,
ollama_api_key: String,
ollama_api_url: String,
ollama_model: String,
caption_extension: String,
prompt: String,
openai_model: String
}
fn load_config(config_file: String) -> Result<Config, Box<dyn Error>> {
//let config_str = fs::read_to_string("config.json")?;
// Open the file in read-only mode with buffer.
let file = File::open(PathBuf::from(config_file))?;
let reader = BufReader::new(file);
// Read the JSON contents of the file as an instance of `User`.
let config: Config = serde_json::from_reader(reader)?;
fn load_config() -> Result<Config, Box<dyn Error>> {
let config_str = fs::read_to_string("config.json")?;
let config: Config = serde_json::from_str(&config_str)?;
Ok(config)
}
// get all the JPEG files from the give directory
fn get_jpeg_files(directory: &str) -> Vec<String> {
let mut images = Vec::new();
if let Ok(entries) = fs::read_dir(directory) {
@ -92,62 +40,149 @@ fn get_jpeg_files(directory: &str) -> Vec<String> {
images
}
fn main() -> Result<(), Box<dyn Error>> {
env_logger::init();
async fn get_image_description(config: &Config, image_path: &String) -> Result<String, Box<dyn Error>> {
// Read and encode image
let image_data = tokio::fs::read(image_path)
.await
.map_err(|e| format!("Failed to read image file: {}", e))?;
let base64_image = STANDARD.encode(&image_data);
let args = Cli::parse();
// Create ChatGPT API request
let client = reqwest::Client::new();
let response = client
.post(&config.openai_api_url)
.header("Authorization", format!("Bearer {}", config.openai_api_key))
.header("Content-Type", "application/json")
.json(&json!({
"model": config.openai_model,
"max_tokens": 300,
"messages": [
{
"role": "user",
"content": [
{
"type": "text",
"text": "Please describe this image concisely for use as an alt text description. Focus on key visual elements and context."
},
{
"type": "image_url",
"image_url": {
"url": format!("data:image/jpeg;base64,{}", base64_image)
}
}
]
}
]
}))
.send()
.await?;
//if args.len() < 2 || args.len() > 3 {
// eprintln!("Usage: {} <directory> [--title <title>]", args[0]);
// eprintln!("Usage: {} <directory> -ready [--title <title>]", args[0]);
// std::process::exit(1);
//}
let title = args.title;
let my_config: String;
match args.config {
Some(configstring) => my_config = configstring,
None => my_config = "config.json".to_string(),
}
info!("effective config file: {}", &my_config);
let mut config = load_config(my_config).unwrap();
info!("Config OK?: ");
// Overwrite config values with command line arguments
match args.visibility {
Some(visibility) => config.pixelfed_visibility = visibility,
None => {}
// Improved error handling for API response
if !response.status().is_success() {
let error_text = response.text().await?;
return Err(format!("OpenAI API error: {}", error_text).into());
}
// get list of all the images in the gives path
let images = get_jpeg_files(&args.image_path);
info!("Images empty? {}", &images.is_empty().to_string());
let result: Value = response.json().await?;
// More detailed error handling for JSON parsing
let description = result["choices"]
.get(0)
.ok_or("No choices in response")?
["message"]["content"]
.as_str()
.ok_or("Invalid content format in response")?
.to_string();
// knowing now the total number of images, calculate the number of batches
let total_batches =
(images.len() + config.pixelfed_batch_size - 1) / config.pixelfed_batch_size;
println!(
"Found a total of {} images to upload. Will take {} batches",
&images.len(),
&total_batches
);
Ok(description)
}
// now iterate over all images in batches of batch_size
for (i, chunk) in images.chunks(config.pixelfed_batch_size).enumerate() {
info!("{}", i.clone());
match pixelfed::bulk_upload_images(&config, chunk, i + 1, total_batches, &title, &args.mode)
{
Ok(_) => {
println!("Upload of batch {} suceeded", &i + 1);
}
Err(e) => {
println!("Upload of batch {} failed: {}", &i + 1, e.to_string());
}
};
fn format_post_text(template: &str, batch_num: usize, total_batches: usize, title: &str) -> String {
template
.replace("@batch@", &format!("Batch {} out of {}", batch_num, total_batches))
.replace("@title@", title)
}
async fn upload_images_batch(client: &Client, config: &Config, images: &[String], batch_num: usize, total_batches: usize, title: &str) -> Result<(), Box<dyn Error>> {
let url = format!("{}/api/v1/media", config.pixelfed_url);
let mut media_ids = Vec::new();
let mut media_descriptions = Vec::new();
for image_path in images {
println!("Fetching image description from OpenAI for {}", image_path.to_string());
let image_description: String ;
if !config.openai_api_key.is_empty() {
image_description = get_image_description(&config, &image_path).await?;
println!("DESC:\n {} \n", &image_description);
media_descriptions.push(image_description.clone());
} else {
image_description = String::new();
}
println!("Uploading image {}", image_path.to_string());
let form = reqwest::blocking::multipart::Form::new().text("description", image_description.clone())
.file("file", image_path)?;
let res = client.post(&url)
.bearer_auth(&config.access_token)
.multipart(form)
.send()?;
let json: serde_json::Value = res.json()?;
if let Some(id) = json["id"].as_str() {
media_ids.push(id.to_string());
}
}
// println!("All images uploaded successfully.");
if !media_ids.is_empty() {
let post_url = format!("{}/api/v1/statuses", config.pixelfed_url);
let post_text = format_post_text(&config.default_text, batch_num, total_batches, title);
let body = serde_json::json!({
"status": post_text,
"media_ids": media_ids,
"alt_texts": media_descriptions,
"visibility": config.visibility,
});
println!("Posting batch {} out of {} with media {}", batch_num, total_batches, media_ids.len());
client.post(&post_url)
.bearer_auth(&config.access_token)
.json(&body)
.send()?;
}
Ok(())
}
#[tokio::main]
async fn main() -> Result<(), Box<dyn Error>> {
let args: Vec<String> = std::env::args().collect();
if args.len() < 2 {
eprintln!("Usage: {} <directory> [--title <title>]", args[0]);
std::process::exit(1);
}
let mut title = "".to_string();
if let Some(index) = args.iter().position(|x| x == "--title") {
if index + 1 < args.len() {
title = args[index + 1].clone();
}
}
let config = load_config()?;
let images = get_jpeg_files(&args[1]);
let client = Client::new();
let total_batches = (images.len() + config.batch_size - 1) / config.batch_size;
println!("Found a total of {} images to upload. Will take {} batches", images.len(), total_batches);
for (i, chunk) in images.chunks(config.batch_size).enumerate() {
upload_images_batch(&client, &config, chunk, i + 1, total_batches, &title).await?;
}
println!("All images uploaded successfully.");
Ok(())
}

View file

@ -1,257 +0,0 @@
use log::{debug, error, info};
use reqwest::{self};
use std::error::Error;
use std::time::Duration;
struct PixelfedConfig {
pixelfed_url: String,
pixelfed_access_token: String,
pixelfed_visibility: String, // Should be "unlisted"
pixelfed_default_text: String,
}
fn format_post_text(template: &str, batch_num: usize, total_batches: usize, title: &str) -> String {
template
.replace(
"@batch@",
&format!("Batch {} out of {}", batch_num, total_batches),
)
.replace("@title@", title)
}
// upload a single image to pixelfed
pub fn bulk_upload_images(
config: &super::Config,
images: &[String],
batch_num: usize,
total_batches: usize,
title: &str,
caption_mode: &super::Mode,
) -> Result<(), Box<dyn Error>> {
let mut media_ids = Vec::new();
let mut media_descriptions = Vec::new();
// generate our Pixelfed specific Configuration from the given global config
let pxl_config = PixelfedConfig {
pixelfed_url: config.pixelfed_url.clone(),
pixelfed_access_token: config.pixelfed_access_token.clone(),
pixelfed_visibility: config.pixelfed_visibility.clone(),
pixelfed_default_text: config.pixelfed_default_text.clone(),
};
let client = match reqwest::blocking::ClientBuilder::new()
.connect_timeout(Duration::new(60, 0))
.timeout(Duration::new(300, 0))
.connection_verbose(true)
.build()
{
Ok(client) => client,
Err(e) => {
error!("Failed to build HTTP client: {}", e);
return Err(Box::from(e));
}
};
// construct the full URL for the Pixelfed Upload
let url = format!("{}/api/v1/media", pxl_config.pixelfed_url.clone());
let mut image_counter: i8 = 0;
// Iterate over all the images we were given
for image_path in images {
let description: String;
image_counter = image_counter + 1;
debug!(
"Handling image #{} at {}",
&image_counter,
&image_path.to_string()
);
// get image description depending on the caption_mode
match caption_mode {
super::Mode::ChatGPT => {
let im_config = super::image_description::ChatGPTConfig {
openai_model: config.openai_model.clone(),
openai_api_key: config.openai_api_key.clone(),
openai_api_url: config.openai_api_url.clone(),
openai_prompt: config.prompt.clone(),
};
info!(
"Fetching image description from ChatGPT for {}",
&image_path.to_string()
);
description = match super::image_description::get_description_from_chatgpt(
image_path.clone().to_string(),
im_config,
) {
Ok(description) => description,
Err(e) => {
error!("Failed to fetch image description from ChatGPT for {}", e);
return Err(Box::from(e));
}
};
media_descriptions.push(description.clone());
}
super::Mode::File => {
let im_config = super::image_description::FileConfig {
caption_extension: config.caption_extension.clone(),
};
info!(
"Fetching image description from File for {}",
&image_path.to_string()
);
description = match super::image_description::get_description_from_file(
image_path.clone().to_string(),
im_config,
) {
Ok(description) => description,
Err(e) => {
error!("Failed to fetch image description from File for {}", e);
return Err(Box::from(e));
}
};
info!("Description generated by ChatGPT: {}", &description.clone());
media_descriptions.push(description.clone());
}
super::Mode::Local => {
let im_config = super::image_description::OllamaConfig {
ollama_api_key: config.ollama_api_key.clone(),
ollama_api_url: config.ollama_api_url.clone(),
ollama_model: config.ollama_model.clone(),
ollama_prompt: config.prompt.clone(),
};
info!(
"Fetching image description from OLLAMA for {}",
&image_path.to_string()
);
description = match super::image_description::get_description_from_ollama(
image_path.clone().to_string(),
im_config,
) {
Ok(description) => description,
Err(e) => {
error!("Failed to fetch image description from OLLAMA for {}", e);
return Err(Box::from(e));
}
};
info!("Description generated by OLLAMA: {}", &description.clone());
media_descriptions.push(description.clone());
}
super::Mode::None => {
// No impage description wanted
description = "".to_string();
media_descriptions.push(description.clone())
}
}
println!(
"Uploading image #{} from {} to Pixelfed",
&image_counter,
&image_path.to_string()
);
// construct the upload form for Pixelfed Upload of a single image including image description
let form = match reqwest::blocking::multipart::Form::new()
.text("description", description.clone())
.file("file", image_path)
{
Ok(f) => f,
Err(e) => {
error!("Failed to construct multipart form for Pixelfed: {}", e);
return Err(Box::from(e));
}
};
// upload the form to Pixelfed
let res = match client
.post(&url)
.bearer_auth(&pxl_config.pixelfed_access_token)
.multipart(form)
.send()
{
Ok(result) => result,
Err(e) => {
error!("Failed to send request to Pixelfed: {}", e);
return Err(Box::from(e));
}
};
let status = res.status();
// Check if the response status indicates success
if status.is_success() {
// Parse the success response
let success_response: serde_json::Value = match res.json() {
Ok(response_json) => response_json,
Err(e) => {
error!(
"Could not decode the Pixelfed response JSON: {}",
e.to_string()
);
return Err(Box::from(e));
}
};
let image_id: serde_json::Value = success_response["id"].clone();
media_ids.push(image_id);
} else {
let error_text = res.text()?;
return Err(Box::from(format!(
"Failed to upload image to Pixelfed: {}",
error_text
)));
}
}
println!("Done uploading all images: {}", media_ids.len());
if !media_ids.is_empty() {
let post_url = format!("{}/api/v1/statuses", pxl_config.pixelfed_url);
let post_text = format_post_text(
&pxl_config.pixelfed_default_text,
batch_num,
total_batches,
title,
);
let body = serde_json::json!({
"status": post_text.to_string(),
"media_ids": media_ids,
"alt_texts": media_descriptions,
"visibility": pxl_config.pixelfed_visibility.to_string(),
});
debug!("Body: \n{}", &body.to_string());
info!("MediaIDs: {}", &media_ids.len());
info!("Alt_texts: {}", &media_descriptions.len());
println!(
"Posting batch {} out of {} with media {}",
batch_num,
total_batches,
media_ids.len()
);
let res = client
.post(&post_url)
.bearer_auth(&pxl_config.pixelfed_access_token)
.json(&body)
.send();
match res {
Ok(response) => {
if response.status().is_success() {
info!("Post on Pixelfed created successfully!");
} else {
error!(
"Failed to create post on Pixelfed. Status: {:?}",
response
.text()
.unwrap_or_else(|_| "Unknown error from Pixelfed".to_string())
)
}
}
Err(e) => {
error!("Failed to send request to Pixelfed: {}", e);
return Err(Box::from(format!(
"Failed to upload image to Pixelfed: {}",
e
)));
}
}
}
Ok(())
}