python-pipeline
from jamditis/claude-skills-journalism
Claude Code skills for journalism, media, and academia - verification, FOIA, data journalism, academic writing, and more
2 stars0 forksUpdated Jan 16, 2026
npx skills add https://github.com/jamditis/claude-skills-journalism --skill python-pipelineSKILL.md
Python data pipeline development
Patterns for building production-quality data processing pipelines with Python.
Architecture patterns
Modular processor architecture
src/
├── workflow.py # Main orchestrator
├── dispatcher.py # Content-type router
├── processors/
│ ├── __init__.py
│ ├── base.py # Abstract base class
│ ├── article_processor.py
│ ├── video_processor.py
│ └── audio_processor.py
├── services/
│ ├── sheets_service.py # Google Sheets integration
│ ├── drive_service.py # Google Drive integration
│ └── ai_service.py # Gemini API wrapper
├── utils/
│ ├── logger.py
│ └── rate_limiter.py
└── config.py # Environment configuration
Dispatcher pattern
from typing import Protocol
from urllib.parse import urlparse
class Processor(Protocol):
def can_process(self, url: str) -> bool: ...
def process(self, url: str, metadata: dict) -> dict: ...
class Dispatcher:
def __init__(self):
self.processors: list[Processor] = [
ArticleProcessor(),
VideoProcessor(),
AudioProcessor(),
SocialProcessor(),
]
def dispatch(self, url: str, metadata: dict) -> dict:
for processor in self.processors:
if processor.can_process(url):
return processor.process(url, metadata)
raise ValueError(f"No processor found for URL: {url}")
# Pattern-based routing
class ArticleProcessor:
DOMAINS = ['nytimes.com', 'washingtonpost.com', 'medium.com']
def can_process(self, url: str) -> bool:
domain = urlparse(url).netloc.replace('www.', '')
return any(d in domain for d in self.DOMAINS)
CSV-based pipeline workflow
import csv
from pathlib import Path
from dataclasses import dataclass, asdict
from typing import Iterator
@dataclass
class Record:
id: str
url: str
title: str | None = None
content: str | None = None
status: str = 'pending'
def read_input(path: Path) -> Iterator[Record]:
with open(path, 'r', encoding='utf-8') as f:
reader = csv.DictReader(f)
for row in reader:
yield Record(**{k: v for k, v in row.items() if k in Record.__annotations__})
def write_output(records: list[Record], path: Path):
with open(path, 'w', encoding='utf-8', newline='') as f:
writer = csv.DictWriter(f, fieldnames=list(Record.__annotations__.keys()))
writer.writeheader()
writer.writerows(asdict(r) for r in records)
def process_batch(input_path: Path, output_path: Path):
dispatcher = Dispatcher()
results = []
for record in read_input(input_path):
try:
processed = dispatcher.dispatch(record.url, asdict(record))
record.status = 'completed'
record.title = processed.get('title')
record.content = processed.get('content')
except Exception as e:
record.status = f'failed: {e}'
results.append(record)
write_output(results, output_path)
Google Sheets integration
import gspread
from google.oauth2.service_account import Credentials
SCOPES = [
'https://www.googleapis.com/auth/spreadsheets',
'https://www.googleapis.com/auth/drive'
]
class SheetsService:
def __init__(self, credentials_path: str):
creds = Credentials.from_service_account_file(credentials_path, scopes=SCOPES)
self.client = gspread.authorize(creds)
def get_worksheet(self, spreadsheet_id: str, sheet_name: str):
spreadsheet = self.client.open_by_key(spreadsheet_id)
return spreadsheet.worksheet(sheet_name)
def read_all(self, worksheet) -> list[dict]:
return worksheet.get_all_records()
def append_row(self, worksheet, row: list):
worksheet.append_row(row, value_input_option='USER_ENTERED')
def batch_update(self, worksheet, updates: list[dict]):
"""Update multiple cells efficiently."""
# Format: [{'range': 'A1', 'values': [[value]]}]
worksheet.batch_update(updates, value_input_option='USER_ENTERED')
def find_row_by_id(self, worksheet, id_value: str, id_column: int = 1) -> int | None:
"""Find row number by ID value."""
try:
cell = worksheet.find(id_value, in_column=id_column)
return cell.row
except gspread.CellNotFound:
return None
Rate limiting
import time
from functools import wraps
from ratelimit import limits, sleep_and_retry
# Simple rate limiter
@sleep_and_retry
@limits(calls=10, period=60) # 10 calls per minute
def rate_limited_api_call(url: str):
return requests.get(url)
# Custom rate limiter with backoff
class RateLimiter:
def __init__(self, calls_per_minute: int = 10):
self.delay = 60 / calls_per_minute
self.last_call = 0
def wait(self):
elapsed = time.time() - self.last_call
if elapsed < self.delay:
...
Repository Stats
Stars2
Forks0
LicenseMIT License