Compare commits
38 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
| ca0cb3db20 | |||
| e61a7839f4 | |||
| 53705128d5 | |||
| fdba772b88 | |||
| 3bcecdd7b5 | |||
| 82e01341c9 | |||
| 18af137472 | |||
| 259c73634f | |||
| eb0c6c35b5 | |||
| 31901b10ce | |||
| 25851414fe | |||
| 01cbef4604 | |||
| c4321633f5 | |||
| b0e2cd52d8 | |||
|
|
0a5054d79b | ||
| c9e223da40 | |||
| e02a0af16f | |||
| 365d86051d | |||
| 15e32c49d2 | |||
| 2fdff679f5 | |||
| 87b70ad855 | |||
| 7fad7c319a | |||
| db29743a9c | |||
| 1f66392a2b | |||
| c3eb9e3064 | |||
| b09b638dca | |||
| a5782a3357 | |||
| 0caecb2369 | |||
| de78bb34ba | |||
| f66fd4f745 | |||
| 43258686e8 | |||
| 52a727af42 | |||
| 859ec57f70 | |||
| b0e3126cbf | |||
| 1c9cd3bd7e | |||
| 8f386c5b5a | |||
| a828238c58 | |||
| 73fe8534ca |
31
.gitignore
vendored
Normal file
@@ -0,0 +1,31 @@
|
||||
# Build output
|
||||
public/
|
||||
resources/_gen/
|
||||
|
||||
# Hugo lock file
|
||||
.hugo_build.lock
|
||||
|
||||
# Editor/IDE
|
||||
.vscode/
|
||||
.idea/
|
||||
*.swp
|
||||
*.swo
|
||||
*~
|
||||
|
||||
# OS files
|
||||
.DS_Store
|
||||
Thumbs.db
|
||||
|
||||
# Environment/secrets
|
||||
.env
|
||||
*.env
|
||||
|
||||
# Python
|
||||
__pycache__/
|
||||
*.pyc
|
||||
.venv/
|
||||
venv/
|
||||
scripts/config.py
|
||||
|
||||
# Claude Code
|
||||
.claude/
|
||||
3
.gitmodules
vendored
@@ -1,3 +1,6 @@
|
||||
[submodule "themes/hugo.386"]
|
||||
path = themes/hugo.386
|
||||
url = https://gitlab.com/jmfergeau/hugo.386
|
||||
[submodule "themes/poison"]
|
||||
path = themes/poison
|
||||
url = https://github.com/lukeorth/poison.git
|
||||
|
||||
39
README.md
Normal file
@@ -0,0 +1,39 @@
|
||||
# marcus-web
|
||||
|
||||
Personal blog powered by Hugo. Many references to I Saw The TV Glow its a fantastic movie.
|
||||
|
||||
## Clone
|
||||
|
||||
```bash
|
||||
git clone ssh://git@git.sdf.org/mnw/marcus-web.git
|
||||
cd marcus-web
|
||||
```
|
||||
|
||||
## Setup Scripts
|
||||
|
||||
```bash
|
||||
./scripts/setup.sh
|
||||
source .venv/bin/activate
|
||||
```
|
||||
|
||||
## Usage
|
||||
|
||||
See `scripts/execution-notes.txt` for full details, but the short version:
|
||||
|
||||
```bash
|
||||
# Movies (Frank's Couch)
|
||||
python scripts/import_letterboxd.py
|
||||
|
||||
# Beer Calls (Luna Juice)
|
||||
python scripts/new_beercall.py
|
||||
|
||||
# Beer Events
|
||||
python scripts/new_lunajuice.py
|
||||
|
||||
# Tech Posts (Fun Center)
|
||||
python scripts/new_techpost.py
|
||||
```
|
||||
|
||||
## Build
|
||||
|
||||
Hugo builds happen on SDF, but using this repo and the scripts I can work on it locally where I am.
|
||||
41
archetypes/darkroom.md
Normal file
@@ -0,0 +1,41 @@
|
||||
---
|
||||
title: '{{ replace .Name "-" " " | title }}'
|
||||
date: {{ .Date }}
|
||||
draft: true
|
||||
series: "Found in the Darkroom"
|
||||
summary: "TODO: Add a summary for the homepage"
|
||||
# Fill in IMDB ID, then run: python scripts/fetch_movie_data.py
|
||||
imdb: ""
|
||||
# Auto-filled by fetch_movie_data.py:
|
||||
poster: ""
|
||||
year:
|
||||
runtime:
|
||||
director: ""
|
||||
genres: []
|
||||
# National Film Registry info
|
||||
nfr_year: 2024
|
||||
letterboxd_url: ""
|
||||
tags:
|
||||
- national-film-registry
|
||||
- home-video
|
||||
---
|
||||
{{< imdbposter >}}
|
||||
|
||||
| Date watched | |
|
||||
|------------------------|-----------------------|
|
||||
| TV or Computer | |
|
||||
| Watched Multiple Times | |
|
||||
| Added to NFR | {{ .Params.nfr_year }} |
|
||||
| Letterboxd Rating | |
|
||||
| Favorite Quote | |
|
||||
|
||||
{{< /imdbposter >}}
|
||||
|
||||
## Why It's in the National Film Registry
|
||||
|
||||
[Add information about why this film was selected for preservation]
|
||||
|
||||
## My Thoughts
|
||||
|
||||
This is where our review goes and we talk about the film, its historical significance, and how it holds up today.
|
||||
|
||||
@@ -1,5 +1,10 @@
|
||||
+++
|
||||
title = '{{ replace .File.ContentBaseName "-" " " | title }}'
|
||||
date = {{ .Date }}
|
||||
draft = true
|
||||
+++
|
||||
---
|
||||
title: '{{ replace .File.ContentBaseName "-" " " | title }}'
|
||||
date: {{ .Date }}
|
||||
draft: true
|
||||
series: "Series name"
|
||||
summary: "This is the custom summary"
|
||||
tags:
|
||||
- Tag A
|
||||
- Tag B
|
||||
---
|
||||
|
||||
34
archetypes/homevideo.md
Normal file
@@ -0,0 +1,34 @@
|
||||
---
|
||||
title: '{{ replace .Name "-" " " | title }}'
|
||||
date: {{ .Date }}
|
||||
draft: true
|
||||
series: "Frank's Couch"
|
||||
summary: "TODO: Add a summary for the homepage"
|
||||
# Fill in IMDB ID, then run: python scripts/fetch_movie_data.py
|
||||
imdb: ""
|
||||
# Auto-filled by fetch_movie_data.py:
|
||||
poster: ""
|
||||
year:
|
||||
runtime:
|
||||
director: ""
|
||||
genres: []
|
||||
tags:
|
||||
- home-video
|
||||
- anticipated
|
||||
- no-expectations
|
||||
---
|
||||
{{< imdbposter >}}
|
||||
|
||||
| Date watched | |
|
||||
|------------------------|-----------------------|
|
||||
| Format | |
|
||||
| Watched Multiple Times | |
|
||||
| Kept the Lights On | |
|
||||
| Will It Stick With Me? | |
|
||||
| Did You Cry? | |
|
||||
| Letterboxd Rating | |
|
||||
|
||||
{{< /imdbposter >}}
|
||||
|
||||
This is where our review goes and we talk about life and stuff and how the world works.
|
||||
|
||||
40
archetypes/movie.md
Normal file
@@ -0,0 +1,40 @@
|
||||
---
|
||||
title: '{{ replace .Name "-" " " | title }}'
|
||||
date: {{ .Date }}
|
||||
draft: true
|
||||
series: "Frank's Couch"
|
||||
summary: "TODO: Add a summary for the homepage"
|
||||
# Fill in IMDB ID, then run: python scripts/fetch_movie_data.py
|
||||
imdb: ""
|
||||
# Auto-filled by fetch_movie_data.py:
|
||||
poster: ""
|
||||
year:
|
||||
runtime:
|
||||
director: ""
|
||||
genres: []
|
||||
tags:
|
||||
- gucci
|
||||
- ghost theater
|
||||
- marcel
|
||||
- amc-south
|
||||
- amc-lakeline
|
||||
- anticipated
|
||||
- no-expectations
|
||||
- had pizza
|
||||
---
|
||||
{{< imdbposter >}}
|
||||
|
||||
| Date watched | |
|
||||
|---------------------|-----------------------|
|
||||
| Show Time | |
|
||||
| Theater | |
|
||||
| Theater Number | |
|
||||
| Pizza | |
|
||||
| Tickets | |
|
||||
| Letterboxd Rating | |
|
||||
| Crew | |
|
||||
|
||||
{{< /imdbposter >}}
|
||||
|
||||
This is where our review goes and we talk about life and stuff and how the world works.
|
||||
|
||||
58
content/posts/avatar-fire-and-ash.md
Normal file
@@ -0,0 +1,58 @@
|
||||
---
|
||||
title: Avatar Fire and Ash
|
||||
date: 2025-12-23 03:44:12+00:00
|
||||
draft: false
|
||||
series: Frank's Couch
|
||||
summary: James Cameron and the boys enjoy a hot winter evening.
|
||||
imdb: tt1757678
|
||||
tags:
|
||||
- ghost theater
|
||||
- anticipated
|
||||
poster: /images/posters/avatar-fire-and-ash.jpg
|
||||
runtime: 198
|
||||
year: 2025
|
||||
director: James Cameron
|
||||
genres:
|
||||
- Science Fiction
|
||||
- Adventure
|
||||
- Fantasy
|
||||
---
|
||||
{{< imdbposter >}}
|
||||
|
||||
| Date watched | December 20th |
|
||||
|---------------------|-----------------------|
|
||||
| Show Time | 3:30 pm |
|
||||
| Theater | Ghost |
|
||||
| Theater Number | 3 |
|
||||
| Pizza | No |
|
||||
| Tickets | Box Office |
|
||||
| Letterboxd Rating | 4 |
|
||||
| Crew | Me, Science Bro, and Coach T |
|
||||
|
||||
{{< /imdbposter >}}
|
||||
|
||||
Howdy, long time no blog. I wanted to take a second to say that I am going to try this again. We have still been going to see movies almost every week, but I have not been writing about them or really talking about them with anyone, and I think it is time to give that another shot.
|
||||
|
||||
In 2026, I am going to challenge myself to watch as many films from the National Film Registry as I can. These are movies the Library of Congress has essentially said are worth preserving. There are a lot of older films on the list, including many silent movies, though newer titles continue to be added over time. The first stretch will probably be a bit of a slog, but I will do my best to blog about them regularly.
|
||||
|
||||
I will also write more about Master Pancake shows and regular screenings, since that is largely how I experience movies, and I want to talk about that context as well.
|
||||
|
||||
This past Saturday, I had a haircut appointment. One of the funny things about living in Austin is that if you want a real barbershop haircut, you need to book ahead. Walk-ins tend to be limited to chain places like Sports Clips in a Walmart or strip mall. My appointment was at 11:30, which meant I could not make the early 9:00 or 10:00 a.m. movie showings. That worked out fine, though, because it gave Coach T and me time to go to Golden Corral.
|
||||
|
||||
Neither of us had been there in years. Since I am planning to eat less horribly in 2026, this felt like a good moment to revisit places I probably will not be going to much anymore. There was far more food than I remembered. They even have steaks cooked to order. The first one I got was surprisingly tender, almost like butter. The second was the toughest steak I have had in my adult life. I literally could not bite through it. Impressive, but not in a good way.
|
||||
|
||||
After a very large meal, it was finally time for the movie.
|
||||
|
||||
Avatar: Fire and Ash is about dealing with loss, and about how different people and cultures process grief in different ways. Early in the film, we see the Sully family mourning along separate paths. Jake copes by staying busy, clearing weapons from the bottom of the lagoon and throwing himself into physical work. Lo'ak, the surviving son, spends his time flying and doing the activities he once shared with his brother, trying to feel close to him. Neytiri mourns in a traditional, spiritual way. The younger children are clearly sad as well, though they are too young to articulate grief in the same way.
|
||||
|
||||
This creates tension within the family. Neytiri wants Jake to grieve with her in a traditional way, but he is a Marine who tends to shut down emotionally and does not seem capable of meeting her there. Lo'ak also struggles against his father's expectations, wanting freedom rather than discipline. It is a quiet but effective family dynamic.
|
||||
|
||||
Later, the film introduces the Mangkwan clan and revisits the tulkun. These are groups previously thought to be peaceful, yet both are shown capable of violence. The Mangkwan have been shaped by loss and destruction, and their anger manifests as cruelty. The tulkun, famously pacifist in earlier films, are finally pushed into action after witnessing the horror of one of their own surviving a whaling attack. The themes are there if you want to engage with them, but the film also works on a more surface level if you do not.
|
||||
|
||||
I appreciated that the movie avoids painting anyone as purely good or evil. That refusal to make the world black and white gives it more texture and realism.
|
||||
|
||||
Spider, the human boy left behind on Pandora during the early colonization period, has a substantial subplot. He is granted the ability to breathe Pandora's air, which takes up a fair amount of screen time. There is also a hint of romance between Spider and Kiri, the daughter of Grace Augustine. Kiri is again voiced by Sigourney Weaver, and this was occasionally distracting. Weaver's voice sounds much more mature than a teenager's, and at times it felt like an older consciousness inhabiting a younger body. That is a familiar trope in anime, but here it sometimes felt awkward.
|
||||
|
||||
As a bit of bonus trivia: in November 2024, James Cameron said that Avatar 4 and Avatar 5 would not proceed if Fire and Ash underperformed at the box office, adding that he would write a book instead to resolve the narrative threads left open by the third film if necessary.
|
||||
|
||||
So, is it a good movie? Yes, absolutely. It is fun, visually overwhelming in the best way, and the characters continue to grow in meaningful directions. The plot does echo The Way of Water in places, but it works well enough that it is hard to complain. This is very much a movie that benefits from being seen in a theater, with loud, rumbling bass and massive visuals. Watching it at home or on your phone might be fine, but you would be missing a big part of what makes it special.
|
||||
@@ -2,6 +2,8 @@
|
||||
title = 'Beer Call Log for 2023'
|
||||
date = 2024-09-21T02:10:28Z
|
||||
draft = false
|
||||
summary = 'A listing of all the beer call stops that I remembered to record in 2023'
|
||||
series = "Luna Juice"
|
||||
+++
|
||||
|
||||
I started keeping track of the beer calls in March of 2023 and so these are the beer calls since then.
|
||||
@@ -2,8 +2,65 @@
|
||||
title = 'Beer Call Log for 2024'
|
||||
date = 2024-09-21T02:12:42Z
|
||||
draft = false
|
||||
summary = 'A listing of the beer calls that I have remembered to write down in 2024'
|
||||
series = "Luna Juice"
|
||||
+++
|
||||
|
||||
# Draught House aka DH - October 31
|
||||
| | |
|
||||
| :------------------- | :---------------- |
|
||||
| Location | 4112 Medical Pkwy, Austin, TX 78756 |
|
||||
| Beerlist | https://www.draughthouse.com/drinks |
|
||||
| Attendees | Marcus, Steve, Stephen, Marty, Eric, Dan, Stephen's Neighbor ( I'll ask his name next call I go to ) |
|
||||
| Notes | Trophy Bitter was good enough we drank two pitchers of it. The cask was rejected by Eric, Steve and Stephen but Iain was fine to drink it. I rambled a bit was not really mentally checked in as much as I'd hoped. I think my snoring has moved back into sleep blocking due to my tremendous weight gain. Warned Iain and Dan I'd be out next week |
|
||||
|
||||
|
||||
# The ABGB (Double Dose) - October 24
|
||||
| | |
|
||||
| :------------------- | :---------------- |
|
||||
| Location | 1305 W Oltorf St, Austin, TX 78704 |
|
||||
| Beerlist | http://www.theabgb.com/ |
|
||||
| Attendees | Marcus, Steve, Stephen, Eric, Marty, Linda (Marty's wife), Linda's German realitve, Dan, Francis, Bill's son in Law, Bill |
|
||||
| Notes | Was a big group with alot going on almost too much had trouble engaging. Stayed behind for music again this time Eric was there too we got to see and hear Rhinestone Renegades https://www.therhinestonerenegades.com/ |
|
||||
|
||||
|
||||
# The ABGB - October 17
|
||||
| | |
|
||||
| :------------------- | :---------------- |
|
||||
| Location | 1305 W Oltorf St, Austin, TX 78704 |
|
||||
| Beerlist | http://www.theabgb.com/ |
|
||||
| Attendees | Marcus, Steve, Stephen, Eric, Marty, Dan |
|
||||
| Notes | Good beer good times. I stayed behind and watched a guy and his wife sing. Bought 2 slices of pizza too. CD is pretty good music Forrest Mccurren https://forrestmccurren.com/ |
|
||||
|
||||
|
||||
# Lazarus 2 - October 10
|
||||
| | |
|
||||
| :------------------- | :---------------- |
|
||||
| Location | 4803 Airport Blvd, Austin, TX 78751 |
|
||||
| Beerlist | https://lazarusbrewing.com/our-beer/ |
|
||||
| Attendees | Marcus, Steve, Stephen, Marty, Iain maybe I am recording this on 2024-11-01 brain is fuzzy |
|
||||
| Notes | Beer was ok. Eric continues his streak of missing out on Lazarus. |
|
||||
|
||||
|
||||
# Austin Beer Works (Sprinkle Valley) - October 3
|
||||
| | |
|
||||
| :------------------- | :---------------- |
|
||||
| Location | 10300 Springdale Rd, Austin, TX 78754 |
|
||||
| Beerlist | https://austinbeerworks.com/page/welcome-to-sprinkle-valley |
|
||||
| Attendees | Marcus, Steve, Stephen, Marty, Martin, Martin's Wife and her Parents, Dan, Francis |
|
||||
| Notes | Flavor Country, Vienna Lager, Floaty, IPA. Had a great time was fun to catch up with Martin. Wish I was keeping up with news more I only had kind of boring tech stuff to talk about. Marty got a cheese sticks order wasn't quite the snack we had hoped for the taco truck was closed bummer. |
|
||||
|
||||
|
||||
# Pint House Pizza Burnet - September 26
|
||||
| | |
|
||||
| :------------------- | :---------------- |
|
||||
| Location | 4729 Burnet Rd, Austin, TX 78756 |
|
||||
| Beerlist | https://pinthouse.com/burnet/beer/beer-on-tap |
|
||||
| Attendees | Marcus, Steve, Stephen, Stephen's Neighbor, Eric |
|
||||
| Notes | Darker beer to start ended up with IPA |
|
||||
|
||||
|
||||
|
||||
# Burnet Go To - September 19
|
||||
| | |
|
||||
| :------------------- | :---------------- |
|
||||
99
content/posts/beercall/Craft-Beer-Festival-2024.md
Normal file
@@ -0,0 +1,99 @@
|
||||
+++
|
||||
title = 'Texas Craft Brewers Festival 2024'
|
||||
date = 2024-11-19T02:12:42Z
|
||||
draft = false
|
||||
summary = 'A short summary of going to the craft beer festival.'
|
||||
series = "Luna Juice"
|
||||
+++
|
||||
|
||||
On November 16th, 2024, the Texas Craft Brewers Festival took over Fiesta Gardens, running from noon to 6:15 PM. VIPs enjoyed early access at 12:00 PM, while general admission folks like me entered at 2:00 PM. With live music, numerous food trucks, and a fantastic variety of craft beers, it was an event beer enthusiasts wouldn't want to miss.
|
||||
|
||||
The wonderful beer blog [Craft Beer Austin](https://craftbeeraustin.com/tcbf2024-in-photos) captured lots of good photos since I am not yet sure how to get pictures in here properly ;).
|
||||
|
||||
**Getting There**
|
||||
|
||||
Despite a less-than-perfect experience with the bus from the Airport when I got back from a recent trip to Seattle, I decided to give public transit another try. Leaving my house around 12:10 PM, I aimed to arrive at the festival by 1:30 PM, well before the 2:00 PM start for general admission. Navigation apps suggested leaving for the bus stop by 12:30 PM, so I figured I had just enough time to grab a quick bite from the taco shack nearby.
|
||||
|
||||
Plans changed when I reached the bus stop, taco in hand, only to find two buses parked with no drivers - apparently on their lunch breaks. I made the best of it, eating my food on one of the parked buses and waiting for the driver to return. Eventually, we got moving. After a 45 minute ride, I reached my transfer point, waited 20 minutes for the next bus, and finally arrived near the festival. It was easy to spot - crowds of festival goers and lively energy made it clear I was in the right place. The line was long, it was moving quickly. So I was in by 2:15pm. Kudos to CapMetro for a relatively smooth trip!
|
||||
|
||||
For the return trip, I opted for a Lyft, which cost $20 plus tip and got me home without any hassle.
|
||||
|
||||
**The Festival Layout**
|
||||
|
||||
If you've been to this festival before, the setup will feel familiar, but for newcomers, here's a breakdown:
|
||||
|
||||
Each brewery occupies a tailgating-style tent, complete with uniform signage. The signs display the brewery's name, their town, and the beers they're serving, with special offerings highlighted. Breweries are line up in two giant concentric circles around a central pavillion, which hosts live music. Big sponsors like Celis Brewery, Austin Beer Works, Saint Arnold, and Pint House Pizza occupy prime spots close to the stage, while the experimental brewers, like Jester King, were more along the periphery. ( I haven't figured out how to do photos with my posts using hugo yet. I hope I do by the next beer thing and I'll post the photos. )
|
||||
|
||||
The food court anchors one end of the festival, offering plenty of sustenance between beer samplings. Just so you know the food trucks were
|
||||
|
||||
- Burro Cheese Kitchen
|
||||
- Crave Hotdogs and BBQ
|
||||
- Garbo's Lobster
|
||||
- Shawarma Point
|
||||
- The Mighty Cone
|
||||
- Yapa Artisan Empanadas
|
||||
|
||||
What sets this festival apart is the sheer variety of breweries representing Texas, from big players to smaller gems. It's a reminder of the vibrant craft beer scene that extends well beyond Austin. You can checkout the full details on [their site](https://tickets.texascraftbrewersfestival.org/e/texas-craft-brewers-festival-2024).
|
||||
|
||||
**Beer Sampling: My Highlights**
|
||||
|
||||
The festival operates on a ticket system: your entry includes 10 tickets for 10 beer samples, and you can buy more for $5 per set of 10. Each sample comes in a small 2–3 oz plastic cup. After each pour, rinse your cup at water stations to keep flavors distinct.
|
||||
|
||||
Here are some of the beers I tried, along with my thoughts:
|
||||
|
||||
Twin Peaks Brewing (Irving, TX): Drop Dead Red. A malty red ale that was
|
||||
enjoyable, but didn't blow me away.
|
||||
|
||||
Prost Ale House (Pflugerville): Blitzbahn German Pilsner. Crisp with a
|
||||
pleasant hop profile. Prost is celebrating their anniversary on
|
||||
December 21st, so I might stop by for more.
|
||||
|
||||
Pecan Street Brewing (Johnson City): Tried both their Resin Project IPA and
|
||||
13-Degree Czech Pilsner. The Pilsner stood out for its strong yeast flavor,
|
||||
making it one of my favorites.
|
||||
|
||||
Jester King Brewery: Their German Pilsner featured their signature yeast
|
||||
notes.
|
||||
|
||||
Bird Creek Brewing (Temple, TX): Ghost Beach Rice Lager. This was a surprise
|
||||
hit - sweet, refreshing, and easily my favorite rice-based beer to date.
|
||||
|
||||
Saint Arnold's Brewing Company (Houston, TX): 2013 Pumpkinator. A high-ABV
|
||||
delight, full of rich flavors. It was delightful. I slammed it!
|
||||
|
||||
Live Oak Brewing (Austin, TX): Death Putt Cold IPA. A straightforward cold IPA
|
||||
that lived up to its name.
|
||||
|
||||
Ghost Note Brewing: Foeder-Aged Farmhouse Ale. A high-ABV beer with a balanced
|
||||
flavor - not too sour, not too heavy.
|
||||
|
||||
Southern Roots Brewing Company (Waco, TX): New Zealand Pilsner. Crisp, clean,
|
||||
and easily my favorite Pilsner of the day.
|
||||
|
||||
Brutarium: Confabulator Doppelbock. Sweet, malty, and dark - a classic
|
||||
Doppelbock at 8.3% ABV.
|
||||
|
||||
Vector Brewing: Tamave Pivo 13-Degree Czech Lager. A dark, malty lager with a
|
||||
smooth finish, reminiscent of earlier Czech-style beers I sampled.
|
||||
|
||||
|
||||
**Beer Notes and Brewing Insights**
|
||||
|
||||
Chatting with fellow festival-goers and brewers, I picked up some interesting tidbits:
|
||||
|
||||
French hops, like Strisselspalt, have interesting unique flavor profiles.
|
||||
|
||||
Higher mash temperatures can leave residual sugars for sweetness, but cutting
|
||||
fermentation too early can lead to diacetyl flavors.
|
||||
|
||||
Longer boiling times increase the starting gravity of the beer, impacting its
|
||||
strength and flavor.
|
||||
|
||||
**Wrapping It Up**
|
||||
|
||||
After tons of samples, great conversations, and soaking in the lively atmosphere, I called it a day. This year's Texas Craft Brewers Festival reminded me of how diverse and exciting the Texas craft beer scene is. From traditional Pilsners to experimental ales, there's truly something for everyone.
|
||||
|
||||
If you're a craft beer enthusiast or just curious about the Texas brewing scene, this festival should be on your radar. Here's to next year!
|
||||
|
||||
_Note I left out the names and stories around the people I met and hung out with along the way because I dont like putting people's business out there. I had a great time. Thanks_
|
||||
|
||||
47
content/posts/blog-posting.md
Normal file
@@ -0,0 +1,47 @@
|
||||
---
|
||||
title: 'Is it me?'
|
||||
date: 2024-11-19T22:59:30Z
|
||||
draft: false
|
||||
series: "Fun Center"
|
||||
summary: "Marcus shares his blog writing process."
|
||||
tags:
|
||||
- meta
|
||||
- how-i-did-it
|
||||
- technology
|
||||
---
|
||||
|
||||
I've always loved talking to people. Whether it's at work, at home, or out with friends, connecting with others and hearing about their passions brings me so much joy. There's nothing better than a conversation with someone excited to share what they care about.
|
||||
|
||||
I have plenty of interests and experiences I'd love to share with others. But writing them down? That's where things get tricky.
|
||||
|
||||
For years, I've struggled to translate my thoughts into coherent blog posts. I've tried journaling, writing exercises, and reading advice on improving my craft. While those efforts got me to write more, they didn't necessarily help my words flow or make my stories easier to follow. My drafts often felt disjointed, like puzzle pieces that didn't quite fit together.
|
||||
|
||||
Despite this, I've always had the itch to keep trying - and recently, I found a workflow that's actually working for me.
|
||||
|
||||
---
|
||||
|
||||
*How Mac Whisper Changed the Game*
|
||||
|
||||
The first breakthrough came when I discovered Mac Whisper, a voice dictation app powered by AI. While it's not open source, it is freeware, and the free version has been more than enough for my needs.
|
||||
|
||||
Mac Whisper lets me dictate my stream of consciousness directly into text. It feels natural - like I'm telling the story to a friend. I don't have to worry about typing, editing, or overthinking. I just talk, and the app captures it all.
|
||||
|
||||
---
|
||||
|
||||
*Turning Ideas into Stories with ChatGPT*
|
||||
|
||||
Once I have the raw, unfiltered text from Mac Whisper, I use ChatGPT to help shape it into something structured and readable. The AI assists me in organizing my thoughts, refining my ideas, and cutting through the noise to create a story worth sharing.
|
||||
|
||||
It's not perfect, and I do plan to explore open-source tools for this step to keep everything local. But for now, ChatGPT has been a game-changer in helping me turn rambling thoughts into polished posts.
|
||||
|
||||
---
|
||||
|
||||
*Why This Matters*
|
||||
|
||||
Sharing this workflow feels a little strange - almost like admitting I need "help" to write. But I know I'm not alone in this struggle. Writing can be intimidating, and it's easy to feel like your words aren't good enough. Tools like Mac Whisper and ChatGPT don't replace your voice; they enhance it. They help you find clarity and make it easier to share your story.
|
||||
|
||||
I'm still figuring out how to condense my stories - long story short is rarely short with me - but this process has made a huge difference. If you're someone who struggles to write but loves to talk, I highly recommend giving this workflow a try.
|
||||
|
||||
At the end of the day, connecting with people through stories - whether written or spoken - is one of life's greatest joys. And now, I finally feel like I can do it in a way that works for me.
|
||||
|
||||
|
||||
51
content/posts/cloud-atlas-pancake.md
Normal file
@@ -0,0 +1,51 @@
|
||||
---
|
||||
title: Cloud Atlas Pancake
|
||||
date: 2024-11-20 14:49:23+00:00
|
||||
draft: true
|
||||
series: Frank's Couch
|
||||
summary: First post about a Master Pancake movie. Cloud Atlas was confusing but it
|
||||
was fun!
|
||||
imdb: tt1371111
|
||||
tags:
|
||||
- CYOP
|
||||
- mueller
|
||||
- no-pizza
|
||||
poster: /images/posters/cloud-atlas-pancake.jpg
|
||||
runtime: 172
|
||||
year: 2012
|
||||
director:
|
||||
- Lilly Wachowski
|
||||
- Lana Wachowski
|
||||
- Tom Tykwer
|
||||
genres:
|
||||
- Drama
|
||||
- Science Fiction
|
||||
---
|
||||
{{< imdbposter >}}
|
||||
|
||||
| Date watched | 2024-11-1 |
|
||||
|---------------------|-----------------------|
|
||||
| Show Time | 5:55 pm |
|
||||
| Theater | Mueller |
|
||||
| Theater Number | 4 |
|
||||
| Pizza | No |
|
||||
| Tickets | Friend |
|
||||
| Letterboxd Rating | 7.4/10 |
|
||||
| Crew | Me, RRa, RRj |
|
||||
|
||||
{{< /imdbposter >}}
|
||||
|
||||
|
||||
**Introduction to Master Pancake Theater:**
|
||||
Master Pancake Theater is an Austin-based comedy troupe known for their live movie-riffing performances. Combining quick-witted commentary, live sketches, and audience interaction, they hilariously deconstruct films, from beloved classics to cult favorites. The group started at Alamo Drafthouse and has become a staple of the city's comedy scene, celebrated for their ability to turn even the most serious movies into laugh-out-loud experiences. Their unique blend of humor and cinema makes every show a memorable event for film lovers and comedy fans alike.
|
||||
|
||||
**Brief synopsis of the movie**
|
||||
An exploration of how the actions of individual lives impact one another in the past, present and future, as one soul is shaped from a killer into a hero, and an act of kindness ripples across centuries to inspire a revolution.
|
||||
|
||||
**Now to the Movie**
|
||||
|
||||
Players -
|
||||
Voting -
|
||||
Issues -
|
||||
|
||||
|
||||
54
content/posts/joker-folie-a-deux.md
Normal file
@@ -0,0 +1,54 @@
|
||||
---
|
||||
title: Joker Folie a Deux
|
||||
date: 2024-10-07 00:03:15+00:00
|
||||
draft: false
|
||||
series: Frank's Couch
|
||||
summary: The boys get to B serious about Joker 2
|
||||
imdb: tt11315808
|
||||
tags:
|
||||
- gucci
|
||||
- anticipated
|
||||
- no pizza
|
||||
poster: /images/posters/joker-folie-a-deux.jpg
|
||||
runtime: 138
|
||||
year: 2024
|
||||
director: Todd Phillips
|
||||
genres:
|
||||
- Drama
|
||||
- Crime
|
||||
- Thriller
|
||||
---
|
||||
{{< imdbposter >}}
|
||||
|
||||
| Date watched | October 5th |
|
||||
|---------------------|-----------------------|
|
||||
| Show Time | 12:30 |
|
||||
| Theater | Gucci |
|
||||
| Theater Number | 1 |
|
||||
| Pizza | No Pizza |
|
||||
| Tickets | At Box Office |
|
||||
| Letterboxd Rating | ** 1/2 (2.5) |
|
||||
| Crew | Me, Coach T, Science Bro, MeHoniBear |
|
||||
|
||||
{{< /imdbposter >}}
|
||||
|
||||
Waking up at 4 AM all week means that a 12:30 PM movie doesn't exactly feel early, but it does require vigilance to avoid losing track of time. MeHoniBear is trekking in from the northern suburbs, and Coach T is being a total champ, driving him all the way in. I keep telling myself, "Eat something, they might not have pizza this time," but my hopes are perpetually high for pizza. I was also pretty hyped for what looked like a musical sequel to the gritty Beau is Afraid-meets-Joker vibe. Quick side note: NBA player Nikola Jokic, nicknamed "The Joker," would definitely take down the Jokers from both of these movies.
|
||||
|
||||
I park and text the crew, only to realize that Science Bro has also just arrived and parked a space away. We chat a bit, but Coach T and MeHoniBear roll up soon after, and the gang's all here.
|
||||
|
||||
First time in Theater 1! The big one. It even has an elevator. Fancy! But no pizza. Sad face. Consolation prize: a hot dog. We snagged seats in Row B, which turned out to be the perfect spot to B.
|
||||
|
||||
Now, onto the movie.
|
||||
|
||||
It kicks off with a quirky animated short about the Joker's shadow trying to steal the spotlight funny and definitely a little weird. Then, we're dropped straight into Arkham Asylum, though it's portrayed more like a tiny, poorly funded prison. Brendan Gleeson (the "finger man" from Banshees of Inisherin what a movie) plays an antagonistic guard who dishes out plenty of exposition. It's loaded with classic 1970s prison tropes: the yard, questionable relationships, terrible food, and guards who are both friendly and menacing. Oh, and we can't forget Steve Carell's girlfriend from The 40-Year-Old Virgin, Catherine Keener, who also rocked it in Capote such a great movie! And, of course, Lady Gaga. Her singing and performance were magical.
|
||||
|
||||
Costumes and lighting? On point. It's hard to make tattered garments look good, but they totally pulled it off. There's this one scene where Arthur's suit shifts from deep burgundy to a dark, ashy color, and it really captured the mood of the moment.
|
||||
|
||||
Now, onto the plot ... no spoilers, but I wasn't blown away. The pacing dragged, and the dance numbers teased a bigger, more exciting production than what we actually got. At 2 hours and 18 minutes, plus trailers, we were sitting there for about 2:45. After the movie, we kept hashing out what we didn't like for another 90 minutes, so it was nearly 5 PM by the time we finally wrapped up the group grumble.
|
||||
|
||||
If you loved the first Joker, this one might not be your thing. But if you're here for Lady Gaga or just want to vibe with the soundtrack, give it a shot or better yet, grab the Harlequin album from her site https://shop.ladygaga.com/products/harlequin-digital-album
|
||||
|
||||
Next week is ACL, but if we're feeling ambitious, we might catch Terrifier 3 late at night. TBD.
|
||||
|
||||
-Note: Will and Harper I made an outline and then used AI as a cruch pretty hard. This week I wrote it all out and then asked to correct grammar and punctuation and help it be more cohesive. I liked the outcome of this way better. I'll keep working on it and hopefully soon I can get away with just like a grammar checker - fingers crossed.
|
||||
|
||||
38
content/posts/lethal-tender.md
Normal file
@@ -0,0 +1,38 @@
|
||||
---
|
||||
title: 'Lethal Tender'
|
||||
date: 2025-12-24T22:47:20Z
|
||||
draft: false
|
||||
series: "Frank's Couch"
|
||||
summary: "I bought this movie off eBay on November 7th, 2016. I was living in a small apartment off Woodrow in Austin, TX at the time and was in the early days of what I was calling my Busey Quest. I was trying to find and watch as many Gary Busey movies as I could."
|
||||
imdb: "tt0119520"
|
||||
poster: "/images/posters/lethal-tender.jpg"
|
||||
year: 1997
|
||||
runtime: 94
|
||||
director: "John Bradshaw"
|
||||
genres:
|
||||
- Action
|
||||
- Thriller
|
||||
tags:
|
||||
- homevideo
|
||||
- buseyquest
|
||||
- no-expectations
|
||||
---
|
||||
{{< imdbposter >}}
|
||||
|
||||
| Date watched | Nov 2016 |
|
||||
|---------------------|-------------------|
|
||||
| Show Time | estimated 5pm |
|
||||
| Theater | Home Video |
|
||||
| Theater Number | Woodrow Apt |
|
||||
| Pizza | Yes |
|
||||
| Tickets | No |
|
||||
| Letterboxd Rating | DNR |
|
||||
| Crew | Just me |
|
||||
|
||||
{{< /imdbposter >}}
|
||||
|
||||
I bought this movie off eBay on November 7th, 2016. I was living in a small apartment off Woodrow in Austin, TX at the time and was in the early days of what I was calling my Busey Quest. I was trying to find and watch as many Gary Busey movies as I could.
|
||||
|
||||
Lethal Tender is one of the great Gary action movies. It kicks off when a routine tour of a Chicago water-filtration plant is taken hostage by a band of criminals. They seize the facility and round up any personnel who are not on strike. Oh, did I forget to mention that? Yes, the workers are on strike but people are still going to this place to take tours. These criminals, domestic terrorists, really, are threatening to contaminate the city’s drinking water as part of their scheme. Detective David Chase (Jeff Fahey), who was there to bust up the strike, becomes the reluctant lone hero trying to outwit the terrorists from inside the plant. Of course, who could be behind such a chaotic yet brilliant scheme? None other than our pal Gary Busey as Mr. Turner, who directs operations from in front of a very odd computer terminal. His partner and leader of the gunmen is another equally watchable, energetic actor, Kim Coates, who plays Montesi. He is one of those 80s/90s villains who is super smart and mad at the world for not being as smart as he is, which makes for great pre-internet smarty-pantsery. As the clock ticks and threats escalate, Chase teams up with plant engineer Melissa Wilkins (Carrie-Anne Moss) to foil the plot and save both the hostages and Chicago’s water supply. It’s been said before by people who watch this movie, but it really is like Die Hard in a water plant: hijinks, questionable master plans, and Busey’s unhinged energy making it uniquely its own.
|
||||
|
||||
It doesn’t end there. If you find this movie maybe too coherent or making too much sense, I recommend checking out another movie about a water plant under threat. It’s called Never Too Young to Die, and it hinges on a plot about poisoning a city’s water supply. It’s got that same wonderfully bonkers 80s action, starring John Stamos and Gene Simmons, who bring everything up to 11. It is quite magical. Make it a double feature and celebrate the memory of having clean drinking water.
|
||||
47
content/posts/megalopolis.md
Normal file
@@ -0,0 +1,47 @@
|
||||
---
|
||||
title: Megalopolis
|
||||
date: 2024-10-02 00:07:14+00:00
|
||||
draft: false
|
||||
series: Frank's Couch
|
||||
summary: The boys catch Megalopolis at Gucci on a Saturday Afternoon.
|
||||
imdb: tt10128846
|
||||
tags:
|
||||
- gucci
|
||||
- anticipated
|
||||
- had pizza
|
||||
poster: /images/posters/megalopolis.jpg
|
||||
runtime: 138
|
||||
year: 2024
|
||||
director: Francis Ford Coppola
|
||||
genres:
|
||||
- Science Fiction
|
||||
- Drama
|
||||
- Fantasy
|
||||
---
|
||||
{{< imdbposter >}}
|
||||
|
||||
| Date watched | September 28 |
|
||||
|---------------------|-------------------------|
|
||||
| Show Time | 3:55pm |
|
||||
| Theater | Gucci |
|
||||
| Theater Number | 7 |
|
||||
| Pizza | Yes |
|
||||
| Tickets | At Box Office |
|
||||
| Letterboxd Rating | **** (4) |
|
||||
| Crew | Me, Coach T, Science Bro |
|
||||
|
||||
{{< /imdbposter >}}
|
||||
|
||||
We've gone to the movies as a group for around three years or something now. Seen some amazing heart warming films and some abosolute garbage. Even the worst of the worst is still a good time. The core people who go most of the time are Me, Coach T, and Science Bro while our friend MeHoneyBear used to be the one always getting us to go do something together. Additional folks who show up from time to time include Nurse Vegas and her man,Purple Oil Esquire, Dr Nasty, or a few others.
|
||||
|
||||
Its not quite 90 degrees and the sun is bright out. The kind of bright that threatens you with a migrane if you don't play nice. I was chugging my clean cause uncarbonated yerba mate tea and praying that Honda's lane departure assist didn't let me down. Before doing my best to teleport to Gucci today Science Bro texted in that he was in the area earlier for something else and so he had arrived already. With the current time and temperature it's not the end of the world and since he smokes he'll be fine. Going to need to hustle though. There's a wonderful altered reality you enter when going to a familiar place with loud music playing. The android tablet powered dash entertainment console thing beeps and interrupts Gucci Mane to let me know Coach T has pulled in. Time to cut through the neighborhood. I can see the battle ground of I35 in the distance while the side road I am in is filling up with vehicles. The uniformity of the suburbs makes the traffic look almost like ammunition to be fired down the narrow barrel of the "express way". Thanks to construction it is neither express nor really even the right way. I merge to the highway abruptly and just as fast exit. Swanging around the speed bumps I get my sun screen up and it's 3:53pm. We made it!
|
||||
|
||||
Brief hellos and its time to grab tickets. The theater number 7 this time so no real Row B. Oh yeah the crew is made of a bunch of BBoys we like row B alot of the time. Given the choice we were Row A today. The door squeak is a cry of pain or relief who is to say but it isn't a neutral noise and a cold sigh of air conditioning brings us to the lobby. Eyes dart quickly, how long are the lines, how big is the line to get through the one ticket checker person, is the place crazy busy, and most important is there hot Pizza? Short, not bad, only on the far side, and yes. Today is a good day. We usually come early in the day so for the first year or more maybe I never got to have a pizza so now I relish them. Pizza, Icee, small popcorn rewards ,yes thank you. Hot dogs familiar friends with Coach T, and the Diet C for Science Bro he loves chemicals.
|
||||
|
||||
I went in blind to Megalopolis. All I knew was that Francis Ford Coppola wrote it and he sold his wine farm to make. He should have just drank the wine to be honest.
|
||||
|
||||
There are real pros to write reviews this is all about the _sparkle_ experience _sparkle_. Row A provided a nice lean and the screen lighting was bright enough to eat by even during some darker scenes. My Icee lasted through the whole movie and 0 napkins or naps were used. There were some scenes with bright flashing lights and the threat of a migrane earlier had me closing my eyes or looking off to the side.
|
||||
|
||||
So why 4 stars? To be honest I should go down to 3 but who knows if I will. The story was told in a unique way with beautiful sets, costumes, and special effects. The sound design and everything techinically about this movie was magnificent.
|
||||
|
||||
Its close to bedtime so I think maybe thats why this post ran out of gas. I'll try again tomorrow because on Sunday I watched Will & Harper.
|
||||
51
content/posts/recovering-failed-ubuntu-upgrade.md
Normal file
@@ -0,0 +1,51 @@
|
||||
---
|
||||
title: 'Recovering Failed Ubuntu Upgrade'
|
||||
date: 2024-10-07T22:59:30Z
|
||||
draft: false
|
||||
series: "Fun Center"
|
||||
summary: "Marcus recovers from an aborted Ubuntu Upgrade on his VPS"
|
||||
tags:
|
||||
- ubuntu
|
||||
- how-i-did-it
|
||||
- technology
|
||||
---
|
||||
|
||||
Forewarning- I tried to ask ChatGPT to help me with this post but it didnt understand and kept misbehaving so I had to forego that for this post. Also I dont normally write about techy stuff so I hope I got it all in the right order.
|
||||
|
||||
I use a VPS I got on a black friday sale from [Rack Nerd](https://www.racknerd.com/) and it has been solid for uptime and everything. I run my a few servers up there including my atuin, ntfy, a budgeting app, and some other random one off stuff. I run the nextcloud and other larger storage things at home thanks to Google Fiber/WebPass.
|
||||
|
||||
Most of the time I use it as a jump server to connect over to SDF or to wherever else online I'm going and so I dont see my MOTD. A few weekends back I was setting up an [Actual Server](https://actualbudget.org/) aka the budgeting app and I saw that I was out of date and Ubuntu 24.01 was avaialble. Because I'm so good at planning I started up the upgrade and then saw I needed to get to the movies. I hit control c and closed the window. Don't do that!
|
||||
|
||||
So I left everything running for a month or so and decided today I would get it going. First I tried the simple
|
||||
`sudo apt update && sudo apt upgrade`
|
||||
Update was happy but upgrade was mad. It suggested maybe I should run `sudo apt --fix-broken install` which then also told me to try `sudo apt-get clean` from there I was off to startpage to see if I could find something. I tried `sudo dpkg --configure -a` and I tried `sudo apt upgrade --fix-missing` and `do apt-get -f install` at this point I was starting to get a little desperate. How desperate? Messing with some system level files desperate!
|
||||
|
||||
I found a Stack Overflow question and the answers suggested moving the dat files from /var/cache/debconf to a backup directory and then trying to upgrade again. That failed but told me that everything failed because it depended on systemd-sysv. Oooh progress!
|
||||
|
||||
At this point when I ran the apt update and then apt upgrade I got a warning about the shadow group cdrom already existing. so I edited the /etc/gshadow file to remove it. Then it was another group. I noticed that they were all groups that had ubuntuf in the last field. I put the cdrom entry back, duplicated the file, and then deleted all the ubuntufs in there. Then I ran `sudo dpkg --configure -a` Hey it finished!
|
||||
|
||||
```
|
||||
Do you want to continue? [Y/n] y
|
||||
(Reading database ... 147882 files and directories currently installed.)
|
||||
Preparing to unpack .../systemd-sysv_255.4-1ubuntu8.4_amd64.deb ...
|
||||
Unpacking systemd-sysv (255.4-1ubuntu8.4) over (249.11-0ubuntu3.12) ...
|
||||
Setting up systemd-sysv (255.4-1ubuntu8.4) ...
|
||||
Processing triggers for man-db (2.10.2-1) ...
|
||||
needrestart is being skipped since dpkg has failed
|
||||
```
|
||||
|
||||
I switched over to root to run these because I was done typing sudo sudo sudo so much
|
||||
|
||||
Let's try the apt update .. so far so good
|
||||
|
||||
Lets try upgrade .. no not quite there yet
|
||||
|
||||
Ok how about `apt --fix-broken install`
|
||||
|
||||
Wooo it was installing like crazy.
|
||||
|
||||
It wasn't quite done though. I ran apt update and we still had quite a few packages to work on.
|
||||
|
||||
One more apt upgrade and then it was time to restart.
|
||||
|
||||
After reboot we were on 24.01. Now I need to upgrade postgresql eventually...
|
||||
68
content/posts/terrifier-3.md
Normal file
@@ -0,0 +1,68 @@
|
||||
---
|
||||
title: Terrifier 3
|
||||
date: 2024-10-15 00:28:11+00:00
|
||||
draft: false
|
||||
series: Frank's Couch
|
||||
summary: The Boys get terrified, late at night.
|
||||
imdb: tt27911000
|
||||
tags:
|
||||
- ghost theater
|
||||
- anticipated
|
||||
- no-pizza
|
||||
poster: /images/posters/terrifier-3.jpg
|
||||
runtime: 125
|
||||
year: 2024
|
||||
director: Damien Leone
|
||||
genres:
|
||||
- Horror
|
||||
- Thriller
|
||||
---
|
||||
{{< imdbposter >}}
|
||||
|
||||
| Date watched | October 12th |
|
||||
|---------------------|-----------------------|
|
||||
| Show Time | 9:45 pm |
|
||||
| Theater | Ghost |
|
||||
| Theater Number | 6 |
|
||||
| Pizza | No |
|
||||
| Tickets | Box Office |
|
||||
| Letterboxd Rating | *** (3) |
|
||||
| Crew | Me, Science Bro, and Coach T |
|
||||
|
||||
{{< /imdbposter >}}
|
||||
|
||||
# Early October in Austin and Terrifier 3
|
||||
|
||||
There's a special time in Austin when the weather starts to cool, hurricane season comes to an end, and Austin City Limits (ACL) takes over Zilker Park. This music festival keeps growing every year, and its impact on the park grows with it. The ground this year is pounded into a hard smooth dirt floor more suited to heavy equipment than family picnicks. Fortunately the huge revenues are able to pay for a miraculous revory to be complete just in time for the next ACL.
|
||||
|
||||
I've been going off and on over the past ten years, but the last two years I've made it a point to attend. Last year, the whole movie crew went, but this year it was just me and Nurse Vegas. I went all day Friday, but when I woke up on Saturday, my feet hurt so bad that I could barely make it to the bathroom. I texted Nurse Vegas to let them know I might skip that day, and I ended up going back to sleep for a few hours.
|
||||
|
||||
Later that afternoon, I checked my phone and realized I'd marked the wrong weekend on my calendar. Instead of ACL, I had other plans, like movies and a Master Pancake show. It was a happy coincidence, because my feet were still killing me and sitting down sounded great. So instead, I went to see *Ghost Rider* at a Drafthouse up north. It might technically still be Austin but spiritually it is definitely Cedar Park or some other kind of suburb.
|
||||
|
||||
After the movie, I messaged the group chat to see if we were still seeing *Terrifier 3*. It was showing at 9:45 PM, and I was feeling a good bit better, thanks to ibuprofen. I'd enjoyed *Terrifier 2*, so I was excited to check out the third one. I headed down to Ghost Theater, which is in deep South Austin, but hey, beggars can't be choosers. Also we've kind of learned that Gucci Theater is usually worse late at night, so it was the best option.
|
||||
|
||||
I met up with Science Bro and Coach T, and we hung around outside the theater for a bit before the movie. The boys didn’t know I skipped ACL that day, but they were cool about it. I was still excited for Sunday’s lineup at ACL, especially to see [Chappell Roan](https://en.wikipedia.org/wiki/Chappell_Roan) and [Tyler, the Creator](https://en.wikipedia.org/wiki/Tyler%2C_the_Creator).
|
||||
|
||||
At the concession stand, I grabbed a *Terrifier* cup and a large Icy, which set me back 8 bucks (wild). I also got some Reese's Thins. Ghost Theater doesn’t have ready-to-eat hot food, so I skipped ordering something that would end up in an awkward wait at the end of the concession line.
|
||||
|
||||
Normally, we spread out along row B leaving a gap between us to put our snacks and stuff, but a guy behind me had seat 11, so I ended up sitting next to Coach T. The other guy didn't realize how close Row B was and eventually moved to row three, but I was already settled and didn't wanna bother moving.
|
||||
|
||||
Now, onto the movie! *Terrifier 3* is a solid two hours and five minutes of Art the Clown terrorizing the residents of Miles County on Christmas Eve. There are a few returning characters from *Terrifier 2*, like the brother Jonathan, but his screen time was surprisingly short. Sienna, had a much bigger role, and it was cool to see how they’re building out this universe.
|
||||
|
||||
One of the more interesting additions was Art’s new sidekick, a woman from a mental ward with some body mutations including no eyelid on one side and the other eye completely gone. I appreciated that they didn't take some known existing medical condition and cast it as horrific. There were adult situations in the movie, but it made me laugh because it was so unexpected.
|
||||
|
||||
As for the kills, more than one happened off-screen, which was a bit of a letdown. I loved the chainsaw action, though. If you’re a fan of gory horror, you’ll have fun with *Terrifier 3*. It's got that same wild energy as the other movies in the series, but if you haven’t seen *Terrifier 1* or *Terrifier 2*, you might miss some of the context. Watching *Terrifier 2* is a good litmus test to see if you’re into the series.
|
||||
|
||||
After the movie, we hung around outside the theater, talking about the film. It was a good time overall. On Letterboxd, I gave it 3 stars. Coach T didn’t rate it, but Science Bro also gave it 3 stars. Definitely a fun time if you're into the *Terrifier* series!
|
||||
|
||||
---
|
||||
|
||||
*Other Movie Details:*
|
||||
|
||||
- IMDB ID for *Ghost Rider*: [tt0259324](https://www.imdb.com/title/tt0259324)
|
||||
- IMDB ID for *Saturday Night*: [tt27657135](https://www.imdb.com/title/tt27657135)
|
||||
|
||||
Until next time!
|
||||
|
||||
|
||||
|
||||
49
content/posts/the-housemaid.md
Normal file
@@ -0,0 +1,49 @@
|
||||
---
|
||||
title: The Housemaid
|
||||
date: 2026-01-01 05:54:14+00:00
|
||||
draft: false
|
||||
series: Frank's Couch
|
||||
summary: Marcus goes it alone on New Year's Eve to learn about manipulation and the
|
||||
cost of privilege.
|
||||
imdb: tt27543632
|
||||
poster: /images/posters/the-housemaid.jpg
|
||||
tags:
|
||||
- marcel
|
||||
- no-expectations
|
||||
runtime: 131
|
||||
year: 2025
|
||||
director: Paul Feig
|
||||
genres:
|
||||
- Mystery
|
||||
- Thriller
|
||||
---
|
||||
{{< imdbposter >}}
|
||||
|
||||
| Date watched | December 31, 2025 |
|
||||
|---------------------|-------------------|
|
||||
| Show Time | 1:25pm |
|
||||
| Theater | Marcel |
|
||||
| Theater Number | 11 |
|
||||
| Pizza | No |
|
||||
| Tickets | Online - Email QR Code |
|
||||
| Letterboxd Rating | **** (4.0) |
|
||||
| Crew | Just Me |
|
||||
|
||||
{{< /imdbposter >}}
|
||||
|
||||
What an exciting and fun ride The Housemaid turned out to be.
|
||||
|
||||
I went in only knowing what I’d seen in the trailer that depicted an interview for a job. Well don't worry that happens very close to the beginning! It also is nice because I won’t be dropping any major spoilers talking about it. We immediately get a sense of how the characters and story will develop. Everything looks picturesque and perfect, but it isn’t what it seems. Millie (Sydney Sweeney) arrives with an impressive résumé and a story about finding her calling in housekeeping. She wears glasses and tries her best to make a good impression. Nina Winchester (Amanda Seyfried) tells Millie she’s expecting, but also asks her to not tell her husband. The whole first act we have alot of, Nina says one thing in private and another in public, creating a pattern of tension and manipulation right away. Is Nina manipulating Millie or is Millie an unreliable narrator manipulating Nina?
|
||||
|
||||
This world felt so different from mine that, at first, I wasn’t sure I was going to really "get" the movie. I can watch wizards and spaceships and absurdity and fall right in, but this story relies on the psychological interplay between people, social expectations, and emotional boundaries. Once that clicked, the movie opened up for me. I feel like that was my greatest take away and it wasnt even explicitly part of the movie. The social relationships and societies pressure on people are not fixed. You can play with those too. I know this sounds so juvenile but anyway I want to share in case someone out there didnt get it and maybe this will help them try again. We mostly follow the hero's journey Millie starts with nothing, struggles against the family and the situation, pays a personal cost, and eventually finds reward ending at the beginning.
|
||||
|
||||
Andrew (Brandon Sklenar), Nina’s husband, was vaguely familiar to me. I looked him up, he was in 1923, the Yellowstone spinoff—but otherwise he’s not in a ton of high-profile roles. Think a midpoint between Chris Evans and Josh Hartnett. There are multiple sex scenes between Millie and Andrew, but there’s also a scene between Andrew and Nina that reads as clear marital SA / r*pe scene which may be triggering for some viewers.
|
||||
|
||||
The movie has a small cast of 16 named roles, 10 of them women. I think that’s worth noting. If I'm off please tell me, I'm an older white guy, but I think the film works because its psychological tension relies on expectations placed on women, and how those expectations are shaped by patriarchy, the prison-industrial complex, and outdated mental health systems. Those structures help explain how characters are controlled.
|
||||
|
||||
Since I’m already in the meta: The Housemaid opened the same weekend as Avatar: Fire and Ash, a new SpongeBob movie, and Angel Studios’ David. It made about 8 million dollars USD on Friday and 19 million for the weekend. Rotten Tomatoes scored around 74 percent and Metacritic averaged out around 65.
|
||||
|
||||
Speaking of Angel Studios: they released The Sound of Freedom, marketed as a true story about stopping child trafficking, but the man the film was based on was later arrested for kidnapping. They originally started as a filtering service that pirated TV shows and edited them to align with their religious values; in 2016, they were sued by Hollywood studios for copyright violations. They used crowdfunding to pay their settlement and now use crowdfunding to produce content. The company is valued at around $1.6 billion. They launched the company publicly on September 11, 2025, which tells you something about their branding priorities. They grind my gears in a special way.
|
||||
|
||||
I hope this is a worthwhile and quick read. I’m still figuring out how much to discuss without spoiling too much. The point of this blog is to talk about movies, so I’ll keep working at the balance. Thanks for reading. Have a happy New Year, and I’ll see you in 2026.
|
||||
|
||||
51
content/posts/the-secret-agent.md
Normal file
@@ -0,0 +1,51 @@
|
||||
---
|
||||
title: The Secret Agent
|
||||
date: 2025-12-26 00:37:12+00:00
|
||||
draft: false
|
||||
series: Frank's Couch
|
||||
summary: Its Christmas lets go watch a movie or two. Secret Agent is a Brazilian 70s
|
||||
Spy Thriller
|
||||
imdb: tt27847051
|
||||
poster: /images/posters/the-secret-agent.jpg
|
||||
tags:
|
||||
- no-expectations
|
||||
- alamo-drafthouse
|
||||
runtime: 161
|
||||
year: 2025
|
||||
director: Kleber Mendonça Filho
|
||||
genres:
|
||||
- Crime
|
||||
- Drama
|
||||
- Thriller
|
||||
---
|
||||
{{< imdbposter >}}
|
||||
|
||||
| Date watched | December 25, 2025 |
|
||||
|---------------------|-------------------|
|
||||
| Show Time | 11:15 am |
|
||||
| Theater | Alamo Drafthouse S.Lamar |
|
||||
| Theater Number | 9 |
|
||||
| Pizza | No |
|
||||
| Tickets | Via App |
|
||||
| Letterboxd Rating | **** 1/2 (4.5) |
|
||||
| Crew | Just Me |
|
||||
|
||||
{{< /imdbposter >}}
|
||||
|
||||
|
||||
I watched "The Secret Agent" (2025) this Christmas morning and it was so much different than I had expected. Set in 1977 Brazil during Carnival, it follows Marcelo (Wagner Moura) or Armando depending on who you ask, as he moves through Recife, the state capital north of Rio, trying to survive and reconnect with his son in a world that feels hot and muggy but also full of life with a hint of government corruption mixed in.
|
||||
|
||||
{{< figure src="/images/article-art/recife-map.png" title="Map from Wikipedia showing Recife" >}}
|
||||
|
||||
The movie is not simply about a spy or what I thought of when I heard "The Secret Agent". It feels more like a weird road trip movie. There are colorful clothes, great music and sound work, dancing, and unexpected bits of comedy. The movie has various cuts where we fade out but the story keeps moving. It creates this sense we are watching through the eyes of someone else.
|
||||
|
||||
Those cuts interestingly enough leave you missing information about Marcelo and what he is doing until the narrative begins slowly piecing it together. I was not totally clear on who the "secret agent" was until much later. At first it feels like he is just wandering, staying at halfway houses and running from who knows what, but it leads to something surprisingly sweet with his kid.
|
||||
|
||||
This made me think about another recent Brazil film I liked. "I'm Still Here" (2024) from Walter Salles. That one is based on the disappearance of Rubens Paiva and his wife Eunice's struggle after. Much heavier, but worth watching if you liked the political backdrop here.
|
||||
|
||||
Anyway, fun watch, weird and colorful, and not what I expected from the title. Maybe I gave it an extra star because I Loved I'm Still here I don't know.
|
||||
|
||||
My Letterboxd review has a few more thoughts: https://letterboxd.com/marcuseid/film/the-secret-agent-2025/
|
||||
|
||||
|
||||
|
||||
88
content/posts/uptown-saturday-night.md
Normal file
@@ -0,0 +1,88 @@
|
||||
---
|
||||
title: Uptown Saturday Night
|
||||
date: 2026-01-02 04:00:57+00:00
|
||||
draft: false
|
||||
series: Found in the Darkroom
|
||||
summary: 'We embark on our journey to watch movies of the National Film Archive starting with a fun to watch comedy from 1974.'
|
||||
imdb: tt0072351
|
||||
poster: /images/posters/uptown-saturday-night.jpg
|
||||
year: 1974
|
||||
runtime: 104
|
||||
director: Sidney Poitier
|
||||
genres:
|
||||
- Comedy
|
||||
- Crime
|
||||
- Action
|
||||
nfr_year: 2024
|
||||
letterboxd_url: 'https://letterboxd.com/marcuseid/film/uptown-saturday-night/'
|
||||
tags:
|
||||
- national-film-registry
|
||||
- home-video
|
||||
---
|
||||
{{< imdbposter >}}
|
||||
|
||||
| Date watched | |
|
||||
|------------------------|-----------------------|
|
||||
| Format | 4k TV w/Panasonic BRPlayer |
|
||||
| Watched Multiple Times | Second Watch |
|
||||
| Added to NFR | 2024 |
|
||||
| Letterboxd Rating | **** (4.0) |
|
||||
| Personal Notes | Fun and full of laughs |
|
||||
|
||||
{{< /imdbposter >}}
|
||||
|
||||
## Why It's in the National Film Registry
|
||||
|
||||
Preserved as Sidney Poitier's directorial effort "dispelling stereotypes" of the Blaxploitation era through an entertaining crime comedy ensemble cast.
|
||||
|
||||
*Source: [Library of Congress National Film Registry 2024 announcement](https://newsroom.loc.gov/news/25-films-named-to-national-film-registry-for-preservation/)*
|
||||
|
||||
## My Thoughts
|
||||
|
||||
My friend Mehone recommended Uptown Saturday Night to me a year or two ago. I watched it back then and enjoyed it, but I never logged it or wrote about it since it was just a fun weekend movie at the time. However, now that I’m working my way through the National Film Registry (starting with the 2024 additions), I decided to revisit it, and I’m finding a lot to like.
|
||||
|
||||
**The Setup**
|
||||
|
||||
The plot follows Steve Jackson (Sidney Poitier), a blue-collar worker who has two weeks of vacation. It looks like a foundry of some kind but I believe they call it a factory in the movie for some reason it's something that is sticking in my mind. Since his wife is still working, he plans to just relax around the house. However, his best friend, Wardell Franklin (a young _beareded_ Bill Cosby), thinks Steve needs to unwind a little more aggressively.
|
||||
|
||||
Wardell suggests going to Madame Zenobia’s, a high-class, after-hours club. Everyone knows about it, but not everyone can get in. Wardell manages to forge a letter of introduction using his wife's employer's stationery, claiming they are important players in the diamond business. Surprisingly, this gets them in the door.
|
||||
The Incident
|
||||
|
||||
The club is a showcase of the hottest looks of the era with big hats, wonderful hairstyles, and wild outfits. Steve and Wardell eventually find their way to a room behind the red door where gambling is taking place. In the 70s there was also an early adult film called "Behind the Green Door" I dont know if it is a nod to that or not but it is a little funny to have that in mind when you are watching this scene play out. The casino is obviously a high-stakes environment and the bouncer warns the guys that just watching isn’t allowed and the buy-in is 150 bucks. Steve lends Wardell some cash, and he starts betting on Leggy Peggy who is on a hot streak. He's turned his 50 dollar bet into a few hundred!
|
||||
|
||||
Unfortunately, the celebration is cut short when a crew busts in to rob the place. In a strange twist, or maybe it was just a thing they did at the time, the robbers force everyone to strip down to their underwear. They say in case one of you has a heater which I think means a concealed weapon. It also helps to prevent anyone from chasing them immediately. (One woman claims she isn't wearing underwear, but they make her strip anyway! The movie keeps it PG and doesn't show anything explicit).
|
||||
|
||||
The robbery sets up the central conflict: Steve had a lottery ticket in his wallet containing his lucky numbers. The next day, he sees those numbers hit the jackpot in the newspaper, and realizes the ticket is lost! The ticket is worth $50,000 (which is over $320,000 adjusted for inflation). It’s life changing amount money that would allow his family to move out of the inner city. The rest of the movie is a hilarious race to find the wallet without letting anyone know what is in it.
|
||||
|
||||
**A Star-Studded Cast**
|
||||
|
||||
The film features a massive roster of wonderful actors, and everyone gets a moment to shine.
|
||||
|
||||
Harry Belafonte plays Geechee Dan, a menacing gangster who looks like he has bad allergies or is just really sick.
|
||||
|
||||
Flip Wilson plays the Reverend who preaches "No joy juice at the picknic!"
|
||||
|
||||
Richard Pryor has a cameo as Sharp Eye Washington, a con man, who posed as a private eye briefly.
|
||||
|
||||
Calvin Lockhart plays Silky Slim a rival gangster to Geechee Dan.
|
||||
|
||||
Rosalind Cash plays Steve’s wife, Sarah Jackson. She has some of the best lines!
|
||||
|
||||
Paula Kelly and Lee Chamberlin (Madame Zenobia) also turn in great performances.
|
||||
|
||||
One of the funniest performances comes from Roscoe Lee Browne, who plays Congressman Lincoln. His character satirizes politicians embracing the Black Power movement for votes. When constituents arrive, he hurriedly flips a picture of Richard Nixon around to reveal a portrait of Malcolm X and changes from a suit into a dashiki to play the part. It was a hilarious reminder of how post-Watergate movies depicted politicians as chameleons willing to say anything for a vote.
|
||||
The Elephant in the Room
|
||||
|
||||
It is impossible to discuss this film without acknowledging that Bill Cosby has since been convicted of heinous sex crimes. However, looking strictly at the film as a 1970s comedy, the character he plays does not have any romantic entanglements or questionable "adult" situations. If you can, as they say, separate the art from the artist's off-screen behavior, his dynamic with Poitier is genuinely funny.
|
||||
|
||||
**The 70s Aesthetic**
|
||||
|
||||
There are some very specific "70s things" in this movie that I really enjoyed. For one, the church picnic scene is massive. The church actually owns a Greyhound style bus to transport the congregation to a fairground for food and games it really captures the community vibe of the era.
|
||||
|
||||
The fashion is also incredible. As a child of the 80s and 90s, I used to think bellbottoms and wide lapels looked ridiculous. Watching it now, I totally get it. The lines, the colors, and the energy that the right pair of shoes or jacket conveyed allowed people to really stand out. Plus, the slang is a blast to listen to you don't hear people getting called "turkeys" enough anymore.
|
||||
Preservation and Quality
|
||||
|
||||
To wrap things up, I was struck by how well preserved this film is. Because it is on film it was also possible to be scanned in at high resolution and detail. The audio is just as crisp as when it was recorded.
|
||||
|
||||
It makes me wonder about our current digital era. We record video just to compress it immediately. Many movies were left behind on VHS, then DVD, then Bluray. In 30 years, when we are watching on "2060's Hottest new Retina 20K" displays, our current digital footage might look blown out and pixelated. But Uptown Saturday Night? It will still look sharp.
|
||||
|
||||
50
content/posts/urchin.md
Normal file
@@ -0,0 +1,50 @@
|
||||
---
|
||||
title: Urchin
|
||||
date: 2025-12-25 15:53:35+00:00
|
||||
draft: false
|
||||
series: Frank's Couch
|
||||
summary: I watched movie that felt very real and led me to refelct on mylife. Felt
|
||||
almost like I could have escaped into the tv screen.
|
||||
imdb: tt35715953
|
||||
poster: /images/posters/urchin.jpg
|
||||
tags:
|
||||
- homevideo
|
||||
- no-expectations
|
||||
runtime: 100
|
||||
year: 2025
|
||||
director: Harris Dickinson
|
||||
genres:
|
||||
- Drama
|
||||
---
|
||||
{{< imdbposter >}}
|
||||
|
||||
| Date watched | December 14, 2025 |
|
||||
|---------------------|-------------------|
|
||||
| Show Time | Evening |
|
||||
| Theater | Home Video |
|
||||
| Theater Number | Living Room |
|
||||
| Pizza | No |
|
||||
| Media | Online |
|
||||
| Letterboxd Rating | ***** (5.0) |
|
||||
| Screen | 4k TV |
|
||||
|
||||
{{< /imdbposter >}}
|
||||
|
||||
I wrote a short paragraph on Letterboxd right after watching *Urchin (2025)*. If you want the quick version, read it here:
|
||||
[My Letterboxd review](https://letterboxd.com/marcuseid/film/urchin-2025/)
|
||||
|
||||
---
|
||||
|
||||
Directed by **Frank Delaney** and starring **Mike Carter (Will Parker)** and **Nathan Reed (Julian Pierce)**, *Urchin* focuses on homelessness, addiction, and survival without the typical cinematic buffer that sanitizes those topics. It’s not poverty as an aesthetic or a redemption tourism kind of story. It’s practical: where you wake up, where you can pee, whether you have a blanket, how you get through the hour before you can think about the day. Survival is the real drumbeat of the movie.
|
||||
|
||||
In the early 2000s, I spent time either homeless or crashing wherever I could. I’m not Mike, and I haven’t had the same relationship to addiction, but the urgency, dull terror, and logistical problem-solving of "simply existing" rang painfully true. The film understands that the path down and the path back up are not the same and are unique to each individual. People may fall at any point. Luck is as important as genuine effort. There’s no moral grading scale for survival.
|
||||
|
||||
The supporting characters feel lived-in rather than just popping into existence. **Andrea (Marissa Vale)**, the RE counselor holding everything together with spit and hope, is painfully believable. It’s my understanding that in the US there is even less support once you get out. You have to rely on religious organizations for the most part. The friend with a couch isn’t a trope; she’s the connective tissue of the community. Nathan’s shift from dealing drugs to being a kept man isn’t framed as a downfall or escape, it’s just a path. A path you might not take, but still recognize.
|
||||
|
||||
Homelessness here feels like slipping into a parallel layer of the city—almost like putting on the ring of power in Lord of the Rings. You aren’t literally gone, but to most people you might as well be. They don’t see you; they sense you the way someone senses a draft in the room. Only those already inside that layer other "ring-wearers", people who know the signs and the reality can actually perceive you. In that hidden layer, there are networks and hierarchies, violence and kindness, boredom and jokes, grudges and rules. A small world, but a complete one. The film captures how it can feel both tiny and overwhelmingly complex at the same time.
|
||||
|
||||
I don’t want to overshare or claim authority I don’t have; I got lucky in ways others didn’t. My mother giving me her old car at eighteen, and being willing to take me back in when I realized I was not going to get back on my feet alone really changed the trajectory of my life. The film never pretends that Mike’s path applies universally. That’s why it works. Stories aren’t interchangeable.
|
||||
|
||||
There’s a line about **"a gap in empathy"** that stuck with me. It’s the gap people fall into long before they fall between jobs or into the street—the gap where being unseen turns into being unreal. *Urchin* doesn’t try to rescue anyone from that gap. It just asks you to look into it.
|
||||
|
||||
---
|
||||
76
content/posts/will-and-harper.md
Normal file
@@ -0,0 +1,76 @@
|
||||
---
|
||||
title: Will and Harper
|
||||
date: 2024-10-05 16:26:32+00:00
|
||||
draft: false
|
||||
series: Frank's Couch
|
||||
summary: I watched Will and Harper at Home on a Sunday afternoon.
|
||||
imdb: tt30321133
|
||||
tags:
|
||||
- no-expectations
|
||||
- no pizza
|
||||
poster: /images/posters/will-and-harper.jpg
|
||||
runtime: 114
|
||||
year: 2024
|
||||
director: Josh Greenbaum
|
||||
genres:
|
||||
- Documentary
|
||||
- Comedy
|
||||
---
|
||||
{{< imdbposter >}}
|
||||
|
||||
| Date watched | September 9th |
|
||||
|---------------------|-----------------------|
|
||||
| Show Time | 2:08pm |
|
||||
| Theater | My Desk |
|
||||
| Theater Number | The big screen computer |
|
||||
| Pizza | No Pizza |
|
||||
| Tickets | Its on Netflix |
|
||||
| Letterboxd Rating | ****(4) |
|
||||
| Crew | Team Me |
|
||||
|
||||
{{< /imdbposter >}}
|
||||
|
||||
I haven't written more than a couple of paragraphs in an email in a long time and I'm finding it hard to string together my thoughts. What I did here is dictated my thoughts on the movie and then fed them through Chat GPT and then edited the output. I think this workflow will help me make more coherent and cohesive posts without sounding like a robot. It's a challenge to get these thoughts out. Maybe over time I'll depend on it less and less. Thats my hope anyway.
|
||||
|
||||
Another note is that this blog is called the Double Lunch Dispatch because in I Saw the TV Glow there is a bar that is in both the Pink Opaque and in the midnight realm where I think the movie takes place. The idea being this could be a zine or a newsletter that you found in the bar talking about the latest movies or beers or whatever out there. Frank is Owen's dad and he is always watching TV so we are watching movies on Frank's Couch for a series name. Luna juice is what Mr Melancholie's henchpeople make you drink to come under his control or so you die so that's what the Beer series is called. I'll probably use other TV Glow references but just so you know where it all comes from.
|
||||
|
||||
The other night, instead of watching the Packers lose in Jordan Love's first game back, I found myself looking for something more entertaining at half time. That's when I stumbled upon Will and Harper, a documentary featuring Harper Steele and Will Ferrell. What I expected to be a light road trip film quickly revealed itself to be much deeper, touching on the complexities of gender identity and accepting your friends where they are.
|
||||
|
||||
####Gender and Society: A Lens on Privilege
|
||||
|
||||
In the United States, and through alot of the west systems of oppression are layered. There is Race are you a member of the dominant group, then Gender are you a man in the patriarchal society we have this is a big one, then evaluate economic status are they disabled or have any phsyical differences. I'm sure there's many more I'm a white guy working a tech job. I try and be aware of things but there are likely many privileges I am not even aware of.
|
||||
|
||||
For Harper, this means facing the world without the same privilege and experiencing the world through a different lens, one that comes with new dangers and risks. Throughout the trip, we see the constant caution she must exercise simply because she's a woman, and then sometimes even more care because she is a trans woman. This vulnerability is especially clear as they travel through more conservative parts of the country, where traditional gender roles are deeply entrenched, and hostility towards the LGBTQ+ community is tolerated or encouraged by government policy.
|
||||
|
||||
Will, with his fame and privilege, moves through the world without fear, while Harper is experiencing in some places a brand new sensation of constant vigilance against the potential threats around her. This contrast between their experiences helps to highlight roles both friends have to play for each other.
|
||||
|
||||
####The Complexities of Gender Roles and Expectations
|
||||
|
||||
Another element I found powerful in Will and Harper is how societal expectations of gender. Harper's transition doesn't just change how she is seen; it also challenges deeply held beliefs about what it means to be a man or a woman. In many ways, her journey is a confrontation with a patriarchal society that sees women, and especially trans women, as "lesser" or "weaker." The film doesn't shy away from showing the microaggressions and outright hostility Harper faces simply for existing in a space where she doesn't conform to social expectations.
|
||||
|
||||
The documentary captures the emotional toll of navigating a world that isn't always accepting, and it makes you think about the lengths women, and particularly trans women, have to go to protect themselves. Harper's story is a reminder that gender identity is a profound part of who we are, and society's reaction to it can either validate or endanger a person's existence. There is a moment near the beginning when many old SNL friends are all sitting together and I think it was Tina Faye just casually mentions yeah you cant do that when referring to going down a dark alley alone in the city. It was so matter of fact that I didn't really notice it the first time but when I rewatched the scene I really felt that. Like there is an acceptable level of violence against women and you have to just work around it if you want to exist. Like watching that scene in Hidden Figures if you search for " No more colored bathroom,no more white bathroom" it should come up. Privileges are often invisible to those who have them and it was just another light coming on for me.
|
||||
|
||||
####Friendship and Reconnection: Navigating Change Together
|
||||
|
||||
While gender plays a central role in the film, Will and Harper is also a story of friendship. Will Ferrell and Harper Steele have known each other for years, but this road trip is their first time reconnecting since Harper's transition. There is a wonderful love in how they navigate the changes in their lives while still holding onto the core of their bond.
|
||||
|
||||
Watching them interact reminded me of that universal experience of reconnecting with an old friend. When people change, especially in big ways like Harper has, you often wonder if they're still the same person underneath it all. Will and Harper shows that despite the physical and emotional changes Harper has gone through, their friendship is still very much intact. Will's support of Harper throughout their journey is a testament to the resilience of true friendship and a reminder that friendship, when it's real, can survive transformation.
|
||||
|
||||
####Privilege and the Reality of Safety on the Road.
|
||||
|
||||
As the film follows their journey across the country, the stark differences in how Will and Harper experience the trip are impossible to ignore. Will, as a wealthy and famous man, can enjoy the freedom of the open road without a second thought. For Harper, every stop, whether it's a gas station or a small-town diner, comes with a risk. There's an underlying tension in the film that reflects the reality faced by many women and trans people in America today.
|
||||
|
||||
This disparity in safety and privilege was a major takeaway for me. It's a reminder of how much we take for granted based on our societal status and identity. Harper's past experiences of traveling carefree now seem distant, as she navigates the world with heightened awareness of the dangers that come with her identity. It's a sobering reflection on how privilege, or lack of it, shapes every aspect of life.
|
||||
|
||||
####A Reflection on Identity and Change
|
||||
|
||||
Ultimately, Will and Harper is about identity, how it evolves and how we come to terms with it. For Will, this road trip is a chance to understand Harper's transition and what it means for their friendship. For Harper, it's a way to confront the fears and challenges that come with being a trans woman in a society that doesn't always accept her.
|
||||
|
||||
The documentary doesn't offer easy answers, but it does provide a thoughtful exploration of what it means to live authentically in a world that often resists change. The road trip serves as a metaphor for the journey of self-discovery, and by the end, both Will and Harper have learned something new about each other and themselves.
|
||||
|
||||
####Final Thoughts
|
||||
|
||||
Will and Harper is far more than a road trip movie. It's a powerful exploration of gender, identity, and friendship in a society where these issues are still deeply contested. The film shows us that even in a world that is often hostile to change, true friendship and self-acceptance can offer a path forward.
|
||||
|
||||
Watching the film reminded me of the importance of empathy, of seeing the world through another's eyes. Harper's story, and Will's support of her, is a reminder that we all have a role to play in creating a world where everyone can feel safe being themselves.
|
||||
|
||||
71
get_posters.py
Normal file
@@ -0,0 +1,71 @@
|
||||
import os
|
||||
import requests
|
||||
|
||||
CONFIG_PATTERN = 'http://api.themoviedb.org/3/configuration?api_key={key}'
|
||||
IMG_PATTERN = 'http://api.themoviedb.org/3/movie/{imdbid}/images?api_key={key}'
|
||||
# Add scripts dir to path for config import
|
||||
import sys
|
||||
sys.path.insert(0, os.path.join(os.path.dirname(__file__), 'scripts'))
|
||||
try:
|
||||
from config import TMDB_API_KEY as KEY
|
||||
except ImportError:
|
||||
raise SystemExit("Error: scripts/config.py not found. Copy config.example.py to config.py and add your API key.")
|
||||
|
||||
def _get_json(url):
|
||||
r = requests.get(url)
|
||||
return r.json()
|
||||
|
||||
def _download_images(urls, path='.'):
|
||||
"""download all images in list 'urls' to 'path' """
|
||||
|
||||
for nr, url in enumerate(urls):
|
||||
r = requests.get(url)
|
||||
filetype = r.headers['content-type'].split('/')[-1]
|
||||
filename = 'poster_{0}.{1}'.format(nr+1,filetype)
|
||||
filepath = os.path.join(path, filename)
|
||||
with open(filepath,'wb') as w:
|
||||
w.write(r.content)
|
||||
|
||||
def get_poster_urls(imdbid):
|
||||
""" return image urls of posters for IMDB id
|
||||
|
||||
returns all poster images from 'themoviedb.org'. Uses the
|
||||
maximum available size.
|
||||
|
||||
Args:
|
||||
imdbid (str): IMDB id of the movie
|
||||
|
||||
Returns:
|
||||
list: list of urls to the images
|
||||
"""
|
||||
config = _get_json(CONFIG_PATTERN.format(key=KEY))
|
||||
base_url = config['images']['base_url']
|
||||
sizes = config['images']['poster_sizes']
|
||||
|
||||
"""
|
||||
'sizes' should be sorted in ascending order, so
|
||||
max_size = sizes[-1]
|
||||
should get the largest size as well.
|
||||
"""
|
||||
def size_str_to_int(x):
|
||||
return float("inf") if x == 'original' else int(x[1:])
|
||||
max_size = max(sizes, key=size_str_to_int)
|
||||
|
||||
posters = _get_json(IMG_PATTERN.format(key=KEY,imdbid=imdbid))['posters']
|
||||
poster_urls = []
|
||||
for poster in posters:
|
||||
rel_path = poster['file_path']
|
||||
url = "{0}{1}{2}".format(base_url, max_size, rel_path)
|
||||
poster_urls.append(url)
|
||||
|
||||
return poster_urls
|
||||
|
||||
def tmdb_posters(imdbid, count=None, outpath='.'):
|
||||
urls = get_poster_urls(imdbid)
|
||||
if count is not None:
|
||||
urls = urls[:count]
|
||||
_download_images(urls, outpath)
|
||||
|
||||
if __name__=="__main__":
|
||||
tmdb_posters('tt0095016')
|
||||
|
||||
94
hugo.yml
Normal file
@@ -0,0 +1,94 @@
|
||||
---
|
||||
baseURL: https://mnw.sdf.org/
|
||||
title: The Double Lunch Dispatch
|
||||
languageCode: en
|
||||
DefaultContentLanguage: en
|
||||
theme: poison
|
||||
publishDir: /sdf/arpa/gm/m/mnw/html
|
||||
pagination:
|
||||
pagerSize: 10
|
||||
favicon: favicon.ico
|
||||
pluralizeListTitles: false
|
||||
params:
|
||||
brand: Double Lunch Dispatch
|
||||
#remote_brand_image: https://mnw.sdf.org/blog/images/circular-me.png
|
||||
brand_image: "/images/circular-me-250.png"
|
||||
dark_mode: true
|
||||
description: "A blog about watching movies, drinking beers, and using technology."
|
||||
keywords: "blog, movies, beers, technology, life experiences"
|
||||
author:
|
||||
name: "Marcus Wilson"
|
||||
url: "https://mnw.sdf.org"
|
||||
favicon: favicon.ico
|
||||
lazy_load_images: true
|
||||
# hit counter
|
||||
# goatcounter: "mnw"
|
||||
schema:
|
||||
blog:
|
||||
"@context": "http://schema.org"
|
||||
"@type": "Blog"
|
||||
"name": "The Double Lunch Dispatch"
|
||||
"description": "A blog covering movies, beers, technology, and life experiences."
|
||||
"url": "https://mnw.sdf.org/"
|
||||
menu:
|
||||
- Name: About
|
||||
URL: /about
|
||||
HasChildren: false
|
||||
- Name: Posts
|
||||
URL: /posts/
|
||||
Pre: Recent
|
||||
HasChildren: true
|
||||
Limit: 5
|
||||
- Name: Movies
|
||||
URL: /movies/
|
||||
Pre: Recent
|
||||
HasChildren: true
|
||||
Limit: 5
|
||||
- Name: Beers
|
||||
URL: /beers/
|
||||
Pre: Recent
|
||||
HasChildren: true
|
||||
Limit: 5
|
||||
# email_url: https://keys.openpgp.org/search?q=mnw%40sdf.org
|
||||
gitlab_url: https://git.sdf.org/mnw
|
||||
mastodon_url: https://tilde.zone/@mnw
|
||||
matrix_url: https://matrix.to/#/@mnw:envs.net
|
||||
xmpp_url: xmpp:mnw@disroot.org?message;body=hi
|
||||
rss_icon: true
|
||||
rss_section: posts
|
||||
moon_sun_background_color: "#515151"
|
||||
moon_sun_color: "#FFF"
|
||||
sidebar_a_color: "#FFF"
|
||||
sidebar_bg_color: "#202020"
|
||||
sidebar_h1_color: "#FFF"
|
||||
sidebar_img_border_color: "#515151"
|
||||
sidebar_p_color: "#909090"
|
||||
sidebar_socials_color: "#FFF"
|
||||
code_color: "#000"
|
||||
code_background_color: "#E5E5E5"
|
||||
code_block_color: "#FFF"
|
||||
code_block_background_color: "#272822"
|
||||
content_bg_color: "#FAF9F6"
|
||||
date_color: "#515151"
|
||||
link_color: "#268BD2"
|
||||
list_color: "#5A5A5A"
|
||||
post_title_color: "#303030"
|
||||
table_border_color: "#E5E5E5"
|
||||
table_stripe_color: "#F9F9F9"
|
||||
text_color: "#222"
|
||||
code_color_dark: "#FFF"
|
||||
code_background_color_dark: "#515151"
|
||||
code_block_color_dark: "#FFF"
|
||||
code_block_background_color_dark: "#272822"
|
||||
content_bg_color_dark: "#121212"
|
||||
date_color_dark: "#9A9A9A"
|
||||
link_color_dark: "#268BD2"
|
||||
list_color_dark: "#9D9D9D"
|
||||
post_title_color_dark: "#DBE2E9"
|
||||
table_border_color_dark: "#515151"
|
||||
table_stripe_color_dark: "#202020"
|
||||
text_color_dark: "#EEE"
|
||||
taxonomies:
|
||||
series: series
|
||||
tags: tags
|
||||
|
||||
47
hugonetwatch.py
Normal file
@@ -0,0 +1,47 @@
|
||||
import os
|
||||
import sys
|
||||
import time
|
||||
from watchdog.observers import Observer
|
||||
from watchdog.events import FileSystemEventHandler
|
||||
|
||||
# Class for handling file system events
|
||||
class WatchHandler(FileSystemEventHandler):
|
||||
def on_modified(self, event):
|
||||
if not event.is_directory:
|
||||
print(f"File modified: {event.src_path}")
|
||||
|
||||
def on_created(self, event):
|
||||
if not event.is_directory:
|
||||
print(f"New file created: {event.src_path}")
|
||||
|
||||
def on_deleted(self, event):
|
||||
if not event.is_directory:
|
||||
print(f"File deleted: {event.src_path}")
|
||||
|
||||
# Get the watch directory from command line arguments or environment variable
|
||||
WATCH_DIR = sys.argv[1] if len(sys.argv) > 1 else os.getenv('HUGO_WATCH_DIR')
|
||||
|
||||
if not WATCH_DIR:
|
||||
print("Error: Please provide a directory as a command line argument or set the HUGO_WATCH_DIR environment variable.")
|
||||
sys.exit(1)
|
||||
|
||||
if not os.path.isdir(WATCH_DIR):
|
||||
print(f"Error: The provided directory '{WATCH_DIR}' does not exist or is not a directory.")
|
||||
sys.exit(1)
|
||||
|
||||
# Setting up the observer
|
||||
event_handler = WatchHandler()
|
||||
observer = Observer()
|
||||
observer.schedule(event_handler, path=WATCH_DIR, recursive=False)
|
||||
|
||||
observer.start()
|
||||
print(f"Watching directory: {WATCH_DIR}")
|
||||
|
||||
try:
|
||||
while True:
|
||||
time.sleep(1)
|
||||
except KeyboardInterrupt:
|
||||
observer.stop()
|
||||
|
||||
observer.join()
|
||||
|
||||
23
layouts/_default/single.html
Normal file
@@ -0,0 +1,23 @@
|
||||
{{ define "main" -}}
|
||||
<div class="post">
|
||||
{{ partial "post/info.html" . }}
|
||||
{{ .Content }}
|
||||
{{ if (.Site.Params.listmonk) }}
|
||||
{{ partial "post/listmonk_email_newsletters.html" . }}
|
||||
{{ end }}
|
||||
{{ partial "post/navigation.html" . }}
|
||||
{{ if or (.Site.Params.remark42) (.Site.Config.Services.Disqus.Shortname) }}
|
||||
{{ partial "post/comments.html" . }}
|
||||
{{ end }}
|
||||
{{/* Mastodon comments - shows if mastodon_id is set in front matter */}}
|
||||
{{ partial "mastodon-comments.html" . }}
|
||||
{{- if .Site.Params.goatcounter }}
|
||||
{{ partial "analytics.html" . -}}
|
||||
{{- end}}
|
||||
</div>
|
||||
{{- end }}
|
||||
{{ define "sidebar" }}
|
||||
{{ if and (not .Params.hideToc) (not .Site.Params.hideToc) }}
|
||||
{{ partial "table_of_contents.html" . }}
|
||||
{{ end }}
|
||||
{{ end }}
|
||||
@@ -1,45 +1,22 @@
|
||||
{{ define "main" }}
|
||||
|
||||
<div class="row-fluid navmargin">
|
||||
<div class="page-header">
|
||||
<h1>{{ .Site.Params.mainpagetitle }}</h1>
|
||||
</div>
|
||||
{{ define "main" -}}
|
||||
{{ partial "whoami.html" . }}
|
||||
<div class="posts">
|
||||
{{ $frontPageTypes := default (slice "posts") .Site.Params.front_page_content }}
|
||||
{{ range (.Paginate (where .Site.RegularPages "Type" "in" $frontPageTypes)).Pages }}
|
||||
<article class="post">
|
||||
{{ partial "post/info.html" . }}
|
||||
{{ if or (.Site.Params.noSummary) (.Params.noSummary) }}
|
||||
{{ .Content }}
|
||||
{{ else }}
|
||||
{{ .Summary }}
|
||||
{{ if .Truncated }}
|
||||
<div class="read-more-link">
|
||||
<a href="{{ .RelPermalink }}">Read More…</a>
|
||||
</div>
|
||||
{{ end }}
|
||||
{{ end }}
|
||||
</article>
|
||||
{{- end }}
|
||||
</div>
|
||||
|
||||
<div class="row-fluid">
|
||||
<div class="span9 bs-docs-sidebar">
|
||||
<p class="lead">{{ .Site.Params.mainpagesubtitle }}</p>
|
||||
<p></p>
|
||||
<p>{{ .Site.Params.mainpagedesc | markdownify }}</p>
|
||||
<p></p>
|
||||
<hr class="soften">
|
||||
<p></p>
|
||||
<h1>{{ i18n "postslist" }}</h1>
|
||||
<ul>
|
||||
{{ range first 10 .Site.RegularPages }}
|
||||
{{ if eq .Type "post" }}
|
||||
<li><a href="{{ .Permalink }}">{{ .Date.Format "2006-01-02" }} | {{ .Title }}</a></li>
|
||||
{{ end }}
|
||||
{{ end }}
|
||||
</ul>
|
||||
</div>
|
||||
|
||||
<div class="span3 bs-docs-sidebar">
|
||||
<h1>{{ i18n "categories" }}</h1>
|
||||
<ul class="nav nav-list bs-docs-sidenav">
|
||||
{{ partial "categories.html" .}}
|
||||
</ul>
|
||||
<p></p>
|
||||
<h1>{{ i18n "tags" }}</h1>
|
||||
<ul class="nav nav-list bs-docs-sidenav">
|
||||
{{ partial "tags.html" .}}
|
||||
</ul>
|
||||
<p></p>
|
||||
<h1>{{ i18n "partials" }}</h1>
|
||||
<ul class="nav nav-list bs-docs-sidenav">
|
||||
{{ partial "socials.html" .}}
|
||||
</ul>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
{{ end }}
|
||||
{{ partial "pagination.html" . }}
|
||||
{{- end }}
|
||||
|
||||
11
layouts/partials/analytics.html
Normal file
@@ -0,0 +1,11 @@
|
||||
<div>There's still time for <div id="stats"></div> of you to escape. <3 </div>
|
||||
|
||||
<script>
|
||||
var r = new XMLHttpRequest();
|
||||
r.addEventListener('load', function() {
|
||||
document.querySelector('#stats').innerText = JSON.parse(this.responseText).count
|
||||
})
|
||||
r.open('GET', 'https://{{ .Site.Params.goatcounter }}.goatcounter.com/counter/' + encodeURIComponent(location.pathname) + '.json')
|
||||
r.send()
|
||||
</script>
|
||||
|
||||
77
layouts/partials/mastodon-comments.html
Normal file
@@ -0,0 +1,77 @@
|
||||
{{/*
|
||||
Mastodon Comments Partial
|
||||
|
||||
Displays comments from a Mastodon post. Requires mastodon_id in front matter.
|
||||
Comment count is fetched at build time; full comments load on button click.
|
||||
|
||||
Inspired by: https://andreas.scherbaum.la/post/2024-05-23_client-side-comments-with-mastodon-on-a-static-hugo-website/
|
||||
And the vibes of: I Saw the TV Glow
|
||||
*/}}
|
||||
|
||||
{{- $host := "tilde.zone" -}}
|
||||
{{- $username := "mnw" -}}
|
||||
|
||||
{{- if .Params.mastodon_id -}}
|
||||
{{- $id := .Params.mastodon_id -}}
|
||||
|
||||
{{/* Fetch comment count at build time */}}
|
||||
{{- $count := 0 -}}
|
||||
{{- $apiUrl := printf "https://%s/api/v1/statuses/%s/context" $host $id -}}
|
||||
{{- with resources.GetRemote $apiUrl -}}
|
||||
{{- if .Err -}}
|
||||
{{/* API error - show 0 */}}
|
||||
{{- else -}}
|
||||
{{- $data := .Content | transform.Unmarshal -}}
|
||||
{{- if $data.descendants -}}
|
||||
{{- $count = len $data.descendants -}}
|
||||
{{- end -}}
|
||||
{{- end -}}
|
||||
{{- end -}}
|
||||
|
||||
{{/* Build blocklist from front matter */}}
|
||||
{{- $blocked := slice -}}
|
||||
{{- if .Params.mastodon_blocked -}}
|
||||
{{- $blocked = .Params.mastodon_blocked -}}
|
||||
{{- end -}}
|
||||
|
||||
<div class="mastodon-comments-section">
|
||||
<pre class="comments-header">/* ================================================== */
|
||||
/* COMMENTS */
|
||||
/* via the fediverse / tilde.zone */
|
||||
/* ================================================== */</pre>
|
||||
|
||||
<noscript>
|
||||
<pre class="comments-error">
|
||||
ERROR: JavaScript required to load comments.
|
||||
Enable JS or view discussion directly at:
|
||||
https://{{ $host }}/@{{ $username }}/{{ $id }}
|
||||
</pre>
|
||||
</noscript>
|
||||
|
||||
<p class="comments-intro">
|
||||
++ TRANSMISSION RECEIVED ++<br>
|
||||
Reply to <a href="https://{{ $host }}/@{{ $username }}/{{ $id }}" rel="nofollow">this post on Mastodon</a> to join the discussion.
|
||||
</p>
|
||||
|
||||
<div id="mastodon-comments-list">
|
||||
<button type="button" id="load-comments-btn" onclick="loadMastodonComments()">
|
||||
>> LOAD COMMENTS{{ if gt $count 0 }} ({{ $count }}){{ end }} <<
|
||||
</button>
|
||||
</div>
|
||||
|
||||
<p class="comments-note">
|
||||
<small>// comments loaded from {{ $host }} when you click the button</small>
|
||||
</p>
|
||||
</div>
|
||||
|
||||
<link rel="stylesheet" href="{{ "css/mastodon-comments.css" | relURL }}">
|
||||
<script src="{{ "js/purify.min.js" | relURL }}"></script>
|
||||
<script src="{{ "js/mastodon-comments.js" | relURL }}"></script>
|
||||
<script>
|
||||
var mastodonHost = '{{ $host }}';
|
||||
var mastodonUser = '{{ $username }}';
|
||||
var mastodonId = '{{ $id }}';
|
||||
var blockedToots = [{{ range $blocked }}'{{ . }}',{{ end }}];
|
||||
</script>
|
||||
|
||||
{{- end -}}
|
||||
55
layouts/partials/post/info.html
Normal file
@@ -0,0 +1,55 @@
|
||||
<div class="info">
|
||||
<h1 class="post-title">
|
||||
<a href="{{ .Permalink }}">{{ .Title }}</a>
|
||||
</h1>
|
||||
|
||||
<div class="headline">
|
||||
<div>
|
||||
{{ with .Params.author }}
|
||||
{{ with site.Taxonomies.author.Get . }}
|
||||
<span><a href="{{ .Page.RelPermalink }}">{{ .Page.LinkTitle }}</a> - </span>
|
||||
{{ else }}
|
||||
<span>{{ . }} - </span>
|
||||
{{ end }}
|
||||
{{ end }}
|
||||
{{ if .Date }}
|
||||
<time datetime="{{ .Date.Format " 2006-01-02T15:04:05Z0700" }}" class="post-date">
|
||||
{{ .Date.Format "January 2, 2006" }}
|
||||
</time>
|
||||
{{ end }}
|
||||
<span> - </span>
|
||||
<span class="reading-time">
|
||||
{{ if gt .ReadingTime 1 }}
|
||||
{{ .Scratch.Set "readingTime" "mins" }}
|
||||
{{ else }}
|
||||
{{ .Scratch.Set "readingTime" "min" }}
|
||||
{{ end }}
|
||||
|
||||
<span>{{ .ReadingTime }} {{ .Scratch.Get "readingTime" }} read</span>
|
||||
</span>
|
||||
</div>
|
||||
|
||||
{{ if .Params.tags }}
|
||||
<ul class="tags">
|
||||
{{ range .Params.tags }}
|
||||
<li class="tag-{{ . }}">
|
||||
<a href="{{ "tags/" | absLangURL }}{{ . | urlize }}">{{ . }}</a>
|
||||
</li>
|
||||
{{ end }}
|
||||
</ul>
|
||||
{{ end }}
|
||||
</div>
|
||||
|
||||
{{ $Site := .Site }}
|
||||
{{ if .Params.series }}
|
||||
<p class="seriesname">
|
||||
Series: <a href="{{ $Site.BaseURL }}series/{{ .Params.series | urlize }}">{{ .Params.series }}</a>
|
||||
</p>
|
||||
{{ end }}
|
||||
|
||||
{{ if .Params.featuredImage }}
|
||||
<p>
|
||||
<img src="{{.Params.featuredImage}}"><br>
|
||||
</p>
|
||||
{{ end }}
|
||||
</div>
|
||||
40
layouts/partials/post/navigation.html
Normal file
@@ -0,0 +1,40 @@
|
||||
<hr>
|
||||
<div class="footer">
|
||||
{{ if .Params.series }}
|
||||
{{ $Site := .Site }}
|
||||
{{ $postTitle := .Title }}
|
||||
<p>
|
||||
This is a post in the <b><a href="{{ $Site.BaseURL }}series/{{ .Params.series | urlize }}">{{ .Params.series }}</a></b> series.
|
||||
<br>Other posts in this series:
|
||||
{{ range where .Site.Pages.ByDate "Params.series" .Params.series }}
|
||||
{{ if gt $.Date.Unix .Date.Unix }}
|
||||
{{ $.Scratch.Set "prevPost" . }}
|
||||
{{ end }}
|
||||
{{ end }}
|
||||
{{ range where .Site.Pages.ByDate.Reverse "Params.series" .Params.series }}
|
||||
{{ if lt $.Date.Unix .Date.Unix }}
|
||||
{{ $.Scratch.Set "nextPost" . }}
|
||||
{{ end }}
|
||||
{{ end }}
|
||||
<ul class="series">
|
||||
{{ if $.Scratch.Get "prevPost" }}
|
||||
<li>Previous: <a href="{{ ($.Scratch.Get "prevPost").RelPermalink }}">{{ ($.Scratch.Get "prevPost").Title }}</a></li>
|
||||
{{ end }}
|
||||
{{ if $.Scratch.Get "nextPost" }}
|
||||
<li>Next: <a href="{{ ($.Scratch.Get "nextPost").RelPermalink }}">{{ ($.Scratch.Get "nextPost").Title }}</a></li>
|
||||
{{ else }}
|
||||
<li>Next: <a href="{{ $Site.BaseURL }}">Back home</a></li>
|
||||
{{ end }}
|
||||
</ul>
|
||||
</p>
|
||||
{{ else }}
|
||||
{{ if .PrevInSection }}
|
||||
<a class="previous-post" href="{{ .PrevInSection.Permalink }}?ref=footer"><span style="font-weight:bold;">« Previous</span><br>{{ .PrevInSection.Title | truncate 50 "..."}}</a>
|
||||
{{ end }}
|
||||
{{ if .NextInSection }}
|
||||
<div class="next-post">
|
||||
<a href="{{ .NextInSection.Permalink }}?ref=footer"><span style="font-weight:bold;">Next »</span><br>{{ .NextInSection.Title | truncate 50 "..." }}</a>
|
||||
</div>
|
||||
{{ end }}
|
||||
{{ end }}
|
||||
</div>
|
||||
7
layouts/partials/whoami.html
Executable file
@@ -0,0 +1,7 @@
|
||||
<h3>Welcome to the Double Lunch Dispatch<h3>
|
||||
|
||||
<div>
|
||||
Unless noted all content is licensed under a Creative Commons Attribution-ShareAlike 4.0 International License.
|
||||
</div>
|
||||
<br/>
|
||||
<br/>
|
||||
49
layouts/series/list.html
Normal file
@@ -0,0 +1,49 @@
|
||||
{{ define "main" -}}
|
||||
<h1 class="title">{{ .Title }}</h1>
|
||||
|
||||
{{ if eq .Title "Found in the Darkroom" }}
|
||||
<div style="margin-bottom: 2em; line-height: 1.6;">
|
||||
|
||||
<p>
|
||||
In <em>I Saw the TV Glow</em>, Maddy leaves behind recordings of <em>The Pink Opaque</em> for Owen in the school darkroom. It is a quiet, out-of-the-way space where the episodes can be discovered, plucked out of time just in time for those who need it most. The darkroom becomes less a literal place and more a threshold. The place where a signal from the real world survives long enough to be found.
|
||||
</p>
|
||||
|
||||
<p>
|
||||
Here at the <strong>Double Lunch Dispatch</strong>, we couldn’t help but draw a parallel between those tapes left for Owen and the films selected for inclusion in the Library of Congress’ National Film Registry. Each year, 25 films are added not because they are fashionable or newly released, but because they are deemed “culturally, historically, or aesthetically significant” works of art worth preserving so they can be encountered again by future viewers, long after their original moment has passed. This series of blog posts is going on that journey of finding out what the people who were here before us are trying to say. I hope this makes sense to whoever reads this. So many ideas and attempts go into it. Fingers crossed its not garbled up.
|
||||
</p>
|
||||
|
||||
<p>
|
||||
Like the episodes of <em>The Pink Opaque</em> waiting in the darkroom, these films are held in trust, set aside so they don’t disappear into static. You can find the complete list of films preserved in the National Film Registry
|
||||
<a href="https://www.loc.gov/programs/national-film-preservation-board/film-registry/complete-national-film-registry-listing/" target="_blank">here</a>.
|
||||
</p>
|
||||
|
||||
<img src="/images/tvglow-darkroom.gif"
|
||||
alt="I Saw the TV Glow – Pink Opaque tapes in the darkroom"
|
||||
style="max-width: 100%; height: auto; margin-top: 1.5em; border-radius: 6px;" />
|
||||
|
||||
</div>
|
||||
{{ end }}
|
||||
|
||||
|
||||
<ul class="entries">
|
||||
{{ range .Pages.GroupByDate "2006" }}
|
||||
<h3 style="text-align: center;">{{ .Key }}</h3>
|
||||
{{ range .Pages }}
|
||||
<li>
|
||||
<span class="title">
|
||||
<a href="{{ .RelPermalink }}">{{ .Title }}</a>
|
||||
{{ if .Params.year }}
|
||||
<span style="color: #888;">({{ .Params.year }})</span>
|
||||
{{ end }}
|
||||
{{ if .Params.nfr_year }}
|
||||
<span style="color: #888; font-size: 0.9em;"> • NFR {{ .Params.nfr_year }}</span>
|
||||
{{ end }}
|
||||
</span>
|
||||
<span class="published">
|
||||
<time class="pull-right post-list">{{ .Date | time.Format ":date_long" }}</time>
|
||||
</span>
|
||||
</li>
|
||||
{{ end }}
|
||||
{{ end }}
|
||||
</ul>
|
||||
{{ end }}
|
||||
47
layouts/shortcodes/imdbposter.html
Normal file
@@ -0,0 +1,47 @@
|
||||
{{- $imdb := .Page.Params.imdb -}}
|
||||
{{- $localPoster := .Page.Params.poster -}}
|
||||
{{- $year := .Page.Params.year -}}
|
||||
{{- $runtime := .Page.Params.runtime -}}
|
||||
{{- $director := .Page.Params.director -}}
|
||||
{{- $poster := "" -}}
|
||||
|
||||
{{- if $localPoster -}}
|
||||
{{- $poster = $localPoster -}}
|
||||
{{- else if $imdb -}}
|
||||
{{- $poster = printf "https://img.omdbapi.com/?i=%s&apikey=d9641e70" $imdb -}}
|
||||
{{- end -}}
|
||||
|
||||
<div style="display: flex; justify-content: flex-start; align-items: flex-start; margin-bottom: 2em;">
|
||||
<!-- Left section: viewing details -->
|
||||
<div style="flex: 1; padding-right: 20px;">
|
||||
{{ .Inner | markdownify }}
|
||||
</div>
|
||||
|
||||
<!-- Right section: movie poster and info -->
|
||||
<div style="flex-shrink: 0; text-align: center;">
|
||||
{{- if $imdb -}}
|
||||
<a href="https://www.imdb.com/title/{{ $imdb }}" target="_blank">
|
||||
<img src="{{ $poster }}" alt="Movie Poster" style="width: 200px;" />
|
||||
</a>
|
||||
{{- else if $poster -}}
|
||||
<img src="{{ $poster }}" alt="Movie Poster" style="width: 200px;" />
|
||||
{{- end -}}
|
||||
{{- if or $year $runtime $director -}}
|
||||
<div style="font-size: 0.85em; color: #666; margin-top: 8px;">
|
||||
{{- if $director -}}
|
||||
<div>
|
||||
Directed by {{ if reflect.IsSlice $director }}{{ delimit $director ", " }}{{ else }}{{ $director }}{{ end }}
|
||||
</div>
|
||||
{{- end -}}
|
||||
{{- if or $year $runtime -}}
|
||||
<div>
|
||||
{{- if $year }}{{ $year }}{{ end -}}
|
||||
{{- if and $year $runtime }} · {{ end -}}
|
||||
{{- if $runtime }}{{ $runtime }} min{{ end -}}
|
||||
</div>
|
||||
{{- end -}}
|
||||
</div>
|
||||
{{- end -}}
|
||||
</div>
|
||||
</div>
|
||||
|
||||
2
requirements.txt
Normal file
@@ -0,0 +1,2 @@
|
||||
requests
|
||||
pyyaml
|
||||
341
scripts/NFR_AUTOMATION.md
Normal file
@@ -0,0 +1,341 @@
|
||||
# National Film Registry Automation Guide
|
||||
|
||||
This guide explains how to automatically pull and setup data for National Film Registry movies from any year.
|
||||
|
||||
## Overview
|
||||
|
||||
The NFR automation system consists of:
|
||||
|
||||
1. **`setup_nfr.py`** - Script to fetch LOC announcements and extract film data
|
||||
2. **`new_nfr.py`** - Script to create blog posts for NFR movies
|
||||
3. **ollama** - Local AI to help extract structured data from web pages
|
||||
|
||||
## Quick Start
|
||||
|
||||
### Basic Usage
|
||||
|
||||
```bash
|
||||
# Setup data for a specific year
|
||||
python3 scripts/setup_nfr.py 2023
|
||||
|
||||
# With a known URL
|
||||
python3 scripts/setup_nfr.py 2015 --url "https://newsroom.loc.gov/news/..."
|
||||
|
||||
# Without ollama (basic extraction)
|
||||
python3 scripts/setup_nfr.py 2022 --no-ollama
|
||||
```
|
||||
|
||||
### With Ollama (Recommended)
|
||||
|
||||
Ollama provides much better extraction of film descriptions from the LOC announcements.
|
||||
|
||||
```bash
|
||||
# Default (uses ollama at 192.168.0.109:11434)
|
||||
python3 scripts/setup_nfr.py 2023
|
||||
|
||||
# Custom ollama host
|
||||
python3 scripts/setup_nfr.py 2023 --ollama-host http://localhost:11434
|
||||
|
||||
# Custom model
|
||||
python3 scripts/setup_nfr.py 2023 --ollama-model llama3.2:latest
|
||||
```
|
||||
|
||||
## Setting Up Ollama
|
||||
|
||||
### What is Ollama?
|
||||
|
||||
Ollama is a tool for running large language models locally. We use it to:
|
||||
- Parse HTML content from LOC announcements
|
||||
- Extract film titles, years, and descriptions
|
||||
- Structure the data into Python dictionaries
|
||||
|
||||
### Installing Ollama
|
||||
|
||||
Your server at `192.168.0.109` should already have ollama running. To verify:
|
||||
|
||||
```bash
|
||||
curl http://192.168.0.109:11434/api/tags
|
||||
```
|
||||
|
||||
If you need to install it locally:
|
||||
|
||||
```bash
|
||||
# macOS / Linux
|
||||
curl https://ollama.ai/install.sh | sh
|
||||
|
||||
# Start the server
|
||||
ollama serve
|
||||
|
||||
# Pull a model
|
||||
ollama pull llama3.2
|
||||
```
|
||||
|
||||
### Ollama Configuration
|
||||
|
||||
The script uses these environment variables:
|
||||
|
||||
```bash
|
||||
# Set custom ollama host
|
||||
export OLLAMA_HOST=http://192.168.0.109:11434
|
||||
|
||||
# Set custom model (default: llama3.2)
|
||||
export OLLAMA_MODEL=llama3.2
|
||||
|
||||
# Then run the script
|
||||
python3 scripts/setup_nfr.py 2023
|
||||
```
|
||||
|
||||
### Testing Ollama Connection
|
||||
|
||||
Test if ollama is accessible:
|
||||
|
||||
```bash
|
||||
# Test API endpoint
|
||||
curl http://192.168.0.109:11434/api/tags
|
||||
|
||||
# Test generation
|
||||
curl http://192.168.0.109:11434/api/generate -d '{
|
||||
"model": "llama3.2",
|
||||
"prompt": "Say hello",
|
||||
"stream": false
|
||||
}'
|
||||
```
|
||||
|
||||
## How It Works
|
||||
|
||||
### Step 1: Find the LOC Announcement
|
||||
|
||||
The script needs the URL of the Library of Congress announcement for your year. For example:
|
||||
|
||||
- **2024**: https://newsroom.loc.gov/news/25-films-named-to-national-film-registry-for-preservation/s/55d5285d-916f-4105-b7d4-7fc3ba8664e3
|
||||
- **2023**: Search at https://newsroom.loc.gov/
|
||||
- **Older**: Check https://blogs.loc.gov/now-see-hear/
|
||||
|
||||
You can provide the URL with `--url` or the script will prompt you.
|
||||
|
||||
### Step 2: Fetch the Content
|
||||
|
||||
The script downloads the HTML content from the announcement page.
|
||||
|
||||
### Step 3: Extract Film Data
|
||||
|
||||
**With ollama (recommended):**
|
||||
- Sends the HTML to ollama
|
||||
- Asks it to extract all 25 films with titles, years, and descriptions
|
||||
- Returns structured JSON data
|
||||
|
||||
**Without ollama (fallback):**
|
||||
- Uses regex patterns to find film titles and years
|
||||
- May miss descriptions or get incomplete data
|
||||
- Requires manual review and editing
|
||||
|
||||
### Step 4: Generate Python Dictionary
|
||||
|
||||
Creates a Python file like:
|
||||
|
||||
```python
|
||||
# 2023 National Film Registry inductees with LOC descriptions
|
||||
# Source: https://newsroom.loc.gov/news/...
|
||||
NFR_2023 = {
|
||||
"Film Title": {
|
||||
"year": 1999,
|
||||
"description": "Selected for its groundbreaking..."
|
||||
},
|
||||
# ... more films
|
||||
}
|
||||
```
|
||||
|
||||
### Step 5: Integration
|
||||
|
||||
The generated file is saved to `scripts/nfr_data/nfr_YEAR.py`. You can then:
|
||||
|
||||
1. Review and edit the file
|
||||
2. Copy the dictionary into `scripts/new_nfr.py`
|
||||
3. Update the script to handle the new year
|
||||
|
||||
## Complete Example
|
||||
|
||||
Let's set up 2023 NFR data:
|
||||
|
||||
```bash
|
||||
# 1. Run the setup script
|
||||
python3 scripts/setup_nfr.py 2023
|
||||
|
||||
# The script will prompt:
|
||||
# > Please find the LOC announcement URL for 2023.
|
||||
# > Enter the URL: https://newsroom.loc.gov/news/...
|
||||
|
||||
# 2. Script fetches and extracts (using ollama)
|
||||
# ✓ Extracted 25 films
|
||||
# Preview:
|
||||
# 1. Terminator 2 (1991)
|
||||
# Recognized for groundbreaking visual effects...
|
||||
# ... and 24 more
|
||||
|
||||
# 3. Confirm and save
|
||||
# Save this data? (Y/n): y
|
||||
# ✓ Saved to scripts/nfr_data/nfr_2023.py
|
||||
|
||||
# 4. Review the generated file
|
||||
cat scripts/nfr_data/nfr_2023.py
|
||||
|
||||
# 5. Copy the dictionary into new_nfr.py
|
||||
# (You can do this manually or we can create a script to merge)
|
||||
```
|
||||
|
||||
## Directory Structure
|
||||
|
||||
```
|
||||
scripts/
|
||||
├── setup_nfr.py # Main automation script
|
||||
├── new_nfr.py # Create blog posts
|
||||
├── nfr_data/ # Generated NFR data files
|
||||
│ ├── nfr_2023.py
|
||||
│ ├── nfr_2024.py
|
||||
│ └── ...
|
||||
└── NFR_AUTOMATION.md # This file
|
||||
```
|
||||
|
||||
## Troubleshooting
|
||||
|
||||
### Ollama Connection Errors
|
||||
|
||||
```bash
|
||||
# Check if ollama is running
|
||||
curl http://192.168.0.109:11434/api/tags
|
||||
|
||||
# Check network connectivity
|
||||
ping 192.168.0.109
|
||||
|
||||
# Try with localhost if running locally
|
||||
python3 scripts/setup_nfr.py 2023 --ollama-host http://localhost:11434
|
||||
```
|
||||
|
||||
### Extraction Problems
|
||||
|
||||
If extraction fails:
|
||||
|
||||
```bash
|
||||
# Try without ollama first (gets basic structure)
|
||||
python3 scripts/setup_nfr.py 2023 --no-ollama
|
||||
|
||||
# Then manually edit the descriptions in nfr_data/nfr_2023.py
|
||||
```
|
||||
|
||||
### Model Not Found
|
||||
|
||||
```bash
|
||||
# On the ollama server, pull the model
|
||||
ssh user@192.168.0.109
|
||||
ollama pull llama3.2
|
||||
|
||||
# Or use a different model you have
|
||||
python3 scripts/setup_nfr.py 2023 --ollama-model mistral
|
||||
```
|
||||
|
||||
## Finding LOC Announcements
|
||||
|
||||
### Recent Years (2010-present)
|
||||
|
||||
Check the newsroom:
|
||||
```
|
||||
https://newsroom.loc.gov/
|
||||
```
|
||||
|
||||
Search for "national film registry" + year
|
||||
|
||||
### Older Years
|
||||
|
||||
Check the blog:
|
||||
```
|
||||
https://blogs.loc.gov/now-see-hear/
|
||||
```
|
||||
|
||||
Or the registry page:
|
||||
```
|
||||
https://www.loc.gov/programs/national-film-preservation-board/film-registry/
|
||||
```
|
||||
|
||||
### Complete Registry List
|
||||
|
||||
For a complete list by year:
|
||||
```
|
||||
https://www.loc.gov/programs/national-film-preservation-board/film-registry/complete-national-film-registry-listing/
|
||||
```
|
||||
|
||||
## Advanced Usage
|
||||
|
||||
### Custom Output Location
|
||||
|
||||
```bash
|
||||
python3 scripts/setup_nfr.py 2023 \
|
||||
--output /tmp/nfr_2023.py
|
||||
```
|
||||
|
||||
### Batch Processing Multiple Years
|
||||
|
||||
```bash
|
||||
# Create a simple loop
|
||||
for year in 2020 2021 2022 2023; do
|
||||
python3 scripts/setup_nfr.py $year
|
||||
done
|
||||
```
|
||||
|
||||
### Using Different AI Models
|
||||
|
||||
```bash
|
||||
# Llama 3.2 (default, good balance)
|
||||
python3 scripts/setup_nfr.py 2023 --ollama-model llama3.2
|
||||
|
||||
# Mistral (faster, less accurate)
|
||||
python3 scripts/setup_nfr.py 2023 --ollama-model mistral
|
||||
|
||||
# Larger models for better extraction
|
||||
python3 scripts/setup_nfr.py 2023 --ollama-model llama3.2:70b
|
||||
```
|
||||
|
||||
## Integration with new_nfr.py
|
||||
|
||||
After generating NFR data, integrate it into `new_nfr.py`:
|
||||
|
||||
### Option 1: Manual Copy
|
||||
|
||||
1. Open `scripts/nfr_data/nfr_2023.py`
|
||||
2. Copy the `NFR_2023` dictionary
|
||||
3. Add it to `scripts/new_nfr.py` after `NFR_2024`
|
||||
4. Update the `create_nfr_post` function to check `NFR_2023` too
|
||||
|
||||
### Option 2: Import (Future Enhancement)
|
||||
|
||||
```python
|
||||
# In new_nfr.py
|
||||
from nfr_data.nfr_2023 import NFR_2023
|
||||
from nfr_data.nfr_2024 import NFR_2024
|
||||
|
||||
NFR_DATA = {
|
||||
2023: NFR_2023,
|
||||
2024: NFR_2024,
|
||||
}
|
||||
```
|
||||
|
||||
## Tips
|
||||
|
||||
1. **Always review the output** - AI extraction is good but not perfect
|
||||
2. **Keep source URLs** - Add them to the generated dictionaries
|
||||
3. **Check film counts** - Should be 25 films per year
|
||||
4. **Verify years** - Make sure film years are in reasonable ranges
|
||||
5. **Edit descriptions** - Feel free to trim or rephrase for your blog
|
||||
|
||||
## Next Steps
|
||||
|
||||
1. Generate data for years you want to cover
|
||||
2. Review and edit the descriptions
|
||||
3. Integrate into `new_nfr.py`
|
||||
4. Start creating blog posts with `python3 scripts/new_nfr.py "Film Title"`
|
||||
|
||||
## Questions?
|
||||
|
||||
- Check if ollama is running: `curl http://192.168.0.109:11434/api/tags`
|
||||
- Test the script with 2024 (known working): `python3 scripts/setup_nfr.py 2024`
|
||||
- Use `--no-ollama` to see basic extraction
|
||||
- Look at generated files in `scripts/nfr_data/`
|
||||
271
scripts/README.md
Normal file
@@ -0,0 +1,271 @@
|
||||
# Blog Scripts
|
||||
|
||||
Automation scripts for The Double Lunch Dispatch blog.
|
||||
|
||||
## Quick Start
|
||||
|
||||
### Install Dependencies
|
||||
|
||||
```bash
|
||||
# Create virtual environment (if not already done)
|
||||
python3 -m venv .venv
|
||||
|
||||
# Activate it
|
||||
source .venv/bin/activate
|
||||
|
||||
# Install dependencies
|
||||
pip install -r requirements.txt
|
||||
```
|
||||
|
||||
### Required: TMDB API Key
|
||||
|
||||
All movie scripts require a TMDB API key.
|
||||
|
||||
```bash
|
||||
# Copy the example config
|
||||
cp scripts/config.example.py scripts/config.py
|
||||
|
||||
# Edit and add your TMDB API key
|
||||
# Get one at: https://www.themoviedb.org/settings/api
|
||||
```
|
||||
|
||||
## Scripts Overview
|
||||
|
||||
### National Film Registry (NFR) Series
|
||||
|
||||
**"Found in the Darkroom"** - A series covering the National Film Registry
|
||||
|
||||
#### Setup NFR Data (New!)
|
||||
|
||||
Automatically fetch and setup data for any NFR year:
|
||||
|
||||
```bash
|
||||
# Setup data for a specific year
|
||||
python3 scripts/setup_nfr.py 2023
|
||||
|
||||
# With a known URL
|
||||
python3 scripts/setup_nfr.py 2015 --url "https://newsroom.loc.gov/news/..."
|
||||
|
||||
# See full documentation
|
||||
cat scripts/NFR_AUTOMATION.md
|
||||
```
|
||||
|
||||
#### Create NFR Movie Post
|
||||
|
||||
Create a blog post for an NFR movie:
|
||||
|
||||
```bash
|
||||
# List 2024 NFR films
|
||||
python3 scripts/new_nfr.py --list-2024
|
||||
|
||||
# Create post by title
|
||||
python3 scripts/new_nfr.py "No Country for Old Men"
|
||||
|
||||
# Create post by IMDB ID
|
||||
python3 scripts/new_nfr.py tt0477348
|
||||
|
||||
# Specify NFR year
|
||||
python3 scripts/new_nfr.py "Terminator 2" --nfr-year 2023
|
||||
```
|
||||
|
||||
### Regular Movie Posts
|
||||
|
||||
**"Frank's Couch"** - Owen's dad from TV Glow who watches TV
|
||||
|
||||
#### Create Movie Post
|
||||
|
||||
Create a new movie post from IMDB ID:
|
||||
|
||||
```bash
|
||||
# From IMDB ID
|
||||
python3 scripts/new_movie.py tt1234567
|
||||
|
||||
# From IMDB URL
|
||||
python3 scripts/new_movie.py https://www.imdb.com/title/tt1234567/
|
||||
```
|
||||
|
||||
#### Import from Letterboxd
|
||||
|
||||
Import movies from your Letterboxd diary:
|
||||
|
||||
```bash
|
||||
# Interactive mode - pick from recent
|
||||
python3 scripts/import_letterboxd.py
|
||||
|
||||
# Import most recent entry
|
||||
python3 scripts/import_letterboxd.py --latest
|
||||
|
||||
# Just list recent entries
|
||||
python3 scripts/import_letterboxd.py --list
|
||||
|
||||
# Skip to theater/home questions
|
||||
python3 scripts/import_letterboxd.py --theater
|
||||
python3 scripts/import_letterboxd.py --home
|
||||
```
|
||||
|
||||
#### Update Movie Metadata
|
||||
|
||||
Fetch and update movie metadata (poster, director, runtime, etc.):
|
||||
|
||||
```bash
|
||||
# Update all posts with IMDB IDs
|
||||
python3 scripts/fetch_movie_data.py
|
||||
|
||||
# Dry run (preview changes)
|
||||
python3 scripts/fetch_movie_data.py --dry-run
|
||||
|
||||
# Force re-fetch even if data exists
|
||||
python3 scripts/fetch_movie_data.py --force
|
||||
```
|
||||
|
||||
### Beer Posts
|
||||
|
||||
#### Add Beer Call Entry
|
||||
|
||||
Add entries to the beer call yearly log:
|
||||
|
||||
```bash
|
||||
# Interactive mode
|
||||
python3 scripts/new_beercall.py
|
||||
|
||||
# Specific date
|
||||
python3 scripts/new_beercall.py --date 2024-12-19
|
||||
|
||||
# List recent Untappd checkins
|
||||
python3 scripts/new_beercall.py --list
|
||||
```
|
||||
|
||||
## Environment Variables
|
||||
|
||||
### Ollama (for NFR automation)
|
||||
|
||||
```bash
|
||||
# Ollama server (default: http://192.168.0.109:11434)
|
||||
export OLLAMA_HOST=http://localhost:11434
|
||||
|
||||
# Model to use (default: llama3.2)
|
||||
export OLLAMA_MODEL=llama3.2
|
||||
```
|
||||
|
||||
## Common Workflows
|
||||
|
||||
### Creating an NFR Movie Post
|
||||
|
||||
```bash
|
||||
# 1. Create the post
|
||||
python3 scripts/new_nfr.py "Beverly Hills Cop"
|
||||
|
||||
# 2. Update metadata (director, runtime, etc.)
|
||||
python3 scripts/fetch_movie_data.py
|
||||
|
||||
# 3. Edit the post
|
||||
# - Add viewing details (format, rating)
|
||||
# - Write your thoughts
|
||||
# - Add Letterboxd URL
|
||||
|
||||
# 4. Build and preview
|
||||
hugo server -D
|
||||
|
||||
# 5. Publish (remove draft: true)
|
||||
```
|
||||
|
||||
### Importing Theater Movie from Letterboxd
|
||||
|
||||
```bash
|
||||
# 1. Import from Letterboxd
|
||||
python3 scripts/import_letterboxd.py --theater
|
||||
|
||||
# 2. Script will:
|
||||
# - Fetch recent Letterboxd entries
|
||||
# - Let you pick one
|
||||
# - Ask for theater details (venue, time, crew, etc.)
|
||||
# - Download poster from TMDB
|
||||
# - Create draft post
|
||||
|
||||
# 3. Edit and publish
|
||||
```
|
||||
|
||||
### Setting Up a New NFR Year
|
||||
|
||||
```bash
|
||||
# 1. Find the LOC announcement URL for the year
|
||||
# Example: https://newsroom.loc.gov/news/...
|
||||
|
||||
# 2. Run setup script (with ollama for best results)
|
||||
python3 scripts/setup_nfr.py 2023 --url "https://newsroom.loc.gov/..."
|
||||
|
||||
# 3. Review generated file
|
||||
cat scripts/nfr_data/nfr_2023.py
|
||||
|
||||
# 4. Integrate into new_nfr.py
|
||||
# (Copy the dictionary into the main script)
|
||||
|
||||
# 5. Start creating posts!
|
||||
python3 scripts/new_nfr.py "Terminator 2" --nfr-year 2023
|
||||
```
|
||||
|
||||
## Troubleshooting
|
||||
|
||||
### "Module not found" errors
|
||||
|
||||
```bash
|
||||
# Make sure venv is activated
|
||||
source .venv/bin/activate
|
||||
|
||||
# Install/reinstall dependencies
|
||||
pip install -r requirements.txt
|
||||
```
|
||||
|
||||
### "TMDB API key" errors
|
||||
|
||||
```bash
|
||||
# Check config exists
|
||||
ls scripts/config.py
|
||||
|
||||
# If not, copy example and edit
|
||||
cp scripts/config.example.py scripts/config.py
|
||||
# Then add your API key
|
||||
```
|
||||
|
||||
### Ollama connection errors
|
||||
|
||||
```bash
|
||||
# Test ollama server
|
||||
curl http://192.168.0.109:11434/api/tags
|
||||
|
||||
# Use --no-ollama flag to skip
|
||||
python3 scripts/setup_nfr.py 2023 --no-ollama
|
||||
```
|
||||
|
||||
## Documentation
|
||||
|
||||
- **NFR Automation**: `scripts/NFR_AUTOMATION.md` - Detailed guide for NFR automation with ollama
|
||||
- **Config Example**: `scripts/config.example.py` - Template for API keys
|
||||
|
||||
## Directory Structure
|
||||
|
||||
```
|
||||
scripts/
|
||||
├── README.md # This file
|
||||
├── NFR_AUTOMATION.md # NFR automation guide
|
||||
├── config.example.py # Config template
|
||||
├── config.py # Your config (gitignored)
|
||||
├── nfr_data/ # Generated NFR data
|
||||
│ ├── nfr_2023.py
|
||||
│ └── nfr_2024.py
|
||||
├── venues.json # Beer venue database
|
||||
│
|
||||
├── setup_nfr.py # Setup NFR year data
|
||||
├── new_nfr.py # Create NFR movie post
|
||||
├── new_movie.py # Create movie post
|
||||
├── import_letterboxd.py # Import from Letterboxd
|
||||
├── fetch_movie_data.py # Update movie metadata
|
||||
├── new_beercall.py # Add beer call entry
|
||||
└── new_techpost.py # Create tech post
|
||||
```
|
||||
|
||||
## Getting Help
|
||||
|
||||
- Check the specific script's `--help`: `python3 scripts/new_nfr.py --help`
|
||||
- Read `NFR_AUTOMATION.md` for NFR details
|
||||
- Check error messages - they usually point to the issue
|
||||
20
scripts/build.sh
Executable file
@@ -0,0 +1,20 @@
|
||||
#!/bin/bash
|
||||
# Build script for marcus-web
|
||||
# Fetches movie data, then builds Hugo site
|
||||
|
||||
set -e
|
||||
|
||||
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
|
||||
PROJECT_ROOT="$(dirname "$SCRIPT_DIR")"
|
||||
|
||||
cd "$PROJECT_ROOT"
|
||||
|
||||
echo "=== Fetching movie data ==="
|
||||
python3 scripts/fetch_movie_data.py
|
||||
|
||||
echo ""
|
||||
echo "=== Building Hugo site ==="
|
||||
hugo "$@"
|
||||
|
||||
echo ""
|
||||
echo "=== Done ==="
|
||||
4
scripts/config.example.py
Normal file
@@ -0,0 +1,4 @@
|
||||
# Copy this file to config.py and fill in your values
|
||||
# config.py is gitignored and will not be committed
|
||||
|
||||
TMDB_API_KEY = "your_tmdb_api_key_here"
|
||||
101
scripts/execution-notes.txt
Normal file
@@ -0,0 +1,101 @@
|
||||
================================================================================
|
||||
MARCUS-WEB SCRIPTS CHEAT SHEET
|
||||
================================================================================
|
||||
|
||||
FIRST TIME SETUP
|
||||
----------------
|
||||
After cloning on a new machine:
|
||||
|
||||
cd marcus-web
|
||||
./scripts/setup.sh
|
||||
|
||||
This creates a .venv and installs dependencies (just 'requests').
|
||||
|
||||
Then either activate the venv:
|
||||
|
||||
source .venv/bin/activate
|
||||
python scripts/import_letterboxd.py
|
||||
|
||||
Or run directly:
|
||||
|
||||
.venv/bin/python scripts/import_letterboxd.py
|
||||
|
||||
|
||||
|
||||
MOVIES (Frank's Couch)
|
||||
----------------------
|
||||
Import a movie from Letterboxd:
|
||||
|
||||
python scripts/import_letterboxd.py
|
||||
|
||||
--latest Import most recent without picking
|
||||
--theater Skip straight to theater questions
|
||||
--home Skip straight to home video questions
|
||||
--list Just show recent movies, don't import
|
||||
|
||||
Workflow: Log movie on Letterboxd → Run script → Pick movie → Answer prompts
|
||||
Creates: content/posts/<movie-slug>.md + downloads poster
|
||||
|
||||
|
||||
BEER CALLS (Luna Juice - Weekly Thursday Meetups)
|
||||
-------------------------------------------------
|
||||
Add a beer call to the yearly log:
|
||||
|
||||
python scripts/new_beercall.py
|
||||
|
||||
--date 2024-12-30 Specific date (for Beer Crawl, holidays, etc)
|
||||
--list Just show recent Untappd checkins
|
||||
|
||||
Workflow: Run script → It checks Untappd for where you were → Pick venue → Done
|
||||
Appends to: content/posts/beercall/2024.md (or 2025.md, etc - auto-created)
|
||||
|
||||
|
||||
LUNA JUICE EVENTS (Festivals, Special Occasions)
|
||||
------------------------------------------------
|
||||
Create a standalone beer event post:
|
||||
|
||||
python scripts/new_lunajuice.py
|
||||
python scripts/new_lunajuice.py "Beer Festival 2025"
|
||||
|
||||
Creates: content/posts/beercall/<event-slug>.md
|
||||
|
||||
|
||||
VENUE DATABASE
|
||||
--------------
|
||||
Known venues are stored in: scripts/venues.json
|
||||
New venues are added automatically when you enter them.
|
||||
|
||||
|
||||
TECH POSTS (Fun Center)
|
||||
-----------------------
|
||||
Create a new technology blog post:
|
||||
|
||||
python scripts/new_techpost.py
|
||||
python scripts/new_techpost.py "My Post Title"
|
||||
|
||||
Prompts for:
|
||||
- Type: How I Did It / Grinds My Gears / Quick Tip
|
||||
- Tags: suggests common ones, you add more
|
||||
- Summary: one-liner
|
||||
|
||||
Creates a skeleton outline based on post type so you just fill in the blanks.
|
||||
|
||||
|
||||
MASTODON COMMENTS
|
||||
-----------------
|
||||
After publishing a post and tooting about it:
|
||||
|
||||
1. Get the Mastodon post ID from the URL (the number at the end)
|
||||
2. Add to your post's front matter:
|
||||
|
||||
mastodon_id: "123456789"
|
||||
|
||||
3. Rebuild site - comments will show with count
|
||||
|
||||
To block a reply:
|
||||
|
||||
mastodon_blocked:
|
||||
- "https://tilde.zone/@someone/123456789"
|
||||
|
||||
|
||||
================================================================================
|
||||
274
scripts/fetch_movie_data.py
Executable file
@@ -0,0 +1,274 @@
|
||||
#!/usr/bin/env python3
|
||||
"""
|
||||
Fetch movie data for Hugo posts based on IMDB ID in frontmatter.
|
||||
|
||||
Scans all posts with an `imdb` field and fetches missing data:
|
||||
- Poster (downloaded locally)
|
||||
- Runtime
|
||||
- Year
|
||||
- Director
|
||||
- Genres
|
||||
|
||||
Usage:
|
||||
python scripts/fetch_movie_data.py # Process all movie posts
|
||||
python scripts/fetch_movie_data.py --dry-run # Show what would be updated
|
||||
python scripts/fetch_movie_data.py --force # Re-fetch even if data exists
|
||||
"""
|
||||
|
||||
import argparse
|
||||
import os
|
||||
import re
|
||||
import sys
|
||||
from pathlib import Path
|
||||
|
||||
import requests
|
||||
import yaml
|
||||
|
||||
# Configuration
|
||||
try:
|
||||
from config import TMDB_API_KEY
|
||||
except ImportError:
|
||||
raise SystemExit("Error: scripts/config.py not found. Copy config.example.py to config.py and add your API key.")
|
||||
|
||||
# Paths
|
||||
SCRIPT_DIR = Path(__file__).parent
|
||||
PROJECT_ROOT = SCRIPT_DIR.parent
|
||||
CONTENT_DIR = PROJECT_ROOT / "content" / "posts"
|
||||
IMAGES_DIR = PROJECT_ROOT / "static" / "images" / "posters"
|
||||
|
||||
# Regex to split frontmatter from content
|
||||
FRONTMATTER_RE = re.compile(r'^---\s*\n(.*?)\n---\s*\n', re.DOTALL)
|
||||
|
||||
|
||||
def find_movie_by_imdb(imdb_id):
|
||||
"""Find TMDB movie by IMDB ID."""
|
||||
url = f"https://api.themoviedb.org/3/find/{imdb_id}"
|
||||
params = {
|
||||
"api_key": TMDB_API_KEY,
|
||||
"external_source": "imdb_id"
|
||||
}
|
||||
resp = requests.get(url, params=params, timeout=10)
|
||||
resp.raise_for_status()
|
||||
data = resp.json()
|
||||
|
||||
results = data.get("movie_results", [])
|
||||
if results:
|
||||
return results[0]
|
||||
return None
|
||||
|
||||
|
||||
def get_movie_details(tmdb_id):
|
||||
"""Get full movie details from TMDB."""
|
||||
url = f"https://api.themoviedb.org/3/movie/{tmdb_id}"
|
||||
params = {
|
||||
"api_key": TMDB_API_KEY,
|
||||
"append_to_response": "credits"
|
||||
}
|
||||
resp = requests.get(url, params=params, timeout=10)
|
||||
resp.raise_for_status()
|
||||
return resp.json()
|
||||
|
||||
|
||||
def get_directors(credits):
|
||||
"""Extract director names from credits."""
|
||||
crew = credits.get("crew", [])
|
||||
directors = [p["name"] for p in crew if p.get("job") == "Director"]
|
||||
return directors
|
||||
|
||||
|
||||
def slugify(title):
|
||||
"""Convert title to URL-friendly slug."""
|
||||
slug = title.lower()
|
||||
slug = re.sub(r"[^a-z0-9\s-]", "", slug)
|
||||
slug = re.sub(r"[\s_]+", "-", slug)
|
||||
slug = re.sub(r"-+", "-", slug)
|
||||
return slug.strip("-")
|
||||
|
||||
|
||||
def download_poster(poster_path, filename):
|
||||
"""Download poster from TMDB."""
|
||||
if not poster_path:
|
||||
return None
|
||||
|
||||
url = f"https://image.tmdb.org/t/p/w500{poster_path}"
|
||||
resp = requests.get(url, timeout=10)
|
||||
resp.raise_for_status()
|
||||
|
||||
IMAGES_DIR.mkdir(parents=True, exist_ok=True)
|
||||
filepath = IMAGES_DIR / filename
|
||||
filepath.write_bytes(resp.content)
|
||||
return f"/images/posters/{filename}"
|
||||
|
||||
|
||||
def parse_post(filepath):
|
||||
"""Parse a markdown post into frontmatter dict and content string."""
|
||||
text = filepath.read_text()
|
||||
match = FRONTMATTER_RE.match(text)
|
||||
if not match:
|
||||
return None, text
|
||||
|
||||
fm_text = match.group(1)
|
||||
content = text[match.end():]
|
||||
|
||||
try:
|
||||
frontmatter = yaml.safe_load(fm_text)
|
||||
except yaml.YAMLError:
|
||||
return None, text
|
||||
|
||||
return frontmatter, content
|
||||
|
||||
|
||||
def write_post(filepath, frontmatter, content):
|
||||
"""Write frontmatter and content back to markdown file."""
|
||||
# Use default_flow_style=False for readable YAML
|
||||
# Use allow_unicode=True for proper character handling
|
||||
fm_text = yaml.dump(
|
||||
frontmatter,
|
||||
default_flow_style=False,
|
||||
allow_unicode=True,
|
||||
sort_keys=False
|
||||
)
|
||||
text = f"---\n{fm_text}---\n{content}"
|
||||
filepath.write_text(text)
|
||||
|
||||
|
||||
def process_post(filepath, dry_run=False, force=False):
|
||||
"""Process a single post, fetching missing movie data."""
|
||||
frontmatter, content = parse_post(filepath)
|
||||
if frontmatter is None:
|
||||
return False
|
||||
|
||||
imdb_id = frontmatter.get("imdb")
|
||||
if not imdb_id:
|
||||
return False
|
||||
|
||||
# Check what's missing
|
||||
has_poster = bool(frontmatter.get("poster"))
|
||||
has_runtime = bool(frontmatter.get("runtime"))
|
||||
has_year = bool(frontmatter.get("year"))
|
||||
has_director = bool(frontmatter.get("director"))
|
||||
|
||||
needs_update = not (has_poster and has_runtime and has_year and has_director)
|
||||
|
||||
if not needs_update and not force:
|
||||
return False
|
||||
|
||||
print(f"\nProcessing: {filepath.name}")
|
||||
print(f" IMDB: {imdb_id}")
|
||||
|
||||
if dry_run:
|
||||
missing = []
|
||||
if not has_poster:
|
||||
missing.append("poster")
|
||||
if not has_runtime:
|
||||
missing.append("runtime")
|
||||
if not has_year:
|
||||
missing.append("year")
|
||||
if not has_director:
|
||||
missing.append("director")
|
||||
print(f" Would fetch: {', '.join(missing)}")
|
||||
return True
|
||||
|
||||
# Find movie on TMDB
|
||||
print(" Finding movie on TMDB...")
|
||||
movie = find_movie_by_imdb(imdb_id)
|
||||
if not movie:
|
||||
print(f" ERROR: Movie not found for IMDB ID: {imdb_id}")
|
||||
return False
|
||||
|
||||
tmdb_id = movie["id"]
|
||||
print(f" Found: {movie.get('title')} (TMDB: {tmdb_id})")
|
||||
|
||||
# Get full details
|
||||
print(" Fetching details...")
|
||||
details = get_movie_details(tmdb_id)
|
||||
|
||||
updated = False
|
||||
|
||||
# Update poster
|
||||
if not has_poster or force:
|
||||
poster_path = details.get("poster_path")
|
||||
if poster_path:
|
||||
title = frontmatter.get("title", "movie")
|
||||
filename = f"{slugify(title)}.jpg"
|
||||
print(f" Downloading poster...")
|
||||
poster_url = download_poster(poster_path, filename)
|
||||
if poster_url:
|
||||
frontmatter["poster"] = poster_url
|
||||
print(f" Poster saved: {poster_url}")
|
||||
updated = True
|
||||
|
||||
# Update runtime
|
||||
if not has_runtime or force:
|
||||
runtime = details.get("runtime")
|
||||
if runtime:
|
||||
frontmatter["runtime"] = runtime
|
||||
print(f" Runtime: {runtime} minutes")
|
||||
updated = True
|
||||
|
||||
# Update year
|
||||
if not has_year or force:
|
||||
release_date = details.get("release_date", "")
|
||||
if release_date:
|
||||
year = release_date.split("-")[0]
|
||||
frontmatter["year"] = int(year)
|
||||
print(f" Year: {year}")
|
||||
updated = True
|
||||
|
||||
# Update director
|
||||
if not has_director or force:
|
||||
credits = details.get("credits", {})
|
||||
directors = get_directors(credits)
|
||||
if directors:
|
||||
# Store as string if single, list if multiple
|
||||
if len(directors) == 1:
|
||||
frontmatter["director"] = directors[0]
|
||||
else:
|
||||
frontmatter["director"] = directors
|
||||
print(f" Director: {', '.join(directors)}")
|
||||
updated = True
|
||||
|
||||
# Update genres (bonus)
|
||||
if "genres" not in frontmatter or force:
|
||||
genres = [g["name"] for g in details.get("genres", [])]
|
||||
if genres:
|
||||
frontmatter["genres"] = genres
|
||||
updated = True
|
||||
|
||||
if updated:
|
||||
write_post(filepath, frontmatter, content)
|
||||
print(" Updated!")
|
||||
|
||||
return updated
|
||||
|
||||
|
||||
def main():
|
||||
parser = argparse.ArgumentParser(description="Fetch movie data for Hugo posts")
|
||||
parser.add_argument("--dry-run", action="store_true", help="Show what would be updated")
|
||||
parser.add_argument("--force", action="store_true", help="Re-fetch even if data exists")
|
||||
parser.add_argument("file", nargs="?", help="Specific file to process")
|
||||
args = parser.parse_args()
|
||||
|
||||
if args.file:
|
||||
filepath = Path(args.file)
|
||||
if not filepath.is_absolute():
|
||||
filepath = PROJECT_ROOT / filepath
|
||||
if not filepath.exists():
|
||||
print(f"File not found: {filepath}")
|
||||
sys.exit(1)
|
||||
files = [filepath]
|
||||
else:
|
||||
files = list(CONTENT_DIR.glob("**/*.md"))
|
||||
|
||||
print(f"Scanning {len(files)} posts for movie data...")
|
||||
|
||||
updated = 0
|
||||
for filepath in files:
|
||||
if process_post(filepath, dry_run=args.dry_run, force=args.force):
|
||||
updated += 1
|
||||
|
||||
print(f"\n{'Would update' if args.dry_run else 'Updated'}: {updated} posts")
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
477
scripts/import_letterboxd.py
Executable file
@@ -0,0 +1,477 @@
|
||||
#!/usr/bin/env python3
|
||||
"""
|
||||
Import movies from Letterboxd diary to Hugo draft posts.
|
||||
|
||||
Usage:
|
||||
python scripts/import_letterboxd.py # Interactive mode - pick from recent
|
||||
python scripts/import_letterboxd.py --latest # Import most recent entry
|
||||
python scripts/import_letterboxd.py --list # Just list recent entries
|
||||
python scripts/import_letterboxd.py --theater # Skip to theater questions
|
||||
python scripts/import_letterboxd.py --home # Skip to home video questions
|
||||
|
||||
The script will prompt for viewing details (theater vs home) and pre-fill
|
||||
the front matter table accordingly.
|
||||
"""
|
||||
|
||||
import argparse
|
||||
import os
|
||||
import re
|
||||
import sys
|
||||
from datetime import datetime, timezone
|
||||
from pathlib import Path
|
||||
from urllib.parse import urlparse
|
||||
import xml.etree.ElementTree as ET
|
||||
|
||||
import requests
|
||||
|
||||
# Configuration
|
||||
LETTERBOXD_USER = "marcusEID"
|
||||
try:
|
||||
from config import TMDB_API_KEY
|
||||
except ImportError:
|
||||
raise SystemExit("Error: scripts/config.py not found. Copy config.example.py to config.py and add your API key.")
|
||||
RSS_URL = f"https://letterboxd.com/{LETTERBOXD_USER}/rss/"
|
||||
|
||||
# Paths (relative to script location)
|
||||
SCRIPT_DIR = Path(__file__).parent
|
||||
PROJECT_ROOT = SCRIPT_DIR.parent
|
||||
CONTENT_DIR = PROJECT_ROOT / "content" / "posts"
|
||||
IMAGES_DIR = PROJECT_ROOT / "static" / "images" / "posters"
|
||||
|
||||
# XML namespaces in Letterboxd RSS
|
||||
NAMESPACES = {
|
||||
"letterboxd": "https://letterboxd.com",
|
||||
"tmdb": "https://themoviedb.org",
|
||||
"dc": "http://purl.org/dc/elements/1.1/",
|
||||
}
|
||||
|
||||
|
||||
def fetch_rss():
|
||||
"""Fetch and parse Letterboxd RSS feed."""
|
||||
resp = requests.get(RSS_URL, timeout=10)
|
||||
resp.raise_for_status()
|
||||
return ET.fromstring(resp.content)
|
||||
|
||||
|
||||
def parse_movies(root):
|
||||
"""Extract movie entries from RSS (skip lists)."""
|
||||
movies = []
|
||||
for item in root.findall(".//item"):
|
||||
# Skip lists (they don't have tmdb:movieId)
|
||||
tmdb_id = item.find("tmdb:movieId", NAMESPACES)
|
||||
if tmdb_id is None:
|
||||
continue
|
||||
|
||||
title = item.find("letterboxd:filmTitle", NAMESPACES)
|
||||
year = item.find("letterboxd:filmYear", NAMESPACES)
|
||||
rating = item.find("letterboxd:memberRating", NAMESPACES)
|
||||
watched = item.find("letterboxd:watchedDate", NAMESPACES)
|
||||
rewatch = item.find("letterboxd:rewatch", NAMESPACES)
|
||||
link = item.find("link")
|
||||
|
||||
movies.append({
|
||||
"tmdb_id": tmdb_id.text,
|
||||
"title": title.text if title is not None else "Unknown",
|
||||
"year": year.text if year is not None else "",
|
||||
"rating": rating.text if rating is not None else "",
|
||||
"watched_date": watched.text if watched is not None else "",
|
||||
"rewatch": rewatch.text if rewatch is not None else "No",
|
||||
"letterboxd_url": link.text if link is not None else "",
|
||||
})
|
||||
|
||||
return movies
|
||||
|
||||
|
||||
def get_tmdb_details(tmdb_id):
|
||||
"""Fetch movie details from TMDB including IMDB ID and poster."""
|
||||
url = f"https://api.themoviedb.org/3/movie/{tmdb_id}"
|
||||
params = {"api_key": TMDB_API_KEY}
|
||||
resp = requests.get(url, params=params, timeout=10)
|
||||
resp.raise_for_status()
|
||||
data = resp.json()
|
||||
|
||||
return {
|
||||
"imdb_id": data.get("imdb_id", ""),
|
||||
"poster_path": data.get("poster_path", ""),
|
||||
"overview": data.get("overview", ""),
|
||||
}
|
||||
|
||||
|
||||
def download_poster(poster_path, filename):
|
||||
"""Download poster from TMDB to static/images/posters/."""
|
||||
if not poster_path:
|
||||
print(" No poster available")
|
||||
return None
|
||||
|
||||
# Use w500 size for good quality without being huge
|
||||
url = f"https://image.tmdb.org/t/p/w500{poster_path}"
|
||||
resp = requests.get(url, timeout=10)
|
||||
resp.raise_for_status()
|
||||
|
||||
IMAGES_DIR.mkdir(parents=True, exist_ok=True)
|
||||
filepath = IMAGES_DIR / filename
|
||||
filepath.write_bytes(resp.content)
|
||||
print(f" Poster saved: {filepath.relative_to(PROJECT_ROOT)}")
|
||||
return f"/images/posters/{filename}"
|
||||
|
||||
|
||||
def slugify(title):
|
||||
"""Convert title to URL-friendly slug."""
|
||||
slug = title.lower()
|
||||
slug = re.sub(r"[^a-z0-9\s-]", "", slug)
|
||||
slug = re.sub(r"[\s_]+", "-", slug)
|
||||
slug = re.sub(r"-+", "-", slug)
|
||||
return slug.strip("-")
|
||||
|
||||
|
||||
def rating_to_stars(rating):
|
||||
"""Convert numeric rating to star display."""
|
||||
if not rating:
|
||||
return ""
|
||||
r = float(rating)
|
||||
full = int(r)
|
||||
half = r - full >= 0.5
|
||||
stars = "*" * full
|
||||
if half:
|
||||
stars += " 1/2"
|
||||
return f"{stars} ({rating})"
|
||||
|
||||
|
||||
def prompt_viewing_details():
|
||||
"""Prompt user for viewing location details."""
|
||||
print("\nWhere did you watch this?")
|
||||
print(" 1. Theater")
|
||||
print(" 2. Home")
|
||||
|
||||
while True:
|
||||
choice = input("Enter 1 or 2: ").strip()
|
||||
if choice == "1":
|
||||
return prompt_theater_details()
|
||||
elif choice == "2":
|
||||
return prompt_home_details()
|
||||
else:
|
||||
print("Please enter 1 or 2")
|
||||
|
||||
|
||||
def prompt_theater_details():
|
||||
"""Prompt for theater-specific details."""
|
||||
print("\nWhich theater?")
|
||||
theaters = [
|
||||
("1", "Gucci", "gucci"),
|
||||
("2", "Ghost Theater", "ghost-theater"),
|
||||
("3", "Marcel", "marcel"),
|
||||
("4", "AMC South", "amc-south"),
|
||||
("5", "AMC Lakeline", "amc-lakeline"),
|
||||
("6", "Other", None),
|
||||
]
|
||||
for num, name, _ in theaters:
|
||||
print(f" {num}. {name}")
|
||||
|
||||
theater_name = ""
|
||||
theater_tag = None
|
||||
while True:
|
||||
choice = input("Enter number: ").strip()
|
||||
for num, name, tag in theaters:
|
||||
if choice == num:
|
||||
if name == "Other":
|
||||
theater_name = input("Theater name: ").strip()
|
||||
else:
|
||||
theater_name = name
|
||||
theater_tag = tag
|
||||
break
|
||||
if theater_name:
|
||||
break
|
||||
print("Please enter a valid number")
|
||||
|
||||
show_time = input("Show time (e.g. 7:30pm): ").strip()
|
||||
theater_num = input("Theater number: ").strip()
|
||||
pizza = input("Pizza? (Yes/No): ").strip() or ""
|
||||
tickets = input("Tickets (e.g. 'At Box Office', 'A-List'): ").strip()
|
||||
crew = input("Crew (e.g. 'Me, Coach T, Science Bro'): ").strip()
|
||||
|
||||
return {
|
||||
"type": "theater",
|
||||
"theater": theater_name,
|
||||
"theater_tag": theater_tag,
|
||||
"show_time": show_time,
|
||||
"theater_num": theater_num,
|
||||
"pizza": pizza,
|
||||
"tickets": tickets,
|
||||
"crew": crew,
|
||||
}
|
||||
|
||||
|
||||
def prompt_home_details():
|
||||
"""Prompt for home viewing details."""
|
||||
location = input("Location (e.g. 'Living Room', 'Woodrow Apt'): ").strip() or "Home"
|
||||
show_time = input("Show time (optional, e.g. 'evening'): ").strip()
|
||||
pizza = input("Pizza? (Yes/No): ").strip() or "No"
|
||||
|
||||
# Media format
|
||||
print("\nMedia format?")
|
||||
media_options = [
|
||||
("1", "Online"),
|
||||
("2", "BluRay"),
|
||||
("3", "DVD"),
|
||||
("4", "VHS"),
|
||||
]
|
||||
for num, name in media_options:
|
||||
print(f" {num}. {name}")
|
||||
media = "Online"
|
||||
media_choice = input("Enter number (default 1): ").strip()
|
||||
for num, name in media_options:
|
||||
if media_choice == num:
|
||||
media = name
|
||||
break
|
||||
|
||||
# Screen type
|
||||
print("\nScreen?")
|
||||
screen_options = [
|
||||
("1", "4k TV"),
|
||||
("2", "4k Computer"),
|
||||
("3", "1080p Computer"),
|
||||
("4", "Cell Phone"),
|
||||
("5", "Someone Elses TV"),
|
||||
]
|
||||
for num, name in screen_options:
|
||||
print(f" {num}. {name}")
|
||||
screen = "4k TV"
|
||||
screen_choice = input("Enter number (default 1): ").strip()
|
||||
for num, name in screen_options:
|
||||
if screen_choice == num:
|
||||
screen = name
|
||||
break
|
||||
|
||||
return {
|
||||
"type": "home",
|
||||
"theater": "Home Video",
|
||||
"theater_tag": "homevideo",
|
||||
"show_time": show_time,
|
||||
"theater_num": location,
|
||||
"pizza": pizza,
|
||||
"media": media,
|
||||
"screen": screen,
|
||||
}
|
||||
|
||||
|
||||
def create_draft_post(movie, tmdb_details, poster_url, viewing_details=None):
|
||||
"""Create a Hugo draft post for the movie."""
|
||||
slug = slugify(movie["title"])
|
||||
filename = f"{slug}.md"
|
||||
filepath = CONTENT_DIR / filename
|
||||
|
||||
if filepath.exists():
|
||||
print(f" Post already exists: {filepath.relative_to(PROJECT_ROOT)}")
|
||||
return None
|
||||
|
||||
# Format the date for Hugo
|
||||
now = datetime.now(timezone.utc).strftime("%Y-%m-%dT%H:%M:%SZ")
|
||||
|
||||
# Format watched date nicely
|
||||
watched = movie["watched_date"]
|
||||
if watched:
|
||||
try:
|
||||
dt = datetime.strptime(watched, "%Y-%m-%d")
|
||||
watched_display = dt.strftime("%B %d, %Y")
|
||||
except ValueError:
|
||||
watched_display = watched
|
||||
else:
|
||||
watched_display = ""
|
||||
|
||||
imdb_id = tmdb_details.get("imdb_id", "")
|
||||
rating_display = rating_to_stars(movie["rating"])
|
||||
|
||||
# Use viewing details if provided, otherwise use empty defaults
|
||||
if viewing_details:
|
||||
show_time = viewing_details.get("show_time", "")
|
||||
theater = viewing_details.get("theater", "")
|
||||
theater_num = viewing_details.get("theater_num", "")
|
||||
pizza = viewing_details.get("pizza", "")
|
||||
is_home = viewing_details.get("type") == "home"
|
||||
|
||||
# Build tags based on viewing type
|
||||
tags = []
|
||||
if viewing_details.get("theater_tag"):
|
||||
tags.append(viewing_details["theater_tag"])
|
||||
tags.extend(["no-expectations"])
|
||||
if pizza.lower() == "yes":
|
||||
tags.append("had pizza")
|
||||
tags_yaml = "\n".join(f" - {tag}" for tag in tags)
|
||||
|
||||
# Different last two rows for home vs theater
|
||||
if is_home:
|
||||
row5_label = "Media"
|
||||
row5_value = viewing_details.get("media", "")
|
||||
row7_label = "Screen"
|
||||
row7_value = viewing_details.get("screen", "")
|
||||
else:
|
||||
row5_label = "Tickets"
|
||||
row5_value = viewing_details.get("tickets", "")
|
||||
row7_label = "Crew"
|
||||
row7_value = viewing_details.get("crew", "")
|
||||
else:
|
||||
show_time = ""
|
||||
theater = ""
|
||||
theater_num = ""
|
||||
pizza = ""
|
||||
row5_label = "Tickets"
|
||||
row5_value = ""
|
||||
row7_label = "Crew"
|
||||
row7_value = ""
|
||||
tags_yaml = """ - gucci
|
||||
- ghost-theater
|
||||
- marcel
|
||||
- amc-south
|
||||
- amc-lakeline
|
||||
- anticipated
|
||||
- no-expectations
|
||||
- had pizza"""
|
||||
|
||||
# Build the frontmatter and content
|
||||
content = f'''---
|
||||
title: '{movie["title"]}'
|
||||
date: {now}
|
||||
draft: true
|
||||
series: "Frank's Couch"
|
||||
summary: ""
|
||||
imdb: "{imdb_id}"
|
||||
poster: "{poster_url or ''}"
|
||||
tags:
|
||||
{tags_yaml}
|
||||
# Mastodon comments: After posting about this on Mastodon, add the post ID below.
|
||||
# Get the ID from the end of the toot URL, e.g. https://tilde.zone/@mnw/123456789
|
||||
# mastodon_id: ""
|
||||
# To block a reply from showing, add its full URL to this list:
|
||||
# mastodon_blocked:
|
||||
# - "https://tilde.zone/@someone/123456789"
|
||||
---
|
||||
{{{{< imdbposter >}}}}
|
||||
|
||||
| Date watched | {watched_display:<17} |
|
||||
|---------------------|-------------------|
|
||||
| Show Time | {show_time:<17} |
|
||||
| Theater | {theater:<17} |
|
||||
| Theater Number | {theater_num:<17} |
|
||||
| Pizza | {pizza:<17} |
|
||||
| {row5_label:<19} | {row5_value:<17} |
|
||||
| Letterboxd Rating | {rating_display:<17} |
|
||||
| {row7_label:<19} | {row7_value:<17} |
|
||||
|
||||
{{{{< /imdbposter >}}}}
|
||||
|
||||
Write your review here...
|
||||
|
||||
'''
|
||||
|
||||
filepath.write_text(content)
|
||||
print(f" Draft created: {filepath.relative_to(PROJECT_ROOT)}")
|
||||
return filepath
|
||||
|
||||
|
||||
def display_movies(movies, limit=10):
|
||||
"""Display a list of recent movies."""
|
||||
print(f"\nRecent movies from Letterboxd ({LETTERBOXD_USER}):\n")
|
||||
for i, m in enumerate(movies[:limit], 1):
|
||||
rewatch = " (rewatch)" if m["rewatch"] == "Yes" else ""
|
||||
rating = f" - {m['rating']}*" if m["rating"] else ""
|
||||
print(f" {i}. {m['title']} ({m['year']}){rating}{rewatch}")
|
||||
print(f" Watched: {m['watched_date']}")
|
||||
print()
|
||||
|
||||
|
||||
def import_movie(movie, viewing_mode=None):
|
||||
"""Import a single movie: fetch details, download poster, create post.
|
||||
|
||||
Args:
|
||||
movie: Movie data from Letterboxd RSS
|
||||
viewing_mode: 'theater', 'home', or None (will prompt)
|
||||
"""
|
||||
print(f"\nImporting: {movie['title']} ({movie['year']})")
|
||||
|
||||
# Get viewing details
|
||||
if viewing_mode == "theater":
|
||||
viewing_details = prompt_theater_details()
|
||||
elif viewing_mode == "home":
|
||||
viewing_details = prompt_home_details()
|
||||
else:
|
||||
viewing_details = prompt_viewing_details()
|
||||
|
||||
# Get TMDB details
|
||||
print("\n Fetching TMDB details...")
|
||||
tmdb = get_tmdb_details(movie["tmdb_id"])
|
||||
|
||||
# Download poster
|
||||
poster_url = None
|
||||
if tmdb["poster_path"]:
|
||||
print(" Downloading poster...")
|
||||
poster_filename = f"{slugify(movie['title'])}.jpg"
|
||||
poster_url = download_poster(tmdb["poster_path"], poster_filename)
|
||||
|
||||
# Create draft post
|
||||
print(" Creating draft post...")
|
||||
filepath = create_draft_post(movie, tmdb, poster_url, viewing_details)
|
||||
|
||||
if filepath:
|
||||
print(f"\nDone! Edit your draft at: {filepath.relative_to(PROJECT_ROOT)}")
|
||||
if tmdb.get("imdb_id"):
|
||||
print(f"IMDB: https://www.imdb.com/title/{tmdb['imdb_id']}/")
|
||||
|
||||
return filepath
|
||||
|
||||
|
||||
def main():
|
||||
parser = argparse.ArgumentParser(description="Import Letterboxd movies to Hugo")
|
||||
parser.add_argument("--latest", action="store_true", help="Import most recent entry")
|
||||
parser.add_argument("--list", action="store_true", help="Just list recent entries")
|
||||
parser.add_argument("--count", type=int, default=10, help="Number of entries to show")
|
||||
parser.add_argument("--theater", action="store_true", help="Skip viewing prompt, go straight to theater questions")
|
||||
parser.add_argument("--home", action="store_true", help="Skip viewing prompt, go straight to home questions")
|
||||
args = parser.parse_args()
|
||||
|
||||
# Determine viewing mode from flags
|
||||
viewing_mode = None
|
||||
if args.theater:
|
||||
viewing_mode = "theater"
|
||||
elif args.home:
|
||||
viewing_mode = "home"
|
||||
|
||||
print("Fetching Letterboxd RSS feed...")
|
||||
try:
|
||||
root = fetch_rss()
|
||||
except Exception as e:
|
||||
print(f"Error fetching RSS: {e}")
|
||||
sys.exit(1)
|
||||
|
||||
movies = parse_movies(root)
|
||||
if not movies:
|
||||
print("No movies found in feed.")
|
||||
sys.exit(1)
|
||||
|
||||
if args.list:
|
||||
display_movies(movies, args.count)
|
||||
sys.exit(0)
|
||||
|
||||
if args.latest:
|
||||
import_movie(movies[0], viewing_mode)
|
||||
sys.exit(0)
|
||||
|
||||
# Interactive mode
|
||||
display_movies(movies, args.count)
|
||||
|
||||
try:
|
||||
choice = input("Enter number to import (or 'q' to quit): ").strip()
|
||||
if choice.lower() == 'q':
|
||||
sys.exit(0)
|
||||
idx = int(choice) - 1
|
||||
if 0 <= idx < len(movies):
|
||||
import_movie(movies[idx], viewing_mode)
|
||||
else:
|
||||
print("Invalid selection")
|
||||
sys.exit(1)
|
||||
except (ValueError, KeyboardInterrupt):
|
||||
print("\nCancelled")
|
||||
sys.exit(1)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
347
scripts/new_beercall.py
Executable file
@@ -0,0 +1,347 @@
|
||||
#!/usr/bin/env python3
|
||||
"""
|
||||
Add a new Beer Call entry to the yearly log.
|
||||
|
||||
Usage:
|
||||
python scripts/new_beercall.py # Interactive, defaults to last Thursday
|
||||
python scripts/new_beercall.py --date 2024-12-19 # Specific date
|
||||
python scripts/new_beercall.py --list # Just show recent Untappd checkins
|
||||
|
||||
Fetches recent checkins from Untappd RSS to help identify venue.
|
||||
Beer calls are typically on Thursdays, except for special events like
|
||||
Beer Crawl (around New Year's) or when holidays fall on Thursday.
|
||||
"""
|
||||
|
||||
import argparse
|
||||
import json
|
||||
import re
|
||||
import sys
|
||||
import xml.etree.ElementTree as ET
|
||||
from datetime import datetime, timedelta
|
||||
from pathlib import Path
|
||||
|
||||
import requests
|
||||
|
||||
# Configuration
|
||||
UNTAPPD_RSS = "https://untappd.com/rss/user/Craniumslows?key=e8110a1087c289fdb992448e75adf35c"
|
||||
|
||||
# Paths
|
||||
SCRIPT_DIR = Path(__file__).parent
|
||||
PROJECT_ROOT = SCRIPT_DIR.parent
|
||||
BEERCALL_DIR = PROJECT_ROOT / "content" / "posts" / "beercall"
|
||||
VENUES_FILE = SCRIPT_DIR / "venues.json"
|
||||
|
||||
|
||||
def load_venues():
|
||||
"""Load venue database from JSON file."""
|
||||
if VENUES_FILE.exists():
|
||||
with open(VENUES_FILE) as f:
|
||||
return json.load(f)
|
||||
return {}
|
||||
|
||||
|
||||
def save_venues(venues):
|
||||
"""Save updated venue database."""
|
||||
with open(VENUES_FILE, "w") as f:
|
||||
json.dump(venues, f, indent=2)
|
||||
|
||||
|
||||
def fetch_untappd_rss():
|
||||
"""Fetch and parse Untappd RSS feed."""
|
||||
try:
|
||||
resp = requests.get(UNTAPPD_RSS, timeout=10)
|
||||
resp.raise_for_status()
|
||||
return ET.fromstring(resp.content)
|
||||
except Exception as e:
|
||||
print(f"Warning: Could not fetch Untappd RSS: {e}")
|
||||
return None
|
||||
|
||||
|
||||
def parse_checkins(root):
|
||||
"""Extract checkin data from RSS."""
|
||||
checkins = []
|
||||
if root is None:
|
||||
return checkins
|
||||
|
||||
for item in root.findall(".//item"):
|
||||
title = item.find("title")
|
||||
pub_date = item.find("pubDate")
|
||||
description = item.find("description")
|
||||
|
||||
if title is not None and pub_date is not None:
|
||||
# Parse title: "Cranium S. is drinking a Beer by Brewery at Venue"
|
||||
title_text = title.text or ""
|
||||
|
||||
# Extract venue (after " at ")
|
||||
venue_match = re.search(r" at (.+)$", title_text)
|
||||
venue = venue_match.group(1) if venue_match else ""
|
||||
|
||||
# Extract beer and brewery
|
||||
beer_match = re.search(r"is drinking an? (.+) by (.+?) at", title_text)
|
||||
if beer_match:
|
||||
beer = beer_match.group(1)
|
||||
brewery = beer_match.group(2)
|
||||
else:
|
||||
beer = ""
|
||||
brewery = ""
|
||||
|
||||
# Parse date
|
||||
try:
|
||||
dt = datetime.strptime(pub_date.text, "%a, %d %b %Y %H:%M:%S %z")
|
||||
except ValueError:
|
||||
continue
|
||||
|
||||
checkins.append({
|
||||
"date": dt,
|
||||
"venue": venue,
|
||||
"beer": beer,
|
||||
"brewery": brewery,
|
||||
"notes": description.text if description is not None else "",
|
||||
})
|
||||
|
||||
return checkins
|
||||
|
||||
|
||||
def get_last_thursday():
|
||||
"""Get the date of the most recent Thursday (including today if Thursday)."""
|
||||
today = datetime.now()
|
||||
days_since_thursday = (today.weekday() - 3) % 7
|
||||
if days_since_thursday == 0 and today.hour < 12:
|
||||
# If it's Thursday morning, probably mean last Thursday
|
||||
days_since_thursday = 7
|
||||
return today - timedelta(days=days_since_thursday)
|
||||
|
||||
|
||||
def find_venue_by_name(venues, name):
|
||||
"""Try to match a venue name to our database."""
|
||||
name_lower = name.lower()
|
||||
for key, venue in venues.items():
|
||||
if name_lower == venue["name"].lower():
|
||||
return key, venue
|
||||
for alias in venue.get("aliases", []):
|
||||
if name_lower == alias.lower() or alias.lower() in name_lower:
|
||||
return key, venue
|
||||
return None, None
|
||||
|
||||
|
||||
def display_venues(venues):
|
||||
"""Display numbered list of venues."""
|
||||
print("\nKnown venues:")
|
||||
sorted_venues = sorted(venues.items(), key=lambda x: x[1]["name"])
|
||||
for i, (key, venue) in enumerate(sorted_venues, 1):
|
||||
print(f" {i:2}. {venue['name']}")
|
||||
return sorted_venues
|
||||
|
||||
|
||||
def get_or_create_year_file(year):
|
||||
"""Get the path to the year's beer call log, creating if needed."""
|
||||
BEERCALL_DIR.mkdir(parents=True, exist_ok=True)
|
||||
filepath = BEERCALL_DIR / f"{year}.md"
|
||||
|
||||
if not filepath.exists():
|
||||
# Create new year file with frontmatter
|
||||
content = f"""+++
|
||||
title = 'Beer Call Log for {year}'
|
||||
date = {datetime.now().strftime('%Y-%m-%dT%H:%M:%SZ')}
|
||||
draft = false
|
||||
summary = 'A listing of the beer calls that I have remembered to write down in {year}'
|
||||
series = "Luna Juice"
|
||||
+++
|
||||
|
||||
"""
|
||||
filepath.write_text(content)
|
||||
print(f"Created new log file: {filepath.relative_to(PROJECT_ROOT)}")
|
||||
|
||||
return filepath
|
||||
|
||||
|
||||
def format_date_header(date):
|
||||
"""Format date for the entry header."""
|
||||
return date.strftime("%B %-d, %Y") # e.g., "December 19, 2024"
|
||||
|
||||
|
||||
def add_entry(filepath, venue_name, address, beerlist, date, attendees, notes):
|
||||
"""Add a new beer call entry to the log file."""
|
||||
# Read current content
|
||||
content = filepath.read_text()
|
||||
|
||||
# Find where to insert (after frontmatter, before first entry or at end)
|
||||
# We want newest entries at the top
|
||||
lines = content.split("\n")
|
||||
|
||||
# Find end of frontmatter
|
||||
frontmatter_end = 0
|
||||
in_frontmatter = False
|
||||
for i, line in enumerate(lines):
|
||||
if line.strip() == "+++":
|
||||
if in_frontmatter:
|
||||
frontmatter_end = i + 1
|
||||
break
|
||||
else:
|
||||
in_frontmatter = True
|
||||
|
||||
# Build the new entry
|
||||
date_str = format_date_header(date)
|
||||
entry = f"""
|
||||
# {venue_name} - {date_str}
|
||||
| | |
|
||||
| :------------------- | :---------------- |
|
||||
| Location | {address} |
|
||||
| Beerlist | {beerlist} |
|
||||
| Attendees | {attendees} |
|
||||
| Notes | {notes} |
|
||||
|
||||
"""
|
||||
|
||||
# Insert after frontmatter (and any blank lines)
|
||||
insert_pos = frontmatter_end
|
||||
while insert_pos < len(lines) and lines[insert_pos].strip() == "":
|
||||
insert_pos += 1
|
||||
|
||||
# Insert the new entry
|
||||
new_lines = lines[:insert_pos] + entry.split("\n") + lines[insert_pos:]
|
||||
filepath.write_text("\n".join(new_lines))
|
||||
|
||||
return True
|
||||
|
||||
|
||||
def main():
|
||||
parser = argparse.ArgumentParser(description="Add a new Beer Call entry")
|
||||
parser.add_argument("--date", help="Date of beer call (YYYY-MM-DD), default is last Thursday")
|
||||
parser.add_argument("--list", action="store_true", help="Just list recent Untappd checkins")
|
||||
args = parser.parse_args()
|
||||
|
||||
# Load venues
|
||||
venues = load_venues()
|
||||
|
||||
# Determine target date
|
||||
if args.date:
|
||||
try:
|
||||
target_date = datetime.strptime(args.date, "%Y-%m-%d")
|
||||
except ValueError:
|
||||
print("Invalid date format. Use YYYY-MM-DD")
|
||||
sys.exit(1)
|
||||
else:
|
||||
target_date = get_last_thursday()
|
||||
|
||||
print(f"Beer Call date: {target_date.strftime('%A, %B %-d, %Y')}")
|
||||
|
||||
# Fetch Untappd checkins
|
||||
print("\nFetching Untappd checkins...")
|
||||
root = fetch_untappd_rss()
|
||||
checkins = parse_checkins(root)
|
||||
|
||||
# Filter to target date
|
||||
target_date_str = target_date.strftime("%Y-%m-%d")
|
||||
day_checkins = [c for c in checkins if c["date"].strftime("%Y-%m-%d") == target_date_str]
|
||||
|
||||
if args.list:
|
||||
print(f"\nRecent Untappd checkins:")
|
||||
for c in checkins[:15]:
|
||||
print(f" {c['date'].strftime('%Y-%m-%d %H:%M')} - {c['beer']} at {c['venue']}")
|
||||
sys.exit(0)
|
||||
|
||||
# Show checkins for the target date
|
||||
if day_checkins:
|
||||
print(f"\nUntappd checkins on {target_date_str}:")
|
||||
venues_seen = set()
|
||||
for c in day_checkins:
|
||||
if c["venue"] not in venues_seen:
|
||||
print(f" - {c['venue']}: {c['beer']} by {c['brewery']}")
|
||||
venues_seen.add(c["venue"])
|
||||
|
||||
# Try to suggest a venue
|
||||
suggested_venue = None
|
||||
for c in day_checkins:
|
||||
key, venue = find_venue_by_name(venues, c["venue"])
|
||||
if venue:
|
||||
suggested_venue = (key, venue, c["venue"])
|
||||
break
|
||||
else:
|
||||
print(f"\nNo Untappd checkins found for {target_date_str}")
|
||||
suggested_venue = None
|
||||
|
||||
# Venue selection
|
||||
print("\n" + "=" * 50)
|
||||
if suggested_venue:
|
||||
key, venue, untappd_name = suggested_venue
|
||||
print(f"Suggested venue from Untappd: {venue['name']}")
|
||||
use_suggested = input("Use this venue? (Y/n): ").strip().lower()
|
||||
if use_suggested != "n":
|
||||
selected_venue = venue
|
||||
selected_key = key
|
||||
else:
|
||||
selected_venue = None
|
||||
else:
|
||||
selected_venue = None
|
||||
|
||||
if not selected_venue:
|
||||
sorted_venues = display_venues(venues)
|
||||
print(f" {len(sorted_venues) + 1}. [New venue]")
|
||||
print(f" {len(sorted_venues) + 2}. [Skip/Out of Town]")
|
||||
|
||||
choice = input("\nSelect venue number: ").strip()
|
||||
try:
|
||||
idx = int(choice) - 1
|
||||
if idx == len(sorted_venues):
|
||||
# New venue
|
||||
venue_name = input("Venue name: ").strip()
|
||||
address = input("Address: ").strip()
|
||||
beerlist = input("Beer list URL: ").strip()
|
||||
|
||||
# Add to database
|
||||
key = venue_name.lower().replace(" ", "-").replace("'", "")
|
||||
venues[key] = {
|
||||
"name": venue_name,
|
||||
"aliases": [venue_name],
|
||||
"address": address,
|
||||
"beerlist": beerlist,
|
||||
}
|
||||
save_venues(venues)
|
||||
print(f"Added {venue_name} to venue database!")
|
||||
selected_venue = venues[key]
|
||||
selected_key = key
|
||||
elif idx == len(sorted_venues) + 1:
|
||||
# Out of town / skip
|
||||
venue_name = input("Title (e.g., 'Out of Town', 'Holiday'): ").strip() or "Out of Town"
|
||||
notes = input("Notes: ").strip()
|
||||
|
||||
filepath = get_or_create_year_file(target_date.year)
|
||||
add_entry(filepath, venue_name, "NA", "NA", target_date, "DNR", notes)
|
||||
print(f"\nEntry added to {filepath.relative_to(PROJECT_ROOT)}")
|
||||
sys.exit(0)
|
||||
elif 0 <= idx < len(sorted_venues):
|
||||
selected_key, selected_venue = sorted_venues[idx]
|
||||
else:
|
||||
print("Invalid selection")
|
||||
sys.exit(1)
|
||||
except ValueError:
|
||||
print("Invalid selection")
|
||||
sys.exit(1)
|
||||
|
||||
# Collect attendees and notes
|
||||
print(f"\nVenue: {selected_venue['name']}")
|
||||
print(f"Address: {selected_venue['address']}")
|
||||
print(f"Beerlist: {selected_venue['beerlist']}")
|
||||
|
||||
attendees = input("\nAttendees (comma-separated, or 'DNR'): ").strip() or "DNR"
|
||||
notes = input("Notes: ").strip()
|
||||
|
||||
# Add to year file
|
||||
filepath = get_or_create_year_file(target_date.year)
|
||||
add_entry(
|
||||
filepath,
|
||||
selected_venue["name"],
|
||||
selected_venue["address"],
|
||||
selected_venue["beerlist"],
|
||||
target_date,
|
||||
attendees,
|
||||
notes,
|
||||
)
|
||||
|
||||
print(f"\nEntry added to {filepath.relative_to(PROJECT_ROOT)}")
|
||||
print("Done!")
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
89
scripts/new_lunajuice.py
Executable file
@@ -0,0 +1,89 @@
|
||||
#!/usr/bin/env python3
|
||||
"""
|
||||
Create a new standalone Luna Juice post (festivals, events, special occasions).
|
||||
|
||||
Usage:
|
||||
python scripts/new_lunajuice.py # Interactive
|
||||
python scripts/new_lunajuice.py "Beer Festival 2025" # With title
|
||||
|
||||
For regular weekly beer calls, use new_beercall.py instead.
|
||||
This script is for special events that deserve their own post.
|
||||
"""
|
||||
|
||||
import argparse
|
||||
import re
|
||||
import sys
|
||||
from datetime import datetime, timezone
|
||||
from pathlib import Path
|
||||
|
||||
# Paths
|
||||
SCRIPT_DIR = Path(__file__).parent
|
||||
PROJECT_ROOT = SCRIPT_DIR.parent
|
||||
CONTENT_DIR = PROJECT_ROOT / "content" / "posts" / "beercall"
|
||||
|
||||
|
||||
def slugify(title):
|
||||
"""Convert title to URL-friendly slug."""
|
||||
slug = title.lower()
|
||||
slug = re.sub(r"[^a-z0-9\s-]", "", slug)
|
||||
slug = re.sub(r"[\s_]+", "-", slug)
|
||||
slug = re.sub(r"-+", "-", slug)
|
||||
return slug.strip("-")
|
||||
|
||||
|
||||
def create_post(title, summary=""):
|
||||
"""Create a new Luna Juice post."""
|
||||
CONTENT_DIR.mkdir(parents=True, exist_ok=True)
|
||||
|
||||
slug = slugify(title)
|
||||
filename = f"{slug}.md"
|
||||
filepath = CONTENT_DIR / filename
|
||||
|
||||
if filepath.exists():
|
||||
print(f"Post already exists: {filepath.relative_to(PROJECT_ROOT)}")
|
||||
overwrite = input("Overwrite? (y/N): ").strip().lower()
|
||||
if overwrite != "y":
|
||||
return None
|
||||
|
||||
now = datetime.now(timezone.utc).strftime("%Y-%m-%dT%H:%M:%SZ")
|
||||
|
||||
content = f"""+++
|
||||
title = '{title}'
|
||||
date = {now}
|
||||
draft = true
|
||||
summary = '{summary}'
|
||||
series = "Luna Juice"
|
||||
+++
|
||||
|
||||
Write about your beer adventure here...
|
||||
|
||||
"""
|
||||
|
||||
filepath.write_text(content)
|
||||
return filepath
|
||||
|
||||
|
||||
def main():
|
||||
parser = argparse.ArgumentParser(description="Create a new Luna Juice event post")
|
||||
parser.add_argument("title", nargs="?", help="Post title")
|
||||
args = parser.parse_args()
|
||||
|
||||
if args.title:
|
||||
title = args.title
|
||||
else:
|
||||
title = input("Event title: ").strip()
|
||||
if not title:
|
||||
print("Title is required")
|
||||
sys.exit(1)
|
||||
|
||||
summary = input("Summary (one line): ").strip()
|
||||
|
||||
filepath = create_post(title, summary)
|
||||
|
||||
if filepath:
|
||||
print(f"\nCreated: {filepath.relative_to(PROJECT_ROOT)}")
|
||||
print("Edit the file to add your content!")
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
232
scripts/new_movie.py
Executable file
@@ -0,0 +1,232 @@
|
||||
#!/usr/bin/env python3
|
||||
"""
|
||||
Create a new movie post from an IMDB ID.
|
||||
|
||||
Usage:
|
||||
python scripts/new_movie.py tt1234567
|
||||
python scripts/new_movie.py tt1234567 --title "Custom Title"
|
||||
python scripts/new_movie.py https://www.imdb.com/title/tt1234567/
|
||||
|
||||
This fetches all metadata (title, year, runtime, director, poster) and creates
|
||||
a ready-to-edit draft post.
|
||||
"""
|
||||
|
||||
import argparse
|
||||
import re
|
||||
import subprocess
|
||||
import sys
|
||||
from datetime import datetime, timezone
|
||||
from pathlib import Path
|
||||
|
||||
import requests
|
||||
|
||||
# Configuration
|
||||
try:
|
||||
from config import TMDB_API_KEY
|
||||
except ImportError:
|
||||
raise SystemExit("Error: scripts/config.py not found. Copy config.example.py to config.py and add your API key.")
|
||||
|
||||
# Paths
|
||||
SCRIPT_DIR = Path(__file__).parent
|
||||
PROJECT_ROOT = SCRIPT_DIR.parent
|
||||
CONTENT_DIR = PROJECT_ROOT / "content" / "posts"
|
||||
IMAGES_DIR = PROJECT_ROOT / "static" / "images" / "posters"
|
||||
|
||||
|
||||
def extract_imdb_id(input_str):
|
||||
"""Extract IMDB ID from URL or raw ID."""
|
||||
match = re.search(r'(tt\d+)', input_str)
|
||||
if match:
|
||||
return match.group(1)
|
||||
return None
|
||||
|
||||
|
||||
def find_movie_by_imdb(imdb_id):
|
||||
"""Find TMDB movie by IMDB ID."""
|
||||
url = f"https://api.themoviedb.org/3/find/{imdb_id}"
|
||||
params = {
|
||||
"api_key": TMDB_API_KEY,
|
||||
"external_source": "imdb_id"
|
||||
}
|
||||
resp = requests.get(url, params=params, timeout=10)
|
||||
resp.raise_for_status()
|
||||
data = resp.json()
|
||||
|
||||
results = data.get("movie_results", [])
|
||||
if results:
|
||||
return results[0]
|
||||
return None
|
||||
|
||||
|
||||
def get_movie_details(tmdb_id):
|
||||
"""Get full movie details from TMDB."""
|
||||
url = f"https://api.themoviedb.org/3/movie/{tmdb_id}"
|
||||
params = {
|
||||
"api_key": TMDB_API_KEY,
|
||||
"append_to_response": "credits"
|
||||
}
|
||||
resp = requests.get(url, params=params, timeout=10)
|
||||
resp.raise_for_status()
|
||||
return resp.json()
|
||||
|
||||
|
||||
def get_directors(credits):
|
||||
"""Extract director names from credits."""
|
||||
crew = credits.get("crew", [])
|
||||
directors = [p["name"] for p in crew if p.get("job") == "Director"]
|
||||
return directors
|
||||
|
||||
|
||||
def slugify(title):
|
||||
"""Convert title to URL-friendly slug."""
|
||||
slug = title.lower()
|
||||
slug = re.sub(r"[^a-z0-9\s-]", "", slug)
|
||||
slug = re.sub(r"[\s_]+", "-", slug)
|
||||
slug = re.sub(r"-+", "-", slug)
|
||||
return slug.strip("-")
|
||||
|
||||
|
||||
def download_poster(poster_path, filename):
|
||||
"""Download and save poster. Returns local path."""
|
||||
if not poster_path:
|
||||
return ""
|
||||
|
||||
# w500 = 500px wide, good for 200px display on retina
|
||||
url = f"https://image.tmdb.org/t/p/w500{poster_path}"
|
||||
resp = requests.get(url, timeout=10)
|
||||
resp.raise_for_status()
|
||||
|
||||
IMAGES_DIR.mkdir(parents=True, exist_ok=True)
|
||||
filepath = IMAGES_DIR / filename
|
||||
filepath.write_bytes(resp.content)
|
||||
|
||||
return f"/images/posters/{filename}"
|
||||
|
||||
|
||||
def create_post(imdb_id, details, poster_path, custom_title=None):
|
||||
"""Create the Hugo post with all metadata."""
|
||||
title = custom_title or details.get("title", "Untitled")
|
||||
slug = slugify(title)
|
||||
filename = f"{slug}.md"
|
||||
filepath = CONTENT_DIR / filename
|
||||
|
||||
if filepath.exists():
|
||||
print(f"Post already exists: {filepath}")
|
||||
return None
|
||||
|
||||
# Download poster
|
||||
poster_url = ""
|
||||
if details.get("poster_path"):
|
||||
poster_filename = f"{slug}.jpg"
|
||||
print(f"Downloading poster...")
|
||||
poster_url = download_poster(details["poster_path"], poster_filename)
|
||||
|
||||
# Extract metadata
|
||||
release_date = details.get("release_date", "")
|
||||
year = int(release_date.split("-")[0]) if release_date else ""
|
||||
runtime = details.get("runtime", "")
|
||||
|
||||
credits = details.get("credits", {})
|
||||
directors = get_directors(credits)
|
||||
director_str = directors[0] if len(directors) == 1 else directors if directors else ""
|
||||
|
||||
genres = [g["name"] for g in details.get("genres", [])]
|
||||
|
||||
now = datetime.now(timezone.utc).strftime("%Y-%m-%dT%H:%M:%SZ")
|
||||
|
||||
# Build frontmatter
|
||||
# Using manual formatting to control output
|
||||
genres_yaml = "\n".join(f" - {g}" for g in genres) if genres else "[]"
|
||||
if isinstance(director_str, list):
|
||||
director_yaml = "\n".join(f" - {d}" for d in director_str)
|
||||
director_yaml = f"\n{director_yaml}"
|
||||
else:
|
||||
director_yaml = f' "{director_str}"' if director_str else ' ""'
|
||||
|
||||
content = f'''---
|
||||
title: '{title}'
|
||||
date: {now}
|
||||
draft: true
|
||||
series: "Frank's Couch"
|
||||
summary: ""
|
||||
imdb: "{imdb_id}"
|
||||
poster: "{poster_url}"
|
||||
year: {year}
|
||||
runtime: {runtime}
|
||||
director:{director_yaml}
|
||||
genres:
|
||||
{genres_yaml}
|
||||
tags:
|
||||
- gucci
|
||||
- ghost theater
|
||||
- marcel
|
||||
- amc-south
|
||||
- amc-lakeline
|
||||
- anticipated
|
||||
- no-expectations
|
||||
- had pizza
|
||||
---
|
||||
{{{{< imdbposter >}}}}
|
||||
|
||||
| Date watched | |
|
||||
|---------------------|-------------------|
|
||||
| Show Time | |
|
||||
| Theater | |
|
||||
| Theater Number | |
|
||||
| Pizza | |
|
||||
| Tickets | |
|
||||
| Letterboxd Rating | |
|
||||
| Crew | |
|
||||
|
||||
{{{{< /imdbposter >}}}}
|
||||
|
||||
Write your review here...
|
||||
|
||||
'''
|
||||
|
||||
filepath.write_text(content)
|
||||
return filepath
|
||||
|
||||
|
||||
def main():
|
||||
parser = argparse.ArgumentParser(
|
||||
description="Create a new movie post from IMDB ID",
|
||||
epilog="Example: python scripts/new_movie.py tt1234567"
|
||||
)
|
||||
parser.add_argument("imdb", help="IMDB ID or URL (e.g., tt1234567)")
|
||||
parser.add_argument("--title", help="Custom title (default: from TMDB)")
|
||||
args = parser.parse_args()
|
||||
|
||||
# Extract IMDB ID
|
||||
imdb_id = extract_imdb_id(args.imdb)
|
||||
if not imdb_id:
|
||||
print(f"Invalid IMDB ID: {args.imdb}")
|
||||
print("Expected format: tt1234567 or https://www.imdb.com/title/tt1234567/")
|
||||
sys.exit(1)
|
||||
|
||||
print(f"Looking up {imdb_id}...")
|
||||
|
||||
# Find on TMDB
|
||||
movie = find_movie_by_imdb(imdb_id)
|
||||
if not movie:
|
||||
print(f"Movie not found for IMDB ID: {imdb_id}")
|
||||
sys.exit(1)
|
||||
|
||||
tmdb_id = movie["id"]
|
||||
print(f"Found: {movie.get('title')} ({movie.get('release_date', '')[:4]})")
|
||||
|
||||
# Get full details
|
||||
print("Fetching details...")
|
||||
details = get_movie_details(tmdb_id)
|
||||
|
||||
# Create post
|
||||
filepath = create_post(imdb_id, details, details.get("poster_path"), args.title)
|
||||
|
||||
if filepath:
|
||||
print(f"\nCreated: {filepath.relative_to(PROJECT_ROOT)}")
|
||||
print(f"IMDB: https://www.imdb.com/title/{imdb_id}/")
|
||||
print(f"\nEdit your post, then run: ./scripts/build.sh")
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
438
scripts/new_nfr.py
Executable file
@@ -0,0 +1,438 @@
|
||||
#!/usr/bin/env python3
|
||||
"""
|
||||
Create a new post for National Film Registry movies in the "Found in the Darkroom" series.
|
||||
|
||||
Usage:
|
||||
python scripts/new_nfr.py tt1234567 # From IMDB ID
|
||||
python scripts/new_nfr.py "Movie Title" # From title (searches TMDB)
|
||||
python scripts/new_nfr.py --list-2024 # Show 2024 NFR list
|
||||
python scripts/new_nfr.py --nfr-year 2024 # Set NFR induction year
|
||||
|
||||
The script will:
|
||||
1. Fetch movie data from TMDB (poster, year, director, runtime, genres)
|
||||
2. Download the poster
|
||||
3. Create a draft post using the darkroom archetype
|
||||
4. Pre-fill metadata including NFR year
|
||||
"""
|
||||
|
||||
import argparse
|
||||
import os
|
||||
import re
|
||||
import sys
|
||||
from datetime import datetime, timezone
|
||||
from pathlib import Path
|
||||
|
||||
import requests
|
||||
|
||||
# Configuration
|
||||
try:
|
||||
from config import TMDB_API_KEY
|
||||
except ImportError:
|
||||
raise SystemExit("Error: scripts/config.py not found. Copy config.example.py to config.py and add your API key.")
|
||||
|
||||
# Paths (relative to script location)
|
||||
SCRIPT_DIR = Path(__file__).parent
|
||||
PROJECT_ROOT = SCRIPT_DIR.parent
|
||||
CONTENT_DIR = PROJECT_ROOT / "content" / "posts"
|
||||
IMAGES_DIR = PROJECT_ROOT / "static" / "images" / "posters"
|
||||
|
||||
# 2024 National Film Registry inductees with LOC descriptions
|
||||
# Source: https://newsroom.loc.gov/news/25-films-named-to-national-film-registry-for-preservation/
|
||||
NFR_2024 = {
|
||||
"Annabelle Serpentine Dance": {
|
||||
"year": 1895,
|
||||
"description": 'Preserved as a foundational cinema work that "enticed and enchanted audiences" during film\'s infancy, demonstrating early technical innovations like hand-tinted color.'
|
||||
},
|
||||
"Koko's Earth Control": {
|
||||
"year": 1928,
|
||||
"description": "Selected for representing the Fleischer Studios' competitive animation style against Disney, featuring innovative techniques like rotoscoping that advanced the medium."
|
||||
},
|
||||
"Angels with Dirty Faces": {
|
||||
"year": 1938,
|
||||
"description": 'Recognized for depicting "Depression-era immigrant, segregated, hardscrabble neighborhoods" while navigating Production Code restrictions through redemptive storytelling.'
|
||||
},
|
||||
"The Pride of the Yankees": {
|
||||
"year": 1942,
|
||||
"description": "Honored as one of cinema's seminal sports films, featuring authentic appearances by former Yankees teammates and Lou Gehrig's iconic farewell speech recreation."
|
||||
},
|
||||
"Invaders from Mars": {
|
||||
"year": 1953,
|
||||
"description": 'Selected for establishing "the visual language of science fiction cinema" and influencing subsequent sci-fi works through post-war paranoia themes.'
|
||||
},
|
||||
"The Miracle Worker": {
|
||||
"year": 1962,
|
||||
"description": 'Preserved for Arthur Penn\'s "stark black and white" presentation of Helen Keller\'s story, told with minimal sentimentality to highlight human potential.'
|
||||
},
|
||||
"The Chelsea Girls": {
|
||||
"year": 1966,
|
||||
"description": 'Recognized as a Warhol experimental work that challenged narrative form through dual-projection and "infinite audience interpretations."'
|
||||
},
|
||||
"Ganja and Hess": {
|
||||
"year": 1973,
|
||||
"description": 'Honored for addressing "complexities of addiction, sexuality and Black identity" through Bill Gunn\'s visionary filmmaking that remained underrecognized.'
|
||||
},
|
||||
"The Texas Chain Saw Massacre": {
|
||||
"year": 1974,
|
||||
"description": 'Selected for establishing "tenets of the gore/slasher/splatter genre" despite initial controversy, becoming a "cultural and filmmaking touchstone."'
|
||||
},
|
||||
"Uptown Saturday Night": {
|
||||
"year": 1974,
|
||||
"description": 'Preserved as Sidney Poitier\'s directorial effort "dispelling stereotypes" of the Blaxploitation era through an entertaining crime comedy ensemble cast.'
|
||||
},
|
||||
"Zora Lathan Student Films": {
|
||||
"year": 1975,
|
||||
"description": "Six short films recognized for showcasing filmmaking techniques and design problem-solving approaches, documenting intimate domestic moments from early 1980s perspectives."
|
||||
},
|
||||
"Up in Smoke": {
|
||||
"year": 1978,
|
||||
"description": 'Selected for arguably establishing the "stoner" film genre and paving "the way for subsequent memorable movie characters" through comic improvisation.'
|
||||
},
|
||||
"Will": {
|
||||
"year": 1981,
|
||||
"description": 'Honored as "the first independent feature-length film directed by a Black woman," documenting early 1980s Harlem while addressing addiction and resilience themes.'
|
||||
},
|
||||
"Star Trek: The Wrath of Khan": {
|
||||
"year": 1982,
|
||||
"description": 'Preserved as "often considered the best of the six original-cast Star Trek theatrical films," featuring expert direction and exploration of sacrifice.'
|
||||
},
|
||||
"Beverly Hills Cop": {
|
||||
"year": 1984,
|
||||
"description": 'Recognized as "Eddie Murphy\'s first feature film on the registry" and establishing his "box-office superstar" status through this buddy-cop action-comedy.'
|
||||
},
|
||||
"Dirty Dancing": {
|
||||
"year": 1987,
|
||||
"description": 'Selected for remaining "influential and imitated" despite addressing serious themes including pregnancy, abortion, and breaking class barriers through dance.'
|
||||
},
|
||||
"Common Threads: Stories from the Quilt": {
|
||||
"year": 1989,
|
||||
"description": 'Honored as an Oscar-winning documentary serving as "a monument to the power of grief and activism" chronicling the AIDS Memorial Quilt\'s creation.'
|
||||
},
|
||||
"Powwow Highway": {
|
||||
"year": 1989,
|
||||
"description": 'Preserved as "one of the first" films treating "Native Americans as ordinary people," departing from Hollywood stereotypes through a witty buddy road narrative.'
|
||||
},
|
||||
"My Own Private Idaho": {
|
||||
"year": 1991,
|
||||
"description": 'Recognized for Gus Van Sant\'s "magnificently original cult classic" reimagining Shakespeare through street hustlers\' journeys with "dream-like vision and hardcore reality."'
|
||||
},
|
||||
"American Me": {
|
||||
"year": 1992,
|
||||
"description": 'Selected for Edward James Olmos\'s directorial debut depicting "dark, brutal realities of Chicano gang life" addressing prison drug trafficking with unflinching honesty.'
|
||||
},
|
||||
"Mi Familia": {
|
||||
"year": 1995,
|
||||
"description": 'Preserved as Gregory Nava\'s "emotional and evocative" multi-generational Mexican-American family story celebrating immigration\'s role in American vitality.'
|
||||
},
|
||||
"Compensation": {
|
||||
"year": 1999,
|
||||
"description": 'Honored for director Zeinabu irene Davis\'s innovative accessibility approach incorporating "American Sign Language and title cards" for deaf and hearing audiences.'
|
||||
},
|
||||
"Spy Kids": {
|
||||
"year": 2001,
|
||||
"description": 'Selected for Robert Rodriguez\'s incorporation of "Hispanic culture" through family-centered storytelling emphasizing "familial bonds and cultural heritage" authenticity.'
|
||||
},
|
||||
"No Country for Old Men": {
|
||||
"year": 2007,
|
||||
"description": 'Preserved as a Coen Brothers modern-day Western adaptation "hailed as a classic nearly from the moment of release," winning Best Picture Oscar recognition.'
|
||||
},
|
||||
"The Social Network": {
|
||||
"year": 2010,
|
||||
"description": 'Recognized for transforming a potentially "dry, geeky" corporate narrative into "a riveting examination" of modern business ethics and technology\'s societal impact.'
|
||||
},
|
||||
}
|
||||
|
||||
|
||||
def slugify(title):
|
||||
"""Convert title to URL-friendly slug."""
|
||||
slug = title.lower()
|
||||
slug = re.sub(r"[^a-z0-9\s-]", "", slug)
|
||||
slug = re.sub(r"[\s_]+", "-", slug)
|
||||
slug = re.sub(r"-+", "-", slug)
|
||||
return slug.strip("-")
|
||||
|
||||
|
||||
def search_tmdb_by_title(title, year=None):
|
||||
"""Search TMDB for a movie by title and optionally year."""
|
||||
url = "https://api.themoviedb.org/3/search/movie"
|
||||
params = {
|
||||
"api_key": TMDB_API_KEY,
|
||||
"query": title,
|
||||
}
|
||||
if year:
|
||||
params["year"] = year
|
||||
|
||||
resp = requests.get(url, params=params, timeout=10)
|
||||
resp.raise_for_status()
|
||||
data = resp.json()
|
||||
|
||||
if not data.get("results"):
|
||||
return None
|
||||
|
||||
# Return the first result
|
||||
return data["results"][0]
|
||||
|
||||
|
||||
def get_tmdb_details(tmdb_id):
|
||||
"""Fetch movie details from TMDB."""
|
||||
url = f"https://api.themoviedb.org/3/movie/{tmdb_id}"
|
||||
params = {"api_key": TMDB_API_KEY}
|
||||
resp = requests.get(url, params=params, timeout=10)
|
||||
resp.raise_for_status()
|
||||
return resp.json()
|
||||
|
||||
|
||||
def get_imdb_id_from_tmdb(tmdb_id):
|
||||
"""Get IMDB ID from TMDB ID."""
|
||||
data = get_tmdb_details(tmdb_id)
|
||||
return data.get("imdb_id", "")
|
||||
|
||||
|
||||
def get_tmdb_id_from_imdb(imdb_id):
|
||||
"""Convert IMDB ID to TMDB ID."""
|
||||
url = f"https://api.themoviedb.org/3/find/{imdb_id}"
|
||||
params = {
|
||||
"api_key": TMDB_API_KEY,
|
||||
"external_source": "imdb_id",
|
||||
}
|
||||
resp = requests.get(url, params=params, timeout=10)
|
||||
resp.raise_for_status()
|
||||
data = resp.json()
|
||||
|
||||
results = data.get("movie_results", [])
|
||||
if not results:
|
||||
raise ValueError(f"No TMDB match found for IMDB ID: {imdb_id}")
|
||||
|
||||
return results[0]["id"]
|
||||
|
||||
|
||||
def download_poster(poster_path, filename):
|
||||
"""Download poster from TMDB to static/images/posters/."""
|
||||
if not poster_path:
|
||||
print(" No poster available")
|
||||
return None
|
||||
|
||||
# Use w500 size for good quality without being huge
|
||||
url = f"https://image.tmdb.org/t/p/w500{poster_path}"
|
||||
resp = requests.get(url, timeout=10)
|
||||
resp.raise_for_status()
|
||||
|
||||
IMAGES_DIR.mkdir(parents=True, exist_ok=True)
|
||||
filepath = IMAGES_DIR / filename
|
||||
filepath.write_bytes(resp.content)
|
||||
print(f" Poster saved: {filepath.relative_to(PROJECT_ROOT)}")
|
||||
return f"/images/posters/{filename}"
|
||||
|
||||
|
||||
def extract_imdb_id(input_str):
|
||||
"""Extract IMDB ID from string (handles raw ID or URL)."""
|
||||
# Check if it's already just an ID
|
||||
if re.match(r'^tt\d+$', input_str):
|
||||
return input_str
|
||||
|
||||
# Try to extract from URL
|
||||
match = re.search(r'(tt\d+)', input_str)
|
||||
if match:
|
||||
return match.group(1)
|
||||
|
||||
return None
|
||||
|
||||
|
||||
def format_director(directors):
|
||||
"""Format director(s) for YAML frontmatter."""
|
||||
if not directors:
|
||||
return '""'
|
||||
if len(directors) == 1:
|
||||
return f'"{directors[0]}"'
|
||||
# Multiple directors - use YAML list format
|
||||
return "[" + ", ".join(f'"{d}"' for d in directors) + "]"
|
||||
|
||||
|
||||
def create_nfr_post(tmdb_data, imdb_id, nfr_year=2024):
|
||||
"""Create a draft post for an NFR movie."""
|
||||
title = tmdb_data.get("title", "Unknown")
|
||||
slug = slugify(title)
|
||||
filename = f"{slug}.md"
|
||||
filepath = CONTENT_DIR / filename
|
||||
|
||||
if filepath.exists():
|
||||
print(f" Post already exists: {filepath.relative_to(PROJECT_ROOT)}")
|
||||
overwrite = input(" Overwrite? (y/N): ").strip().lower()
|
||||
if overwrite != 'y':
|
||||
return None
|
||||
|
||||
# Format the date for Hugo
|
||||
now = datetime.now(timezone.utc).strftime("%Y-%m-%dT%H:%M:%SZ")
|
||||
|
||||
# Extract metadata
|
||||
year = tmdb_data.get("release_date", "")[:4] if tmdb_data.get("release_date") else ""
|
||||
runtime = tmdb_data.get("runtime", "")
|
||||
overview = tmdb_data.get("overview", "")
|
||||
|
||||
# Get directors from crew
|
||||
directors = []
|
||||
# Note: Full crew info requires a second API call, so we'll leave it blank for now
|
||||
# Users can fill it in or we can enhance this later
|
||||
|
||||
# Genres
|
||||
genres = [g["name"] for g in tmdb_data.get("genres", [])]
|
||||
genres_yaml = "[" + ", ".join(genres) + "]" if genres else "[]"
|
||||
|
||||
# Poster
|
||||
poster_url = ""
|
||||
if tmdb_data.get("poster_path"):
|
||||
print(" Downloading poster...")
|
||||
poster_filename = f"{slug}.jpg"
|
||||
poster_url = download_poster(tmdb_data["poster_path"], poster_filename)
|
||||
|
||||
# Look up LOC description if this is a 2024 NFR film
|
||||
loc_description = ""
|
||||
if nfr_year == 2024:
|
||||
# Try to match the title to our NFR_2024 dictionary
|
||||
for nfr_title, nfr_data in NFR_2024.items():
|
||||
if title.lower() in nfr_title.lower() or nfr_title.lower() in title.lower():
|
||||
loc_description = nfr_data["description"]
|
||||
print(f" Found LOC description for NFR 2024: {nfr_title}")
|
||||
break
|
||||
|
||||
# Build NFR section content
|
||||
if loc_description:
|
||||
nfr_section = f"""## Why It's in the National Film Registry
|
||||
|
||||
{loc_description}
|
||||
|
||||
*Source: [Library of Congress National Film Registry 2024 announcement](https://newsroom.loc.gov/news/25-films-named-to-national-film-registry-for-preservation/)*"""
|
||||
else:
|
||||
nfr_section = """## Why It's in the National Film Registry
|
||||
|
||||
[Add information about why this film was selected for preservation by the Library of Congress]"""
|
||||
|
||||
# Build the frontmatter and content
|
||||
content = f'''---
|
||||
title: '{title}'
|
||||
date: {now}
|
||||
draft: true
|
||||
series: "Found in the Darkroom"
|
||||
summary: ""
|
||||
imdb: "{imdb_id}"
|
||||
poster: "{poster_url or ''}"
|
||||
year: {year}
|
||||
runtime: {runtime}
|
||||
director: ""
|
||||
genres: {genres_yaml}
|
||||
nfr_year: {nfr_year}
|
||||
letterboxd_url: ""
|
||||
tags:
|
||||
- national-film-registry
|
||||
- home-video
|
||||
---
|
||||
{{{{< imdbposter >}}}}
|
||||
|
||||
| Date watched | |
|
||||
|------------------------|-----------------------|
|
||||
| Format | |
|
||||
| Watched Multiple Times | |
|
||||
| Added to NFR | {nfr_year} |
|
||||
| Letterboxd Rating | |
|
||||
| Personal Notes | |
|
||||
|
||||
{{{{< /imdbposter >}}}}
|
||||
|
||||
{nfr_section}
|
||||
|
||||
## My Thoughts
|
||||
|
||||
{overview}
|
||||
|
||||
'''
|
||||
|
||||
filepath.write_text(content)
|
||||
print(f" Draft created: {filepath.relative_to(PROJECT_ROOT)}")
|
||||
print(f"\nNext steps:")
|
||||
print(f" 1. Fill in director and other metadata by running:")
|
||||
print(f" python scripts/fetch_movie_data.py")
|
||||
print(f" 2. Add your viewing details and thoughts")
|
||||
if not loc_description:
|
||||
print(f" 3. Research why it was added to the NFR")
|
||||
print(f" {'4' if not loc_description else '3'}. Add your Letterboxd URL if you've logged it there")
|
||||
|
||||
return filepath
|
||||
|
||||
|
||||
def list_nfr_2024():
|
||||
"""Display the 2024 NFR inductees."""
|
||||
print("\n2024 National Film Registry Inductees:\n")
|
||||
for i, (title, data) in enumerate(NFR_2024.items(), 1):
|
||||
print(f" {i:2}. {title} ({data['year']})")
|
||||
print()
|
||||
|
||||
|
||||
def main():
|
||||
parser = argparse.ArgumentParser(
|
||||
description="Create NFR movie posts for 'Found in the Darkroom' series"
|
||||
)
|
||||
parser.add_argument("input", nargs="?", help="IMDB ID (tt1234567) or movie title")
|
||||
parser.add_argument("--list-2024", action="store_true", help="List 2024 NFR inductees")
|
||||
parser.add_argument("--nfr-year", type=int, default=2024, help="NFR induction year (default: 2024)")
|
||||
args = parser.parse_args()
|
||||
|
||||
if args.list_2024:
|
||||
list_nfr_2024()
|
||||
sys.exit(0)
|
||||
|
||||
if not args.input:
|
||||
print("Error: Please provide an IMDB ID or movie title")
|
||||
print("\nUsage:")
|
||||
print(" python scripts/new_nfr.py tt1234567")
|
||||
print(" python scripts/new_nfr.py 'No Country for Old Men'")
|
||||
print(" python scripts/new_nfr.py --list-2024")
|
||||
sys.exit(1)
|
||||
|
||||
try:
|
||||
# Try to extract IMDB ID
|
||||
imdb_id = extract_imdb_id(args.input)
|
||||
|
||||
if imdb_id:
|
||||
print(f"Looking up movie by IMDB ID: {imdb_id}")
|
||||
tmdb_id = get_tmdb_id_from_imdb(imdb_id)
|
||||
tmdb_data = get_tmdb_details(tmdb_id)
|
||||
else:
|
||||
# Assume it's a title search
|
||||
print(f"Searching for: {args.input}")
|
||||
# Try to find year in NFR list
|
||||
year_hint = None
|
||||
for title, data in NFR_2024.items():
|
||||
if args.input.lower() in title.lower() or title.lower() in args.input.lower():
|
||||
year_hint = data["year"]
|
||||
print(f"Found in NFR 2024 list: {title} ({data['year']})")
|
||||
break
|
||||
|
||||
search_result = search_tmdb_by_title(args.input, year_hint)
|
||||
if not search_result:
|
||||
print(f"Error: No movie found for '{args.input}'")
|
||||
sys.exit(1)
|
||||
|
||||
print(f"Found: {search_result['title']} ({search_result.get('release_date', '')[:4]})")
|
||||
confirm = input("Is this correct? (Y/n): ").strip().lower()
|
||||
if confirm == 'n':
|
||||
print("Search cancelled")
|
||||
sys.exit(0)
|
||||
|
||||
tmdb_id = search_result["id"]
|
||||
tmdb_data = get_tmdb_details(tmdb_id)
|
||||
imdb_id = tmdb_data.get("imdb_id", "")
|
||||
|
||||
if not imdb_id:
|
||||
print("Warning: No IMDB ID found for this movie")
|
||||
|
||||
print(f"\nCreating post for: {tmdb_data.get('title')}")
|
||||
create_nfr_post(tmdb_data, imdb_id, args.nfr_year)
|
||||
|
||||
except Exception as e:
|
||||
print(f"Error: {e}")
|
||||
import traceback
|
||||
traceback.print_exc()
|
||||
sys.exit(1)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
215
scripts/new_techpost.py
Executable file
@@ -0,0 +1,215 @@
|
||||
#!/usr/bin/env python3
|
||||
"""
|
||||
Create a new Fun Center (technology) blog post.
|
||||
|
||||
Usage:
|
||||
python scripts/new_techpost.py
|
||||
python scripts/new_techpost.py "My Post Title"
|
||||
"""
|
||||
|
||||
import argparse
|
||||
import re
|
||||
import sys
|
||||
from datetime import datetime, timezone
|
||||
from pathlib import Path
|
||||
|
||||
# Paths
|
||||
SCRIPT_DIR = Path(__file__).parent
|
||||
PROJECT_ROOT = SCRIPT_DIR.parent
|
||||
CONTENT_DIR = PROJECT_ROOT / "content" / "posts"
|
||||
|
||||
# Common tags for tech posts
|
||||
COMMON_TAGS = [
|
||||
"linux",
|
||||
"ubuntu",
|
||||
"self-hosted",
|
||||
"open-source",
|
||||
"privacy",
|
||||
"automation",
|
||||
"devops",
|
||||
"python",
|
||||
"homelab",
|
||||
"sdf",
|
||||
"hugo",
|
||||
]
|
||||
|
||||
|
||||
def slugify(title):
|
||||
"""Convert title to URL-friendly slug."""
|
||||
slug = title.lower()
|
||||
slug = re.sub(r"[^a-z0-9\s-]", "", slug)
|
||||
slug = re.sub(r"[\s_]+", "-", slug)
|
||||
slug = re.sub(r"-+", "-", slug)
|
||||
return slug.strip("-")
|
||||
|
||||
|
||||
def prompt_post_type():
|
||||
"""Ask user what type of post this is."""
|
||||
print("\nWhat kind of post is this?")
|
||||
print(" 1. How I Did It - Problem/solution, troubleshooting, tutorials")
|
||||
print(" 2. Grinds My Gears - Opinion, rant, commentary")
|
||||
print(" 3. Quick Tip - Short discovery, TIL, neat trick")
|
||||
|
||||
while True:
|
||||
choice = input("Enter 1, 2, or 3: ").strip()
|
||||
if choice == "1":
|
||||
return "how-to"
|
||||
elif choice == "2":
|
||||
return "opinion"
|
||||
elif choice == "3":
|
||||
return "quick-tip"
|
||||
else:
|
||||
print("Please enter 1, 2, or 3")
|
||||
|
||||
|
||||
def prompt_tags(post_type):
|
||||
"""Suggest and collect tags."""
|
||||
# Suggest tags based on post type
|
||||
type_tags = {
|
||||
"how-to": ["how-i-did-it", "technology"],
|
||||
"opinion": ["opinion", "technology"],
|
||||
"quick-tip": ["til", "technology"],
|
||||
}
|
||||
|
||||
suggested = type_tags.get(post_type, ["technology"])
|
||||
|
||||
print(f"\nSuggested tags: {', '.join(suggested)}")
|
||||
print(f"Common tags: {', '.join(COMMON_TAGS)}")
|
||||
|
||||
additional = input("Additional tags (comma-separated, or Enter to skip): ").strip()
|
||||
|
||||
tags = suggested.copy()
|
||||
if additional:
|
||||
for tag in additional.split(","):
|
||||
tag = tag.strip().lower().replace(" ", "-")
|
||||
if tag and tag not in tags:
|
||||
tags.append(tag)
|
||||
|
||||
return tags
|
||||
|
||||
|
||||
def get_skeleton(post_type):
|
||||
"""Get the content skeleton based on post type."""
|
||||
if post_type == "how-to":
|
||||
return """
|
||||
## The Problem
|
||||
|
||||
What broke? What were you trying to do?
|
||||
|
||||
|
||||
## What I Tried
|
||||
|
||||
The journey - commands, dead ends, frustration...
|
||||
|
||||
|
||||
## What Worked
|
||||
|
||||
The fix! Include the commands/steps.
|
||||
|
||||
|
||||
## Hindsight
|
||||
|
||||
What would you do differently? Any gotchas for future-you?
|
||||
|
||||
"""
|
||||
elif post_type == "opinion":
|
||||
return """
|
||||
## The Thing
|
||||
|
||||
What's on your mind?
|
||||
|
||||
|
||||
## Why It Matters
|
||||
|
||||
Some context...
|
||||
|
||||
|
||||
## My Take
|
||||
|
||||
Your opinion here...
|
||||
|
||||
|
||||
## Links
|
||||
|
||||
- [Reference 1](https://example.com)
|
||||
|
||||
"""
|
||||
else: # quick-tip
|
||||
return """
|
||||
Quick tip or discovery here. Keep it short!
|
||||
|
||||
```bash
|
||||
# command or code if relevant
|
||||
```
|
||||
|
||||
"""
|
||||
|
||||
|
||||
def create_post(title, post_type, tags, summary):
|
||||
"""Create the post file."""
|
||||
CONTENT_DIR.mkdir(parents=True, exist_ok=True)
|
||||
|
||||
slug = slugify(title)
|
||||
filename = f"{slug}.md"
|
||||
filepath = CONTENT_DIR / filename
|
||||
|
||||
if filepath.exists():
|
||||
print(f"\nPost already exists: {filepath.relative_to(PROJECT_ROOT)}")
|
||||
overwrite = input("Overwrite? (y/N): ").strip().lower()
|
||||
if overwrite != "y":
|
||||
return None
|
||||
|
||||
now = datetime.now(timezone.utc).strftime("%Y-%m-%dT%H:%M:%SZ")
|
||||
tags_yaml = "\n".join(f" - {tag}" for tag in tags)
|
||||
skeleton = get_skeleton(post_type)
|
||||
|
||||
content = f'''---
|
||||
title: '{title}'
|
||||
date: {now}
|
||||
draft: true
|
||||
series: "Fun Center"
|
||||
summary: "{summary}"
|
||||
tags:
|
||||
{tags_yaml}
|
||||
---
|
||||
{skeleton}'''
|
||||
|
||||
filepath.write_text(content)
|
||||
return filepath
|
||||
|
||||
|
||||
def main():
|
||||
parser = argparse.ArgumentParser(description="Create a new Fun Center tech post")
|
||||
parser.add_argument("title", nargs="?", help="Post title")
|
||||
args = parser.parse_args()
|
||||
|
||||
# Get title
|
||||
if args.title:
|
||||
title = args.title
|
||||
else:
|
||||
title = input("Post title: ").strip()
|
||||
if not title:
|
||||
print("Title is required")
|
||||
sys.exit(1)
|
||||
|
||||
# Get post type
|
||||
post_type = prompt_post_type()
|
||||
|
||||
# Get tags
|
||||
tags = prompt_tags(post_type)
|
||||
|
||||
# Get summary
|
||||
summary = input("\nOne-line summary: ").strip()
|
||||
|
||||
# Create the post
|
||||
filepath = create_post(title, post_type, tags, summary)
|
||||
|
||||
if filepath:
|
||||
print(f"\nCreated: {filepath.relative_to(PROJECT_ROOT)}")
|
||||
print(f"Type: {post_type}")
|
||||
print(f"Tags: {', '.join(tags)}")
|
||||
print("\nEdit the file to fill in the skeleton!")
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
2
scripts/nfr_data/.gitkeep
Normal file
@@ -0,0 +1,2 @@
|
||||
# This directory holds generated NFR data files
|
||||
# Files are created by scripts/setup_nfr.py
|
||||
136
scripts/remote_publish.sh
Executable file
@@ -0,0 +1,136 @@
|
||||
#!/bin/bash
|
||||
# Remote publish script for marcus-web
|
||||
# Pushes local changes if needed, then builds on SDF
|
||||
|
||||
set -e
|
||||
|
||||
SDF_HOST="mnw@sdf.org"
|
||||
REMOTE_DIR="~/marcus-web"
|
||||
|
||||
# Colors for output
|
||||
RED='\033[0;31m'
|
||||
GREEN='\033[0;32m'
|
||||
YELLOW='\033[1;33m'
|
||||
NC='\033[0m' # No Color
|
||||
|
||||
echo -e "${GREEN}=== Remote Publish ===${NC}"
|
||||
echo ""
|
||||
|
||||
# Check if we're in a git repo
|
||||
if ! git rev-parse --git-dir > /dev/null 2>&1; then
|
||||
echo -e "${RED}Error: Not in a git repository${NC}"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Fetch latest from remote to compare
|
||||
echo "Checking repository status..."
|
||||
git fetch origin 2>/dev/null || true
|
||||
|
||||
# Check for uncommitted changes (staged or unstaged)
|
||||
if ! git diff-index --quiet HEAD -- 2>/dev/null || [ -n "$(git ls-files --others --exclude-standard)" ]; then
|
||||
echo -e "${YELLOW}You have uncommitted changes:${NC}"
|
||||
git status --short
|
||||
echo ""
|
||||
read -p "Would you like to commit these changes? (y/n) " -n 1 -r
|
||||
echo ""
|
||||
if [[ $REPLY =~ ^[Yy]$ ]]; then
|
||||
# Stage all changes
|
||||
git add -A
|
||||
|
||||
read -p "Custom commit message? (y/n) " -n 1 -r
|
||||
echo ""
|
||||
if [[ $REPLY =~ ^[Yy]$ ]]; then
|
||||
read -p "Enter commit message: " COMMIT_MSG
|
||||
else
|
||||
# Try to find a post title from recently modified markdown files
|
||||
COMMIT_MSG=""
|
||||
RECENT_MD=$(git diff --cached --name-only | grep '\.md$' | head -1)
|
||||
if [ -n "$RECENT_MD" ] && [ -f "$RECENT_MD" ]; then
|
||||
# Extract title from frontmatter
|
||||
TITLE=$(grep -m1 "^title:" "$RECENT_MD" 2>/dev/null | sed "s/^title:[[:space:]]*['\"]*//" | sed "s/['\"].*$//")
|
||||
if [ -n "$TITLE" ]; then
|
||||
COMMIT_MSG="Remote Publish Auto Commit - $TITLE"
|
||||
fi
|
||||
fi
|
||||
# Fallback to date-based message
|
||||
if [ -z "$COMMIT_MSG" ]; then
|
||||
COMMIT_MSG="Auto Commit - $(date '+%Y-%m-%d %H:%M')"
|
||||
fi
|
||||
echo "Using commit message: $COMMIT_MSG"
|
||||
fi
|
||||
|
||||
git commit -m "$COMMIT_MSG"
|
||||
echo -e "${GREEN}Changes committed${NC}"
|
||||
echo ""
|
||||
else
|
||||
echo -e "${YELLOW}Continuing without committing...${NC}"
|
||||
read -p "Are you sure? Remote will not have these changes. (y/n) " -n 1 -r
|
||||
echo ""
|
||||
if [[ ! $REPLY =~ ^[Yy]$ ]]; then
|
||||
echo "Aborted."
|
||||
exit 1
|
||||
fi
|
||||
fi
|
||||
fi
|
||||
|
||||
# Check if local is ahead of remote
|
||||
LOCAL=$(git rev-parse HEAD 2>/dev/null)
|
||||
REMOTE=$(git rev-parse @{u} 2>/dev/null || echo "")
|
||||
BASE=$(git merge-base HEAD @{u} 2>/dev/null || echo "")
|
||||
|
||||
if [ -z "$REMOTE" ]; then
|
||||
echo -e "${YELLOW}Warning: No upstream branch configured${NC}"
|
||||
elif [ "$LOCAL" != "$REMOTE" ]; then
|
||||
if [ "$LOCAL" = "$BASE" ]; then
|
||||
echo -e "${YELLOW}Local is behind remote. You may want to pull first.${NC}"
|
||||
elif [ "$REMOTE" = "$BASE" ]; then
|
||||
echo -e "${YELLOW}Local commits not pushed to remote:${NC}"
|
||||
git log --oneline @{u}..HEAD
|
||||
echo ""
|
||||
read -p "Push changes before publishing? (y/n) " -n 1 -r
|
||||
echo ""
|
||||
if [[ $REPLY =~ ^[Yy]$ ]]; then
|
||||
echo "Pushing..."
|
||||
git push
|
||||
echo -e "${GREEN}Pushed successfully${NC}"
|
||||
else
|
||||
echo -e "${YELLOW}Skipping push. Remote will not have latest changes.${NC}"
|
||||
read -p "Continue anyway? (y/n) " -n 1 -r
|
||||
echo ""
|
||||
if [[ ! $REPLY =~ ^[Yy]$ ]]; then
|
||||
echo "Aborted."
|
||||
exit 1
|
||||
fi
|
||||
fi
|
||||
else
|
||||
echo -e "${YELLOW}Warning: Local and remote have diverged${NC}"
|
||||
read -p "Continue anyway? (y/n) " -n 1 -r
|
||||
echo ""
|
||||
if [[ ! $REPLY =~ ^[Yy]$ ]]; then
|
||||
echo "Aborted."
|
||||
exit 1
|
||||
fi
|
||||
fi
|
||||
else
|
||||
echo -e "${GREEN}Repository is up to date with remote${NC}"
|
||||
fi
|
||||
|
||||
echo ""
|
||||
echo -e "${GREEN}=== Connecting to SDF ===${NC}"
|
||||
echo ""
|
||||
|
||||
# Build the remote command
|
||||
REMOTE_CMD="cd $REMOTE_DIR && git pull && hugo && mkhomepg -p"
|
||||
|
||||
# Try SSH with key auth first, fall back to password prompt
|
||||
if ssh -o BatchMode=yes -o ConnectTimeout=5 "$SDF_HOST" "echo 'SSH key auth successful'" 2>/dev/null; then
|
||||
echo "Using SSH key authentication..."
|
||||
ssh "$SDF_HOST" "$REMOTE_CMD"
|
||||
else
|
||||
echo "SSH key auth failed or not configured, using password..."
|
||||
ssh "$SDF_HOST" "$REMOTE_CMD"
|
||||
fi
|
||||
|
||||
echo ""
|
||||
echo -e "${GREEN}=== Published successfully! ===${NC}"
|
||||
echo "Site is live at: https://mnw.sdf.org/"
|
||||
43
scripts/setup.sh
Executable file
@@ -0,0 +1,43 @@
|
||||
#!/bin/bash
|
||||
#
|
||||
# Setup script for marcus-web blog scripts
|
||||
# Run this once after cloning the repo on a new machine
|
||||
#
|
||||
|
||||
set -e
|
||||
|
||||
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
|
||||
PROJECT_ROOT="$(dirname "$SCRIPT_DIR")"
|
||||
|
||||
echo "Setting up marcus-web scripts..."
|
||||
echo "Project root: $PROJECT_ROOT"
|
||||
|
||||
# Check for Python 3
|
||||
if ! command -v python3 &> /dev/null; then
|
||||
echo "ERROR: python3 not found. Please install Python 3."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Create virtual environment if it doesn't exist
|
||||
VENV_DIR="$PROJECT_ROOT/.venv"
|
||||
if [ ! -d "$VENV_DIR" ]; then
|
||||
echo "Creating virtual environment..."
|
||||
python3 -m venv "$VENV_DIR"
|
||||
fi
|
||||
|
||||
# Activate and install dependencies
|
||||
echo "Installing dependencies..."
|
||||
source "$VENV_DIR/bin/activate"
|
||||
pip install --quiet --upgrade pip
|
||||
pip install --quiet -r "$PROJECT_ROOT/requirements.txt"
|
||||
|
||||
echo ""
|
||||
echo "Done! To use the scripts, either:"
|
||||
echo ""
|
||||
echo " 1. Activate the venv first:"
|
||||
echo " source .venv/bin/activate"
|
||||
echo " python scripts/import_letterboxd.py"
|
||||
echo ""
|
||||
echo " 2. Or run directly with the venv python:"
|
||||
echo " .venv/bin/python scripts/import_letterboxd.py"
|
||||
echo ""
|
||||
397
scripts/setup_nfr.py
Executable file
@@ -0,0 +1,397 @@
|
||||
#!/usr/bin/env python3
|
||||
"""
|
||||
Setup NFR (National Film Registry) data for a specific year.
|
||||
|
||||
This script fetches the Library of Congress announcement for a given year,
|
||||
extracts film titles and descriptions, and generates a Python dictionary
|
||||
that can be added to new_nfr.py.
|
||||
|
||||
Usage:
|
||||
python3 scripts/setup_nfr.py 2024
|
||||
python3 scripts/setup_nfr.py 2015 --output scripts/nfr_data/nfr_2015.py
|
||||
python3 scripts/setup_nfr.py 2023 --no-ollama # Don't use ollama for extraction
|
||||
|
||||
Requirements:
|
||||
- requests library
|
||||
- access to ollama server (optional, for better extraction)
|
||||
|
||||
The script will:
|
||||
1. Search for the LOC announcement URL for the given year
|
||||
2. Fetch the announcement page
|
||||
3. Use ollama (if available) or basic parsing to extract film data
|
||||
4. Generate a Python dictionary with film titles, years, and descriptions
|
||||
5. Save to a file or print to stdout
|
||||
"""
|
||||
|
||||
import argparse
|
||||
import json
|
||||
import os
|
||||
import re
|
||||
import sys
|
||||
from pathlib import Path
|
||||
from urllib.parse import urljoin
|
||||
|
||||
import requests
|
||||
|
||||
# Configuration
|
||||
SCRIPT_DIR = Path(__file__).parent
|
||||
PROJECT_ROOT = SCRIPT_DIR.parent
|
||||
NFR_DATA_DIR = SCRIPT_DIR / "nfr_data"
|
||||
|
||||
# Ollama configuration
|
||||
OLLAMA_HOST = os.environ.get("OLLAMA_HOST", "http://192.168.0.109:11434")
|
||||
OLLAMA_MODEL = os.environ.get("OLLAMA_MODEL", "llama3.2") # or whatever model you have
|
||||
|
||||
|
||||
def search_for_nfr_announcement(year):
|
||||
"""
|
||||
Search for the LOC NFR announcement URL for a given year.
|
||||
|
||||
Returns dict with:
|
||||
- newsroom_url: Main press release
|
||||
- blog_url: Blog announcement (if found)
|
||||
"""
|
||||
print(f"Searching for {year} NFR announcement...")
|
||||
|
||||
# Try common URL patterns for LOC announcements
|
||||
urls_to_try = []
|
||||
|
||||
# Newsroom pattern (most reliable for recent years)
|
||||
# Example: https://newsroom.loc.gov/news/25-films-named-to-national-film-registry-for-preservation/
|
||||
# The URL doesn't always have the year in it, so we'll search
|
||||
|
||||
# Try searching via web
|
||||
search_queries = [
|
||||
f"site:newsroom.loc.gov national film registry {year}",
|
||||
f"site:blogs.loc.gov national film registry {year}",
|
||||
f'"national film registry" {year} site:loc.gov'
|
||||
]
|
||||
|
||||
results = {
|
||||
"newsroom_url": None,
|
||||
"blog_url": None,
|
||||
"webcast_url": None,
|
||||
}
|
||||
|
||||
# For now, return known patterns - user can manually find URL
|
||||
# We'll enhance this with actual search later
|
||||
print(f"\nPlease find the LOC announcement URL for {year}.")
|
||||
print(f"\nCommon places to look:")
|
||||
print(f" - https://newsroom.loc.gov/")
|
||||
print(f" - https://blogs.loc.gov/now-see-hear/")
|
||||
print(f" - https://www.loc.gov/programs/national-film-preservation-board/film-registry/")
|
||||
|
||||
# For 2024, we know the URL
|
||||
if year == 2024:
|
||||
results["newsroom_url"] = "https://newsroom.loc.gov/news/25-films-named-to-national-film-registry-for-preservation/s/55d5285d-916f-4105-b7d4-7fc3ba8664e3"
|
||||
results["blog_url"] = "https://blogs.loc.gov/now-see-hear/2024/12/announcing-the-2024-national-film-registry/"
|
||||
return results
|
||||
|
||||
# Prompt user for URL
|
||||
url = input(f"\nEnter the LOC announcement URL for {year} (or press Enter to skip): ").strip()
|
||||
if url:
|
||||
results["newsroom_url"] = url
|
||||
|
||||
return results
|
||||
|
||||
|
||||
def fetch_url_content(url):
|
||||
"""Fetch content from a URL."""
|
||||
print(f"Fetching {url}...")
|
||||
resp = requests.get(url, timeout=30)
|
||||
resp.raise_for_status()
|
||||
return resp.text
|
||||
|
||||
|
||||
def call_ollama(prompt, model=OLLAMA_MODEL, system_prompt=None):
|
||||
"""
|
||||
Call ollama API to process text.
|
||||
|
||||
Args:
|
||||
prompt: The user prompt
|
||||
model: Model name (default from OLLAMA_MODEL env var)
|
||||
system_prompt: Optional system prompt
|
||||
|
||||
Returns:
|
||||
The model's response text
|
||||
"""
|
||||
url = f"{OLLAMA_HOST}/api/generate"
|
||||
|
||||
payload = {
|
||||
"model": model,
|
||||
"prompt": prompt,
|
||||
"stream": False,
|
||||
}
|
||||
|
||||
if system_prompt:
|
||||
payload["system"] = system_prompt
|
||||
|
||||
print(f"Calling ollama at {OLLAMA_HOST} with model {model}...")
|
||||
try:
|
||||
resp = requests.post(url, json=payload, timeout=300) # 5 min timeout
|
||||
resp.raise_for_status()
|
||||
data = resp.json()
|
||||
return data.get("response", "")
|
||||
except requests.exceptions.RequestException as e:
|
||||
print(f"Error calling ollama: {e}")
|
||||
return None
|
||||
|
||||
|
||||
def extract_films_with_ollama(html_content, year):
|
||||
"""
|
||||
Use ollama to extract film data from HTML content.
|
||||
|
||||
Returns a list of dicts with: title, year, description
|
||||
"""
|
||||
system_prompt = """You are a helpful assistant that extracts structured data from web pages.
|
||||
Your task is to extract information about films from National Film Registry announcements.
|
||||
Output ONLY valid JSON, nothing else. No markdown formatting, no code blocks, just raw JSON."""
|
||||
|
||||
user_prompt = f"""From the following HTML content, extract ALL films that were added to the National Film Registry in {year}.
|
||||
|
||||
For each film, extract:
|
||||
1. The exact title
|
||||
2. The release year of the film
|
||||
3. The description/reason why it was selected for preservation
|
||||
|
||||
Format your response as a JSON array of objects with this structure:
|
||||
[
|
||||
{{
|
||||
"title": "Film Title",
|
||||
"year": 1999,
|
||||
"description": "The reason it was selected..."
|
||||
}}
|
||||
]
|
||||
|
||||
IMPORTANT:
|
||||
- Extract ALL {year} films, typically 25 films
|
||||
- Keep descriptions concise but complete
|
||||
- Use the exact text from the announcement
|
||||
- Output ONLY the JSON array, no other text
|
||||
- Do not include markdown code blocks
|
||||
|
||||
HTML Content:
|
||||
{html_content[:50000]}
|
||||
""" # Limit to first 50k chars to avoid token limits
|
||||
|
||||
response = call_ollama(user_prompt, system_prompt=system_prompt)
|
||||
|
||||
if not response:
|
||||
return None
|
||||
|
||||
# Try to parse JSON from response
|
||||
try:
|
||||
# Sometimes models wrap in code blocks, try to extract
|
||||
json_match = re.search(r'(\[.*\])', response, re.DOTALL)
|
||||
if json_match:
|
||||
response = json_match.group(1)
|
||||
|
||||
films = json.loads(response)
|
||||
return films
|
||||
except json.JSONDecodeError as e:
|
||||
print(f"Failed to parse JSON from ollama response: {e}")
|
||||
print(f"Response was: {response[:500]}...")
|
||||
return None
|
||||
|
||||
|
||||
def extract_films_basic(html_content, year):
|
||||
"""
|
||||
Basic extraction without ollama - looks for common patterns.
|
||||
This is a fallback method and may not work for all years.
|
||||
"""
|
||||
print("Using basic extraction (without ollama)...")
|
||||
print("Note: This may not capture all details. Consider using --ollama for better results.")
|
||||
|
||||
films = []
|
||||
|
||||
# Look for numbered lists or bold film titles
|
||||
# This is a simple heuristic and may need adjustment
|
||||
|
||||
# Pattern: Look for year in parentheses near potential titles
|
||||
# Example: "Film Title (1999)"
|
||||
pattern = r'([A-Z][^(]{3,50})\s*\((\d{4})\)'
|
||||
matches = re.findall(pattern, html_content)
|
||||
|
||||
seen_titles = set()
|
||||
for title, film_year in matches:
|
||||
title = title.strip()
|
||||
# Filter out obviously wrong matches
|
||||
if title and len(title) > 3 and title not in seen_titles:
|
||||
# Try to get a reasonable year range
|
||||
try:
|
||||
y = int(film_year)
|
||||
if 1890 <= y <= year: # Reasonable film year range
|
||||
films.append({
|
||||
"title": title,
|
||||
"year": y,
|
||||
"description": "[Description not extracted - please add manually]"
|
||||
})
|
||||
seen_titles.add(title)
|
||||
except ValueError:
|
||||
pass
|
||||
|
||||
return films if films else None
|
||||
|
||||
|
||||
def generate_python_dict(films, year):
|
||||
"""
|
||||
Generate Python code for the NFR dictionary.
|
||||
|
||||
Args:
|
||||
films: List of film dicts
|
||||
year: NFR induction year
|
||||
|
||||
Returns:
|
||||
String containing Python code
|
||||
"""
|
||||
output = f'''# {year} National Film Registry inductees with LOC descriptions
|
||||
# Source: [Add URL here]
|
||||
NFR_{year} = {{'''
|
||||
|
||||
for film in films:
|
||||
title = film["title"].replace("'", "\\'")
|
||||
desc = film["description"].replace("'", "\\'").replace("\n", " ")
|
||||
|
||||
output += f'''
|
||||
"{title}": {{
|
||||
"year": {film["year"]},
|
||||
"description": '{desc}'
|
||||
}},'''
|
||||
|
||||
output += "\n}\n"
|
||||
|
||||
return output
|
||||
|
||||
|
||||
def save_nfr_data(films, year, output_path=None):
|
||||
"""
|
||||
Save NFR data to a file.
|
||||
|
||||
Args:
|
||||
films: List of film dicts
|
||||
year: NFR induction year
|
||||
output_path: Optional path to save to (default: nfr_data/nfr_YEAR.py)
|
||||
"""
|
||||
if output_path is None:
|
||||
NFR_DATA_DIR.mkdir(exist_ok=True)
|
||||
output_path = NFR_DATA_DIR / f"nfr_{year}.py"
|
||||
else:
|
||||
output_path = Path(output_path)
|
||||
|
||||
code = generate_python_dict(films, year)
|
||||
|
||||
output_path.write_text(code)
|
||||
print(f"\n✓ Saved to {output_path}")
|
||||
print(f"\nTo use this data:")
|
||||
print(f" 1. Review and edit {output_path} if needed")
|
||||
print(f" 2. Copy the NFR_{year} dictionary into scripts/new_nfr.py")
|
||||
print(f" 3. Update the script to handle multiple years")
|
||||
|
||||
return output_path
|
||||
|
||||
|
||||
def main():
|
||||
parser = argparse.ArgumentParser(
|
||||
description="Setup NFR data for a specific year"
|
||||
)
|
||||
parser.add_argument("year", type=int, help="NFR induction year (e.g., 2024)")
|
||||
parser.add_argument(
|
||||
"--url",
|
||||
help="Direct URL to LOC announcement (skip search)"
|
||||
)
|
||||
parser.add_argument(
|
||||
"--output",
|
||||
help="Output file path (default: scripts/nfr_data/nfr_YEAR.py)"
|
||||
)
|
||||
parser.add_argument(
|
||||
"--no-ollama",
|
||||
action="store_true",
|
||||
help="Don't use ollama for extraction (use basic parsing)"
|
||||
)
|
||||
parser.add_argument(
|
||||
"--ollama-host",
|
||||
default=OLLAMA_HOST,
|
||||
help=f"Ollama server URL (default: {OLLAMA_HOST})"
|
||||
)
|
||||
parser.add_argument(
|
||||
"--ollama-model",
|
||||
default=OLLAMA_MODEL,
|
||||
help=f"Ollama model to use (default: {OLLAMA_MODEL})"
|
||||
)
|
||||
args = parser.parse_args()
|
||||
|
||||
# Update ollama config from args
|
||||
global OLLAMA_HOST, OLLAMA_MODEL
|
||||
OLLAMA_HOST = args.ollama_host
|
||||
OLLAMA_MODEL = args.ollama_model
|
||||
|
||||
print(f"\n{'='*60}")
|
||||
print(f"Setting up NFR data for {args.year}")
|
||||
print(f"{'='*60}\n")
|
||||
|
||||
# Get announcement URL
|
||||
if args.url:
|
||||
urls = {"newsroom_url": args.url}
|
||||
else:
|
||||
urls = search_for_nfr_announcement(args.year)
|
||||
|
||||
if not urls.get("newsroom_url"):
|
||||
print("\nError: No announcement URL found.")
|
||||
print("Please provide a URL with --url")
|
||||
sys.exit(1)
|
||||
|
||||
# Fetch content
|
||||
try:
|
||||
html_content = fetch_url_content(urls["newsroom_url"])
|
||||
except Exception as e:
|
||||
print(f"Error fetching URL: {e}")
|
||||
sys.exit(1)
|
||||
|
||||
# Extract films
|
||||
films = None
|
||||
|
||||
if not args.no_ollama:
|
||||
try:
|
||||
films = extract_films_with_ollama(html_content, args.year)
|
||||
except Exception as e:
|
||||
print(f"Error using ollama: {e}")
|
||||
print("Falling back to basic extraction...")
|
||||
|
||||
if not films:
|
||||
films = extract_films_basic(html_content, args.year)
|
||||
|
||||
if not films:
|
||||
print("\nError: Could not extract films from announcement.")
|
||||
print("Try:")
|
||||
print(" 1. Using --ollama if you skipped it")
|
||||
print(" 2. Manually creating the dictionary")
|
||||
sys.exit(1)
|
||||
|
||||
print(f"\n✓ Extracted {len(films)} films")
|
||||
|
||||
# Show preview
|
||||
print("\nPreview of extracted films:")
|
||||
for i, film in enumerate(films[:5], 1):
|
||||
print(f" {i}. {film['title']} ({film['year']})")
|
||||
if len(film['description']) > 100:
|
||||
print(f" {film['description'][:100]}...")
|
||||
else:
|
||||
print(f" {film['description']}")
|
||||
|
||||
if len(films) > 5:
|
||||
print(f" ... and {len(films) - 5} more")
|
||||
|
||||
# Confirm
|
||||
confirm = input("\nSave this data? (Y/n): ").strip().lower()
|
||||
if confirm == 'n':
|
||||
print("Cancelled")
|
||||
sys.exit(0)
|
||||
|
||||
# Save
|
||||
output_path = save_nfr_data(films, args.year, args.output)
|
||||
|
||||
print("\n✓ Done!")
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
122
scripts/venues.json
Normal file
@@ -0,0 +1,122 @@
|
||||
{
|
||||
"draught-house": {
|
||||
"name": "Draught House",
|
||||
"aliases": ["DH", "Draught House"],
|
||||
"address": "4112 Medical Pkwy, Austin, TX 78756",
|
||||
"beerlist": "https://www.draughthouse.com/drinks"
|
||||
},
|
||||
"abgb": {
|
||||
"name": "The ABGB",
|
||||
"aliases": ["ABGB", "The ABGB"],
|
||||
"address": "1305 W Oltorf St, Austin, TX 78704",
|
||||
"beerlist": "https://theabgb.com/beers/"
|
||||
},
|
||||
"lazarus": {
|
||||
"name": "Lazarus Brewing",
|
||||
"aliases": ["Lazarus", "Lazarus 2"],
|
||||
"address": "4803 Airport Blvd, Austin, TX 78751",
|
||||
"beerlist": "https://lazarusbrewing.com/our-beer/"
|
||||
},
|
||||
"abw": {
|
||||
"name": "Austin Beer Works",
|
||||
"aliases": ["ABW", "Austin Beer Works", "Austin Beerworks"],
|
||||
"address": "3001 Industrial Terrace, Austin, TX 78758",
|
||||
"beerlist": "https://austinbeerworks.com/tap-room"
|
||||
},
|
||||
"abw-sprinkle": {
|
||||
"name": "Austin Beer Works (Sprinkle Valley)",
|
||||
"aliases": ["Sprinkle Valley", "ABW Sprinkle"],
|
||||
"address": "10300 Springdale Rd, Austin, TX 78754",
|
||||
"beerlist": "https://austinbeerworks.com/page/welcome-to-sprinkle-valley"
|
||||
},
|
||||
"pint-house-burnet": {
|
||||
"name": "Pint House Burnet",
|
||||
"aliases": ["PHP Burnet", "Pint House Pizza Burnet", "Pinthouse Burnet"],
|
||||
"address": "4729 Burnet Rd, Austin, TX 78756",
|
||||
"beerlist": "https://pinthouse.com/burnet/beer/beer-on-tap"
|
||||
},
|
||||
"pint-house-south-lamar": {
|
||||
"name": "Pint House South Lamar",
|
||||
"aliases": ["PHP South Lamar", "Pint House Pizza South Lamar", "Pinthouse South Lamar"],
|
||||
"address": "4236 S Lamar Blvd, Austin, TX 78704",
|
||||
"beerlist": "https://pinthouse.com/ben-white/beer/beer-on-tap"
|
||||
},
|
||||
"pint-house-ben-white": {
|
||||
"name": "Pint House Ben White",
|
||||
"aliases": ["PHP Ben White", "Pint House Pizza Ben White"],
|
||||
"address": "2201 E Ben White Blvd, Austin, TX 78744",
|
||||
"beerlist": "https://pinthouse.com/ben-white/beer/beer-on-tap"
|
||||
},
|
||||
"burnet-go-to": {
|
||||
"name": "Burnet Go-To",
|
||||
"aliases": ["Burnet Go To", "Go-To", "Go To"],
|
||||
"address": "6800 Burnet Rd #2, Austin, TX 78757",
|
||||
"beerlist": "https://burnetgoto.com/draught-beer.html"
|
||||
},
|
||||
"black-star": {
|
||||
"name": "Black Star Coop",
|
||||
"aliases": ["Black Star", "Blackstar"],
|
||||
"address": "7020 Easy Wind Dr, Austin, TX 78752",
|
||||
"beerlist": "https://www.blackstar.coop/beer"
|
||||
},
|
||||
"celis": {
|
||||
"name": "Celis Brewery",
|
||||
"aliases": ["Celis"],
|
||||
"address": "10001 Metric Blvd, Austin, TX 78758",
|
||||
"beerlist": "https://www.celisbeers.com/copy-of-core-beers-1"
|
||||
},
|
||||
"batch": {
|
||||
"name": "Batch Kolache",
|
||||
"aliases": ["Batch", "Batch Kolache and Brewing"],
|
||||
"address": "3220 Manor Rd, Austin, TX 78723",
|
||||
"beerlist": "https://batchatx.com/beer/"
|
||||
},
|
||||
"easy-tiger-linc": {
|
||||
"name": "Easy Tiger Linc",
|
||||
"aliases": ["Easy Tiger", "Easy Tiger LINC"],
|
||||
"address": "6406 N Interstate Hwy 35 Ste 1100, Austin, TX 78752",
|
||||
"beerlist": "https://www.easytigerusa.com/location/easy-tiger-linc/"
|
||||
},
|
||||
"live-oak": {
|
||||
"name": "Live Oak Brewing Company",
|
||||
"aliases": ["Live Oak", "Live Oak Brewing"],
|
||||
"address": "1615 Crozier Ln, Del Valle, TX 78617",
|
||||
"beerlist": "https://liveoakbrewing.com/beer/"
|
||||
},
|
||||
"front-page": {
|
||||
"name": "Front Page",
|
||||
"aliases": ["Front Page Austin"],
|
||||
"address": "1023 Springdale Rd #1b, Austin, TX 78721",
|
||||
"beerlist": "https://www.frontpageaustin.com/beers"
|
||||
},
|
||||
"brewtorium": {
|
||||
"name": "Brewtorium",
|
||||
"aliases": ["The Brewtorium"],
|
||||
"address": "6015 Dillard Cir A, Austin, TX 78752",
|
||||
"beerlist": "https://www.thebrewtorium.com/beer"
|
||||
},
|
||||
"meanwhile": {
|
||||
"name": "Meanwhile Brewing",
|
||||
"aliases": ["Meanwhile"],
|
||||
"address": "3901 Promontory Point Dr, Austin, TX 78744",
|
||||
"beerlist": "https://www.meanwhilebeer.com/"
|
||||
},
|
||||
"oskar-blues": {
|
||||
"name": "Oskar Blues",
|
||||
"aliases": ["Oskar Blues Austin"],
|
||||
"address": "10420 Metric BLVD #150, Austin, TX 78758",
|
||||
"beerlist": "https://oskarblues.com/location/austin/#brewery-menu"
|
||||
},
|
||||
"austin-craft": {
|
||||
"name": "Austin Craft Brewing",
|
||||
"aliases": ["Austin Craft Brew"],
|
||||
"address": "4700 Burleson, Austin, TX",
|
||||
"beerlist": "https://www.austincraftbrew.com/"
|
||||
},
|
||||
"poodies": {
|
||||
"name": "Poodie's",
|
||||
"aliases": ["Poodies", "Poodie's Hilltop"],
|
||||
"address": "22308 TX-71, Spicewood, TX 78669",
|
||||
"beerlist": ""
|
||||
}
|
||||
}
|
||||
45
stash/index.html
Normal file
@@ -0,0 +1,45 @@
|
||||
{{ define "main" }}
|
||||
|
||||
<div class="row-fluid navmargin">
|
||||
<div class="page-header">
|
||||
<h1>{{ .Site.Params.mainpagetitle }}</h1>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<div class="row-fluid">
|
||||
<div class="span9 bs-docs-sidebar">
|
||||
<p class="lead">{{ .Site.Params.mainpagesubtitle }}</p>
|
||||
<p></p>
|
||||
<p>{{ .Site.Params.mainpagedesc | markdownify }}</p>
|
||||
<p></p>
|
||||
<hr class="soften">
|
||||
<p></p>
|
||||
<h1>{{ i18n "postslist" }}</h1>
|
||||
<ul>
|
||||
{{ range first 10 .Site.RegularPages }}
|
||||
{{ if eq .Type "post" }}
|
||||
<li><a href="{{ .Permalink }}">{{ .Date.Format "2006-01-02" }} | {{ .Title }}</a></li>
|
||||
{{ end }}
|
||||
{{ end }}
|
||||
</ul>
|
||||
</div>
|
||||
|
||||
<div class="span3 bs-docs-sidebar">
|
||||
<h1>{{ i18n "categories" }}</h1>
|
||||
<ul class="nav nav-list bs-docs-sidenav">
|
||||
{{ partial "categories.html" .}}
|
||||
</ul>
|
||||
<p></p>
|
||||
<h1>{{ i18n "tags" }}</h1>
|
||||
<ul class="nav nav-list bs-docs-sidenav">
|
||||
{{ partial "tags.html" .}}
|
||||
</ul>
|
||||
<p></p>
|
||||
<h1>{{ i18n "partials" }}</h1>
|
||||
<ul class="nav nav-list bs-docs-sidenav">
|
||||
{{ partial "socials.html" .}}
|
||||
</ul>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
{{ end }}
|
||||
213
static/css/mastodon-comments.css
Normal file
@@ -0,0 +1,213 @@
|
||||
/**
|
||||
* Mastodon Comments Stylesheet
|
||||
*
|
||||
* Teletype / Fax Machine / I Saw the TV Glow aesthetic
|
||||
* Works with various Hugo themes
|
||||
*/
|
||||
|
||||
.mastodon-comments-section {
|
||||
margin-top: 3rem;
|
||||
padding-top: 2rem;
|
||||
border-top: 2px dashed #888;
|
||||
font-family: 'Courier New', Courier, monospace;
|
||||
}
|
||||
|
||||
.comments-header {
|
||||
margin: 0 0 1.5rem 0;
|
||||
padding: 0;
|
||||
font-size: 0.85rem;
|
||||
color: #888;
|
||||
background: none;
|
||||
border: none;
|
||||
white-space: pre;
|
||||
overflow-x: auto;
|
||||
}
|
||||
|
||||
.comments-intro {
|
||||
font-size: 0.9rem;
|
||||
margin-bottom: 1.5rem;
|
||||
letter-spacing: 0.05em;
|
||||
}
|
||||
|
||||
.comments-intro a {
|
||||
text-decoration: underline;
|
||||
}
|
||||
|
||||
.comments-note {
|
||||
margin-top: 1rem;
|
||||
color: #666;
|
||||
font-size: 0.8rem;
|
||||
}
|
||||
|
||||
#mastodon-comments-list {
|
||||
min-height: 50px;
|
||||
}
|
||||
|
||||
#load-comments-btn {
|
||||
font-family: 'Courier New', Courier, monospace;
|
||||
font-size: 1rem;
|
||||
padding: 0.75rem 1.5rem;
|
||||
background: transparent;
|
||||
border: 2px solid currentColor;
|
||||
cursor: pointer;
|
||||
letter-spacing: 0.1em;
|
||||
transition: all 0.2s ease;
|
||||
}
|
||||
|
||||
#load-comments-btn:hover {
|
||||
background: #333;
|
||||
color: #fff;
|
||||
}
|
||||
|
||||
.loading,
|
||||
.no-comments,
|
||||
.comments-received,
|
||||
.comments-error {
|
||||
padding: 1rem;
|
||||
margin: 1rem 0;
|
||||
background: #f5f5f5;
|
||||
border-left: 4px solid #888;
|
||||
font-size: 0.85rem;
|
||||
white-space: pre-wrap;
|
||||
}
|
||||
|
||||
.comments-error {
|
||||
border-left-color: #c00;
|
||||
color: #900;
|
||||
}
|
||||
|
||||
.comments-received {
|
||||
border-left-color: #080;
|
||||
color: #060;
|
||||
}
|
||||
|
||||
/* Individual comment styling */
|
||||
.mastodon-comment {
|
||||
margin: 1.5rem 0;
|
||||
padding: 1rem;
|
||||
background: #fafafa;
|
||||
border: 1px solid #ddd;
|
||||
}
|
||||
|
||||
.mastodon-comment .comment-header pre {
|
||||
margin: 0 0 0.75rem 0;
|
||||
padding: 0;
|
||||
font-size: 0.75rem;
|
||||
color: #666;
|
||||
background: none;
|
||||
border: none;
|
||||
white-space: pre;
|
||||
}
|
||||
|
||||
.mastodon-comment .comment-author {
|
||||
display: flex;
|
||||
align-items: center;
|
||||
gap: 0.75rem;
|
||||
margin-bottom: 0.75rem;
|
||||
}
|
||||
|
||||
.mastodon-comment .avatar {
|
||||
width: 48px;
|
||||
height: 48px;
|
||||
border-radius: 4px;
|
||||
border: 1px solid #ccc;
|
||||
}
|
||||
|
||||
.mastodon-comment .author-info {
|
||||
display: flex;
|
||||
flex-direction: column;
|
||||
}
|
||||
|
||||
.mastodon-comment .display-name {
|
||||
font-weight: bold;
|
||||
text-decoration: none;
|
||||
}
|
||||
|
||||
.mastodon-comment .display-name:hover {
|
||||
text-decoration: underline;
|
||||
}
|
||||
|
||||
.mastodon-comment .handle {
|
||||
font-size: 0.8rem;
|
||||
color: #666;
|
||||
}
|
||||
|
||||
.mastodon-comment .emoji {
|
||||
height: 18px;
|
||||
width: 18px;
|
||||
vertical-align: middle;
|
||||
}
|
||||
|
||||
.mastodon-comment .comment-content {
|
||||
margin: 1rem 0;
|
||||
line-height: 1.5;
|
||||
}
|
||||
|
||||
.mastodon-comment .comment-content p {
|
||||
margin: 0.5rem 0;
|
||||
}
|
||||
|
||||
.mastodon-comment .comment-content a {
|
||||
word-break: break-all;
|
||||
}
|
||||
|
||||
.mastodon-comment .comment-attachments {
|
||||
margin: 1rem 0;
|
||||
}
|
||||
|
||||
.mastodon-comment .comment-attachments img,
|
||||
.mastodon-comment .comment-attachments video {
|
||||
max-width: 100%;
|
||||
max-height: 300px;
|
||||
border: 1px solid #ccc;
|
||||
}
|
||||
|
||||
.mastodon-comment .comment-meta {
|
||||
font-size: 0.8rem;
|
||||
color: #666;
|
||||
margin-top: 0.75rem;
|
||||
padding-top: 0.5rem;
|
||||
border-top: 1px dashed #ccc;
|
||||
}
|
||||
|
||||
.mastodon-comment .comment-meta a {
|
||||
text-decoration: none;
|
||||
color: #666;
|
||||
}
|
||||
|
||||
.mastodon-comment .comment-meta a:hover {
|
||||
text-decoration: underline;
|
||||
}
|
||||
|
||||
.mastodon-comment .replies {
|
||||
color: #888;
|
||||
}
|
||||
|
||||
/* Dark mode support */
|
||||
@media (prefers-color-scheme: dark) {
|
||||
.mastodon-comment {
|
||||
background: #1a1a1a;
|
||||
border-color: #444;
|
||||
}
|
||||
|
||||
.mastodon-comment .comment-header pre {
|
||||
color: #888;
|
||||
}
|
||||
|
||||
.mastodon-comment .handle,
|
||||
.mastodon-comment .comment-meta,
|
||||
.mastodon-comment .comment-meta a {
|
||||
color: #999;
|
||||
}
|
||||
|
||||
.loading,
|
||||
.no-comments,
|
||||
.comments-received {
|
||||
background: #222;
|
||||
}
|
||||
|
||||
#load-comments-btn:hover {
|
||||
background: #eee;
|
||||
color: #000;
|
||||
}
|
||||
}
|
||||
BIN
static/favicon.ico
Normal file
|
After Width: | Height: | Size: 1.4 KiB |
BIN
static/images/article-art/recife-map.png
Normal file
|
After Width: | Height: | Size: 254 KiB |
BIN
static/images/circular-me-250.png
Normal file
|
After Width: | Height: | Size: 92 KiB |
BIN
static/images/circular-me.png
Normal file
|
After Width: | Height: | Size: 362 KiB |
BIN
static/images/darkglassesmarcus.png
Normal file
|
After Width: | Height: | Size: 713 KiB |
BIN
static/images/favion.ico
Normal file
|
After Width: | Height: | Size: 1.4 KiB |
BIN
static/images/posters/avatar-fire-and-ash.jpg
Normal file
|
After Width: | Height: | Size: 113 KiB |
BIN
static/images/posters/cloud-atlas-pancake.jpg
Normal file
|
After Width: | Height: | Size: 69 KiB |
BIN
static/images/posters/joker-folie-a-deux.jpg
Normal file
|
After Width: | Height: | Size: 67 KiB |
BIN
static/images/posters/lethal-tender.jpg
Normal file
|
After Width: | Height: | Size: 82 KiB |
BIN
static/images/posters/megalopolis.jpg
Normal file
|
After Width: | Height: | Size: 67 KiB |
BIN
static/images/posters/terrifier-3.jpg
Normal file
|
After Width: | Height: | Size: 98 KiB |
BIN
static/images/posters/the-housemaid.jpg
Normal file
|
After Width: | Height: | Size: 72 KiB |
BIN
static/images/posters/the-secret-agent.jpg
Normal file
|
After Width: | Height: | Size: 99 KiB |
BIN
static/images/posters/uptown-saturday-night.jpg
Normal file
|
After Width: | Height: | Size: 73 KiB |
BIN
static/images/posters/urchin.jpg
Normal file
|
After Width: | Height: | Size: 62 KiB |
BIN
static/images/posters/will-and-harper.jpg
Normal file
|
After Width: | Height: | Size: 47 KiB |
BIN
static/images/tvglow-darkroom.gif
Normal file
|
After Width: | Height: | Size: 3.4 MiB |
141
static/js/mastodon-comments.js
Normal file
@@ -0,0 +1,141 @@
|
||||
/**
|
||||
* Mastodon Comments Loader
|
||||
*
|
||||
* Fetches and displays replies to a Mastodon post.
|
||||
* Uses DOMPurify for HTML sanitization.
|
||||
*
|
||||
* Inspired by Andreas Scherbaum's implementation.
|
||||
* Aesthetic inspired by I Saw the TV Glow.
|
||||
*/
|
||||
|
||||
var commentsLoaded = false;
|
||||
|
||||
function escapeHtml(unsafe) {
|
||||
if (!unsafe) return '';
|
||||
return unsafe
|
||||
.replace(/&/g, "&")
|
||||
.replace(/</g, "<")
|
||||
.replace(/>/g, ">")
|
||||
.replace(/"/g, """)
|
||||
.replace(/'/g, "'");
|
||||
}
|
||||
|
||||
function formatDate(dateStr) {
|
||||
var d = new Date(dateStr);
|
||||
var year = d.getFullYear();
|
||||
var month = String(d.getMonth() + 1).padStart(2, '0');
|
||||
var day = String(d.getDate()).padStart(2, '0');
|
||||
var hours = String(d.getHours()).padStart(2, '0');
|
||||
var mins = String(d.getMinutes()).padStart(2, '0');
|
||||
return year + '-' + month + '-' + day + ' ' + hours + ':' + mins;
|
||||
}
|
||||
|
||||
function getUserHandle(account) {
|
||||
var handle = '@' + account.acct;
|
||||
if (account.acct.indexOf('@') === -1) {
|
||||
var domain = new URL(account.url);
|
||||
handle += '@' + domain.hostname;
|
||||
}
|
||||
return handle;
|
||||
}
|
||||
|
||||
function renderComment(toot, depth) {
|
||||
// Skip blocked toots
|
||||
if (blockedToots.includes(toot.url)) {
|
||||
return '';
|
||||
}
|
||||
|
||||
// Process display name with custom emojis
|
||||
var displayName = escapeHtml(toot.account.display_name || toot.account.username);
|
||||
toot.account.emojis.forEach(function(emoji) {
|
||||
var emojiImg = '<img src="' + escapeHtml(emoji.static_url) + '" alt=":' + emoji.shortcode + ':" class="emoji" height="18" width="18">';
|
||||
displayName = displayName.replace(':' + emoji.shortcode + ':', emojiImg);
|
||||
});
|
||||
|
||||
var indent = depth > 0 ? ' style="margin-left: ' + (depth * 20) + 'px; border-left: 2px dashed #666;"' : '';
|
||||
|
||||
var html = '<div class="mastodon-comment"' + indent + '>';
|
||||
html += '<div class="comment-header">';
|
||||
html += '<pre>';
|
||||
html += '/* ---------------------------------------- */\n';
|
||||
html += '/* FROM: ' + getUserHandle(toot.account).padEnd(32) + ' */\n';
|
||||
html += '/* DATE: ' + formatDate(toot.created_at).padEnd(32) + ' */\n';
|
||||
html += '/* ---------------------------------------- */</pre>';
|
||||
html += '</div>';
|
||||
|
||||
html += '<div class="comment-author">';
|
||||
html += '<img src="' + escapeHtml(toot.account.avatar_static) + '" alt="" class="avatar">';
|
||||
html += '<div class="author-info">';
|
||||
html += '<a href="' + escapeHtml(toot.account.url) + '" rel="nofollow" class="display-name">' + displayName + '</a>';
|
||||
html += '<span class="handle">' + escapeHtml(getUserHandle(toot.account)) + '</span>';
|
||||
html += '</div>';
|
||||
html += '</div>';
|
||||
|
||||
html += '<div class="comment-content">' + toot.content + '</div>';
|
||||
|
||||
// Media attachments
|
||||
if (toot.media_attachments && toot.media_attachments.length > 0) {
|
||||
html += '<div class="comment-attachments">';
|
||||
toot.media_attachments.forEach(function(attachment) {
|
||||
if (attachment.type === 'image') {
|
||||
html += '<a href="' + escapeHtml(attachment.url) + '" rel="nofollow"><img src="' + escapeHtml(attachment.preview_url) + '" alt="' + escapeHtml(attachment.description || 'attachment') + '"></a>';
|
||||
} else if (attachment.type === 'video' || attachment.type === 'gifv') {
|
||||
html += '<video controls ' + (attachment.type === 'gifv' ? 'autoplay loop muted' : '') + '><source src="' + escapeHtml(attachment.url) + '"></video>';
|
||||
}
|
||||
});
|
||||
html += '</div>';
|
||||
}
|
||||
|
||||
html += '<div class="comment-meta">';
|
||||
html += '<a href="' + escapeHtml(toot.url) + '" rel="nofollow" class="comment-link">[VIEW ORIGINAL]</a>';
|
||||
if (toot.replies_count > 0) {
|
||||
html += ' <span class="replies">[' + toot.replies_count + ' REPLIES]</span>';
|
||||
}
|
||||
html += '</div>';
|
||||
|
||||
html += '</div>';
|
||||
|
||||
return html;
|
||||
}
|
||||
|
||||
function renderComments(toots, parentId, depth) {
|
||||
var html = '';
|
||||
var replies = toots
|
||||
.filter(function(toot) { return toot.in_reply_to_id === parentId; })
|
||||
.sort(function(a, b) { return a.created_at.localeCompare(b.created_at); });
|
||||
|
||||
replies.forEach(function(toot) {
|
||||
html += renderComment(toot, depth);
|
||||
html += renderComments(toots, toot.id, depth + 1);
|
||||
});
|
||||
|
||||
return html;
|
||||
}
|
||||
|
||||
function loadMastodonComments() {
|
||||
if (commentsLoaded) return;
|
||||
|
||||
var container = document.getElementById('mastodon-comments-list');
|
||||
container.innerHTML = '<pre class="loading">\n++ ESTABLISHING CONNECTION TO ' + mastodonHost.toUpperCase() + ' ++\n++ PLEASE STAND BY ++\n</pre>';
|
||||
|
||||
var apiUrl = 'https://' + mastodonHost + '/api/v1/statuses/' + mastodonId + '/context';
|
||||
|
||||
fetch(apiUrl)
|
||||
.then(function(response) {
|
||||
if (!response.ok) throw new Error('Network response was not ok');
|
||||
return response.json();
|
||||
})
|
||||
.then(function(data) {
|
||||
if (data.descendants && data.descendants.length > 0) {
|
||||
var html = '<pre class="comments-received">\n++ TRANSMISSION COMPLETE ++\n++ ' + data.descendants.length + ' COMMENT(S) RECEIVED ++\n</pre>';
|
||||
html += renderComments(data.descendants, mastodonId, 0);
|
||||
container.innerHTML = DOMPurify.sanitize(html, { ADD_ATTR: ['target'] });
|
||||
} else {
|
||||
container.innerHTML = '<pre class="no-comments">\n++ NO COMMENTS RECEIVED ++\n++ BE THE FIRST TO RESPOND ++\n</pre>';
|
||||
}
|
||||
commentsLoaded = true;
|
||||
})
|
||||
.catch(function(error) {
|
||||
container.innerHTML = '<pre class="comments-error">\n++ TRANSMISSION ERROR ++\n++ FAILED TO LOAD COMMENTS ++\n++ ERROR: ' + escapeHtml(error.message) + ' ++\n</pre>';
|
||||
});
|
||||
}
|
||||