Temp Finish (Lesson 01)

This commit is contained in:
MangoPig 2025-12-09 22:10:27 +00:00
parent 060839a6b7
commit 6c523d019e
38 changed files with 375 additions and 40 deletions

File diff suppressed because one or more lines are too long

Binary file not shown.

After

Width:  |  Height:  |  Size: 22 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 33 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 3.1 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 4.5 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 7.7 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 12 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 15 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 16 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 22 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 23 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 30 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 5.6 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 6.0 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 7.7 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 8.4 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 33 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 33 KiB

View File

@ -0,0 +1,2 @@
<?xml version="1.0" encoding="utf-8"?>
<browserconfig><msapplication><tile><square70x70logo src="/ms-icon-70x70.png"/><square150x150logo src="/ms-icon-150x150.png"/><square310x310logo src="/ms-icon-310x310.png"/><TileColor>#ffffff</TileColor></tile></msapplication></browserconfig>

Binary file not shown.

After

Width:  |  Height:  |  Size: 1.5 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 2.6 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 12 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 1.1 KiB

View File

@ -0,0 +1,41 @@
{
"name": "App",
"icons": [
{
"src": "\/android-icon-36x36.png",
"sizes": "36x36",
"type": "image\/png",
"density": "0.75"
},
{
"src": "\/android-icon-48x48.png",
"sizes": "48x48",
"type": "image\/png",
"density": "1.0"
},
{
"src": "\/android-icon-72x72.png",
"sizes": "72x72",
"type": "image\/png",
"density": "1.5"
},
{
"src": "\/android-icon-96x96.png",
"sizes": "96x96",
"type": "image\/png",
"density": "2.0"
},
{
"src": "\/android-icon-144x144.png",
"sizes": "144x144",
"type": "image\/png",
"density": "3.0"
},
{
"src": "\/android-icon-192x192.png",
"sizes": "192x192",
"type": "image\/png",
"density": "4.0"
}
]
}

Binary file not shown.

After

Width:  |  Height:  |  Size: 22 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 23 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 67 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 7.5 KiB

View File

@ -0,0 +1,21 @@
---
// Path: frontend/src/components/Post/Blockquotes/Homework.astro
import styles from "./Homework.module.scss";
interface Props {
toc?: string;
tocLevel?: string;
imageAlt?: string;
}
const { toc = "Homework", tocLevel = "1", imageAlt = "MangoPig Homework" } = Astro.props;
---
<blockquote class={styles.homework} data-toc={toc} data-toc-level={tocLevel}>
<slot />
<picture>
<img src="https://pic.mangopig.tech/i/ce28cb80-5190-4fb3-b193-8b082cb326d8.webp" alt={imageAlt} />
</picture>
</blockquote>

View File

@ -0,0 +1,41 @@
/* Path: frontend/src/components/Post/Blockquotes/Homework.module.scss */
.homework {
background-color: #a95eff1a;
padding: 30px;
border-radius: 10px;
position: relative;
min-height: 100px;
picture {
position: absolute;
bottom: -10px;
right: -10px;
margin: 0;
width: 200px;
max-width: 30%;
transform: rotate(10deg);
img {
width: 100%;
height: auto;
box-shadow: none;
}
}
ul {
list-style-type: disc;
padding-left: 20px;
margin-right: 220px;
}
span {
position: absolute;
top: 50%;
left: 30px;
transform: translateY(-50%);
}
}

View File

@ -1,5 +1,5 @@
--- ---
# Path: src/content/lessons/01-intro.mdx # Path: frontend/src/content/lessons/01-intro.mdx
title: "Introduction to Web Dev" title: "Introduction to Web Dev"
description: "Setting up the environment" description: "Setting up the environment"
@ -8,6 +8,7 @@ style: "type-1"
{/* Blockquotes */} {/* Blockquotes */}
import Ganbatte from "../../components/Post/Blockquotes/Ganbatte.astro"; import Ganbatte from "../../components/Post/Blockquotes/Ganbatte.astro";
import Homework from "../../components/Post/Blockquotes/Homework.astro";
import Important from "../../components/Post/Blockquotes/Important.astro"; import Important from "../../components/Post/Blockquotes/Important.astro";
import Info from "../../components/Post/Blockquotes/Info.astro"; import Info from "../../components/Post/Blockquotes/Info.astro";
import QA from "../../components/Post/Blockquotes/QA.astro"; import QA from "../../components/Post/Blockquotes/QA.astro";
@ -35,8 +36,6 @@ import Spoiler from "../../components/Post/Spoiler.tsx";
- Converting the model to GGUF format - Converting the model to GGUF format
- Quantizing the model for better performance - Quantizing the model for better performance
- Hosting a basic LLM model with llama.cpp locally - Hosting a basic LLM model with llama.cpp locally
- (To Be Added) Making a volume mount to persist LLM data across container restarts
- (To Be Added) Tagging the Docker Image for future reuse
</Ganbatte> </Ganbatte>
@ -115,6 +114,7 @@ import Spoiler from "../../components/Post/Spoiler.tsx";
``` ```
</section> </section>
</section> </section>
<section data-toc="Docker Environment Setup" data-toc-level="1"> <section data-toc="Docker Environment Setup" data-toc-level="1">
@ -153,7 +153,7 @@ import Spoiler from "../../components/Post/Spoiler.tsx";
- `--name` llm-container gives the container a name for easier reference. - `--name` llm-container gives the container a name for easier reference.
- `-p 8080:8080` = `-p HOST:CONTAINER` maps port 8080 on your host machine to port 8080 inside the container. This is useful if you plan to run a server inside the container and want to access it from your host machine. - `-p 8080:8080` = `-p HOST:CONTAINER` maps port 8080 on your host machine to port 8080 inside the container. This is useful if you plan to run a server inside the container and want to access it from your host machine.
- `nvidia/cuda:13.0.2-cudnn-runtime-ubuntu24.04` specifies the Docker image to use. - `nvidia/cuda:13.0.2-cudnn-runtime-ubuntu24.04` specifies the Docker image to use.
- `/bin/bash` starts a bash shell inside the container. - `/bin/bash` start point for the container, which opens a bash shell.
</Info> </Info>
Once you are inside the container, you can proceed to setup the environment like we did before in the <a href="#setting-up-developer-environment">WSL section</a>. Once you are inside the container, you can proceed to setup the environment like we did before in the <a href="#setting-up-developer-environment">WSL section</a>.
@ -550,6 +550,7 @@ import Spoiler from "../../components/Post/Spoiler.tsx";
``` ```
Then you have succeeded in converting the model to GGUF format! Then you have succeeded in converting the model to GGUF format!
</section> </section>
<section data-toc="Quantizing the Model" data-toc-level="1"> <section data-toc="Quantizing the Model" data-toc-level="1">
@ -730,3 +731,227 @@ import Spoiler from "../../components/Post/Spoiler.tsx";
</Info> </Info>
</section> </section>
<section data-toc="Docker Volume Mount" data-toc-level="1">
<h2>Docker Volume Mount</h2>
Before we continue, we are going to destroy everything that we have worked on so far:
```zsh frame="none"
exit # As many times as needed to exit the container to your host shell
docker stop llm-container
docker rm llm-container
```
This is to show that, whenever we remove the Docker container, all the data inside the container will be lost. This is bad because we don't want to redownload and reconvert the models every time we restart the container.
To solve this issue, we can use Docker volume mounts to persist our data.
Docker volume maps directories from your host machine to the Docker container.
It's a little bit like plugging in a USB drive to your computer, so that the data on the USB drive is accessible even if you remove the USB drive.
When you run the Docker container, you can use the `-v` option to specify volume mounts.
```zsh frame="none"
docker run \
--gpus all \
-it \
-v ~/Models:/Models \
--name llm-container \
-p 8080:8080 \
nvidia/cuda:13.0.2-cudnn-devel-ubuntu24.04 \
/bin/bash
```
<Info>
- `-v ~/Models:/Models`: This maps the `~/Models` directory on your host machine to the `/Models` directory inside the Docker container.
- The left side (`~/Models`) is the path on your host machine.
- The right side (`/Models`) is the path inside the Docker container.
- With this setup, any models you download to `~/Models` on your host machine will be accessible at `/Models` inside the Docker container, and vice versa.
</Info>
Now, it's your turn to set up everything again inside the Docker container, but this time, when you download and convert the models, make sure to save them to the `/Models` directory inside the container. Try to do it own your own!
<Homework>
<h3>Your Task</h3>
1. Setting up Hugging Face CLI and downloading the model to `~/Models` in your host machine
2. Starting a docker container and mount `~/Models` to `/Models` in the container
3. Initializing the container with the scripts provided
- apt update and install dependencies
- delete default user
- provisional script
- log into to your own user account
4. Cloning llama.cpp and building it
5. Converting the model to GGUF and quantizing it (Remember your models are in `/Models` now!)
6. Running the server with the model from `/Models`
</Homework>
The solution is below if you get stuck:
<Spoiler client:idle>
1. Setting up Hugging Face CLI and downloading the model to `~/Models` in your host machine
```zsh frame="none"
mkdir -p ~/Models
cd ~/Models
curl -LsSf https://hf.co/cli/install.sh | bash
git config --global credential.helper store
hf auth login
hf download HuggingFaceTB/SmolLM3-3B --local-dir ~/Models/SmolLM3-3B
```
2. Starting a docker container and mount `~/Models` to `/Models` in the container
```zsh frame="none"
docker run \
--gpus all \
-it \
-v ~/Models:/Models \
--name llm-container \
-p 8080:8080 \
nvidia/cuda:13.0.2-cudnn-devel-ubuntu24.04 \
/bin/bash
```
3. Initializing the container with the scripts provided
```zsh frame="none"
apt update && apt install -y git make curl sudo zsh
userdel -r ubuntu
bash <(curl -s https://git.mangopig.tech/mangopig/Dot-Zsh/raw/branch/main/scripts/provision.sh)
su - mangopig
```
```zsh frame="none"
cd ~/Config/Dot-Zsh
make base && \
make python && \
make clean && \
make stow && \
zsh
```
OR you can just run:
```zsh frame="none"
cd ~/Config/Dot-Zsh
make setup && \
zsh
```
4. Cloning llama.cpp and building it
```zsh frame="none"
mkdir -p ~/Projects/llama.cpp
cd ~/Projects/llama.cpp
git clone https://github.com/ggerganov/llama.cpp.git .
cmake -S . -B build -G Ninja -DCMAKE_BUILD_TYPE=Release -DCMAKE_INSTALL_PREFIX=/usr/local -DLLAMA_BUILD_TESTS=OFF -DLLAMA_BUILD_EXAMPLES=ON -DLLAMA_BUILD_SERVER=ON
cmake --build build --config Release -j $(nproc)
sudo cmake --install build && \
sudo ldconfig
```
5. Converting the model to GGUF and quantizing it (Remember your models are in `/Models` now!)
```zsh frame="none"
conda create -n llama-cpp python=3.10 -y
conda activate llama-cpp
python -m pip install --upgrade pip wheel setuptools
pip install --upgrade -r ~/Projects/llama.cpp/requirements/requirements-convert_hf_to_gguf.txt
python ~/Projects/llama.cpp/convert_hf_to_gguf.py \
/Models/SmolLM3-3B \
--outfile /Models/SmolLM3-3B/SmolLM3-3B.gguf
llama-quantize \
/Models/SmolLM3-3B/SmolLM3-3B.gguf \
/Models/SmolLM3-3B/SmolLM3-3B.q4.gguf \
q4_0
4
```
6. Running the server with the model from `/Models`
```zsh frame="none"
llama-server \
--model /Models/SmolLM3-3B/SmolLM3-3B.q4.gguf \
--host 0.0.0.0
--port 8080
```
</Spoiler>
If you have done it without help! Congratulations! You have successfully set up a persistent environment for running llama.cpp with Docker volume mounts!
<h3 data-toc="Conclusion" data-toc-level="1">Wrapping Up</h3>
Your LLM setup will still stop when you stop the container tho. In the future, we will learn more about that will help solve these issues:
- Creating Custom Docker Images to Preserve Setup
- Deploying LLM Server to the Cloud
- Hosting Multiple Models and Switching Between Them
- Using docker-compose to Manage Multiple Containers
<h3 data-toc="Tmux Session Persistence" data-toc-level="2">Tmux Session Persistence</h3>
For now, if you want to keep the server running after exiting the terminal, you can use `tmux` or `screen` to create a persistent session inside the Docker container.
1. Enter the Docker container again (if you have exited it):
```zsh frame="none"
docker start llm-container
```
```zsh frame="none"
docker exec -it --user YOUR_USERNAME llm-container /bin/zsh
```
2. Install `tmux` inside the container
```zsh frame="none"
sudo apt install -y tmux
tmux new -s llm-server
```
3. Start the server inside the `tmux` session
```zsh frame="none"
llama-server \
--model /Models/SmolLM3-3B/SmolLM3-3B.q4.gguf \
--host 0.0.0.0
--port 8080
```
4. To detach from the `tmux` session and keep it running in the background, press `Ctrl + B`, then `D`.
5. To reattach to the `tmux` session later, use:
```zsh frame="none"
tmux attach -t llm-server
```
<h3 data-toc="Basic Container Management" data-toc-level="2">Basic Container Management</h3>
This session will persist as long as the Docker container is running. Your setup will also persist as long as you don't remove the Docker container. But if you want to free up some resources, you should stop the container when not in use.
You can stop the docker container with:
```zsh frame="none"
docker stop llm-container
```
You can remove the container with:
```zsh frame="none"
docker rm llm-container
```
Start it back up anytime with:
```zsh frame="none"
docker start llm-container
```
Reattach to the container with:
```zsh frame="none"
docker exec -it --user YOUR_USERNAME llm-container /bin/zsh
```
</section>

View File

@ -1,5 +1,5 @@
--- ---
// Path: src/layouts/LandingLayout.astro // Path: 00-Lesson-Site/frontend/src/layouts/LandingLayout.astro
import { ClientRouter } from "astro:transitions"; import { ClientRouter } from "astro:transitions";

View File

@ -1,5 +1,5 @@
--- ---
// Path: src/layouts/LessonLayout.astro // Path: 00-Lesson-Site/frontend/src/layouts/LessonLayout.astro
import { ClientRouter } from "astro:transitions"; import { ClientRouter } from "astro:transitions";

View File

@ -1,12 +1,13 @@
--- ---
// Path: src/pages/changelog.astro // Path: 00-Lesson-Site/frontend/src/pages/changelog.astro
import Layout from "../layouts/LessonLayout.astro"; import Layout from "../layouts/LessonLayout.astro";
--- ---
<Layout> <Layout>
<h1>Welcome to the Lesson Site</h1> <h1>THIS PAGE IS EMPTY</h1>
<p>This is the homepage of the lesson site built with Astro.</p> <p>But you can find the lessons here!</p>
<ul>
<a href="/lessons/01-intro">Lesson 1</a> <li><a href="lessons/01-intro">Lesson 01!</a></li>
</ul>
</Layout> </Layout>

View File

@ -1,12 +1,13 @@
--- ---
// Path: 00-Lesson-Site/src/pages/index.astro // Path: 00-Lesson-Site/frontend/src/pages/index.astro
import Layout from "../layouts/LessonLayout.astro"; import Layout from "../layouts/LessonLayout.astro";
--- ---
<Layout> <Layout>
<h1>Welcome to the Lesson Site</h1> <h1>THIS PAGE IS EMPTY</h1>
<p>This is the homepage of the lesson site built with Astro.</p> <p>But you can find the lessons here!</p>
<ul>
<a href="/lessons/01-intro">Lesson 1</a> <li><a href="lessons/01-intro">Lesson 01!</a></li>
</ul>
</Layout> </Layout>

View File

@ -1,12 +1,13 @@
--- ---
// Path: src/pages/lessons/index.astro // Path: 00-Lesson-Site/frontend/src/pages/lessons/index.astro
import Layout from "../../layouts/LessonLayout.astro"; import Layout from "../../layouts/LessonLayout.astro";
--- ---
<Layout> <Layout>
<h1>Welcome to the Lesson Site</h1> <h1>THIS PAGE IS EMPTY</h1>
<p>This is the homepage of the lesson site built with Astro.</p> <p>But you can find the lessons here!</p>
<ul>
<a href="/lessons/01-intro">Lesson 1</a> <li><a href="lessons/01-intro">Lesson 01!</a></li>
</ul>
</Layout> </Layout>

View File

@ -1,9 +1,10 @@
/* Path: src/pages/lessons/lessonPage.module.scss */ /* Path: frontend/src/pages/lessons/lessonPage.module.scss */
.content { .content {
width: 1000px; width: 1000px;
margin: 0 auto; margin: 0 auto;
padding: 20px; padding: 20px;
margin-bottom: 200px;
} }
html { html {

View File

@ -1,12 +1,13 @@
--- ---
// Path: src/pages/resources.astro // Path: 00-Lesson-Site/frontend/src/pages/resources.astro
import Layout from "../layouts/LessonLayout.astro"; import Layout from "../layouts/LessonLayout.astro";
--- ---
<Layout> <Layout>
<h1>Welcome to the Lesson Site</h1> <h1>THIS PAGE IS EMPTY</h1>
<p>This is the homepage of the lesson site built with Astro.</p> <p>But you can find the lessons here!</p>
<ul>
<a href="/lessons/01-intro">Lesson 1</a> <li><a href="lessons/01-intro">Lesson 01!</a></li>
</ul>
</Layout> </Layout>