/
/
/
Ansible role that deployes services on my runner machine
1---
2# Runner Services - Unified LLM Stack Deployment
3# Single deployment with Ollama, OpenWebUI, and LiteLLM services
4
5- name: Check if LLM stack is enabled
6 fail:
7 msg: "LLM stack is disabled. Set llm_stack_enabled: true to enable."
8 when: not llm_stack_enabled
9 tags: always
10
11- name: Create LLM stack configuration directory structure
12 file:
13 path: "{{ item }}"
14 state: directory
15 owner: "{{ runner_user }}"
16 group: "{{ runner_group }}"
17 mode: '0775'
18 loop:
19 - "{{ llm_stack_config_dir }}"
20 - "{{ llm_stack_ollama_data_dir }}"
21 - "{{ llm_stack_openwebui_data_dir }}"
22 - "{{ llm_stack_litellm_data_dir }}"
23
24- name: Set group sticky bit on LLM stack directories for permission inheritance
25 file:
26 path: "{{ llm_stack_config_dir }}"
27 state: directory
28 mode: "g+s"
29
30- name: Create unified LLM stack Docker Compose file
31 template:
32 src: llm-stack-compose.yml.j2
33 dest: "{{ llm_stack_config_dir }}/docker-compose.yml"
34 owner: "{{ runner_user }}"
35 group: "{{ runner_group }}"
36 mode: '0664'
37 notify: restart llm-stack
38
39- name: Create unified LLM stack environment file
40 template:
41 src: llm-stack.env.j2
42 dest: "{{ llm_stack_config_dir }}/.env"
43 owner: "{{ runner_user }}"
44 group: "{{ runner_group }}"
45 mode: '0664'
46 notify: restart llm-stack
47
48- name: Start unified LLM stack service
49 community.docker.docker_compose_v2:
50 project_src: "{{ llm_stack_config_dir }}"
51 state: present
52 register: llm_stack_start_result
53 check_mode: no
54
55- name: Wait for Ollama to be healthy
56 uri:
57 url: "http://localhost:{{ llm_stack_ollama_port }}/api/tags"
58 method: GET
59 status_code: 200
60 register: ollama_health
61 until: ollama_health.status == 200
62 retries: 30
63 delay: 10
64 when: llm_stack_start_result is changed
65 check_mode: no
66
67- name: Wait for OpenWebUI to be healthy
68 uri:
69 url: "http://localhost:{{ llm_stack_openwebui_port }}/api/health"
70 method: GET
71 status_code: 200
72 timeout: 30
73 register: openwebui_health
74 until: openwebui_health.status == 200
75 retries: 60
76 delay: 15
77 when: llm_stack_start_result is changed
78 check_mode: no
79
80- name: Wait for LiteLLM to be healthy
81 uri:
82 url: "http://localhost:{{ llm_stack_litellm_port }}/"
83 method: GET
84 status_code: 200
85 register: litellm_health
86 until: litellm_health.status == 200
87 retries: 30
88 delay: 10
89 when: llm_stack_start_result is changed
90 check_mode: no
91
92- name: Display unified LLM stack deployment summary
93 debug:
94 msg: |
95 Unified LLM Stack Deployment:
96 - Status: {{ 'Started' if llm_stack_start_result is changed else 'Already running' }}
97
98 Ollama (Local LLM Server):
99 - Web UI: http://{{ ansible_default_ipv4.address }}:{{ llm_stack_ollama_port }}
100 - Configuration: {{ llm_stack_config_dir }}
101 - Data Storage: {{ llm_stack_ollama_data_dir }}
102 - GPU Support: {{ 'Enabled' if gpu_enabled else 'Disabled' }}
103 - Default Model: {{ llm_stack_ollama_default_model }}
104
105 OpenWebUI (Web Interface):
106 - Web UI: http://{{ ansible_default_ipv4.address }}:{{ llm_stack_openwebui_port }}
107 - Connected to Ollama: ollama:11434
108 - Name: {{ llm_stack_openwebui_name }}
109
110 LiteLLM (Unified Proxy):
111 - API Endpoint: http://{{ ansible_default_ipv4.address }}:{{ llm_stack_litellm_port }}
112 - Connected to Ollama: ollama:11434
113 - Available Models: {{ llm_stack_litellm_model_list }}
114
115 Management Commands:
116 - cd {{ llm_stack_config_dir }} && docker compose logs -f
117 - cd {{ llm_stack_config_dir }} && docker compose restart
118 - cd {{ llm_stack_config_dir }} && docker compose ps