Skip to content

Commit a64c843

Browse files
committed
feat: enhance pod-to-pod communication with network alias and project network support
1 parent 5456323 commit a64c843

4 files changed

Lines changed: 145 additions & 77 deletions

File tree

internal/server/docker/docker.go

Lines changed: 79 additions & 43 deletions
Original file line numberDiff line numberDiff line change
@@ -194,10 +194,6 @@ func (d *DockerService) RunContainer(ctx context.Context, opts RunContainerOptio
194194
//─────────────────────────────────────────────────────────────────────────
195195

196196
labels := map[string]string{
197-
// Enable Traefik for this container
198-
// Without this, Traefik ignores the container completely
199-
"traefik.enable": "true",
200-
201197
// Deeploy metadata for container identification
202198
"deeploy.pod.id": opts.PodID,
203199
}
@@ -209,44 +205,49 @@ func (d *DockerService) RunContainer(ctx context.Context, opts RunContainerOptio
209205
if d.isDevelopment {
210206
entrypoint = "web"
211207
}
208+
port := 8080
209+
if len(opts.Domains) > 0 {
210+
port = opts.Domains[0].Port
211+
}
212+
213+
if opts.EnablePublicAccess {
214+
// Enable Traefik only for public pods.
215+
labels["traefik.enable"] = "true"
212216

213-
// Create a router for each domain
214-
// Each domain gets its own router but shares the same service (load balancer)
215-
for i, domain := range opts.Domains {
216-
routerName := fmt.Sprintf("%s-%d", opts.PodID, i)
217+
// Create a router for each domain
218+
// Each domain gets its own router but shares the same service (load balancer)
219+
for i, domain := range opts.Domains {
220+
routerName := fmt.Sprintf("%s-%d", opts.PodID, i)
217221

218-
// Routing rule: Which domain goes to this container?
219-
// Host(`example.com`) matches requests with that exact Host header
220-
labels["traefik.http.routers."+routerName+".rule"] = fmt.Sprintf("Host(`%s`)", domain.Domain)
222+
// Routing rule: Which domain goes to this container?
223+
// Host(`example.com`) matches requests with that exact Host header
224+
labels["traefik.http.routers."+routerName+".rule"] = fmt.Sprintf("Host(`%s`)", domain.Domain)
221225

222-
// Service: Where to forward the traffic
223-
// @docker suffix is required because Traefik auto-appends it to Docker services
224-
labels["traefik.http.routers."+routerName+".service"] = opts.PodID + "@docker"
226+
// Service: Where to forward the traffic
227+
// @docker suffix is required because Traefik auto-appends it to Docker services
228+
labels["traefik.http.routers."+routerName+".service"] = opts.PodID + "@docker"
225229

226-
// Entrypoint: Which port to listen on (web=80, websecure=443)
227-
labels["traefik.http.routers."+routerName+".entrypoints"] = entrypoint
230+
// Entrypoint: Which port to listen on (web=80, websecure=443)
231+
labels["traefik.http.routers."+routerName+".entrypoints"] = entrypoint
228232

229-
// SSL/TLS: Only in production (not development)
230-
// certresolver=letsencrypt tells Traefik to automatically get a certificate
231-
// from Let's Encrypt using the HTTP challenge
232-
if !d.isDevelopment {
233-
labels["traefik.http.routers."+routerName+".tls.certresolver"] = "letsencrypt"
233+
// SSL/TLS: Only in production (not development)
234+
// certresolver=letsencrypt tells Traefik to automatically get a certificate
235+
// from Let's Encrypt using the HTTP challenge
236+
if !d.isDevelopment {
237+
labels["traefik.http.routers."+routerName+".tls.certresolver"] = "letsencrypt"
238+
}
234239
}
235-
}
236240

237-
// Service configuration: One service for all routers
238-
// All domains for this pod route to the same container/port
239-
port := 8080
240-
if len(opts.Domains) > 0 {
241-
port = opts.Domains[0].Port
242-
}
243-
labels["traefik.http.services."+opts.PodID+".loadbalancer.server.port"] = fmt.Sprintf("%d", port)
241+
// Service configuration: One service for all routers
242+
// All domains for this pod route to the same container/port
243+
labels["traefik.http.services."+opts.PodID+".loadbalancer.server.port"] = fmt.Sprintf("%d", port)
244244

245-
// Health checks: Traefik pings each container every 2 seconds
246-
// Only containers that respond get traffic. This ensures zero-downtime
247-
// during redeploys - new container only gets traffic once it's ready.
248-
labels["traefik.http.services."+opts.PodID+".loadbalancer.healthcheck.path"] = "/"
249-
labels["traefik.http.services."+opts.PodID+".loadbalancer.healthcheck.interval"] = "2s"
245+
// Health checks: Traefik pings each container every 2 seconds
246+
// Only containers that respond get traffic. This ensures zero-downtime
247+
// during redeploys - new container only gets traffic once it's ready.
248+
labels["traefik.http.services."+opts.PodID+".loadbalancer.healthcheck.path"] = "/"
249+
labels["traefik.http.services."+opts.PodID+".loadbalancer.healthcheck.interval"] = "2s"
250+
}
250251

251252
// Container config
252253
config := &container.Config{
@@ -264,12 +265,25 @@ func (d *DockerService) RunContainer(ctx context.Context, opts RunContainerOptio
264265
RestartPolicy: container.RestartPolicy{Name: "unless-stopped"},
265266
}
266267

267-
// Network config - join the deeploy network so Traefik can reach this container
268-
networkConfig := &network.NetworkingConfig{
269-
EndpointsConfig: map[string]*network.EndpointSettings{
270-
NetworkName: {},
268+
if opts.ProjectNetwork == "" {
269+
return "", fmt.Errorf("project network is required")
270+
}
271+
if err := d.EnsureNetwork(ctx, opts.ProjectNetwork); err != nil {
272+
return "", fmt.Errorf("failed to ensure project network: %w", err)
273+
}
274+
275+
endpoints := map[string]*network.EndpointSettings{
276+
opts.ProjectNetwork: {
277+
Aliases: opts.NetworkAliases,
271278
},
272279
}
280+
if opts.EnablePublicAccess {
281+
endpoints[NetworkName] = &network.EndpointSettings{}
282+
}
283+
284+
networkConfig := &network.NetworkingConfig{
285+
EndpointsConfig: endpoints,
286+
}
273287

274288
// Create container
275289
resp, err := d.client.ContainerCreate(ctx, config, hostConfig, networkConfig, nil, opts.ContainerName)
@@ -286,6 +300,25 @@ func (d *DockerService) RunContainer(ctx context.Context, opts RunContainerOptio
286300
return resp.ID, nil
287301
}
288302

303+
// EnsureNetwork creates a Docker network if it does not already exist.
304+
func (d *DockerService) EnsureNetwork(ctx context.Context, name string) error {
305+
_, err := d.client.NetworkInspect(ctx, name, network.InspectOptions{})
306+
if err == nil {
307+
return nil
308+
}
309+
310+
_, err = d.client.NetworkCreate(ctx, name, network.CreateOptions{})
311+
if err != nil {
312+
// Another deploy may have created the network in the meantime.
313+
if _, inspectErr := d.client.NetworkInspect(ctx, name, network.InspectOptions{}); inspectErr == nil {
314+
return nil
315+
}
316+
return err
317+
}
318+
319+
return nil
320+
}
321+
289322
// StopContainer stops a running container.
290323
func (d *DockerService) StopContainer(ctx context.Context, containerID string) error {
291324
timeout := 30
@@ -409,11 +442,14 @@ type DomainConfig struct {
409442

410443
// RunContainerOptions holds options for running a container.
411444
type RunContainerOptions struct {
412-
ImageName string
413-
ContainerName string
414-
PodID string
415-
Domains []DomainConfig
416-
EnvVars map[string]string
445+
ImageName string
446+
ContainerName string
447+
PodID string
448+
Domains []DomainConfig
449+
EnvVars map[string]string
450+
ProjectNetwork string
451+
EnablePublicAccess bool
452+
NetworkAliases []string
417453
}
418454

419455
func mapToEnvSlice(m map[string]string) []string {

0 commit comments

Comments
 (0)