Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
42 changes: 24 additions & 18 deletions backend/controllers/github.go
Original file line number Diff line number Diff line change
Expand Up @@ -66,32 +66,38 @@ func (d DiggerController) GithubAppWebHook(c *gin.Context) {
"installationId", *event.Installation.ID,
)

if *event.Action == "deleted" {
err := handleInstallationDeletedEvent(event, appId64)
if err != nil {
slog.Error("Failed to handle installation deleted event", "error", err)
c.String(http.StatusAccepted, "Failed to handle webhook event.")
return
}
} else if *event.Action == "created" || *event.Action == "unsuspended" || *event.Action == "new_permissions_accepted" {
if err := handleInstallationUpsertEvent(c.Request.Context(), gh, event, appId64); err != nil {
slog.Error("Failed to handle installation upsert event", "error", err)
c.String(http.StatusAccepted, "Failed to handle webhook event.")
return
// Run in goroutine to avoid webhook timeouts for large installations
go func(ctx context.Context) {
defer logging.InheritRequestLogger(ctx)()
if *event.Action == "deleted" {
if err := handleInstallationDeletedEvent(event, appId64); err != nil {
slog.Error("Failed to handle installation deleted event", "error", err)
}
} else if *event.Action == "created" || *event.Action == "unsuspended" || *event.Action == "new_permissions_accepted" {
if err := handleInstallationUpsertEvent(c.Request.Context(), gh, event, appId64); err != nil {
slog.Error("Failed to handle installation upsert event", "error", err)
c.String(http.StatusAccepted, "Failed to handle webhook event.")
return
}
}
}
}(c.Request.Context())

case *github.InstallationRepositoriesEvent:
slog.Info("Processing InstallationRepositoriesEvent",
"action", event.GetAction(),
"installationId", event.Installation.GetID(),
"added", len(event.RepositoriesAdded),
"removed", len(event.RepositoriesRemoved),
)
if err := handleInstallationRepositoriesEvent(c.Request.Context(), gh, event, appId64); err != nil {
slog.Error("Failed to handle installation repositories event", "error", err)
c.String(http.StatusAccepted, "Failed to handle webhook event.")
return
}

// Run in goroutine to avoid webhook timeouts for large installations
go func(ctx context.Context) {
defer logging.InheritRequestLogger(ctx)()
// Use background context so work continues after HTTP response
if err := handleInstallationRepositoriesEvent(context.Background(), gh, event, appId64); err != nil {
slog.Error("Failed to handle installation repositories event", "error", err)
}
}(c.Request.Context())
case *github.PushEvent:
slog.Info("Processing PushEvent",
"repo", *event.Repo.FullName,
Expand Down
4 changes: 4 additions & 0 deletions backend/controllers/github_callback.go
Original file line number Diff line number Diff line change
Expand Up @@ -31,6 +31,10 @@ func (d DiggerController) GithubAppCallbackPage(c *gin.Context) {
code := ""
if codeExists && len(codeParams) > 0 && len(codeParams[0]) > 0 {
code = codeParams[0]
} else {
slog.Debug("No code parameter found, probably a setup update, going to return success since we are relying on webhooks now")
c.HTML(http.StatusOK, "github_success.tmpl", gin.H{})
return
}

appId := c.Request.URL.Query().Get("state")
Expand Down
2 changes: 1 addition & 1 deletion backend/controllers/github_comment.go
Original file line number Diff line number Diff line change
Expand Up @@ -162,7 +162,7 @@ func handleIssueCommentEvent(gh utils.GithubClientProvider, payload *github.Issu
}

diggerYmlStr, ghService, config, projectsGraph, prSourceBranch, commitSha, changedFiles, err := getDiggerConfigForPR(gh, orgId, prLabelsStr, installationId, repoFullName, repoOwner, repoName, cloneURL, issueNumber)
if err != nil {
if err != nil {
slog.Error("Error getting Digger config for PR",
"issueNumber", issueNumber,
"repoFullName", repoFullName,
Expand Down
4 changes: 2 additions & 2 deletions backend/controllers/github_helpers.go
Original file line number Diff line number Diff line change
Expand Up @@ -830,7 +830,7 @@ func getDiggerConfigForPR(gh utils.GithubClientProvider, orgId uint, prLabels []
"branch", prBranch,
"error", err,
)
return "", nil, nil, nil, nil, nil, nil, fmt.Errorf("error loading digger.yml: %v", err)
return "", nil, nil, nil, nil, nil, nil, fmt.Errorf("error loading digger.yml: %w", err)
}

return diggerYmlStr, ghService, config, dependencyGraph, &prBranch, &prCommitSha, changedFiles, nil
Expand Down Expand Up @@ -893,7 +893,7 @@ func GetDiggerConfigForBranchOrSha(gh utils.GithubClientProvider, installationId
"branch", branch,
"error", err,
)
return "", nil, nil, nil, fmt.Errorf("error cloning and loading config %v", err)
return "", nil, nil, nil, fmt.Errorf("error cloning and loading config: %w", err)
}

projectCount := 0
Expand Down
24 changes: 17 additions & 7 deletions backend/controllers/github_pull_request.go
Original file line number Diff line number Diff line change
Expand Up @@ -9,7 +9,6 @@ import (
"runtime/debug"
"slices"
"strconv"
"strings"

"github.com/diggerhq/digger/backend/ci_backends"
config2 "github.com/diggerhq/digger/backend/config"
Expand Down Expand Up @@ -138,16 +137,27 @@ func handlePullRequestEvent(gh utils.GithubClientProvider, payload *github.PullR
return nil
}

// Silently skip repos without digger.yml - this is expected for org-wide installations
if strings.Contains(err.Error(), "could not find digger.yml") ||
strings.Contains(err.Error(), "could not find digger.yaml") {
slog.Info("No Digger config found, skipping repo",
// Check if the error is due to missing digger config and the app is installed for all repos
if errors.Is(err, digger_config.ErrDiggerConfigNotFound) {
slog.Debug("Digger config not found, checking if app is installed for all repos",
"prNumber", prNumber,
"repoFullName", repoFullName,
)
return nil
isAllRepos, checkErr := utils.IsAllReposInstallation(appId, installationId)
if checkErr != nil {
slog.Warn("Failed to check if installation is for all repos",
"error", checkErr,
)
} else if isAllRepos {
slog.Info("Digger config not found but GitHub App is installed for all repos, skipping error comment",
"prNumber", prNumber,
"repoFullName", repoFullName,
)
return nil
}
}


slog.Error("Error getting Digger config for PR",
"prNumber", prNumber,
"repoFullName", repoFullName,
Expand Down Expand Up @@ -515,7 +525,7 @@ func handlePullRequestEvent(gh utils.GithubClientProvider, payload *github.PullR
commentReporterManager.UpdateComment(fmt.Sprintf(":x: Could not retrieve created batch: %v", err))
return fmt.Errorf("error getting digger batch")
}

if config.CommentRenderMode == digger_config.CommentRenderModeGroupByModule {
slog.Info("Using GroupByModule render mode for comments", "prNumber", prNumber)

Expand Down
2 changes: 0 additions & 2 deletions backend/controllers/projects.go
Original file line number Diff line number Diff line change
Expand Up @@ -1026,7 +1026,6 @@ func (d DiggerController) SetJobStatusForProject(c *gin.Context) {
c.JSON(http.StatusInternalServerError, gin.H{"error": "Error getting refreshed batch"})
return
}
//err = UpdateCheckStatusForBatch(d.GithubClientProvider, refreshedBatch)
slog.Debug("Attempting to update GitHub Check Run for batch",
"batchId", batch.ID,
"checkRunId", refreshedBatch.CheckRunId,
Expand Down Expand Up @@ -1056,7 +1055,6 @@ func (d DiggerController) SetJobStatusForProject(c *gin.Context) {
c.JSON(http.StatusInternalServerError, gin.H{"error": "Error getting refreshed job"})
return
}
//err = UpdateCommitStatusForJob(d.GithubClientProvider, refreshedJob)
slog.Debug("Attempting to update GitHub Check Run for job",
"jobId", jobId,
"checkRunId", refreshedJob.CheckRunId,
Expand Down
19 changes: 13 additions & 6 deletions backend/controllers/projects_helpers.go
Original file line number Diff line number Diff line change
Expand Up @@ -210,9 +210,12 @@ func UpdateCheckRunForBatch(gh utils.GithubClientProvider, batch *models.DiggerB
return fmt.Errorf("error generating realtime comment message: %v", err)
}

summary, err := GenerateChecksSummaryForBatch(batch)
if err != nil {
slog.Warn("Error generating checks summary for batch", "batchId", batch.ID, "error", err)
var summary = ""
if batch.Status == orchestrator_scheduler.BatchJobSucceeded || batch.Status == orchestrator_scheduler.BatchJobFailed {
summary, err = GenerateChecksSummaryForBatch(batch)
if err != nil {
slog.Warn("Error generating checks summary for batch", "batchId", batch.ID, "error", err)
}
}

if isPlanBatch {
Expand Down Expand Up @@ -397,11 +400,15 @@ func UpdateCheckRunForJob(gh utils.GithubClientProvider, job *models.DiggerJob)
"```\n"


summary, err := GenerateChecksSummaryForJob(job)
if err != nil {
slog.Warn("Error generating checks summary for batch", "batchId", batch.ID, "error", err)
var summary = ""
if job.Status == orchestrator_scheduler.DiggerJobSucceeded || job.Status == orchestrator_scheduler.DiggerJobFailed {
summary, err = GenerateChecksSummaryForJob(job)
if err != nil {
slog.Warn("Error generating checks summary for batch", "batchId", batch.ID, "error", err)
}
}


slog.Debug("Updating PR status for job", "jobId", job.DiggerJobID, "status", status, "conclusion", conclusion)
if isPlan {
title := fmt.Sprintf("%v to create %v to update %v to delete", job.DiggerJobSummary.ResourcesCreated, job.DiggerJobSummary.ResourcesUpdated, job.DiggerJobSummary.ResourcesDeleted)
Expand Down
8 changes: 4 additions & 4 deletions backend/models/scheduler.go
Original file line number Diff line number Diff line change
Expand Up @@ -259,9 +259,9 @@ func GetCheckRunConclusionForJob(job *DiggerJob) (string, error) {
return "failure", nil
}
slog.Error("Unknown job status in GetCheckRunConclusionForJob - this will cause GitHub API 422 error",
"jobId", job.DiggerJobID,
"jobStatus", job.Status,
"jobStatusInt", int(job.Status),
"validStatuses", []string{"created", "triggered", "started", "queued_for_run", "succeeded", "failed"})
"jobId", job.DiggerJobID,
"jobStatus", job.Status,
"jobStatusInt", int(job.Status),
"validStatuses", []string{"created", "triggered", "started", "queued_for_run", "succeeded", "failed"})
return "", fmt.Errorf("unknown job status: %v", job.Status)
}
53 changes: 53 additions & 0 deletions backend/utils/github.go
Original file line number Diff line number Diff line change
Expand Up @@ -408,6 +408,59 @@ func GetGithubHostname() string {
return githubHostname
}

// IsAllReposInstallation checks if the GitHub App installation is configured to access all repositories
// (as opposed to a selected subset). Returns true if installation is for "all" repos.
// Note: This requires app-level JWT authentication, not installation token authentication.
func IsAllReposInstallation(appId int64, installationId int64) (bool, error) {
githubAppPrivateKey := ""
githubAppPrivateKeyB64 := os.Getenv("GITHUB_APP_PRIVATE_KEY_BASE64")
if githubAppPrivateKeyB64 != "" {
decodedBytes, err := base64.StdEncoding.DecodeString(githubAppPrivateKeyB64)
if err != nil {
slog.Error("Failed to decode GITHUB_APP_PRIVATE_KEY_BASE64", "error", err)
return false, fmt.Errorf("error decoding private key: %v", err)
}
githubAppPrivateKey = string(decodedBytes)
} else {
githubAppPrivateKey = os.Getenv("GITHUB_APP_PRIVATE_KEY")
if githubAppPrivateKey == "" {
return false, fmt.Errorf("missing GitHub app private key")
}
}

// Use app-level transport (JWT) instead of installation token
atr, err := ghinstallation.NewAppsTransport(net.DefaultTransport, appId, []byte(githubAppPrivateKey))
if err != nil {
slog.Error("Failed to create GitHub app transport",
"appId", appId,
"error", err,
)
return false, fmt.Errorf("error creating app transport: %v", err)
}

client := github.NewClient(&net.Client{Transport: atr})

installation, _, err := client.Apps.GetInstallation(context.Background(), installationId)
if err != nil {
slog.Error("Failed to get GitHub installation details",
"installationId", installationId,
"error", err,
)
return false, fmt.Errorf("error getting installation details: %v", err)
}

repositorySelection := installation.GetRepositorySelection()
isAllRepos := repositorySelection == "all"

slog.Debug("Checked installation repository selection",
"installationId", installationId,
"repositorySelection", repositorySelection,
"isAllRepos", isAllRepos,
)

return isAllRepos, nil
}

func GetWorkflowIdAndUrlFromDiggerJobId(client *github.Client, repoOwner string, repoName string, diggerJobID string) (int64, string, error) {
slog.Debug("Looking for workflow for job",
"diggerJobId", diggerJobID,
Expand Down
4 changes: 2 additions & 2 deletions docs/ce/local-development/backend.mdx
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
---
title: Orchestrator local setup
title: Backend (orchestrator) local setup
---

The backend serves orchestration APIs, GitHub app endpoints, and internal APIs the UI relies on.
Expand Down Expand Up @@ -51,7 +51,7 @@ The backend serves orchestration APIs, GitHub app endpoints, and internal APIs t

## GitHub app integration

- For a quick install link, set `ORCHESTRATOR_GITHUB_APP_URL` in `ui/.env.local` to your app's install URL (`https://github.com/apps/<app>/installations/new`).
- For a quick install link, set `ORCHESTRATOR_GITHUB_APP_URL` in `ui/.env.local` to your apps install URL (`https://github.com/apps/<app>/installations/new`).
- To create a new app via the backend, open `http://localhost:3000/github/setup` (requires `HOSTNAME` set to a reachable URL for callbacks).

## Troubleshooting
Expand Down
5 changes: 2 additions & 3 deletions docs/docs.json
Original file line number Diff line number Diff line change
Expand Up @@ -22,8 +22,7 @@
"group": "Introduction",
"pages": [
"readme/introduction",
"readme/howitworks",
"readme/feedback"
"readme/howitworks"
]
},
{
Expand Down Expand Up @@ -223,4 +222,4 @@
"linkedin": "https://www.linkedin.com/company/diggerhq/"
}
}
}
}
15 changes: 0 additions & 15 deletions docs/readme/feedback.mdx

This file was deleted.

44 changes: 32 additions & 12 deletions docs/readme/howitworks.mdx
Original file line number Diff line number Diff line change
Expand Up @@ -2,19 +2,39 @@
title: "How it works"
---

Digger has 2 main components:
Before OpenTaco the project was called digger and it consisted of purely a piece for PR automation. This engine was responsible for
integrating with github actions and running CI jobs with terraform in response to certain events. We call this the PR Automation or "orchestrator" service.

- A CLI agent that runs in your CI and interacts with Terraform CLI
- An orchestrator backend that responds to events from GitHub and triggers CI jobs
The same engine now became part of the OpenTaco suite and continues to be improved.

When a PR is opened, Digger starts a CI job that runs `terraform plan` and posts plan output as comment. You can then comment "digger apply" to run `terraform apply`. Digger can also be configured to run apply only after the PR has been merged; to check plan output against OPA policies; to run drift detection on schedule; and so on.
The OpenTaco architecture consists of these components:

The orchestrator backend does not have access to your cloud account, or terraform states, or plan output, or tfvars, or any other sensitive data. It just triggers CI jobs; your sensitive data never leaves the high-trust environment of your CI. For this reason, there is little reason to self-host the backend of Digger (although you still can). Much easier to use the managed cloud version of the orchestrator.
- Statesman: Offers states management and remote runs capabilities
- Drift: All drift detection capabilities including detection and remediation
- Token: A token validation service (internal)
- Orchestrator (formerly digger): Responsible for starting and stopping CI Jobs, currently responsible for all of PR automation.
- UI: the UI layer, also acts as a gateway proxy to the other services

<Note>
Digger can also run as a standalone GitHub Action without a backend. In this case:
- comments and status checks will be updated with a delay
- all applies will run sequentially in one job without concurrency
- clashing applies from other jobs will fail as they cannot be queued
- buckets / tables for PR-level locks need to be configured manually in your cloud account
</Note>
```mermaid
flowchart LR
subgraph Clients
dev[Developer / User]
end

subgraph OpenTaco
ui[UI<br/>• Gateway proxy<br/>• Token validation]

subgraph Backend Services
orch[Orchestrator<br/>• Start/stop CI jobs<br/>• PR automation]
states[Statesman<br/>• State mgmt<br/>• Remote runs]
drift[Drift<br/>• Drift detection<br/>• Remediation]
end
end

dev -->|HTTP / Browser| ui

%% Forward validated requests internally
ui --> orch
ui --> states
ui --> drift
```
Loading
Loading