Skip to content

Commit 261d891

Browse files
Teo Mrnjavacteo
authored andcommitted
Reduce logging verbosity
1 parent 1a9334e commit 261d891

11 files changed

Lines changed: 335 additions & 327 deletions

File tree

core/controlcommands/commandqueue.go

Lines changed: 26 additions & 26 deletions
Original file line numberDiff line numberDiff line change
@@ -50,8 +50,8 @@ type empty struct{}
5050
type CommandQueue struct {
5151
sync.Mutex
5252

53-
q chan queueEntry
54-
servent *Servent
53+
q chan queueEntry
54+
servent *Servent
5555
}
5656

5757
func NewCommandQueue(s *Servent) *CommandQueue {
@@ -70,13 +70,13 @@ func (m *CommandQueue) Enqueue(cmd MesosCommand, callback chan<- MesosCommandRes
7070
default: // Buffer full!
7171
err := errors.New("the queue for MESSAGE commands is full")
7272
log.WithField("error", err.Error()).
73-
WithField("queueSize", QUEUE_SIZE).
74-
Error("cannot enqueue control command")
73+
WithField("queueSize", QUEUE_SIZE).
74+
Error("cannot enqueue control command")
7575
return err
7676
}
7777
}
7878

79-
func (m* CommandQueue) Start() {
79+
func (m *CommandQueue) Start() {
8080
m.Lock()
8181
m.q = make(chan queueEntry, QUEUE_SIZE)
8282
m.Unlock()
@@ -86,7 +86,7 @@ func (m* CommandQueue) Start() {
8686
select {
8787
case entry, more := <-m.q:
8888
m.Lock()
89-
if !more { // if the channel is closed, we bail
89+
if !more { // if the channel is closed, we bail
9090
return
9191
}
9292
response, err := m.commit(entry.cmd)
@@ -116,7 +116,7 @@ func (m *CommandQueue) commit(command MesosCommand) (response MesosCommandRespon
116116
}
117117
defer utils.TimeTrack(time.Now(), fmt.Sprintf("cmdq.commit %s to %d targets", command.GetName(), len(command.targets())), log.WithPrefix("cmdq"))
118118

119-
type responseSemaphore struct{
119+
type responseSemaphore struct {
120120
receiver MesosCommandTarget
121121
response MesosCommandResponse
122122
err error
@@ -129,19 +129,19 @@ func (m *CommandQueue) commit(command MesosCommand) (response MesosCommandRespon
129129
responses := make(map[MesosCommandTarget]MesosCommandResponse)
130130

131131
log.WithFields(logrus.Fields{
132-
"name": command.GetName(),
133-
"id": command.GetId(),
134-
}).
132+
"name": command.GetName(),
133+
"id": command.GetId(),
134+
}).
135135
Debug("ready to commit MesosCommand")
136136

137137
for _, rec := range command.targets() {
138138
go func(receiver MesosCommandTarget) {
139139
log.WithFields(logrus.Fields{
140-
"agentId": receiver.AgentId,
141-
"executorId": receiver.ExecutorId,
142-
"name": command.GetName(),
143-
}).
144-
Debug("sending MesosCommand to target")
140+
"agentId": receiver.AgentId,
141+
"executorId": receiver.ExecutorId,
142+
"name": command.GetName(),
143+
}).
144+
Trace("sending MesosCommand to target")
145145
singleCommand := command.MakeSingleTarget(receiver)
146146
res, err := m.servent.RunCommand(singleCommand, receiver)
147147
if err != nil {
@@ -150,36 +150,36 @@ func (m *CommandQueue) commit(command MesosCommand) (response MesosCommandRespon
150150
semaphore <- responseSemaphore{
151151
receiver: receiver,
152152
response: res,
153-
err: err,
153+
err: err,
154154
}
155155
return
156156
}
157157

158158
if res.Err() != nil {
159159
log.WithFields(logrus.Fields{
160-
"commandName": res.GetCommandName(),
161-
"error": res.Err().Error(),
162-
}).
160+
"commandName": res.GetCommandName(),
161+
"error": res.Err().Error(),
162+
}).
163163
Trace("received MesosCommandResponse")
164164
} else {
165165
log.WithFields(logrus.Fields{
166-
"commandName": res.GetCommandName(),
167-
}).
166+
"commandName": res.GetCommandName(),
167+
}).
168168
Trace("received MesosCommandResponse")
169169
}
170170

171171
semaphore <- responseSemaphore{
172-
receiver: receiver,
173-
response: res,
174-
}
172+
receiver: receiver,
173+
response: res,
174+
}
175175
}(rec)
176176
}
177177
// Wait for goroutines to finish
178178
for i := 0; i < len(command.targets()); i++ {
179-
respSemaphore := <- semaphore
179+
respSemaphore := <-semaphore
180180
responses[respSemaphore.receiver] = respSemaphore.response
181181
if respSemaphore.err != nil {
182-
sendErrorList = append(sendErrorList, respSemaphore.err)
182+
sendErrorList = append(sendErrorList, respSemaphore.err)
183183
}
184184
}
185185
close(semaphore)

core/controlcommands/mesoscommandservent.go

Lines changed: 30 additions & 30 deletions
Original file line numberDiff line numberDiff line change
@@ -46,29 +46,29 @@ type Call struct {
4646

4747
func NewCall(cmd MesosCommand) *Call {
4848
return &Call{
49-
Request: cmd,
49+
Request: cmd,
5050
Response: nil,
51-
Done: make(chan empty),
52-
Error: nil,
51+
Done: make(chan empty),
52+
Error: nil,
5353
}
5454
}
5555

5656
type CallId struct {
57-
Id xid.ID
57+
Id xid.ID
5858
Target MesosCommandTarget
5959
}
6060

6161
type Servent struct {
62-
mu sync.Mutex
62+
mu sync.Mutex
6363
pending map[CallId]*Call
6464

6565
SendFunc SendCommandFunc
6666
}
6767

6868
func NewServent(commandFunc SendCommandFunc) *Servent {
6969
return &Servent{
70-
SendFunc: commandFunc,
71-
pending: make(map[CallId]*Call),
70+
SendFunc: commandFunc,
71+
pending: make(map[CallId]*Call),
7272
}
7373
}
7474

@@ -82,43 +82,43 @@ func (s *Servent) RunCommand(cmd MesosCommand, receiver MesosCommandTarget) (Mes
8282
call := NewCall(cmd)
8383

8484
callId := CallId{
85-
Id: cmdId,
85+
Id: cmdId,
8686
Target: receiver,
8787
}
8888

89-
log.Debug("servent mutex locking")
89+
log.Trace("servent mutex locking")
9090
s.mu.Lock()
91-
log.Debug("servent mutex locked")
91+
log.Trace("servent mutex locked")
9292

9393
// We append the new call to the pending map, and send the request
9494
s.pending[callId] = call
9595

9696
s.mu.Unlock()
97-
log.Debug("servent mutex unlocked")
97+
log.Trace("servent mutex unlocked")
9898

9999
log.WithFields(logrus.Fields{
100-
"name": cmd.GetName(),
101-
"id": cmd.GetId(),
102-
"agentId": receiver.AgentId,
103-
"executorId": receiver.ExecutorId,
104-
}).
105-
Debug("calling scheduler SendFunc")
100+
"name": cmd.GetName(),
101+
"id": cmd.GetId(),
102+
"agentId": receiver.AgentId,
103+
"executorId": receiver.ExecutorId,
104+
}).
105+
Trace("calling scheduler SendFunc")
106106

107107
err := s.SendFunc(cmd, receiver)
108108
if err != nil {
109-
log.Debug("servent mutex locking")
109+
log.Trace("servent mutex locking")
110110
s.mu.Lock()
111-
log.Debug("servent mutex locked")
111+
log.Trace("servent mutex locked")
112112

113113
delete(s.pending, callId)
114114

115115
s.mu.Unlock()
116-
log.WithError(err).Debug("servent mutex unlocked")
116+
log.WithError(err).Trace("servent mutex unlocked")
117117

118118
return nil, err
119119
}
120120

121-
log.WithField("timeout", cmd.GetResponseTimeout()).Debug("blocking until response or timeout")
121+
log.WithField("timeout", cmd.GetResponseTimeout()).Trace("blocking until response or timeout")
122122
// Neat, now we block until done||timeout
123123
select {
124124
case <-call.Done:
@@ -127,14 +127,14 @@ func (s *Servent) RunCommand(cmd MesosCommand, receiver MesosCommandTarget) (Mes
127127
case <-time.After(cmd.GetResponseTimeout()):
128128
call.Error = fmt.Errorf("MesosCommand %s timed out for task %s", cmd.GetName(), receiver.TaskId.Value)
129129

130-
log.Debug("servent mutex locking")
130+
log.Trace("servent mutex locking")
131131
s.mu.Lock()
132-
log.Debug("servent mutex locked")
132+
log.Trace("servent mutex locked")
133133

134134
delete(s.pending, callId)
135135

136136
s.mu.Unlock()
137-
log.Debug("servent mutex unlocked")
137+
log.Trace("servent mutex unlocked")
138138
}
139139

140140
if call.Error != nil {
@@ -145,7 +145,7 @@ func (s *Servent) RunCommand(cmd MesosCommand, receiver MesosCommandTarget) (Mes
145145

146146
func (s *Servent) ProcessResponse(res MesosCommandResponse, sender MesosCommandTarget) {
147147
callId := CallId{
148-
Id: res.GetCommandId(),
148+
Id: res.GetCommandId(),
149149
Target: sender,
150150
}
151151

@@ -157,14 +157,14 @@ func (s *Servent) ProcessResponse(res MesosCommandResponse, sender MesosCommandT
157157
if call == nil {
158158
log.WithFields(logrus.Fields{
159159
"commandName": res.GetCommandName(),
160-
"commandId": res.GetCommandId(),
161-
"agentId": sender.AgentId,
162-
"executorId": sender.ExecutorId,
160+
"commandId": res.GetCommandId(),
161+
"agentId": sender.AgentId,
162+
"executorId": sender.ExecutorId,
163163
}).
164-
Warning("no pending request found")
164+
Warning("no pending request found")
165165
return
166166
}
167167

168168
call.Response = res
169169
call.Done <- empty{}
170-
}
170+
}

core/environment/transition_deploy.go

Lines changed: 11 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -41,18 +41,18 @@ import (
4141
func NewDeployTransition(taskman *task.Manager, addRoles []string, removeRoles []string) Transition {
4242
return &DeployTransition{
4343
baseTransition: baseTransition{
44-
name: "DEPLOY",
44+
name: "DEPLOY",
4545
taskman: taskman,
4646
},
47-
addRoles: addRoles,
47+
addRoles: addRoles,
4848
removeRoles: removeRoles,
4949
}
5050
}
5151

5252
type DeployTransition struct {
5353
baseTransition
54-
addRoles []string
55-
removeRoles []string
54+
addRoles []string
55+
removeRoles []string
5656
}
5757

5858
func (t DeployTransition) do(env *Environment) (err error) {
@@ -159,7 +159,7 @@ func (t DeployTransition) do(env *Environment) (err error) {
159159
// We set all callRoles to ACTIVE right now, because there's no task activation for them.
160160
// This is the callRole equivalent of AcquireTasks, which only pushes updates to taskRoles.
161161
allHooks := wf.GetAllHooks()
162-
callHooks := allHooks.FilterCalls() // get the calls
162+
callHooks := allHooks.FilterCalls() // get the calls
163163
if len(callHooks) > 0 {
164164
for _, h := range callHooks {
165165
pr, ok := h.GetParentRole().(workflow.PublicUpdatable)
@@ -173,19 +173,19 @@ func (t DeployTransition) do(env *Environment) (err error) {
173173
deploymentTimeout := 90 * time.Second
174174
wfStatus := wf.GetStatus()
175175
if wfStatus != task.ACTIVE {
176-
WORKFLOW_ACTIVE_LOOP:
176+
log.Debug("waiting for workflow to become active")
177+
WORKFLOW_ACTIVE_LOOP:
177178
for {
178-
log.Debug("waiting for workflow to become active")
179179
select {
180180
case wfStatus = <-notifyStatus:
181181
log.WithField("status", wfStatus.String()).
182-
Debug("workflow status change")
182+
Debug("workflow status change")
183183
if wfStatus == task.ACTIVE {
184184
break WORKFLOW_ACTIVE_LOOP
185185
}
186186
continue
187187
case <-time.After(deploymentTimeout):
188-
err = errors.New(fmt.Sprintf("workflow deployment timed out. timeout: %s",deploymentTimeout.String()))
188+
err = errors.New(fmt.Sprintf("workflow deployment timed out. timeout: %s", deploymentTimeout.String()))
189189
break WORKFLOW_ACTIVE_LOOP
190190
// This is needed for when the workflow fails during the STAGING state(mesos status),mesos responds with the `REASON_COMMAND_EXECUTOR_FAILED`,
191191
// By listening to workflow state ERROR we can break the loop before reaching the timeout (1m30s), we can trigger the cleanup faster
@@ -195,7 +195,7 @@ func (t DeployTransition) do(env *Environment) (err error) {
195195
if wfState == task.ERROR {
196196
failedRoles := make([]string, 0)
197197
workflow.LeafWalk(wf, func(role workflow.Role) {
198-
if st := role.GetState(); st == task.ERROR {
198+
if st := role.GetState(); st == task.ERROR {
199199
log.WithField("state", st).
200200
WithField("role", role.GetPath()).
201201
WithField("environment", role.GetEnvironmentId().String()).
@@ -204,7 +204,7 @@ func (t DeployTransition) do(env *Environment) (err error) {
204204
}
205205
})
206206
log.WithField("state", wfState.String()).
207-
Debug("workflow state change")
207+
Debug("workflow state change")
208208
err = fmt.Errorf("workflow deployment failed, aborting and cleaning up [offending roles: %s]", strings.Join(failedRoles, ", "))
209209
break WORKFLOW_ACTIVE_LOOP
210210
}

core/task/constraint/attributes.go

Lines changed: 2 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -29,7 +29,7 @@ import (
2929
"strings"
3030

3131
"github.com/AliceO2Group/Control/common/utils"
32-
"github.com/mesos/mesos-go/api/v1/lib"
32+
mesos "github.com/mesos/mesos-go/api/v1/lib"
3333
)
3434

3535
type Attributes []mesos.Attribute
@@ -68,13 +68,12 @@ func (attrs Attributes) Satisfy(cts Constraints) (ok bool) {
6868
}
6969

7070
for _, constraint := range cts {
71-
log.WithField("constraint", constraint.String()).Trace("processing constraint")
7271
switch constraint.Operator {
7372
case Equals:
7473
var value string
7574
if value, ok = attrs.Get(constraint.Attribute); ok {
7675
if strings.Contains(value, ",") {
77-
values := strings.Split(value,",")
76+
values := strings.Split(value, ",")
7877
if utils.StringSliceContains(values, constraint.Value) {
7978
ok = true
8079
continue

0 commit comments

Comments
 (0)