Skip to content

Commit ecd3098

Browse files
authored
🤖 Format .jl files
1 parent 886408e commit ecd3098

8 files changed

Lines changed: 42 additions & 42 deletions

File tree

old/problems/NN_CIFAR10.jl

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -24,4 +24,3 @@ trained_model = train_knetNLPmodel!(
2424
all_data = false,
2525
verbose = false,
2626
)
27-

old/testParam.jl

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -64,4 +64,3 @@ println(B.γ1.value)
6464

6565

6666
C = R2ParameterSet{R}(eps(R), eps(R), 0.1, 0.01, 0.1, 1.9, zero(R), 0.9) # with issues
67-

src/JSOSolver_SR2.jl

Lines changed: 13 additions & 13 deletions
Original file line numberDiff line numberDiff line change
@@ -45,19 +45,19 @@ struct R2ParameterSet{T<:AbstractFloat} <: AbstractParameterSet #TODO change it
4545
atol >= 0 || throw(DomainError("invalid atol, atol>=0"))
4646
rtol >= 0 || throw(DomainError("invalid rtol, rtol >=0"))
4747
0 < η1 <= η2 <= 1 || throw(DomainError("invalid: 0 < η1 <= η2 <= 1"))
48-
0 <= β < 1 || throw(DomainError("invalid: β needs to be between [0,1)"))
49-
0 < γ1 < 1 <= γ2 || throw(DomainError("invalid 0 < γ1 < 1 <= γ2 "))
50-
new(
51-
Parameter(T(atol), RealInterval(T(-1000), T(1000)), "atol"), #TODO actual name
52-
Parameter(T(rtol), RealInterval(T(-1000), T(1000)), "rtol"),
53-
Parameter(T(η1), RealInterval(T(-1000), T(1000)), "η1"),
54-
Parameter(T(η2), RealInterval(T(-10000), T(10000)), "η2"),
55-
Parameter(T(γ1), RealInterval(T(-10000), T(1000)), "γ1"),
56-
Parameter(T(γ2), RealInterval(T(-10000), T(1000)), "γ2"),
57-
Parameter(σmin, RealInterval(T(-10000), T(1000)), "σmin"),
58-
Parameter(T(β), RealInterval(T(-10000), T(1000)), "β"),
59-
)
60-
48+
0 <= β < 1 || throw(DomainError("invalid: β needs to be between [0,1)"))
49+
0 < γ1 < 1 <= γ2 || throw(DomainError("invalid 0 < γ1 < 1 <= γ2 "))
50+
new(
51+
Parameter(T(atol), RealInterval(T(-1000), T(1000)), "atol"), #TODO actual name
52+
Parameter(T(rtol), RealInterval(T(-1000), T(1000)), "rtol"),
53+
Parameter(T(η1), RealInterval(T(-1000), T(1000)), "η1"),
54+
Parameter(T(η2), RealInterval(T(-10000), T(10000)), "η2"),
55+
Parameter(T(γ1), RealInterval(T(-10000), T(1000)), "γ1"),
56+
Parameter(T(γ2), RealInterval(T(-10000), T(1000)), "γ2"),
57+
Parameter(σmin, RealInterval(T(-10000), T(1000)), "σmin"),
58+
Parameter(T(β), RealInterval(T(-10000), T(1000)), "β"),
59+
)
60+
6161
end
6262

6363
end

src/SR2.jl

Lines changed: 17 additions & 17 deletions
Original file line numberDiff line numberDiff line change
@@ -143,9 +143,9 @@ function SolverCore.solve!(
143143
# d = solver.d
144144

145145
set_iter!(stats, 0)
146-
set_objective!(stats, obj(nlp,solver.x ))
146+
set_objective!(stats, obj(nlp, solver.x))
147147

148-
grad!(nlp,solver.x, solver.gx)
148+
grad!(nlp, solver.x, solver.gx)
149149
norm_∇fk = norm(solver.gx)
150150
set_dual_residual!(stats, norm_∇fk)
151151

@@ -161,9 +161,9 @@ function SolverCore.solve!(
161161
end
162162

163163
if verbose > 0 && mod(stats.iter, verbose) == 0
164-
@info @sprintf "%5s %9s %7s %7s %7s %7s " "iter" "f" "‖∇f‖" "σ" "ρk" "ΔTk"
164+
@info @sprintf "%5s %9s %7s %7s %7s %7s " "iter" "f" "‖∇f‖" "σ" "ρk" "ΔTk"
165165
infoline =
166-
@sprintf "%5d %9.2e %7.1e %7.1e %7.1e %7.1e " stats.iter stats.objective norm_∇fk σk 0.0 0.0
166+
@sprintf "%5d %9.2e %7.1e %7.1e %7.1e %7.1e " stats.iter stats.objective norm_∇fk σk 0.0 0.0
167167
end
168168

169169
set_status!(
@@ -181,27 +181,27 @@ function SolverCore.solve!(
181181

182182
done = stats.status != :unknown
183183
while !done
184-
184+
185185
#added by Farhad for Deep learning
186186
# since we are not updating the solver.x, then grad should say the same but we might have noise
187187

188188

189189
#TODO objective re-calculate since we need same x, with new minibatch
190190

191-
set_objective!(stats, obj(nlp,solver.x ))
191+
set_objective!(stats, obj(nlp, solver.x))
192192

193-
grad!(nlp,solver.x, solver.gx)
193+
grad!(nlp, solver.x, solver.gx)
194194
norm_∇fk = norm(solver.gx)
195195
set_dual_residual!(stats, norm_∇fk)
196196

197197
# σk = 2^round(log2(norm_∇fk + 1))
198198

199199

200200
if param.β.value == 0
201-
solver.cx .=solver.x .- (solver.gx ./ σk)
201+
solver.cx .= solver.x .- (solver.gx ./ σk)
202202
else
203-
solver.d .= solver.gx .* (T(1) - param.β.value) .+ solver.d .* param.β.value
204-
solver.cx .=solver.x .- (d ./ σk)
203+
solver.d .= solver.gx .* (T(1) - param.β.value) .+ solver.d .* param.β.value
204+
solver.cx .= solver.x .- (d ./ σk)
205205
end
206206

207207

@@ -228,20 +228,20 @@ function SolverCore.solve!(
228228

229229
# Acceptance of the new candidate
230230
if ρk >= param.η1.value
231-
solver.x .= solver.cx
231+
solver.x .= solver.cx
232232
set_objective!(stats, fck)
233-
grad!(nlp,solver.x, solver.gx)
233+
grad!(nlp, solver.x, solver.gx)
234234
norm_∇fk = norm(solver.gx)
235235
end
236236

237237
set_iter!(stats, stats.iter + 1)
238238
set_time!(stats, time() - start_time)
239239
set_dual_residual!(stats, norm_∇fk)
240-
241-
240+
241+
242242
#TODO for now
243243
# optimal = norm_∇fk ≤ ϵ
244-
optimal = false
244+
optimal = false
245245

246246
if verbose > 0 && mod(stats.iter, verbose) == 0
247247
@info infoline
@@ -269,8 +269,8 @@ function SolverCore.solve!(
269269
done = stats.status != :unknown
270270
end
271271

272-
set_solution!(stats,solver.x)
273-
if verbose > 0
272+
set_solution!(stats, solver.x)
273+
if verbose > 0
274274
@info @sprintf "%s: %s" "stats.status" stats.status
275275
end
276276
return stats

src/problems/CPU/LeNet_MNIST.jl

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -47,7 +47,8 @@ fig = plot(
4747

4848
println("Training SGD with KNET")
4949
# Train Knet
50-
trained_model_knet = train_knet(knetModel, xtrn, ytrn, xtst, ytst;mbatch=m, mepoch = max_epochs) #TODO some reason when mepoch=max_epochs, will give error , maybe Int(max_epochs)
50+
trained_model_knet =
51+
train_knet(knetModel, xtrn, ytrn, xtst, ytst; mbatch = m, mepoch = max_epochs) #TODO some reason when mepoch=max_epochs, will give error , maybe Int(max_epochs)
5152
res_knet = trained_model_knet[2]
5253
epochs_knet = res_knet[:, 1]
5354
acc_knet = res_knet[:, 2]

src/problems/GPU/LeNet_CIFAR10.jl

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -1,9 +1,9 @@
11

22
T = Float32
33
# Knet.atype() = Array{T}
4-
if CUDA.functional()
4+
if CUDA.functional()
55
Knet.array_type[] = CUDA.CuArray{T}
6-
else
6+
else
77
Knet.array_type[] = Array{T}
88
end
99

@@ -143,4 +143,4 @@ plot!(
143143
# linestyle = :dot,
144144
# )
145145

146-
savefig("run_GPU_LENET_CIFAR10.png")
146+
savefig("run_GPU_LENET_CIFAR10.png")

src/problems/GPU/LeNet_MNIST.jl

Lines changed: 5 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -1,9 +1,9 @@
11

22
T = Float32
33
# Knet.atype() = Array{T}
4-
if CUDA.functional()
4+
if CUDA.functional()
55
Knet.array_type[] = CUDA.CuArray{T}
6-
else
6+
else
77
Knet.array_type[] = Array{T}
88
end
99

@@ -45,7 +45,8 @@ train_acc = res.train_acc_arr
4545

4646
println("Training SGD with KNET")
4747
# Train Knet
48-
trained_model_knet = train_knet(knetModel, xtrn, ytrn, xtst, ytst;mbatch=m, mepoch = max_epochs) #TODO some reason when mepoch=max_epochs, will give error , maybe Int(max_epochs)
48+
trained_model_knet =
49+
train_knet(knetModel, xtrn, ytrn, xtst, ytst; mbatch = m, mepoch = max_epochs) #TODO some reason when mepoch=max_epochs, will give error , maybe Int(max_epochs)
4950
res_knet = trained_model_knet[2]
5051
epochs_knet = res_knet[:, 1]
5152
acc_knet = res_knet[:, 2]
@@ -117,4 +118,4 @@ plot!(
117118
linestyle = :dot,
118119
)
119120

120-
savefig("run_GPU_LENET_MNIST.png")
121+
savefig("run_GPU_LENET_MNIST.png")

src/train_tools.jl

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -99,7 +99,7 @@ function train_knetNLPmodel!(
9999
γ2 = 1 / γ1,
100100
σmin = zero(T),# change this
101101
β::T = T(0),
102-
max_time = Inf
102+
max_time = Inf,
103103
# max_iter = 1000, # we can play with this and see what happens in R2, 1 means one itration but the relation is not 1-to-1,
104104
#TODO add max itration
105105
) where {T}
@@ -121,7 +121,7 @@ function train_knetNLPmodel!(
121121
γ2 = γ2,
122122
σmin = σmin,
123123
β = β,
124-
max_time=max_time,
124+
max_time = max_time,
125125
verbose = verbose,
126126
callback = (nlp, solver, stats, nlp_param) ->
127127
cb(nlp, solver, stats, nlp_param, stochastic_data),

0 commit comments

Comments
 (0)