-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathlogger.py
More file actions
171 lines (136 loc) · 5.52 KB
/
logger.py
File metadata and controls
171 lines (136 loc) · 5.52 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
import logging, os.path, sys
from datetime import datetime
from multiprocessing import RLock
## Add these here so users don't have to import logging just to pass these
## values to the init method.
CRITICAL = logging.CRITICAL
ERROR = logging.ERROR
WARNING = logging.WARNING
INFO = logging.INFO
DEBUG = logging.DEBUG
NOTSET = logging.NOTSET
# next bit filched from Lib/logging/__init__.py who filched it from 1.5.2's inspect.py
def currentframe():
"""Return the frame object for the caller's stack frame."""
try:
raise Exception
except:
return sys.exc_info()[2].tb_frame.f_back
if hasattr(sys, '_getframe'): currentframe = lambda: sys._getframe(3)
_srcfile = os.path.normcase(currentframe.__code__.co_filename)
# done filching
## Wrapper around Python logging module to perform routine items like creating
## log directory, formatting, and file/console logging depending on value of
## variable.
class Logger(logging.Logger):
## Recursive lock to allow multiple processes to share the logger
rlock = RLock()
## Thin wrappers that use the rlock
def log(self, lvl, msg, *args, **kwargs):
if self.closed is False:
self.rlock.acquire()
super(Logger, self).log(lvl, msg, *args, **kwargs)
self.rlock.release()
def critical(self, msg, *args, **kwargs):
self.log(CRITICAL, msg, *args, **kwargs)
def error(self, msg, *args, **kwargs):
self.log(ERROR, msg, *args, **kwargs)
def warning(self, msg, *args, **kwargs):
self.log(WARNING, msg, *args, **kwargs)
def info(self, msg, *args, **kwargs):
self.log(INFO, msg, *args, **kwargs)
def debug(self, msg, *args, **kwargs):
self.log(DEBUG, msg, *args, **kwargs)
## We need to override this or the filename equals logger.py since
## that is where the actual log() method is called. This is taken
## directly from Lib/logging/__init__.py
def findCaller(self, stack_info=False):
"""
Find the stack frame of the caller so that we can note the source
file name, line number and function name.
"""
f = currentframe()
#On some versions of IronPython, currentframe() returns None if
#IronPython isn't run with -X:Frames.
if f is not None:
f = f.f_back
rv = "(unknown file)", 0, "(unknown function)", None
while hasattr(f, "f_code"):
co = f.f_code
filename = os.path.normcase(co.co_filename)
if filename == _srcfile:
f = f.f_back
continue
sinfo = None
if stack_info:
sio = io.StringIO()
sio.write('Stack (most recent call last):\n')
traceback.print_stack(f, file=sio)
sinfo = sio.getvalue()
if sinfo[-1] == '\n':
sinfo = sinfo[:-1]
sio.close()
rv = (co.co_filename, f.f_lineno, co.co_name, sinfo)
break
return rv
@classmethod
def _get_name(cls, name):
if name is None:
## Get the name of the script without the extension
name = os.path.basename(sys.argv[0]).split('.')[0]
return name
def __init__(self, name=None, logdir=None, loglevel=logging.DEBUG, console=False, stderr=False, logThreads=0, logProcesses=0, logMultiprocessing=0):
logging.Logger.__init__(self, Logger._get_name(name))
self.closed = False
## Assume (and force) console output only
if logdir is None:
console = True
else:
## Ensure our logging directory exists
try:
os.mkdir(logdir, 775)
except OSError as e:
if e.args[1] != 'File exists':
print("Cannot create {}: {}".format(logdir, e))
sys.exit(1)
logdir = "{}/".format(logdir.rstrip('/'))
# File to where we send this script's logs
logging_output_file = "{}{}-{}.log".format(logdir, self.name, datetime.today().date())
if console is True:
if stderr is False:
logging_output = sys.stdout
else:
logging_output = sys.stderr
self.logfile = 'console'
else:
logging_output = open(logging_output_file, 'a')
self.logfile = logging_output_file
## These enable/disable collecting of this log information
logging.logThreads = logThreads
logging.logProcesses = logProcesses
logging.logMultiprocessing = logMultiprocessing
self.setLevel(loglevel)
logger_handler = logging.StreamHandler(logging_output)
if logProcesses:
logger_handler.setFormatter(logging.Formatter(
"[%(asctime)s] %(name)s(%(process)d) %(levelname)s (%(filename)s "
"=> %(lineno)s): %(message)s"
))
else:
logger_handler.setFormatter(logging.Formatter(
"[%(asctime)s] %(name)s %(levelname)s (%(filename)s "
"=> %(lineno)s): %(message)s"
))
self.addHandler(logger_handler)
## Close underlying handlers if they support the close() method call
def close(self):
self.closed = True
for handler in self.handlers:
if hasattr(handler, "close"):
handler.close()
def main(*args):
logger = Logger(logdir="/tmp", loglevel=DEBUG)
logger.error("This is an error")
return 0
if __name__ == '__main__':
sys.exit(main(*sys.argv))