Date: Sat, 25 Aug 2012 18:27:47 +0000 From: tzabal@FreeBSD.org To: svn-soc-all@FreeBSD.org Subject: socsvn commit: r240845 - in soc2012/tzabal/server-side/akcrs-handler: . crashreportd Message-ID: <20120825182747.1BF1B106566B@hub.freebsd.org>
next in thread | raw e-mail | index | archive | help
Author: tzabal Date: Sat Aug 25 18:27:45 2012 New Revision: 240845 URL: http://svnweb.FreeBSD.org/socsvn/?view=rev&rev=240845 Log: Add the setup.py file and minor changes in the other files. The setup.py file enables the installation of the crashreportd program. It uses the standard distutils module, not setuptools. Added: soc2012/tzabal/server-side/akcrs-handler/crashreportd/ soc2012/tzabal/server-side/akcrs-handler/crashreportd/__init__.py soc2012/tzabal/server-side/akcrs-handler/crashreportd/confirm_report.wsgi soc2012/tzabal/server-side/akcrs-handler/crashreportd/crashreport.py soc2012/tzabal/server-side/akcrs-handler/crashreportd/crashreportd.py soc2012/tzabal/server-side/akcrs-handler/crashreportd/database.py soc2012/tzabal/server-side/akcrs-handler/crashreportd/settings.py soc2012/tzabal/server-side/akcrs-handler/setup.py Added: soc2012/tzabal/server-side/akcrs-handler/crashreportd/__init__.py ============================================================================== --- /dev/null 00:00:00 1970 (empty, because file is newly added) +++ soc2012/tzabal/server-side/akcrs-handler/crashreportd/__init__.py Sat Aug 25 18:27:45 2012 (r240845) @@ -0,0 +1 @@ + Added: soc2012/tzabal/server-side/akcrs-handler/crashreportd/confirm_report.wsgi ============================================================================== --- /dev/null 00:00:00 1970 (empty, because file is newly added) +++ soc2012/tzabal/server-side/akcrs-handler/crashreportd/confirm_report.wsgi Sat Aug 25 18:27:45 2012 (r240845) @@ -0,0 +1,58 @@ +import urlparse +# Importing the cgi module leads to an error when accessing the web page +#from cgi import escape + +import database + + +def application(environ, start_response): + response_body = 'Invalid confirmation code.' + + db = database.Database() + if not db.connection: + response_body = 'Could not connect to database.' + + if environ['REQUEST_METHOD'] == 'GET': + + parameters = urlparse.parse_qs(environ['QUERY_STRING']) + + if 'id' in parameters and 'code' in parameters: + report_id = parameters['id'][0] + code = parameters['code'][0] + + db.query = ('SELECT bug_id ' + 'FROM reports ' + 'WHERE id = %s AND confirmation_code = %s AND ' + 'confirmed = %s') + db.values = (report_id, code, False) + + if not db.execute_query(): + response_body = 'Could not execute the query.' + + if db.cursor.rowcount == 1: + bug_id = db.cursor.fetchone() + + db.query = 'UPDATE reports SET confirmed = %s WHERE id = %s' + db.values = (True, report_id) + + if not db.execute_query(): + response_body = 'Could not execute the query.' + + db.query = 'UPDATE bugs SET reported = reported + 1 WHERE id = %s' + db.values = (bug_id, ) + + if not db.execute_query(): + response_body = 'Could not execute the query.' + + db.save() + db.cursor.close() + db.connection.close() + + response_body = 'Your report has been confirmed succesfully.' + + status = '200 OK' + response_headers = [('Content-type', 'text/html')] + + start_response(status, response_headers) + + return [response_body] Added: soc2012/tzabal/server-side/akcrs-handler/crashreportd/crashreport.py ============================================================================== --- /dev/null 00:00:00 1970 (empty, because file is newly added) +++ soc2012/tzabal/server-side/akcrs-handler/crashreportd/crashreport.py Sat Aug 25 18:27:45 2012 (r240845) @@ -0,0 +1,161 @@ +import logging +import os +import re +import tarfile +from StringIO import StringIO + +from lxml import etree + + +class CrashReport(object): + """This class represents a crash report.""" + + valid_name = re.compile('^crashreport\.[A-Za-z0-9]{6}\.tar\.gz$') + + def __init__(self, path): + name = os.path.basename(path) + + self.name = name + self.path = path + self.confirmation_code = None + self.data = CrashData() + + + def has_valid_name(self): + """Returns True is the report's name matches the name of a valid crash + report. Otherwise it returns implicit False.""" + match = re.match(self.__class__.valid_name, self.name) + + if not match: + logging.info('Invalid crash report name: %s' % self.name) + return + + return True + + + def has_valid_type(self): + """Returns True if the report's file type matches the file type of a + valid crash report. Otherwise it returns implicit False.""" + if not tarfile.is_tarfile(self.path): + logging.info('The report %s cannot be read from the tarfile module' + % self.path) + return + + try: + tarfileobj = tarfile.open(self.path, 'r:gz') + except tarfile.ReadError: + logging.info('The provided mode is not suitable to open for reading' + ' the report %s' % self.path) + return + except tarfile.CompressionError: + logging.info('The compression method for the report %s is not ' + 'supported' % self.path) + return + finally: + tarfileobj.close() + + return True + + + def has_valid_contents_number(self): + """Returns True is the report contains the same number of files that a + valid crash report has. Othewise it returns implicit False.""" + try: + tarfileobj = tarfile.open(self.path, 'r:gz') + except tarfile.ReadError: + return + except tarfile.CompressionError: + return + else: + contents_list = tarfileobj.getnames() + if not len(contents_list) == 1: + logging.info('The report %s has invalid number of contents' + % self.path) + return + self.data.name = contents_list[0] + finally: + tarfileobj.close() + + return True + + + +class CrashData(object): + """This class represents the crash data that a crash report contains.""" + + valid_name = re.compile('^crashreport\.[A-Za-z0-9]{6}\.xml$') + + def __init__(self): + self.name = None + self.path = None + self.info = {} + self.commands = {'crashtype': None, + 'crashdate': None, + 'hostname': None, + 'ostype': None, + 'osrelease': None, + 'version': None, + 'machine': None, + 'panic': None, + 'backtrace': None, + 'ps_axl': None, + 'vmstat_s': None, + 'vmstat_m': None, + 'vmstat_z': None, + 'vmstat_i': None, + 'pstat_T': None, + 'pstat_s': None, + 'iostat': None, + 'ipcs_a': None, + 'ipcs_T': None, + 'nfsstat': None, + 'netstat_s': None, + 'netstat_m': None, + 'netstat_id': None, + 'netstat_anr': None, + 'netstat_anA': None, + 'netstat_aL': None, + 'fstat': None, + 'dmesg': None, + 'kernelconfig': None, + 'ddbcapturebuffer': None + } + + + def has_valid_name(self): + """Returns True if the report's crash data name matches the name of a + valid crash data. Otherwise it returns implicit False.""" + match = re.match(self.__class__.valid_name, self.name) + + if not match: + logging.info('Invalid crash data name: %s' % self.name) + return + + return True + + + def has_valid_crashdata(self): + """Returns True if the crash data is a well formed and valid XML file. + Otherwise implicit False.""" + dtdfile = StringIO("""<!ELEMENT crashreport (header, body)> + <!ELEMENT header (email)> + <!ELEMENT email (#PCDATA)> + <!ELEMENT body (command+)> + <!ELEMENT command (name, result)> + <!ELEMENT name (#PCDATA)> + <!ELEMENT result (#PCDATA)>""") + + try: + elemtree = etree.parse(self.path) + except: + logging.info('%s is not a well formed crash report data.' % + (self.path)) + return + else: + dtd = etree.DTD(dtdfile) + if not dtd.validate(elemtree): + logging.info('%s is not a valid crash report data.' % + (self.path)) + return + + return True Added: soc2012/tzabal/server-side/akcrs-handler/crashreportd/crashreportd.py ============================================================================== --- /dev/null 00:00:00 1970 (empty, because file is newly added) +++ soc2012/tzabal/server-side/akcrs-handler/crashreportd/crashreportd.py Sat Aug 25 18:27:45 2012 (r240845) @@ -0,0 +1,530 @@ +#! /usr/local/bin/python + +import difflib +import hashlib +import logging +import os +import random +import shutil +import smtplib +import string +import tarfile +import time +from email.mime.text import MIMEText + +from lxml import etree + +import crashreport +import database +import settings + + +db = database.Database() + + +def move_invalid_report(report): + if not os.path.isfile(report.path): + return + + if not os.path.isdir(settings.INVALID_REPORTS_DIR): + logging.error('Invalid reports directory does not exist') + return + + # If file with same name exists, remove it in order to avoid shutil.Error + if os.path.exists(settings.INVALID_REPORTS_DIR + '/' + report.name): + os.remove(settings.INVALID_REPORTS_DIR + '/' + report.name) + + shutil.move(report.path, settings.INVALID_REPORTS_DIR) + + +def send_confirmation_email(report): + smtpserver = settings.SMTPSERVER + sender = settings.SENDER + receiver = report.data.info['email'] + subject = settings.SUBJECT + + if not report.user_password: + text = settings.TEXT01 % (report.id, report.confirmation_code) + else: + text = settings.TEXT02 % (report.id, report.confirmation_code, + report.user_password) + + message = MIMEText(text) + message['From'] = sender + message['To'] = receiver + message['Subject'] = subject + + try: + smtpconn = smtplib.SMTP(smtpserver) + smtpconn.sendmail(sender, receiver, message.as_string()) + except smtplib.SMTPException, err: + logging.info(err) + return + finally: + smtpconn.quit() + + return True + + +def generate_random_string(size): + """Generates and returns a random string of the specified size. + + The string is a sequence of characters that are chosen randomly from a set + that contains digits, lowercase and uppercase letters. + """ + chars = string.letters + string.digits + return ''.join(random.choice(chars) for ch in range(size)) + + +def store_report(report): + # Bugs + report.bugs_id = None + if report.bug_id == -1: + db.query = ('INSERT INTO bugs (state, reported) ' + 'VALUES (%s, %s) ' + 'RETURNING id') + db.values = ('Open', 0) + + if not db.execute_query(): + return + + report.bug_id = db.cursor.fetchone() + db.save() + elif type(report.bug_id) == type([]): + report.bugs_id = report.bug_id + report.bug_id = None + + # Users + db.query = 'SELECT id FROM users WHERE email = %s' + db.values = (report.data.info['email'], ) + + if not db.execute_query(): + return + + if db.cursor.rowcount: + report.user_id = db.cursor.fetchone() + report.user_password = None # Part of Hack + else: + password = generate_random_string(8) + hashobj = hashlib.sha256() + hashobj.update(password) + hashpass = hashobj.hexdigest() + + # Hack: send the password of the new submitter along with the + # confirmation email instead of sending two emails + report.user_password = password + + db.query = ('INSERT INTO users (email, password) ' + 'VALUES (%s, %s)' + 'RETURNING id') + db.values = (report.data.info['email'], hashpass) + + if not db.execute_query(): + return + + report.user_id = db.cursor.fetchone() + db.save() + + # Reports + report.confirmation_code = generate_random_string(16) + + db.query = """INSERT INTO reports (bug_id, user_id, confirmation_code, + bugs_id, crashtype, crashdate, hostname, ostype, osrelease, version, + machine, panic, backtrace, top_significant_func, rem_significant_funcs, + ps_axl, vmstat_s, vmstat_m, vmstat_z, vmstat_i, pstat_T, pstat_s, iostat, + ipcs_a, ipcs_T, nfsstat, netstat_s, netstat_m, netstat_id, netstat_anr, + netstat_anA, netstat_aL, fstat, dmesg, kernelconfig, ddbcapturebuffer) + VALUES (%s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, + %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s) + RETURNING id""" + + db.values = (report.bug_id, + report.user_id, + report.confirmation_code, + report.bugs_id, + report.data.commands['crashtype'], + report.data.commands['crashdate'], + report.data.commands['hostname'], + report.data.commands['ostype'], + report.data.commands['osrelease'], + report.data.commands['version'], + report.data.commands['machine'], + report.data.commands['panic'], + report.data.commands['backtrace'], + report.top_significant_func, + report.rem_significant_funcs, + report.data.commands['ps_axl'], + report.data.commands['vmstat_s'], + report.data.commands['vmstat_m'], + report.data.commands['vmstat_z'], + report.data.commands['vmstat_i'], + report.data.commands['pstat_T'], + report.data.commands['pstat_s'], + report.data.commands['iostat'], + report.data.commands['ipcs_a'], + report.data.commands['ipcs_T'], + report.data.commands['nfsstat'], + report.data.commands['netstat_s'], + report.data.commands['netstat_m'], + report.data.commands['netstat_id'], + report.data.commands['netstat_anr'], + report.data.commands['netstat_anA'], + report.data.commands['netstat_aL'], + report.data.commands['fstat'], + report.data.commands['dmesg'], + report.data.commands['kernelconfig'], + report.data.commands['ddbcapturebuffer']) + + if not db.execute_query(): + return + + report.id = db.cursor.fetchone()[0] + db.save() + + return True + + +def uniqify_sequence(seq): + """Takes a sequence of elements and returns a sequence with only unique + elements. (Taken from http://www.peterbe.com/plog/uniqifiers-benchmark)""" + keys = {} + for e in seq: + keys[e] = 1 + return keys.keys() + + +def allocate_values(length): + """Takes an integer as input and returns a list with length equals to that + integer plus one. The list contains values that are allocated in an + increment and symmetric way, and the first index of the list (i.e zero) is + not used (is None). The values are calculated using the length of the list + and the constant REMAINING_FRAMES_MAX_PERC.""" + # Average value + avg = settings.REMAINING_FRAMES_MAX_PERC / length + + # Divide the list in 2 sets + elems_per_set = length // 2 + + # Initialize the list (need to access the middle element afterwards) + values = [None] * (length + 1) + + # Handle even and odd lengths + if length % 2 == 0: + # Auxiliary variable for swapping between the two sets + aux = 1 + else: + aux = 2 + # If odd, then the average value to the middle element (starting from 1) + values[elems_per_set + 1] = avg + #values[elems_per_set] = avg + + # How the values will be allocated + diff = 1 + # Increment and symmetric allocation of values + for i in range(elems_per_set, 0, -1): + values[i] = avg + diff + values[i+aux] = avg - diff + aux += 2 + diff += 1 + + return values + + +def contains_any(str, set): + """Returns True if the string str contains any of the elements found + in the iterable set.""" + for elem in set: + if elem in str: + return True + + return + + +def get_significant_funcs(backtrace): + """Takes a backtrace as a string and returns a list that + contains only the function names of the most significant stack frames.""" + + # Store every stack frame of the backtrace in a separate line + backtrace = backtrace.splitlines() + + # For every stack frame store only the function name + backtrace[0] = backtrace[0].split()[1] + for index, stackframe in enumerate(backtrace[1:], 1): + backtrace[index] = stackframe.split()[3] + + # Find the most significant stack frames and store only the function names + hit_significant = False + significant_funcs = [] + insignificant_funcs = ('syscall', 'panic', 'trap', 'lock', 'sleep', '??') + for func in reversed(backtrace): + if not contains_any(func, insignificant_funcs): + hit_significant = True + significant_funcs.append(func) + else: + if hit_significant: + break + significant_funcs.reverse() + + return significant_funcs + + +def recognize_report(report): + # The algorithm needs from every report to provide a valid panic message + # and a backtrace. If one of them is not provided, the report is marked + # as invalid. + if not report.data.commands['panic'] or not report.data.commands['backtrace']: + return + + # Calculate the significant functions of the reports + significant_funcs = get_significant_funcs(report.data.commands['backtrace']) + if len(significant_funcs) > 1: + report.top_significant_func = significant_funcs[0] + report.rem_significant_funcs = significant_funcs[1:] + elif len(significant_funcs) == 1: + report.top_significant_func = significant_funcs[0] + report.rem_significant_funcs = [] + else: + report.top_significant_func = report.rem_significant_funcs = [] + + # Retrieve from the database the confirmed reports + db.query = ('SELECT bug_id, panic, top_significant_func, ' + 'rem_significant_funcs FROM Reports') + if not db.execute_query(): + return + loggedreports = db.cursor.fetchall() + + # A list that contains the percentages of similarity of the examined report + # with all the others retrieved from the database + sims = [[None for i in range(2)] for j in range(db.cursor.rowcount)] + + # Check the examined report against all the retrieved reports + for index, loggedreport in enumerate(loggedreports): + # Store the bug_id of the report + sims[index][0] = loggedreport[0] + + # Calculate the percentage of similarity between the panic messages + ratio = difflib.SequenceMatcher(None, + report.data.commands['panic'], + loggedreport[1] + ).ratio() + sims[index][1] = settings.PANIC_MESSAGE_MAX_PERC * ratio + + # Calculate the percentage of similarity between the top significant + # function names + ratio = difflib.SequenceMatcher(None, + report.top_significant_func, + loggedreport[2] + ).ratio() + sims[index][1] += settings.TOP_FRAME_MAX_PERC * ratio + + # Calculate the percentage of similarity between the remaining + # significant function names + + # Firstly, create an increment and symmetric allocation of maximum + # percentages for the remaining significant function names. + # Compare X function names, where X is the length of the report with + # the fewest remaining significant function names + # sigkrisi tou full sign funcs me tou rem sign funcs twn logged! (fixed) + length = min(len(report.rem_significant_funcs), len(loggedreport[3])) + #rem_sig_max_percs = allocate_values(length - 1) + rem_sig_max_percs = allocate_values(length) + + # Then, calculate the percentage of similarity between every remaining + # significant function name based on the previous calculated percentages + for i in range(1, length, 1): + ratio = difflib.SequenceMatcher(None, + report.rem_significant_funcs[i], + loggedreport[3][i] + ).ratio() + sims[index][1] += rem_sig_max_percs[i] * ratio + + # Find with which bugs the examined report is similar based on the value + # of the limit percentage + passlimit = [] + for sim in sims: + if sim[1] >= settings.LIMIT_PERC: + passlimit.append(sim[0]) + + # Finally, check if the examined report concluded to refer to none, only + # one, or more logged bugs. If it refers to more than one bugs, then this is + # an indication that our algorithm is not accurate. + report.bug_id = -1 # new bug + if len(passlimit): + if passlimit.count(passlimit[0]) == len(passlimit): + # Refers to a known bug + report.bug_id = passlimit[0] + else: + # Refers to more than one known bugs + report.bug_id = uniqify_sequence(passlimit) + + return True + + +def parse_crashdata(report): + """Parses the crash data XML file of the given report and store the data in + instance variables of the report.""" + validnames = ['crashtype', 'crashdate', 'hostname', 'ostype', 'osrelease', + 'version', 'machine', 'panic', 'backtrace', 'ps_axl', + 'vmstat_s', 'vmstat_m', 'vmstat_z', 'vmstat_i', 'pstat_T', + 'pstat_s', 'iostat', 'ipcs_a', 'ipcs_T', 'nfsstat', + 'netstat_s', 'netstat_m', 'netstat_id', 'netstat_anr', + 'netstat_anA', 'netstat_aL', 'fstat', 'dmesg', 'kernelconfig', + 'ddbcapturebuffer'] + + if not os.path.isfile(report.data.path): + logging.info('Crash report data %s is not an existing regular file' + % report.data.path) + return + + elemtree = etree.parse(report.data.path) + root = elemtree.getroot() + + report.data.info['email'] = root[0][0].text.strip() + + for elem in elemtree.iter(): + if elem.tag == 'command': + children = list(elem) + name = children[0].text.strip() + result = children[1].text.strip() + if name in validnames: + report.data.commands[name] = result + + return True + + +def discard_report(path): + """Discards a crash report from the system.""" + os.remove(path) + + +def clear_directory(directory): + """Takes the absolute path of a directory, and removes all the files (not + directories) that it contains.""" + for filename in os.listdir(directory): + filepath = directory + '/' + filename + if os.path.isfile(filepath): + os.remove(filepath) + + +def extract_report(report): + """Extracts the given report to the auxiliary directory.""" + if not os.path.isdir(settings.AUXILIARY_DIR): + logging.error('Auxiliary directory does not exist') + return + + clear_directory(settings.AUXILIARY_DIR) + + try: + tarfileobj = tarfile.open(report.path, 'r:gz') + tarfileobj.extractall(settings.AUXILIARY_DIR) + except tarfile.ReadError: + return + except tarfile.CompressionError: + return + else: + report.data.path = settings.AUXILIARY_DIR + '/' + report.data.name + finally: + tarfileobj.close() + + return True + + +def check_report(report): + """Checks a crash report for validity and security. + + It is a function that calls all the methods provided by the CrashReport and + the CrashData objects that are related with the validity of a report. The + methods are called in a stict order because some methods assign values + to the instance variables of the given object and some other methods depend + on them. This is done in order to avoid execution of the same code multiple + times, distinguish the checks easily, and organize the code better. + """ + if not report.has_valid_name(): + return + + if not report.has_valid_type(): + return + + if not report.has_valid_contents_number(): + return + + if not report.data.has_valid_name(): + return + + if not extract_report(report): + return + + if not report.data.has_valid_crashdata(): + return + + return True + + +def create_pid_file(): + """Creates the Process ID file that contains the PID of crashreportd. + + It is used from the rc.d script to stop the program normally. + """ + pid = os.getpid() + try: + pidfile = open(settings.PID_FILE, 'w') + pidfile.write(str(pid)) + except IOError: + logging.error('Could not create the Process ID file') + return + finally: + pidfile.close() + + return True + + +def start_logging(): + """Turns on or off the logging facility.""" + if settings.LOGGING_FILE: + logging.basicConfig(level=logging.DEBUG, filename=settings.LOGGING_FILE, + format='%(asctime)s in %(funcName)s() at ' + '%(lineno)s %(levelname)s: %(message)s', + datefmt='%Y-%m-%d %H:%M:%S') + + +def main(): + start_logging() + + if not create_pid_file(): + return + + if not db.connection: + return + + while True: + dirlist = os.listdir(settings.CRASHREPORTS_DIR) + for filename in dirlist: + path = settings.CRASHREPORTS_DIR + '/' + filename + report = crashreport.CrashReport(path) + print report + if not check_report(report): + move_invalid_report(report) + continue + + if not parse_crashdata(report): + move_invalid_report(report) + continue + + if not recognize_report(report): + move_invalid_report(report) + continue + + if not store_report(report): + move_invalid_report(report) + continue + + if not send_confirmation_email(report): + move_invalid_report(report) + continue + + discard_report(report.path) + time.sleep(settings.INTERVAL_TIME) + + +if __name__ == '__main__': + main() Added: soc2012/tzabal/server-side/akcrs-handler/crashreportd/database.py ============================================================================== --- /dev/null 00:00:00 1970 (empty, because file is newly added) +++ soc2012/tzabal/server-side/akcrs-handler/crashreportd/database.py Sat Aug 25 18:27:45 2012 (r240845) @@ -0,0 +1,42 @@ +import logging + +import psycopg2 + +import settings + + +class Database: + + def __init__(self): + try: + self.connection = psycopg2.connect(database=settings.DBNAME, + host=settings.DBHOST, + user=settings.DBUSER, + password=settings.DBPASS) + except: + self.connection = None + logging.error('Could not connect to the database') + else: + self.cursor = self.connection.cursor() + + self.query = None + self.values = None + + def execute_query(self): + try: + if self.values: + self.cursor.execute(self.query, self.values) + else: + self.cursor.execute(self.query) + except Exception, err: + logging.info('Could not execute the query: %s' % self.query) + logging.info(err.pgerror) + return + + self.query = None + self.values = None + + return True + + def save(self): + self.connection.commit() Added: soc2012/tzabal/server-side/akcrs-handler/crashreportd/settings.py ============================================================================== --- /dev/null 00:00:00 1970 (empty, because file is newly added) +++ soc2012/tzabal/server-side/akcrs-handler/crashreportd/settings.py Sat Aug 25 18:27:45 2012 (r240845) @@ -0,0 +1,80 @@ +# Interval time +INTERVAL_TIME = 10 + +# Process ID file +PID_FILE = '/var/run/crashreportd.pid' + +# Crashreports directory +CRASHREPORTS_DIR = '/var/spool/crashreports' + +# Auxiliary directory +AUXILIARY_DIR = '/tmp/crashreports' + +# Invalid crash reports +INVALID_REPORTS_DIR = AUXILIARY_DIR + '/invalidreports' + +# Logging file +LOGGING_FILE = '/root/crashreportd.log' + +# Database name +DBNAME = 'akcrsdb' + +# Database host +DBHOST = '127.0.0.1' + +# Database user +DBUSER = 'akcrs' + +# Database user password +DBPASS = 'freebsd' + +# SMTP Server +SMTPSERVER = 'smtp.hol.gr' + +# Email address and name of the sender +SENDER = 'Automated Kernel Crash Reporting System <akcrs@freebsd.org>' + +# Confirmation email subject +SUBJECT = 'Confirm your kernel crash report' + +# Confirmation email text (for a registered user) +TEXT01 = """\ +Hello, + +Please confirm your kernel crash report by clicking the following link: +http://akcrs.dyndns.org/confirm_report?id=%s&code=%s + +Once you confirm, your kernel crash report will be stored in our database as +valid. + +Thank you for your time. +""" + +# Confirmation email text (for a new user) +TEXT02 = """\ +Hello, + +Please confirm your kernel crash report by clicking the following link: +http://akcrs.dyndns.org/confirm_report?id=%s&code=%s + +Once you confirm, your kernel crash report will be stored in our database as +valid. + +Finally, because this is the first time that you report a kernel crash to our +system, we have created for you an account with the following password: %s +Use your email address and the password to login into your account. + +Thank you for your time. +""" + +# Panic message maximum percentage +PANIC_MESSAGE_MAX_PERC = 25 + +# Top significant frame maximum percentage +TOP_FRAME_MAX_PERC = 25 + +# Remaining significant frames maximum percentage +REMAINING_FRAMES_MAX_PERC = 50.0 + +# Limit percentage for similar reports +LIMIT_PERC = 60 Added: soc2012/tzabal/server-side/akcrs-handler/setup.py ============================================================================== --- /dev/null 00:00:00 1970 (empty, because file is newly added) +++ soc2012/tzabal/server-side/akcrs-handler/setup.py Sat Aug 25 18:27:45 2012 (r240845) @@ -0,0 +1,15 @@ +from distutils.core import setup + + +setup(name='crashreportd', + version='0.1', + url='http://wiki.freebsd.org/SummerOfCode2012/AutomatedKernelCrashReportingSystem', + author='Tzanetos Balitsaris', + author_email='tzabal@freebsd.org', + description='The server side program that handles the received crash reports. Part of the Automated Kernel Crash Reporting System.', + license='BSD', + platforms=['FreeBSD'], + py_modules=['crashreportd.crashreport', 'crashreportd.database', 'crashreportd.settings'], + data_files=[('/usr/sbin', ['crashreportd/crashreportd.py']), + ('/usr/local/www/apache22/wsgi-scripts', ['crashreportd/confirm_report.wsgi'])], + )
Want to link to this message? Use this URL: <https://mail-archive.FreeBSD.org/cgi/mid.cgi?20120825182747.1BF1B106566B>