diff options
Diffstat (limited to 'modules')
514 files changed, 18317 insertions, 2999 deletions
diff --git a/modules/amavis/manifests/init.pp b/modules/amavis/manifests/init.pp new file mode 100644 index 00000000..57af5bd9 --- /dev/null +++ b/modules/amavis/manifests/init.pp @@ -0,0 +1,13 @@ +class amavis { + package { 'amavisd-new': } + + service { 'amavisd': + subscribe => Package['amavisd-new'], + } + + file { '/etc/amavisd/amavisd.conf': + require => Package['amavisd-new'], + content => template('amavis/amavisd.conf'), + notify => Service['amavisd'], + } +} diff --git a/modules/amavis/templates/amavisd.conf b/modules/amavis/templates/amavisd.conf new file mode 100644 index 00000000..84a44944 --- /dev/null +++ b/modules/amavis/templates/amavisd.conf @@ -0,0 +1,782 @@ +use strict; + +# a minimalistic configuration file for amavisd-new with all necessary settings +# +# see amavisd.conf-default for a list of all variables with their defaults; +# see amavisd.conf-sample for a traditional-style commented file; +# for more details see documentation in INSTALL, README_FILES/* +# and at http://www.ijs.si/software/amavisd/amavisd-new-docs.html + + +# COMMONLY ADJUSTED SETTINGS: + +# @bypass_virus_checks_maps = (1); # controls running of anti-virus code +# @bypass_spam_checks_maps = (1); # controls running of anti-spam code +# $bypass_decode_parts = 1; # controls running of decoders&dearchivers + +$max_servers = 2; # num of pre-forked children (2..30 is common), -m +$daemon_user = 'amavis'; # (no default; customary: vscan or amavis), -u +$daemon_group = 'amavis'; # (no default; customary: vscan or amavis), -g + +(my $__hn,$mydomain) = split (/\./, $myhostname, 2); # try to discover domainname, + # a convenient default for other settings could be localhost.localdomain + # or change this as your needs + +$MYHOME = '/run/amavis'; # a convenient default for other settings, -H +$TEMPBASE = "$MYHOME/tmp"; # working directory, needs to exist, -T +$ENV{TMPDIR} = $TEMPBASE; # environment variable TMPDIR, used by SA, etc. +$QUARANTINEDIR = '/var/spool/amavis/virusmails'; # -Q +# $quarantine_subdir_levels = 1; # add level of subdirs to disperse quarantine +# $release_format = 'resend'; # 'attach', 'plain', 'resend' +# $report_format = 'arf'; # 'attach', 'plain', 'resend', 'arf' + +# $daemon_chroot_dir = $MYHOME; # chroot directory or undef, -R + +# $db_home = "$MYHOME/db"; # dir for bdb nanny/cache/snmp databases, -D +# $helpers_home = "$MYHOME/var"; # working directory for SpamAssassin, -S +# $lock_file = "$MYHOME/var/lib/amavisd.lock"; # -L +# $pid_file = "$MYHOME/var/lib/amavisd.pid"; # -P + +#NOTE: create directories $MYHOME/tmp, $MYHOME/var, $MYHOME/db manually + +$log_level = 0; # verbosity 0..5, -d +$log_recip_templ = undef; # disable by-recipient level-0 log entries +$DO_SYSLOG = 1; # log via syslogd (preferred) +$syslog_facility = 'mail'; # Syslog facility as a string + # e.g.: mail, daemon, user, local0, ... local7 +$syslog_priority = 'debug'; # Syslog base (minimal) priority as a string, + # choose from: emerg, alert, crit, err, warning, notice, info, debug + +$enable_db = 1; # enable use of BerkeleyDB/libdb (SNMP and nanny) +$enable_global_cache = 1; # enable use of libdb-based cache if $enable_db=1 +$nanny_details_level = 2; # nanny verbosity: 1: traditional, 2: detailed +$enable_dkim_verification = 1; # enable DKIM signatures verification +$enable_dkim_signing = 1; # load DKIM signing code, keys defined by dkim_key + +@local_domains_maps = ( [".$mydomain"] ); # list of all local domains + +@mynetworks = qw( 127.0.0.0/8 [::1] [FE80::]/10 [FEC0::]/10 + 10.0.0.0/8 172.16.0.0/12 192.168.0.0/16 ); + +$unix_socketname = "$MYHOME/amavisd.sock"; # amavisd-release or amavis-milter + # option(s) -p overrides $inet_socket_port and $unix_socketname + +$inet_socket_port = 10025; # listen on this local TCP port(s) +# $inet_socket_port = [10024,10026]; # listen on multiple TCP ports + +$policy_bank{'MYNETS'} = { # mail originating from @mynetworks + originating => 1, # is true in MYNETS by default, but let's make it explicit + os_fingerprint_method => undef, # don't query p0f for internal clients +}; + +# it is up to MTA to re-route mail from authenticated roaming users or +# from internal hosts to a dedicated TCP port (such as 10026) for filtering +$interface_policy{'10026'} = 'ORIGINATING'; + +$policy_bank{'ORIGINATING'} = { # mail supposedly originating from our users + originating => 1, # declare that mail was submitted by our smtp client + allow_disclaimers => 1, # enables disclaimer insertion if available + # notify administrator of locally originating malware + virus_admin_maps => ["virusalert\@$mydomain"], + spam_admin_maps => ["virusalert\@$mydomain"], + warnbadhsender => 1, + # forward to a smtpd service providing DKIM signing service + forward_method => 'smtp:[127.0.0.1]:10027', + # force MTA conversion to 7-bit (e.g. before DKIM signing) + smtpd_discard_ehlo_keywords => ['8BITMIME'], + bypass_banned_checks_maps => [1], # allow sending any file names and types + terminate_dsn_on_notify_success => 0, # don't remove NOTIFY=SUCCESS option +}; + +$interface_policy{'SOCK'} = 'AM.PDP-SOCK'; # only applies with $unix_socketname + +# Use with amavis-release over a socket or with Petr Rehor's amavis-milter.c +# (with amavis-milter.c from this package or old amavis.c client use 'AM.CL'): +$policy_bank{'AM.PDP-SOCK'} = { + protocol => 'AM.PDP', + auth_required_release => 0, # do not require secret_id for amavisd-release +}; + +$sa_tag_level_deflt = 1.0; # add spam info headers if at, or above that level +$sa_tag2_level_deflt = 4.7; # add 'spam detected' headers at that level +$sa_kill_level_deflt = 4.7; # triggers spam evasive actions (e.g. blocks mail) +$sa_dsn_cutoff_level = 10; # spam level beyond which a DSN is not sent +$sa_crediblefrom_dsn_cutoff_level = 18; # likewise, but for a likely valid From +# $sa_quarantine_cutoff_level = 25; # spam level beyond which quarantine is off +$penpals_bonus_score = 8; # (no effect without a @storage_sql_dsn database) +$penpals_threshold_high = $sa_kill_level_deflt; # don't waste time on hi spam +$bounce_killer_score = 100; # spam score points to add for joe-jobbed bounces + +$sa_mail_body_size_limit = 512*1024; # don't waste time on SA if mail is larger +$sa_local_tests_only = 0; # only tests which do not require internet access? + +# @lookup_sql_dsn = +# ( ['DBI:mysql:database=mail;host=127.0.0.1;port=3306', 'user1', 'passwd1'], +# ['DBI:mysql:database=mail;host=host2', 'username2', 'password2'], +# ["DBI:SQLite:dbname=$MYHOME/sql/mail_prefs.sqlite", '', ''] ); +# @storage_sql_dsn = @lookup_sql_dsn; # none, same, or separate database + +# $timestamp_fmt_mysql = 1; # if using MySQL *and* msgs.time_iso is TIMESTAMP; +# defaults to 0, which is good for non-MySQL or if msgs.time_iso is CHAR(16) + +$virus_admin = ""; # notifications recip. + +$mailfrom_notify_admin = "virusalert\@$mydomain"; # notifications sender +$mailfrom_notify_recip = "virusalert\@$mydomain"; # notifications sender +$mailfrom_notify_spamadmin = "spam.police\@$mydomain"; # notifications sender +$mailfrom_to_quarantine = ''; # null return path; uses original sender if undef + +@addr_extension_virus_maps = ('virus'); +@addr_extension_banned_maps = ('banned'); +@addr_extension_spam_maps = ('spam'); +@addr_extension_bad_header_maps = ('badh'); +# $recipient_delimiter = '+'; # undef disables address extensions altogether +# when enabling addr extensions do also Postfix/main.cf: recipient_delimiter=+ + +$path = '/usr/local/sbin:/usr/local/bin:/usr/sbin:/sbin:/usr/bin:/bin'; +# $dspam = 'dspam'; + +$MAXLEVELS = 14; +$MAXFILES = 1500; +$MIN_EXPANSION_QUOTA = 100*1024; # bytes (default undef, not enforced) +$MAX_EXPANSION_QUOTA = 512*1024*1024; # bytes (default undef, not enforced) + +$sa_spam_subject_tag = '***SPAM*** '; +$defang_virus = 1; # MIME-wrap passed infected mail +$defang_banned = 1; # MIME-wrap passed mail containing banned name +# for defanging bad headers only turn on certain minor contents categories: +$defang_by_ccat{+CC_BADH.",3"} = 1; # NUL or CR character in header +$defang_by_ccat{+CC_BADH.",5"} = 1; # header line longer than 998 characters +$defang_by_ccat{+CC_BADH.",6"} = 1; # header field syntax error + + +# OTHER MORE COMMON SETTINGS (defaults may suffice): + +# $myhostname = 'host.example.com'; # must be a fully-qualified domain name! + +$notify_method = 'smtp:[127.0.0.1]:10026'; +$forward_method = 'smtp:[127.0.0.1]:10026'; # set to undef with milter! + +# $final_virus_destiny = D_DISCARD; +# $final_banned_destiny = D_BOUNCE; +# $final_spam_destiny = D_PASS; +# $final_bad_header_destiny = D_PASS; +# $bad_header_quarantine_method = undef; + +# $os_fingerprint_method = 'p0f:*:2345'; # to query p0f-analyzer.pl + +## hierarchy by which a final setting is chosen: +## policy bank (based on port or IP address) -> *_by_ccat +## *_by_ccat (based on mail contents) -> *_maps +## *_maps (based on recipient address) -> final configuration value + + +# SOME OTHER VARIABLES WORTH CONSIDERING (see amavisd.conf-default for all) + +# $warnbadhsender, +# $warnvirusrecip, $warnbannedrecip, $warnbadhrecip, (or @warn*recip_maps) +# +# @bypass_virus_checks_maps, @bypass_spam_checks_maps, +# @bypass_banned_checks_maps, @bypass_header_checks_maps, +# +# @virus_lovers_maps, @spam_lovers_maps, +# @banned_files_lovers_maps, @bad_header_lovers_maps, +# +# @blacklist_sender_maps, @score_sender_maps, +# +# $clean_quarantine_method, $virus_quarantine_to, $banned_quarantine_to, +# $bad_header_quarantine_to, $spam_quarantine_to, +# +# $defang_bad_header, $defang_undecipherable, $defang_spam + + +# REMAINING IMPORTANT VARIABLES ARE LISTED HERE BECAUSE OF LONGER ASSIGNMENTS + +@keep_decoded_original_maps = (new_RE( + qr'^MAIL$', # retain full original message for virus checking + qr'^MAIL-UNDECIPHERABLE$', # recheck full mail if it contains undecipherables + qr'^(ASCII(?! cpio)|text|uuencoded|xxencoded|binhex)'i, +# qr'^Zip archive data', # don't trust Archive::Zip +)); + + +# for $banned_namepath_re (a new-style of banned table) see amavisd.conf-sample + +$banned_filename_re = new_RE( + +### BLOCKED ANYWHERE +# qr'^UNDECIPHERABLE$', # is or contains any undecipherable components + qr'^\.(exe-ms|dll)$', # banned file(1) types, rudimentary +# qr'^\.(exe|lha|tnef|cab|dll)$', # banned file(1) types + +### BLOCK THE FOLLOWING, EXCEPT WITHIN UNIX ARCHIVES: +# [ qr'^\.(gz|bz2)$' => 0 ], # allow any in gzip or bzip2 + [ qr'^\.(rpm|cpio|tar)$' => 0 ], # allow any in Unix-type archives + + qr'.\.(pif|scr)$'i, # banned extensions - rudimentary +# qr'^\.zip$', # block zip type + +### BLOCK THE FOLLOWING, EXCEPT WITHIN ARCHIVES: +# [ qr'^\.(zip|rar|arc|arj|zoo)$'=> 0 ], # allow any within these archives + + qr'^application/x-msdownload$'i, # block these MIME types + qr'^application/x-msdos-program$'i, + qr'^application/hta$'i, + +# qr'^message/partial$'i, # rfc2046 MIME type +# qr'^message/external-body$'i, # rfc2046 MIME type + +# qr'^(application/x-msmetafile|image/x-wmf)$'i, # Windows Metafile MIME type +# qr'^\.wmf$', # Windows Metafile file(1) type + + # block certain double extensions in filenames + qr'\.[^./]*[A-Za-z][^./]*\.\s*(exe|vbs|pif|scr|bat|cmd|com|cpl|dll)[.\s]*$'i, + +# qr'\{[0-9a-f]{8}(-[0-9a-f]{4}){3}-[0-9a-f]{12}\}?'i, # Class ID CLSID, strict +# qr'\{[0-9a-z]{4,}(-[0-9a-z]{4,}){0,7}\}?'i, # Class ID extension CLSID, loose + + qr'.\.(exe|vbs|pif|scr|cpl)$'i, # banned extension - basic +# qr'.\.(exe|vbs|pif|scr|cpl|bat|cmd|com)$'i, # banned extension - basic+cmd +# qr'.\.(ade|adp|app|bas|bat|chm|cmd|com|cpl|crt|emf|exe|fxp|grp|hlp|hta| +# inf|ins|isp|js|jse|lnk|mda|mdb|mde|mdw|mdt|mdz|msc|msi|msp|mst| +# ops|pcd|pif|prg|reg|scr|sct|shb|shs|vb|vbe|vbs| +# wmf|wsc|wsf|wsh)$'ix, # banned ext - long +# qr'.\.(ani|cur|ico)$'i, # banned cursors and icons filename +# qr'^\.ani$', # banned animated cursor file(1) type + +# qr'.\.(mim|b64|bhx|hqx|xxe|uu|uue)$'i, # banned extension - WinZip vulnerab. +); +# See http://support.microsoft.com/default.aspx?scid=kb;EN-US;q262631 +# and http://www.cknow.com/vtutor/vtextensions.htm + + +# ENVELOPE SENDER SOFT-WHITELISTING / SOFT-BLACKLISTING + +@score_sender_maps = ({ # a by-recipient hash lookup table, + # results from all matching recipient tables are summed + +# ## per-recipient personal tables (NOTE: positive: black, negative: white) +# 'user1@example.com' => [{'bla-mobile.press@example.com' => 10.0}], +# 'user3@example.com' => [{'.ebay.com' => -3.0}], +# 'user4@example.com' => [{'cleargreen@cleargreen.com' => -7.0, +# '.cleargreen.com' => -5.0}], + + ## site-wide opinions about senders (the '.' matches any recipient) + '.' => [ # the _first_ matching sender determines the score boost + + new_RE( # regexp-type lookup table, just happens to be all soft-blacklist + [qr'^(bulkmail|offers|cheapbenefits|earnmoney|foryou)@'i => 5.0], + [qr'^(greatcasino|investments|lose_weight_today|market\.alert)@'i=> 5.0], + [qr'^(money2you|MyGreenCard|new\.tld\.registry|opt-out|opt-in)@'i=> 5.0], + [qr'^(optin|saveonlsmoking2002k|specialoffer|specialoffers)@'i => 5.0], + [qr'^(stockalert|stopsnoring|wantsome|workathome|yesitsfree)@'i => 5.0], + [qr'^(your_friend|greatoffers)@'i => 5.0], + [qr'^(inkjetplanet|marketopt|MakeMoney)\d*@'i => 5.0], + ), + +# read_hash("/var/lib/amavis/sender_scores_sitewide"), + + { # a hash-type lookup table (associative array) + 'nobody@cert.org' => -3.0, + 'cert-advisory@us-cert.gov' => -3.0, + 'owner-alert@iss.net' => -3.0, + 'slashdot@slashdot.org' => -3.0, + 'securityfocus.com' => -3.0, + 'ntbugtraq@listserv.ntbugtraq.com' => -3.0, + 'security-alerts@linuxsecurity.com' => -3.0, + 'mailman-announce-admin@python.org' => -3.0, + 'amavis-user-admin@lists.sourceforge.net'=> -3.0, + 'amavis-user-bounces@lists.sourceforge.net' => -3.0, + 'spamassassin.apache.org' => -3.0, + 'notification-return@lists.sophos.com' => -3.0, + 'owner-postfix-users@postfix.org' => -3.0, + 'owner-postfix-announce@postfix.org' => -3.0, + 'owner-sendmail-announce@lists.sendmail.org' => -3.0, + 'sendmail-announce-request@lists.sendmail.org' => -3.0, + 'donotreply@sendmail.org' => -3.0, + 'ca+envelope@sendmail.org' => -3.0, + 'noreply@freshmeat.net' => -3.0, + 'owner-technews@postel.acm.org' => -3.0, + 'ietf-123-owner@loki.ietf.org' => -3.0, + 'cvs-commits-list-admin@gnome.org' => -3.0, + 'rt-users-admin@lists.fsck.com' => -3.0, + 'clp-request@comp.nus.edu.sg' => -3.0, + 'surveys-errors@lists.nua.ie' => -3.0, + 'emailnews@genomeweb.com' => -5.0, + 'yahoo-dev-null@yahoo-inc.com' => -3.0, + 'returns.groups.yahoo.com' => -3.0, + 'clusternews@linuxnetworx.com' => -3.0, + lc('lvs-users-admin@LinuxVirtualServer.org') => -3.0, + lc('owner-textbreakingnews@CNNIMAIL12.CNN.COM') => -5.0, + + # soft-blacklisting (positive score) + 'sender@example.net' => 3.0, + '.example.net' => 1.0, + + }, + ], # end of site-wide tables +}); + + +@decoders = ( + ['mail', \&do_mime_decode], + ['asc', \&do_ascii], + ['uue', \&do_ascii], + ['hqx', \&do_ascii], + ['ync', \&do_ascii], + ['F', \&do_uncompress, ['unfreeze','freeze -d','melt','fcat'] ], + ['Z', \&do_uncompress, ['uncompress','gzip -d','zcat'] ], + ['gz', \&do_uncompress, 'gzip -d'], + ['gz', \&do_gunzip], + ['bz2', \&do_uncompress, 'bzip2 -d'], + ['lzo', \&do_uncompress, 'lzop -d'], + ['rpm', \&do_uncompress, ['rpm2cpio.pl','rpm2cpio'] ], + ['cpio', \&do_pax_cpio, ['pax','gcpio','cpio'] ], + ['tar', \&do_pax_cpio, ['pax','gcpio','cpio'] ], + ['deb', \&do_ar, 'ar'], +# ['a', \&do_ar, 'ar'], # unpacking .a seems an overkill + ['zip', \&do_unzip], + ['7z', \&do_7zip, ['7zr','7za','7z'] ], + ['rar', \&do_unrar, ['rar','unrar'] ], + ['arj', \&do_unarj, ['arj','unarj'] ], + ['arc', \&do_arc, ['nomarch','arc'] ], + ['zoo', \&do_zoo, ['zoo','unzoo'] ], + ['lha', \&do_lha, 'lha'], +# ['doc', \&do_ole, 'ripole'], + ['cab', \&do_cabextract, 'cabextract'], + ['tnef', \&do_tnef_ext, 'tnef'], + ['tnef', \&do_tnef], +# ['sit', \&do_unstuff, 'unstuff'], # broken/unsafe decoder + ['exe', \&do_executable, ['rar','unrar'], 'lha', ['arj','unarj'] ], +); + + +@av_scanners = ( + +# ### http://www.clanfield.info/sophie/ (http://www.vanja.com/tools/sophie/) +# ['Sophie', +# \&ask_daemon, ["{}/\n", '/var/run/sophie'], +# qr/(?x)^ 0+ ( : | [\000\r\n]* $)/m, qr/(?x)^ 1 ( : | [\000\r\n]* $)/m, +# qr/(?x)^ [-+]? \d+ : (.*?) [\000\r\n]* $/m ], + +# ### http://www.csupomona.edu/~henson/www/projects/SAVI-Perl/ +# ['Sophos SAVI', \&sophos_savi ], + +# ### http://www.clamav.net/ +# ['ClamAV-clamd', +# \&ask_daemon, ["CONTSCAN {}\n", "/var/lib/clamav/clamd.socket"], +# qr/\bOK$/m, qr/\bFOUND$/m, +# qr/^.*?: (?!Infected Archive)(.*) FOUND$/m ], +# # NOTE: run clamd under the same user as amavisd, or run it under its own +# # uid such as clamav, add user clamav to the amavis group, and then add +# # AllowSupplementaryGroups to clamd.conf; +# # NOTE: match socket name (LocalSocket) in clamav.conf to the socket name in +# # this entry; when running chrooted one may prefer socket "$MYHOME/clamd". + +# ### http://www.clamav.net/ and CPAN (memory-hungry! clamd is preferred) +# # note that Mail::ClamAV requires perl to be build with threading! +# ['Mail::ClamAV', \&ask_clamav, "*", [0], [1], qr/^INFECTED: (.+)/m ], + +# ### http://www.openantivirus.org/ +# ['OpenAntiVirus ScannerDaemon (OAV)', +# \&ask_daemon, ["SCAN {}\n", '127.0.0.1:8127'], +# qr/^OK/m, qr/^FOUND: /m, qr/^FOUND: (.+)/m ], + +# ### http://www.vanja.com/tools/trophie/ +# ['Trophie', +# \&ask_daemon, ["{}/\n", '/var/run/trophie'], +# qr/(?x)^ 0+ ( : | [\000\r\n]* $)/m, qr/(?x)^ 1 ( : | [\000\r\n]* $)/m, +# qr/(?x)^ [-+]? \d+ : (.*?) [\000\r\n]* $/m ], + +# ### http://www.grisoft.com/ +# ['AVG Anti-Virus', +# \&ask_daemon, ["SCAN {}\n", '127.0.0.1:55555'], +# qr/^200/m, qr/^403/m, qr/^403 .*?: ([^\r\n]+)/m ], + +# ### http://www.f-prot.com/ +# ['F-Prot fpscand', # F-PROT Antivirus for BSD/Linux/Solaris, version 6 +# \&ask_daemon, +# ["SCAN FILE {}/*\n", '127.0.0.1:10200'], +# qr/^(0|8|64) /m, +# qr/^([1235679]|1[01345]) |<[^>:]*(?i)(infected|suspicious|unwanted)/m, +# qr/(?i)<[^>:]*(?:infected|suspicious|unwanted)[^>:]*: ([^>]*)>/m ], + +# ### http://www.f-prot.com/ +# ['F-Prot f-protd', # old version +# \&ask_daemon, +# ["GET {}/*?-dumb%20-archive%20-packed HTTP/1.0\r\n\r\n", +# ['127.0.0.1:10200', '127.0.0.1:10201', '127.0.0.1:10202', +# '127.0.0.1:10203', '127.0.0.1:10204'] ], +# qr/(?i)<summary[^>]*>clean<\/summary>/m, +# qr/(?i)<summary[^>]*>infected<\/summary>/m, +# qr/(?i)<name>(.+)<\/name>/m ], + +# ### http://www.sald.com/, http://www.dials.ru/english/, http://www.drweb.ru/ +# ['DrWebD', \&ask_daemon, # DrWebD 4.31 or later +# [pack('N',1). # DRWEBD_SCAN_CMD +# pack('N',0x00280001). # DONT_CHANGEMAIL, IS_MAIL, RETURN_VIRUSES +# pack('N', # path length +# length("$TEMPBASE/amavis-yyyymmddTHHMMSS-xxxxx/parts/pxxx")). +# '{}/*'. # path +# pack('N',0). # content size +# pack('N',0), +# '/var/drweb/run/drwebd.sock', +# # '/var/lib/amavis/var/run/drwebd.sock', # suitable for chroot +# # '/usr/local/drweb/run/drwebd.sock', # FreeBSD drweb ports default +# # '127.0.0.1:3000', # or over an inet socket +# ], +# qr/\A\x00[\x10\x11][\x00\x10]\x00/sm, # IS_CLEAN,EVAL_KEY; SKIPPED +# qr/\A\x00[\x00\x01][\x00\x10][\x20\x40\x80]/sm,# KNOWN_V,UNKNOWN_V,V._MODIF +# qr/\A.{12}(?:infected with )?([^\x00]+)\x00/sm, +# ], +# # NOTE: If using amavis-milter, change length to: +# # length("$TEMPBASE/amavis-milter-xxxxxxxxxxxxxx/parts/pxxx"). + + ### http://www.kaspersky.com/ (kav4mailservers) +# ['KasperskyLab AVP - aveclient', +# ['/usr/local/kav/bin/aveclient','/usr/local/share/kav/bin/aveclient', +# '/opt/kav/5.5/kav4mailservers/bin/aveclient','aveclient'], +# '-p /var/run/aveserver -s {}/*', +# [0,3,6,8], qr/\b(INFECTED|SUSPICION|SUSPICIOUS)\b/m, +# qr/(?:INFECTED|WARNING|SUSPICION|SUSPICIOUS) (.+)/m, +# ], + # NOTE: one may prefer [0],[2,3,4,5], depending on how suspicious, + # corrupted or protected archives are to be handled + + ### http://www.kaspersky.com/ +# ['KasperskyLab AntiViral Toolkit Pro (AVP)', ['avp'], +# '-* -P -B -Y -O- {}', [0,3,6,8], [2,4], # any use for -A -K ? +# qr/infected: (.+)/m, +# sub {chdir('/opt/AVP') or die "Can't chdir to AVP: $!"}, +# sub {chdir($TEMPBASE) or die "Can't chdir back to $TEMPBASE $!"}, +# ], + + ### The kavdaemon and AVPDaemonClient have been removed from Kasperky + ### products and replaced by aveserver and aveclient +# ['KasperskyLab AVPDaemonClient', +# [ '/opt/AVP/kavdaemon', 'kavdaemon', +# '/opt/AVP/AvpDaemonClient', 'AvpDaemonClient', +# '/opt/AVP/AvpTeamDream', 'AvpTeamDream', +# '/opt/AVP/avpdc', 'avpdc' ], +# "-f=$TEMPBASE {}", [0,8], [3,4,5,6], qr/infected: ([^\r\n]+)/m ], + # change the startup-script in /etc/init.d/kavd to: + # DPARMS="-* -Y -dl -f=/var/lib/amavis /var/lib/amavis" + # (or perhaps: DPARMS="-I0 -Y -* /var/lib/amavis" ) + # adjusting /var/lib/amavis above to match your $TEMPBASE. + # The '-f=/var/lib/amavis' is needed if not running it as root, so it + # can find, read, and write its pid file, etc., see 'man kavdaemon'. + # defUnix.prf: there must be an entry "*/var/lib/amavis" (or whatever + # directory $TEMPBASE specifies) in the 'Names=' section. + # cd /opt/AVP/DaemonClients; configure; cd Sample; make + # cp AvpDaemonClient /opt/AVP/ + # su - vscan -c "${PREFIX}/kavdaemon ${DPARMS}" + + ### http://www.centralcommand.com/ +# ['CentralCommand Vexira (new) vascan', +# ['vascan','/usr/lib/Vexira/vascan'], +# "-a s --timeout=60 --temp=$TEMPBASE -y $QUARANTINEDIR ". +# "--log=/var/log/vascan.log {}", +# [0,3], [1,2,5], +# qr/(?x)^\s* (?:virus|iworm|macro|mutant|sequence|trojan)\ found:\ ( [^\]\s']+ )\ \.\.\.\ /m ], + # Adjust the path of the binary and the virus database as needed. + # 'vascan' does not allow to have the temp directory to be the same as + # the quarantine directory, and the quarantine option can not be disabled. + # If $QUARANTINEDIR is not used, then another directory must be specified + # to appease 'vascan'. Move status 3 to the second list if password + # protected files are to be considered infected. + + ### http://www.avira.com/ + ### Avira AntiVir (formerly H+BEDV) or (old) CentralCommand Vexira Antivirus +# ['Avira AntiVir', ['antivir','vexira'], +# '--allfiles -noboot -nombr -rs -s -z {}', [0], qr/ALERT:|VIRUS:/m, +# qr/(?x)^\s* (?: ALERT: \s* (?: \[ | [^']* ' ) | +# (?i) VIRUS:\ .*?\ virus\ '?) ( [^\]\s']+ )/m ], + # NOTE: if you only have a demo version, remove -z and add 214, as in: + # '--allfiles -noboot -nombr -rs -s {}', [0,214], qr/ALERT:|VIRUS:/, + + ### http://www.commandsoftware.com/ +# ['Command AntiVirus for Linux', 'csav', +# '-all -archive -packed {}', [50], [51,52,53], +# qr/Infection: (.+)/m ], + + ### http://www.symantec.com/ +# ['Symantec CarrierScan via Symantec CommandLineScanner', +# 'cscmdline', '-a scan -i 1 -v -s 127.0.0.1:7777 {}', +# qr/^Files Infected:\s+0$/m, qr/^Infected\b/m, +# qr/^(?:Info|Virus Name):\s+(.+)/m ], + + ### http://www.symantec.com/ +# ['Symantec AntiVirus Scan Engine', +# 'savsecls', '-server 127.0.0.1:7777 -mode scanrepair -details -verbose {}', +# [0], qr/^Infected\b/m, +# qr/^(?:Info|Virus Name):\s+(.+)/m ], + # NOTE: check options and patterns to see which entry better applies + +# ### http://www.f-secure.com/products/anti-virus/ version 4.65 +# ['F-Secure Antivirus for Linux servers', +# ['/opt/f-secure/fsav/bin/fsav', 'fsav'], +# '--delete=no --disinf=no --rename=no --archive=yes --auto=yes '. +# '--dumb=yes --list=no --mime=yes {}', [0], [3,6,8], +# qr/(?:infection|Infected|Suspected): (.+)/m ], + + ### http://www.f-secure.com/products/anti-virus/ version 5.52 +# ['F-Secure Antivirus for Linux servers', +# ['/opt/f-secure/fsav/bin/fsav', 'fsav'], +# '--virus-action1=report --archive=yes --auto=yes '. +# '--dumb=yes --list=no --mime=yes {}', [0], [3,4,6,8], +# qr/(?:infection|Infected|Suspected|Riskware): (.+)/m ], + # NOTE: internal archive handling may be switched off by '--archive=no' + # to prevent fsav from exiting with status 9 on broken archives + +# ### http://www.avast.com/ +# ['avast! Antivirus daemon', +# \&ask_daemon, # greets with 220, terminate with QUIT +# ["SCAN {}\015\012QUIT\015\012", '/var/run/avast4/mailscanner.sock'], +# qr/\t\[\+\]/m, qr/\t\[L\]\t/m, qr/\t\[L\]\t([^[ \t\015\012]+)/m ], + +# ### http://www.avast.com/ +# ['avast! Antivirus - Client/Server Version', 'avastlite', +# '-a /var/run/avast4/mailscanner.sock -n {}', [0], [1], +# qr/\t\[L\]\t([^[ \t\015\012]+)/m ], + +# ['CAI InoculateIT', 'inocucmd', # retired product +# '-sec -nex {}', [0], [100], +# qr/was infected by virus (.+)/m ], + # see: http://www.flatmtn.com/computer/Linux-Antivirus_CAI.html + + ### http://www3.ca.com/Solutions/Product.asp?ID=156 (ex InoculateIT) +# ['CAI eTrust Antivirus', 'etrust-wrapper', +# '-arc -nex -spm h {}', [0], [101], +# qr/is infected by virus: (.+)/m ], + # NOTE: requires suid wrapper around inocmd32; consider flag: -mod reviewer + # see http://marc.theaimsgroup.com/?l=amavis-user&m=109229779912783 + + ### http://mks.com.pl/english.html +# ['MkS_Vir for Linux (beta)', ['mks32','mks'], +# '-s {}/*', [0], [1,2], +# qr/--[ \t]*(.+)/m ], + + ### http://mks.com.pl/english.html +# ['MkS_Vir daemon', 'mksscan', +# '-s -q {}', [0], [1..7], +# qr/^... (\S+)/m ], + +# ### http://www.nod32.com/, version v2.52 (old) +# ['ESET NOD32 for Linux Mail servers', +# ['/opt/eset/nod32/bin/nod32cli', 'nod32cli'], +# '--subdir --files -z --sfx --rtp --adware --unsafe --pattern --heur '. +# '-w -a --action-on-infected=accept --action-on-uncleanable=accept '. +# '--action-on-notscanned=accept {}', +# [0,3], [1,2], qr/virus="([^"]+)"/m ], + +# ### http://www.eset.com/, version v2.7 (old) +# ['ESET NOD32 Linux Mail Server - command line interface', +# ['/usr/bin/nod32cli', '/opt/eset/nod32/bin/nod32cli', 'nod32cli'], +# '--subdir {}', [0,3], [1,2], qr/virus="([^"]+)"/m ], + +# ### http://www.eset.com/, version 2.71.12 +# ['ESET Software ESETS Command Line Interface', +# ['/usr/bin/esets_cli', 'esets_cli'], +# '--subdir {}', [0], [1,2,3], qr/virus="([^"]+)"/m ], + + ### http://www.eset.com/, version 3.0 +# ['ESET Software ESETS Command Line Interface', +# ['/usr/bin/esets_cli', 'esets_cli'], +# '--subdir {}', [0], [1,2,3], +# qr/:\s*action="(?!accepted)[^"]*"\n.*:\s*virus="([^"]*)"/m ], + + ## http://www.nod32.com/, NOD32LFS version 2.5 and above +# ['ESET NOD32 for Linux File servers', +# ['/opt/eset/nod32/sbin/nod32','nod32'], +# '--files -z --mail --sfx --rtp --adware --unsafe --pattern --heur '. +# '-w -a --action=1 -b {}', +# [0], [1,10], qr/^object=.*, virus="(.*?)",/m ], + +# Experimental, based on posting from Rado Dibarbora (Dibo) on 2002-05-31 +# ['ESET Software NOD32 Client/Server (NOD32SS)', +# \&ask_daemon2, # greets with 200, persistent, terminate with QUIT +# ["SCAN {}/*\r\n", '127.0.0.1:8448' ], +# qr/^200 File OK/m, qr/^201 /m, qr/^201 (.+)/m ], + + ### http://www.norman.com/products_nvc.shtml +# ['Norman Virus Control v5 / Linux', 'nvcc', +# '-c -l:0 -s -u -temp:$TEMPBASE {}', [0,10,11], [1,2,14], +# qr/(?i).* virus in .* -> \'(.+)\'/m ], + + ### http://www.pandasoftware.com/ +# ['Panda CommandLineSecure 9 for Linux', +# ['/opt/pavcl/usr/bin/pavcl','pavcl'], +# '-auto -aex -heu -cmp -nbr -nor -nos -eng -nob {}', +# qr/Number of files infected[ .]*: 0+(?!\d)/m, +# qr/Number of files infected[ .]*: 0*[1-9]/m, +# qr/Found virus :\s*(\S+)/m ], + # NOTE: for efficiency, start the Panda in resident mode with 'pavcl -tsr' + # before starting amavisd - the bases are then loaded only once at startup. + # To reload bases in a signature update script: + # /opt/pavcl/usr/bin/pavcl -tsr -ulr; /opt/pavcl/usr/bin/pavcl -tsr + # Please review other options of pavcl, for example: + # -nomalw, -nojoke, -nodial, -nohackt, -nospyw, -nocookies + +# ### http://www.pandasoftware.com/ +# ['Panda Antivirus for Linux', ['pavcl'], +# '-TSR -aut -aex -heu -cmp -nbr -nor -nso -eng {}', +# [0], [0x10, 0x30, 0x50, 0x70, 0x90, 0xB0, 0xD0, 0xF0], +# qr/Found virus :\s*(\S+)/m ], + +# GeCAD AV technology is acquired by Microsoft; RAV has been discontinued. +# Check your RAV license terms before fiddling with the following two lines! +# ['GeCAD RAV AntiVirus 8', 'ravav', +# '--all --archive --mail {}', [1], [2,3,4,5], qr/Infected: (.+)/m ], +# # NOTE: the command line switches changed with scan engine 8.5 ! +# # (btw, assigning stdin to /dev/null causes RAV to fail) + + ### http://www.nai.com/ +# ['NAI McAfee AntiVirus (uvscan)', 'uvscan', +# '--secure -rv --mime --summary --noboot - {}', [0], [13], +# qr/(?x) Found (?: +# \ the\ (.+)\ (?:virus|trojan) | +# \ (?:virus|trojan)\ or\ variant\ ([^ ]+) | +# :\ (.+)\ NOT\ a\ virus)/m, + # sub {$ENV{LD_PRELOAD}='/lib/libc.so.6'}, + # sub {delete $ENV{LD_PRELOAD}}, +# ], + # NOTE1: with RH9: force the dynamic linker to look at /lib/libc.so.6 before + # anything else by setting environment variable LD_PRELOAD=/lib/libc.so.6 + # and then clear it when finished to avoid confusing anything else. + # NOTE2: to treat encrypted files as viruses replace the [13] with: + # qr/^\s{5,}(Found|is password-protected|.*(virus|trojan))/ + + ### http://www.virusbuster.hu/en/ +# ['VirusBuster', ['vbuster', 'vbengcl'], +# "{} -ss -i '*' -log=$MYHOME/vbuster.log", [0], [1], +# qr/: '(.*)' - Virus/m ], + # VirusBuster Ltd. does not support the daemon version for the workstation + # engine (vbuster-eng-1.12-linux-i386-libc6.tgz) any longer. The names of + # binaries, some parameters AND return codes have changed (from 3 to 1). + # See also the new Vexira entry 'vascan' which is possibly related. + +# ### http://www.virusbuster.hu/en/ +# ['VirusBuster (Client + Daemon)', 'vbengd', +# '-f -log scandir {}', [0], [3], +# qr/Virus found = (.*);/m ], +# # HINT: for an infected file it always returns 3, +# # although the man-page tells a different story + + ### http://www.cyber.com/ +# ['CyberSoft VFind', 'vfind', +# '--vexit {}/*', [0], [23], qr/##==>>>> VIRUS ID: CVDL (.+)/m, + # sub {$ENV{VSTK_HOME}='/usr/lib/vstk'}, +# ], + + ### http://www.avast.com/ +# ['avast! Antivirus', ['/usr/bin/avastcmd','avastcmd'], +# '-a -i -n -t=A {}', [0], [1], qr/\binfected by:\s+([^ \t\n\[\]]+)/m ], + + ### http://www.ikarus-software.com/ +# ['Ikarus AntiVirus for Linux', 'ikarus', +# '{}', [0], [40], qr/Signature (.+) found/m ], + + ### http://www.bitdefender.com/ +# ['BitDefender', 'bdscan', # new version +# '--action=ignore --no-list {}', qr/^Infected files\s*:\s*0+(?!\d)/m, +# qr/^(?:Infected files|Identified viruses|Suspect files)\s*:\s*0*[1-9]/m, +# qr/(?:suspected|infected)\s*:\s*(.*)(?:\033|$)/m ], + + ### http://www.bitdefender.com/ +# ['BitDefender', 'bdc', # old version +# '--arc --mail {}', qr/^Infected files *:0+(?!\d)/m, +# qr/^(?:Infected files|Identified viruses|Suspect files) *:0*[1-9]/m, +# qr/(?:suspected|infected): (.*)(?:\033|$)/m ], + # consider also: --all --nowarn --alev=15 --flev=15. The --all argument may + # not apply to your version of bdc, check documentation and see 'bdc --help' + + ### ArcaVir for Linux and Unix http://www.arcabit.pl/ +# ['ArcaVir for Linux', ['arcacmd','arcacmd.static'], +# '-v 1 -summary 0 -s {}', [0], [1,2], +# qr/(?:VIR|WIR):[ \t]*(.+)/m ], + +# ### a generic SMTP-client interface to a SMTP-based virus scanner +# ['av_smtp', \&ask_av_smtp, +# ['{}', 'smtp:[127.0.0.1]:5525', 'dummy@localhost'], +# qr/^2/, qr/^5/, qr/^\s*(.*?)\s*$/m ], + +# ['File::Scan', sub {Amavis::AV::ask_av(sub{ +# use File::Scan; my($fn)=@_; +# my($f)=File::Scan->new(max_txt_size=>0, max_bin_size=>0); +# my($vname) = $f->scan($fn); +# $f->error ? (2,"Error: ".$f->error) +# : ($vname ne '') ? (1,"$vname FOUND") : (0,"Clean")}, @_) }, +# ["{}/*"], [0], [1], qr/^(.*) FOUND$/m ], + +# ### fully-fledged checker for JPEG marker segments of invalid length +# ['check-jpeg', +# sub { use JpegTester (); Amavis::AV::ask_av(\&JpegTester::test_jpeg, @_) }, +# ["{}/*"], undef, [1], qr/^(bad jpeg: .*)$/m ], +# # NOTE: place file JpegTester.pm somewhere where Perl can find it, +# # for example in /usr/local/lib/perl5/site_perl + + ['always-clean', sub {0}], +); + + +@av_scanners_backup = ( + + ### http://www.clamav.net/ - backs up clamd or Mail::ClamAV + ['ClamAV-clamscan', 'clamscan', + "--stdout --no-summary -r --tempdir=$TEMPBASE {}", + [0], qr/:.*\sFOUND$/m, qr/^.*?: (?!Infected Archive)(.*) FOUND$/m ], + + ### http://www.f-prot.com/ - backs up F-Prot Daemon, V6 + ['F-PROT Antivirus for UNIX', ['fpscan'], + '--report --mount --adware {}', # consider: --applications -s 4 -u 3 -z 10 + [0,8,64], [1,2,3, 4+1,4+2,4+3, 8+1,8+2,8+3, 12+1,12+2,12+3], + qr/^\[Found\s+[^\]]*\]\s+<([^ \t(>]*)/m ], + + ### http://www.f-prot.com/ - backs up F-Prot Daemon (old) + ['FRISK F-Prot Antivirus', ['f-prot','f-prot.sh'], + '-dumb -archive -packed {}', [0,8], [3,6], # or: [0], [3,6,8], + qr/(?:Infection:|security risk named) (.+)|\s+contains\s+(.+)$/m ], + + ### http://www.trendmicro.com/ - backs up Trophie + ['Trend Micro FileScanner', ['/etc/iscan/vscan','vscan'], + '-za -a {}', [0], qr/Found virus/m, qr/Found virus (.+) in/m ], + + ### http://www.sald.com/, http://drweb.imshop.de/ - backs up DrWebD + ['drweb - DrWeb Antivirus', # security LHA hole in Dr.Web 4.33 and earlier + ['/usr/local/drweb/drweb', '/opt/drweb/drweb', 'drweb'], + '-path={} -al -go -ot -cn -upn -ok-', + [0,32], [1,9,33], qr' infected (?:with|by)(?: virus)? (.*)$'m ], + + ### http://www.kaspersky.com/ + ['Kaspersky Antivirus v5.5', + ['/opt/kaspersky/kav4fs/bin/kav4fs-kavscanner', + '/opt/kav/5.5/kav4unix/bin/kavscanner', + '/opt/kav/5.5/kav4mailservers/bin/kavscanner', 'kavscanner'], + '-i0 -xn -xp -mn -R -ePASBME {}/*', [0,10,15], [5,20,21,25], + qr/(?:INFECTED|WARNING|SUSPICION|SUSPICIOUS) (.*)/m, +# sub {chdir('/opt/kav/bin') or die "Can't chdir to kav: $!"}, +# sub {chdir($TEMPBASE) or die "Can't chdir back to $TEMPBASE $!"}, + ], + +# Commented out because the name 'sweep' clashes with Debian and FreeBSD +# package/port of an audio editor. Make sure the correct 'sweep' is found +# in the path when enabling. +# +# ### http://www.sophos.com/ - backs up Sophie or SAVI-Perl +# ['Sophos Anti Virus (sweep)', 'sweep', +# '-nb -f -all -rec -ss -sc -archive -cab -mime -oe -tnef '. +# '--no-reset-atime {}', +# [0,2], qr/Virus .*? found/m, +# qr/^>>> Virus(?: fragment)? '?(.*?)'? found/m, +# ], +# # other options to consider: -idedir=/usr/local/sav + +# Always succeeds and considers mail clean. +# Potentially useful when all other scanners fail and it is desirable +# to let mail continue to flow with no virus checking (when uncommented). + ['always-clean', sub {0}], + +); + + +1; # insure a defined return value diff --git a/modules/apache/manifests/base.pp b/modules/apache/manifests/base.pp new file mode 100644 index 00000000..4e1d6ed4 --- /dev/null +++ b/modules/apache/manifests/base.pp @@ -0,0 +1,37 @@ +class apache::base { + include apache::var + + $conf_d = '/etc/httpd/conf/conf.d' + + package { 'apache': + alias => 'apache-server', + } + + service { 'httpd': + alias => 'apache', + subscribe => [ Package['apache-server'] ], + } + + exec { 'apachectl configtest': + refreshonly => true, + notify => Service['apache'], + } + + apache::config { + "${conf_d}/no_hidden_file_dir.conf": + content => template('apache/no_hidden_file_dir.conf'), + require => Package[$apache::var::pkg_conf]; + "${conf_d}/customization.conf": + content => template('apache/customization.conf'), + require => Package[$apache::var::pkg_conf]; + '/etc/httpd/conf/vhosts.d/00_default_vhosts.conf': + content => template('apache/00_default_vhosts.conf'), + require => Package[$apache::var::pkg_conf]; + '/etc/httpd/conf/modules.d/50_mod_deflate.conf': + content => template('apache/50_mod_deflate.conf'); + } + + file { '/etc/logrotate.d/httpd': + content => template('apache/logrotate') + } +} diff --git a/modules/apache/manifests/config.pp b/modules/apache/manifests/config.pp new file mode 100644 index 00000000..0ff0962c --- /dev/null +++ b/modules/apache/manifests/config.pp @@ -0,0 +1,6 @@ +define apache::config($content) { + file { $name: + content => $content, + notify => Exec['apachectl configtest'], + } +} diff --git a/modules/apache/manifests/cve-2011-3192.pp b/modules/apache/manifests/cve-2011-3192.pp new file mode 100644 index 00000000..1e39ac04 --- /dev/null +++ b/modules/apache/manifests/cve-2011-3192.pp @@ -0,0 +1,9 @@ +class apache::cve-2011-3192 { + include apache::base + # temporary protection against CVE-2011-3192 + # https://httpd.apache.org/security/CVE-2011-3192.txt + apache::config { + "${apache::base::conf_d}/CVE-2011-3192.conf": + content => template('apache/CVE-2011-3192.conf'), + } +} diff --git a/modules/apache/manifests/init.pp b/modules/apache/manifests/init.pp index e8f7a575..40779d4d 100644 --- a/modules/apache/manifests/init.pp +++ b/modules/apache/manifests/init.pp @@ -1,156 +1,25 @@ class apache { - - class base { - package { "apache-mpm-prefork": - alias => apache, - ensure => installed - } - - service { httpd: - alias => apache, - ensure => running, - subscribe => [ Package['apache-mpm-prefork'] ], - } - - file { "customization.conf": - ensure => present, - path => "/etc/httpd/conf.d/customization.conf", - content => template("apache/customization.conf"), - require => Package["apache"], - notify => Service["apache"], - owner => root, - group => root, - mode => 644, - } - - file { "00_default_vhosts.conf": - path => "/etc/httpd/conf/vhosts.d/00_default_vhosts.conf", - ensure => "present", - owner => root, - group => root, - mode => 644, - notify => Service['apache'], - content => template("apache/00_default_vhosts.conf") - } - } - - class mod_php inherits base { - package { "apache-mod_php": - ensure => installed - } - } - - class mod_perl inherits base { - package { "apache-mod_perl": - ensure => installed - } - } - - class mod_fcgid inherits base { - package { "apache-mod_fcgid": - ensure => installed - } - } - - class mod_fastcgi inherits base { - package { "apache-mod_fastcgi": - ensure => installed - } - } - - class mod_ssl inherits base { - package { "apache-mod_ssl": - ensure => installed - } - } - - class mod_wsgi inherits base { - package { "apache-mod_wsgi": - ensure => installed - } - - file { "/usr/local/lib/wsgi": - ensure => directory, - owner => root, - group => root, - mode => 644, + define vhost_simple($location) { + include apache::base + apache::vhost::base { $name: + location => $location, } - } - - define vhost_redirect_ssl() { - file { "redirect_ssl_$name.conf": - path => "/etc/httpd/conf/vhosts.d/redirect_ssl_$name.conf", - ensure => "present", - owner => root, - group => root, - mode => 644, - notify => Service['apache'], - content => template("apache/vhost_ssl_redirect.conf") + apache::vhost::base { "ssl_${name}": + vhost => $name, + use_ssl => true, + location => $location, } } - define vhost_catalyst_app($script, $location = '', $process = 4, $use_ssl = false) { - - include apache::mod_fastcgi - - file { "$name.conf": - path => "/etc/httpd/conf/vhosts.d/$name.conf", - ensure => "present", - owner => root, - group => root, - mode => 644, - notify => Service['apache'], - content => template("apache/vhost_catalyst_app.conf") + define vhost_redirect($url, + $vhost = false, + $use_ssl = false) { + include apache::base + apache::vhost::base { $name: + use_ssl => $use_ssl, + vhost => $vhost, + content => template("apache/vhost_redirect.conf"), } } - define vhost_django_app($module, $module_path = '/usr/share') { - include apache::mod_wsgi - - file { "$name.conf": - path => "/etc/httpd/conf/vhosts.d/$name.conf", - ensure => "present", - owner => root, - group => root, - mode => 644, - notify => Service['apache'], - content => template("apache/vhost_django_app.conf") - } - - # fichier django wsgi - file { "$name.wsgi": - path => "/usr/local/lib/wsgi/$name.wsgi", - ensure => "present", - owner => root, - group => root, - mode => 755, - notify => Service['apache'], - content => template("apache/django.wsgi") - } - } - - define vhost_other_app($vhost_file) { - file { "$name.conf": - path => "/etc/httpd/conf/vhosts.d/$name.conf", - ensure => "present", - owner => root, - group => root, - mode => 644, - notify => Service['apache'], - content => template($vhost_file) - } - } - - define webapp_other($webapp_file) { - $webappname = $name - file { "webapp_$name.conf": - path => "/etc/httpd/conf/webapps.d/$webappname.conf", - ensure => "present", - owner => root, - group => root, - mode => 644, - notify => Service['apache'], - content => template($webapp_file) - } - } } diff --git a/modules/apache/manifests/mod/fastcgi.pp b/modules/apache/manifests/mod/fastcgi.pp new file mode 100644 index 00000000..2b421291 --- /dev/null +++ b/modules/apache/manifests/mod/fastcgi.pp @@ -0,0 +1,5 @@ +class apache::mod::fastcgi { + include apache::base + package { 'apache-mod_fastcgi': } +} + diff --git a/modules/apache/manifests/mod/fcgid.pp b/modules/apache/manifests/mod/fcgid.pp new file mode 100644 index 00000000..b8186a64 --- /dev/null +++ b/modules/apache/manifests/mod/fcgid.pp @@ -0,0 +1,11 @@ +class apache::mod::fcgid { + include apache::base + package { 'apache-mod_fcgid': } + + file { 'urlescape': + path => '/usr/local/bin/urlescape', + mode => '0755', + notify => Service['apache'], + content => template('apache/urlescape'), + } +} diff --git a/modules/apache/manifests/mod/geoip.pp b/modules/apache/manifests/mod/geoip.pp new file mode 100644 index 00000000..7f5516bc --- /dev/null +++ b/modules/apache/manifests/mod/geoip.pp @@ -0,0 +1,4 @@ +class apache::mod::geoip { + include apache::base + package { 'apache-mod_geoip': } +} diff --git a/modules/apache/manifests/mod/perl.pp b/modules/apache/manifests/mod/perl.pp new file mode 100644 index 00000000..2c52bf50 --- /dev/null +++ b/modules/apache/manifests/mod/perl.pp @@ -0,0 +1,4 @@ +class apache::mod::perl { + include apache::base + package { 'apache-mod_perl': } +} diff --git a/modules/apache/manifests/mod/php.pp b/modules/apache/manifests/mod/php.pp new file mode 100644 index 00000000..2c8d6733 --- /dev/null +++ b/modules/apache/manifests/mod/php.pp @@ -0,0 +1,10 @@ +class apache::mod::php { + include apache::base + $php_date_timezone = 'UTC' + + package { 'apache-mod_php': } + + apache::config { "${apache::base::conf_d}/mod_php.conf": + content => template('apache/mod/php.conf'), + } +} diff --git a/modules/apache/manifests/mod/proxy.pp b/modules/apache/manifests/mod/proxy.pp new file mode 100644 index 00000000..80180d62 --- /dev/null +++ b/modules/apache/manifests/mod/proxy.pp @@ -0,0 +1,4 @@ +class apache::mod::proxy { + include apache::base + package { 'apache-mod_proxy': } +} diff --git a/modules/apache/manifests/mod/public_html.pp b/modules/apache/manifests/mod/public_html.pp new file mode 100644 index 00000000..b5691b53 --- /dev/null +++ b/modules/apache/manifests/mod/public_html.pp @@ -0,0 +1,4 @@ +class apache::mod::public_html { + include apache::base + package { 'apache-mod_public_html': } +} diff --git a/modules/apache/manifests/mod/ssl.pp b/modules/apache/manifests/mod/ssl.pp new file mode 100644 index 00000000..ab3d24e4 --- /dev/null +++ b/modules/apache/manifests/mod/ssl.pp @@ -0,0 +1,20 @@ +class apache::mod::ssl { + include apache::base + file { '/etc/ssl/apache/': + ensure => directory + } + + openssl::self_signed_cert{ 'localhost': + directory => '/etc/ssl/apache/', + before => Apache::Config['/etc/httpd/conf/vhosts.d/01_default_ssl_vhost.conf'], + } + + package { 'apache-mod_ssl': } + + apache::config { + '/etc/httpd/conf/vhosts.d/01_default_ssl_vhost.conf': + content => template('apache/01_default_ssl_vhost.conf'); + "${apache::base::conf_d}/ssl_vhost.conf": + content => template('apache/mod/ssl_vhost.conf'); + } +} diff --git a/modules/apache/manifests/mod/wsgi.pp b/modules/apache/manifests/mod/wsgi.pp new file mode 100644 index 00000000..7f4fb719 --- /dev/null +++ b/modules/apache/manifests/mod/wsgi.pp @@ -0,0 +1,12 @@ +class apache::mod::wsgi { + include apache::base + package { 'apache-mod_wsgi': } + + file { '/usr/local/lib/wsgi': + ensure => directory, + } + + apache::config { "${apache::base::conf_d}/mod_wsgi.conf": + content => template('apache/mod/wsgi.conf'), + } +} diff --git a/modules/apache/manifests/var.pp b/modules/apache/manifests/var.pp new file mode 100644 index 00000000..4a6d68eb --- /dev/null +++ b/modules/apache/manifests/var.pp @@ -0,0 +1,12 @@ +# $httpdlogs_rotate: +# number of time the log file are rotated before being removed +# $default_vhost_redirect: +# URL to redirect to in case of unknown vhost +class apache::var( + $httpdlogs_rotate = '24', + $apache_user = 'apache', + $apache_group = 'apache', + $default_vhost_redirect = '' +) { + $pkg_conf = 'apache' +} diff --git a/modules/apache/manifests/vhost/base.pp b/modules/apache/manifests/vhost/base.pp new file mode 100644 index 00000000..27a19998 --- /dev/null +++ b/modules/apache/manifests/vhost/base.pp @@ -0,0 +1,50 @@ +define apache::vhost::base ($content = '', + $location = '/dev/null', + $use_ssl = false, + $vhost = false, + $aliases = {}, + $server_aliases = [], + $access_logfile = false, + $error_logfile = false, + $options = [], + $enable_public_html = false, + $enable_location = true) { + include apache::base + $httpd_logdir = '/var/log/httpd' + $filename = "${name}.conf" + + if ! $vhost { + $real_vhost = $name + } else { + $real_vhost = $vhost + } + + if ! $access_logfile { + $real_access_logfile = "${httpd_logdir}/${real_vhost}-access_log" + } else { + $real_access_logfile = $access_logfile + } + if ! $error_logfile { + $real_error_logfile = "${httpd_logdir}/${real_vhost}-error_log" + } else { + $real_error_logfile = $error_logfile + } + + if $use_ssl { + include apache::mod::ssl + if $wildcard_sslcert != true { + openssl::self_signed_cert{ $real_vhost: + directory => '/etc/ssl/apache/', + before => Apache::Config["/etc/httpd/conf/vhosts.d/${filename}"], + } + } + } + + if $enable_public_html { + include apache::mod::public_html + } + + apache::config { "/etc/httpd/conf/vhosts.d/${filename}": + content => template('apache/vhost_base.conf') + } +} diff --git a/modules/apache/manifests/vhost/catalyst_app.pp b/modules/apache/manifests/vhost/catalyst_app.pp new file mode 100644 index 00000000..1ce40747 --- /dev/null +++ b/modules/apache/manifests/vhost/catalyst_app.pp @@ -0,0 +1,24 @@ +define apache::vhost::catalyst_app( $script, + $location = '', + $process = 4, + $use_ssl = false, + $aliases = {}, + $vhost = false) { + include apache::mod::fcgid + if ($location) { + $aliases['/static'] = "${location}/root/static" + } + + $script_aliases = { + '/' => "$script/", + } + + apache::vhost::base { $name: + vhost => $vhost, + use_ssl => $use_ssl, + content => template('apache/vhost_fcgid.conf'), + aliases => $aliases, + } +} + + diff --git a/modules/apache/manifests/vhost/django_app.pp b/modules/apache/manifests/vhost/django_app.pp new file mode 100644 index 00000000..91974acd --- /dev/null +++ b/modules/apache/manifests/vhost/django_app.pp @@ -0,0 +1,22 @@ +define apache::vhost::django_app ($module = false, + $module_path = false, + $use_ssl = false, + $aliases= {}) { + include apache::mod::wsgi + apache::vhost::base { $name: + use_ssl => $use_ssl, + content => template('apache/vhost_django_app.conf'), + aliases => $aliases, + } + + # module is a ruby reserved keyword, cannot be used in templates + $django_module = $module + file { "${name}.wsgi": + path => "/usr/local/lib/wsgi/${name}.wsgi", + mode => '0755', + notify => Service['apache'], + content => template('apache/django.wsgi'), + } +} + + diff --git a/modules/apache/manifests/vhost/other_app.pp b/modules/apache/manifests/vhost/other_app.pp new file mode 100644 index 00000000..f5a71574 --- /dev/null +++ b/modules/apache/manifests/vhost/other_app.pp @@ -0,0 +1,6 @@ +define apache::vhost::other_app($vhost_file) { + include apache::base + apache::config { "/etc/httpd/conf/vhosts.d/${name}.conf": + content => template($vhost_file), + } +} diff --git a/modules/apache/manifests/vhost/redirect_ssl.pp b/modules/apache/manifests/vhost/redirect_ssl.pp new file mode 100644 index 00000000..22a4d4f6 --- /dev/null +++ b/modules/apache/manifests/vhost/redirect_ssl.pp @@ -0,0 +1,6 @@ +define apache::vhost::redirect_ssl() { + apache::vhost::base { "redirect_ssl_${name}": + vhost => $name, + content => template('apache/vhost_ssl_redirect.conf') + } +} diff --git a/modules/apache/manifests/vhost/reverse_proxy.pp b/modules/apache/manifests/vhost/reverse_proxy.pp new file mode 100644 index 00000000..a32aaff0 --- /dev/null +++ b/modules/apache/manifests/vhost/reverse_proxy.pp @@ -0,0 +1,11 @@ +define apache::vhost::reverse_proxy($url, + $vhost = false, + $use_ssl = false, + $content = '') { + include apache::mod::proxy + apache::vhost::base { $name: + use_ssl => $use_ssl, + vhost => $vhost, + content => template('apache/vhost_reverse_proxy.conf') + } +} diff --git a/modules/apache/manifests/vhost/wsgi.pp b/modules/apache/manifests/vhost/wsgi.pp new file mode 100644 index 00000000..291c6d71 --- /dev/null +++ b/modules/apache/manifests/vhost/wsgi.pp @@ -0,0 +1,10 @@ +define apache::vhost::wsgi ($wsgi_path, + $aliases = {}, + $server_aliases = []) { + include apache::mod::wsgi + apache::vhost::base { $name: + aliases => $aliases, + server_aliases => $server_aliases, + content => template('apache/vhost_wsgi.conf'), + } +} diff --git a/modules/apache/manifests/webapp_other.pp b/modules/apache/manifests/webapp_other.pp new file mode 100644 index 00000000..147a2370 --- /dev/null +++ b/modules/apache/manifests/webapp_other.pp @@ -0,0 +1,7 @@ +define apache::webapp_other($webapp_file) { + include apache::base + $webappname = $name + apache::config { "/etc/httpd/conf/webapps.d/${webappname}.conf": + content => template($webapp_file), + } +} diff --git a/modules/apache/templates/00_default_vhosts.conf b/modules/apache/templates/00_default_vhosts.conf index 25f59b5e..9a5f586c 100644 --- a/modules/apache/templates/00_default_vhosts.conf +++ b/modules/apache/templates/00_default_vhosts.conf @@ -3,5 +3,13 @@ <Location /> Allow from all </Location> - Redirect / http://www.<%= domain %>/ + <%- + default_redirect = scope.lookupvar('apache::var::default_vhost_redirect') + if default_redirect == '' + -%> + Redirect 404 / + ErrorDocument 404 "Page Not Found" + <%- else -%> + Redirect / <%= default_redirect %> + <%- end -%> </VirtualHost> diff --git a/modules/apache/templates/01_default_ssl_vhost.conf b/modules/apache/templates/01_default_ssl_vhost.conf new file mode 100644 index 00000000..323bf145 --- /dev/null +++ b/modules/apache/templates/01_default_ssl_vhost.conf @@ -0,0 +1,169 @@ +<IfDefine HAVE_SSL> + <IfModule !mod_ssl.c> + LoadModule ssl_module modules/mod_ssl.so + </IfModule> +</IfDefine> + +<IfModule mod_ssl.c> + +## +## SSL Virtual Host Context +## + +<VirtualHost _default_:443> + +# General setup for the virtual host +DocumentRoot "/var/www/html" +#ServerName localhost:443 +ServerAdmin root@<%= @domain %> +ErrorLog logs/ssl_error_log + +<IfModule mod_log_config.c> + TransferLog logs/ssl_access_log +</IfModule> + +# SSL Engine Switch: +# Enable/Disable SSL for this virtual host. +SSLEngine on + +# SSL Cipher Suite: +# List the ciphers that the client is permitted to negotiate. +# See the mod_ssl documentation for a complete list. +SSLHonorCipherOrder On +SSLCipherSuite ECDH+AESGCM:DH+AESGCM:ECDH+AES256:DH+AES256:ECDH+AES128:DH+AES:ECDH+3DES:DH+3DES:RSA+AESGCM:RSA+AES:RSA+3DES:!aNULL:!MD5:!DSS + + +# SSL Protocol support: +# List the enable protocol levels with which clients will be able to +# connect. Disable SSLv2/v3 access by default: +SSLProtocol ALL -SSLv2 -SSLv3 + +<%- if @wildcard_sslcert == 'true' then -%> +SSLCertificateFile /etc/ssl/wildcard.<%= @domain %>.crt +SSLCertificateKeyFile /etc/ssl/wildcard.<%= @domain %>.key +SSLCACertificateFile /etc/ssl/wildcard.<%= @domain %>.pem +SSLVerifyClient None +<%- else -%> +SSLCertificateFile /etc/ssl/apache/localhost.pem +SSLCertificateKeyFile /etc/ssl/apache/localhost.pem +#SSLCertificateChainFile /etc/pki/tls/certs/server-chain.crt +#SSLCACertificateFile /etc/pki/tls/certs/ca-bundle.crt +<%- end -%> + +# Certificate Revocation Lists (CRL): +# Set the CA revocation path where to find CA CRLs for client +# authentication or alternatively one huge file containing all +# of them (file must be PEM encoded) +# Note: Inside SSLCARevocationPath you need hash symlinks +# to point to the certificate files. Use the provided +# Makefile to update the hash symlinks after changes. +#SSLCARevocationPath /etc/pki/tls/certs/ssl.crl +#SSLCARevocationFile /etc/pki/tls/certs/ca-bundle.crl + +# Client Authentication (Type): +# Client certificate verification type and depth. Types are +# none, optional, require and optional_no_ca. Depth is a +# number which specifies how deeply to verify the certificate +# issuer chain before deciding the certificate is not valid. +#SSLVerifyClient require +#SSLVerifyDepth 10 + +# Access Control: +# With SSLRequire you can do per-directory access control based +# on arbitrary complex boolean expressions containing server +# variable checks and other lookup directives. The syntax is a +# mixture between C and Perl. See the mod_ssl documentation +# for more details. +#<Location /> +#SSLRequire ( %{SSL_CIPHER} !~ m/^(EXP|NULL)/ \ +# and %{SSL_CLIENT_S_DN_O} eq "Snake Oil, Ltd." \ +# and %{SSL_CLIENT_S_DN_OU} in {"Staff", "CA", "Dev"} \ +# and %{TIME_WDAY} >= 1 and %{TIME_WDAY} <= 5 \ +# and %{TIME_HOUR} >= 8 and %{TIME_HOUR} <= 20 ) \ +# or %{REMOTE_ADDR} =~ m/^192\.76\.162\.[0-9]+$/ +#</Location> + +# SSL Engine Options: +# Set various options for the SSL engine. +# o FakeBasicAuth: +# Translate the client X.509 into a Basic Authorisation. This means that +# the standard Auth/DBMAuth methods can be used for access control. The +# user name is the `one line' version of the client's X.509 certificate. +# Note that no password is obtained from the user. Every entry in the user +# file needs this password: `xxj31ZMTZzkVA'. +# o ExportCertData: +# This exports two additional environment variables: SSL_CLIENT_CERT and +# SSL_SERVER_CERT. These contain the PEM-encoded certificates of the +# server (always existing) and the client (only existing when client +# authentication is used). This can be used to import the certificates +# into CGI scripts. +# o StdEnvVars: +# This exports the standard SSL/TLS related `SSL_*' environment variables. +# Per default this exportation is switched off for performance reasons, +# because the extraction step is an expensive operation and is usually +# useless for serving static content. So one usually enables the +# exportation for CGI and SSI requests only. +# o StrictRequire: +# This denies access when "SSLRequireSSL" or "SSLRequire" applied even +# under a "Satisfy any" situation, i.e. when it applies access is denied +# and no other module can change it. +# o OptRenegotiate: +# This enables optimized SSL connection renegotiation handling when SSL +# directives are used in per-directory context. +#SSLOptions +FakeBasicAuth +ExportCertData +StrictRequire + +<FilesMatch "\.(cgi|shtml|phtml|php)$"> + SSLOptions +StdEnvVars +</FilesMatch> + +<Directory "/var/www/cgi-bin"> + SSLOptions +StdEnvVars +</Directory> + +# SSL Protocol Adjustments: +# The safe and default but still SSL/TLS standard compliant shutdown +# approach is that mod_ssl sends the close notify alert but doesn't wait for +# the close notify alert from client. When you need a different shutdown +# approach you can use one of the following variables: +# o ssl-unclean-shutdown: +# This forces an unclean shutdown when the connection is closed, i.e. no +# SSL close notify alert is send or allowed to received. This violates +# the SSL/TLS standard but is needed for some brain-dead browsers. Use +# this when you receive I/O errors because of the standard approach where +# mod_ssl sends the close notify alert. +# o ssl-accurate-shutdown: +# This forces an accurate shutdown when the connection is closed, i.e. a +# SSL close notify alert is send and mod_ssl waits for the close notify +# alert of the client. This is 100% SSL/TLS standard compliant, but in +# practice often causes hanging connections with brain-dead browsers. Use +# this only for browsers where you know that their SSL implementation +# works correctly. +# Notice: Most problems of broken clients are also related to the HTTP +# keep-alive facility, so you usually additionally want to disable +# keep-alive for those clients, too. Use variable "nokeepalive" for this. +# Similarly, one has to force some clients to use HTTP/1.0 to workaround +# their broken HTTP/1.1 implementation. Use variables "downgrade-1.0" and +# "force-response-1.0" for this. + +<IfModule mod_setenvif.c> + BrowserMatch ".*MSIE.*" nokeepalive ssl-unclean-shutdown \ + downgrade-1.0 force-response-1.0 +</IfModule> + +# Per-Server Logging: +# The home of a custom SSL log file. Use this when you want a +# compact non-error SSL logfile on a virtual host basis. + +<IfModule mod_log_config.c> + CustomLog logs/ssl_request_log \ + "%t %h %{SSL_PROTOCOL}x %{SSL_CIPHER}x \"%r\" %b" +</IfModule> + +<IfModule mod_rewrite.c> + RewriteEngine On + RewriteOptions inherit +</IfModule> + +</VirtualHost> + +</IfModule> diff --git a/modules/apache/templates/50_mod_deflate.conf b/modules/apache/templates/50_mod_deflate.conf new file mode 100644 index 00000000..5192bf6e --- /dev/null +++ b/modules/apache/templates/50_mod_deflate.conf @@ -0,0 +1,36 @@ +<IfModule mod_deflate.c> + # Compress HTML, CSS, JavaScript, JSON, Text, XML and fonts + AddOutputFilterByType DEFLATE application/javascript + AddOutputFilterByType DEFLATE application/json + AddOutputFilterByType DEFLATE application/rss+xml + AddOutputFilterByType DEFLATE application/vnd.ms-fontobject + AddOutputFilterByType DEFLATE application/x-font + AddOutputFilterByType DEFLATE application/x-font-opentype + AddOutputFilterByType DEFLATE application/x-font-otf + AddOutputFilterByType DEFLATE application/x-font-truetype + AddOutputFilterByType DEFLATE application/x-font-ttf + AddOutputFilterByType DEFLATE application/x-javascript + AddOutputFilterByType DEFLATE application/xhtml+xml + AddOutputFilterByType DEFLATE application/xml + AddOutputFilterByType DEFLATE font/opentype + AddOutputFilterByType DEFLATE font/otf + AddOutputFilterByType DEFLATE font/ttf + AddOutputFilterByType DEFLATE image/svg+xml + AddOutputFilterByType DEFLATE image/x-icon + AddOutputFilterByType DEFLATE text/css + AddOutputFilterByType DEFLATE text/html + AddOutputFilterByType DEFLATE text/javascript + AddOutputFilterByType DEFLATE text/plain + AddOutputFilterByType DEFLATE text/xml + + # Level of compression (9=highest compression level) + DeflateCompressionLevel 1 + + # Do not compress certain file types + SetEnvIfNoCase Request_URI \.(?:gif|jpe?g|png|heif|heic|webp|mp4|mov|mpg|webm|avi)$ no-gzip dont-vary + SetEnvIfNoCase Request_URI \.(?:exe|t?gz|zip|bz2|xz|zst|lzo|lzma|sit|rar|cab|rpm)$ no-gzip dont-vary + SetEnvIfNoCase Request_URI \.pdf$ no-gzip dont-vary + + # Make sure proxies don't deliver the wrong content + Header append Vary User-Agent env=!dont-vary +</IfModule> diff --git a/modules/apache/templates/CVE-2011-3192.conf b/modules/apache/templates/CVE-2011-3192.conf new file mode 100644 index 00000000..25751adc --- /dev/null +++ b/modules/apache/templates/CVE-2011-3192.conf @@ -0,0 +1,12 @@ + # Drop the Range header when more than 5 ranges. + # CVE-2011-3192 + SetEnvIf Range (?:,.*?){5,5} bad-range=1 + RequestHeader unset Range env=bad-range + + # We always drop Request-Range; as this is a legacy + # dating back to MSIE3 and Netscape 2 and 3. + # + RequestHeader unset Request-Range + + # optional logging. + CustomLog logs/range-CVE-2011-3192.log common env=bad-range diff --git a/modules/apache/templates/customization.conf b/modules/apache/templates/customization.conf index 81424c42..41e15e3a 100644 --- a/modules/apache/templates/customization.conf +++ b/modules/apache/templates/customization.conf @@ -1,2 +1 @@ NameVirtualHost *:80 -NameVirtualHost *:443 diff --git a/modules/apache/templates/django.wsgi b/modules/apache/templates/django.wsgi index 90521653..2188e1e7 100644 --- a/modules/apache/templates/django.wsgi +++ b/modules/apache/templates/django.wsgi @@ -1,7 +1,16 @@ #!/usr/bin/python import os, sys -sys.path.append('<%= module_path %>') -os.environ['DJANGO_SETTINGS_MODULE'] = '<%= module %>.settings' +<%- for m in module_path -%> +path = '<%= m %>' +if path not in sys.path: + sys.path.append(path) +<%- end -%> + +<%- if @django_module -%> +os.environ['DJANGO_SETTINGS_MODULE'] = '<%= @django_module %>.settings' +<%- else -%> +os.environ['DJANGO_SETTINGS_MODULE'] = 'settings' +<%- end -%> import django.core.handlers.wsgi diff --git a/modules/apache/templates/logrotate b/modules/apache/templates/logrotate new file mode 100644 index 00000000..823989eb --- /dev/null +++ b/modules/apache/templates/logrotate @@ -0,0 +1,23 @@ +/var/log/httpd/*_log /var/log/httpd/apache_runtime_status /var/log/httpd/ssl_mutex { +<% if @hostname == 'duvel' %> + rotate 60 + daily +<% elsif @hostname == 'friteuse' %> + # The virtual disk is very small so keep log sizes down + rotate 26 + weekly +<% elsif @hostname == 'sucuk' %> + rotate 52 + weekly +<% else %> + rotate <%= scope.lookupvar('apache::var::httpdlogs_rotate') %> + monthly +<% end %> + missingok + notifempty + sharedscripts + compress + postrotate + /bin/systemctl restart httpd.service > /dev/null 2>/dev/null || true + endscript +} diff --git a/modules/apache/templates/mod/php.conf b/modules/apache/templates/mod/php.conf new file mode 100644 index 00000000..8bc20078 --- /dev/null +++ b/modules/apache/templates/mod/php.conf @@ -0,0 +1,5 @@ +# as php insist to have this value set, let's +# look on the system for him +php_value date.timezone "<%= @php_date_timezone %>" +php_admin_value sendmail_path "/usr/sbin/sendmail -t -i -f root@<%= @domain %>" + diff --git a/modules/apache/templates/mod/ssl_vhost.conf b/modules/apache/templates/mod/ssl_vhost.conf new file mode 100644 index 00000000..bcfe8201 --- /dev/null +++ b/modules/apache/templates/mod/ssl_vhost.conf @@ -0,0 +1 @@ +NameVirtualHost *:443 diff --git a/modules/apache/templates/mod/wsgi.conf b/modules/apache/templates/mod/wsgi.conf new file mode 100644 index 00000000..18678bc6 --- /dev/null +++ b/modules/apache/templates/mod/wsgi.conf @@ -0,0 +1,12 @@ +# https://code.google.com/p/modwsgi/wiki/ApplicationIssues +# mainly for viewvc at the moment , when doing a diff +WSGIRestrictStdout Off +# again viewvc : +# mod_wsgi (pid=20083): Callback registration for signal 15 ignored. +# no bug reported upstream yet :/ +# WSGIRestrictSignal Off +# reenabled, as this prevent apache from restarting properly + +# make sure transifex client work fine, as we need wsgi to pass authorisation +# header to django ( otherwise, this just show error 401 ) +WSGIPassAuthorization On diff --git a/modules/apache/templates/no_hidden_file_dir.conf b/modules/apache/templates/no_hidden_file_dir.conf new file mode 100644 index 00000000..dce78912 --- /dev/null +++ b/modules/apache/templates/no_hidden_file_dir.conf @@ -0,0 +1,4 @@ +# +# dont serve up any hidden files or dirs like .git*, .svn, ... +# +RedirectMatch 404 /\..*$ diff --git a/modules/apache/templates/urlescape b/modules/apache/templates/urlescape new file mode 100644 index 00000000..8feb7fa4 --- /dev/null +++ b/modules/apache/templates/urlescape @@ -0,0 +1,9 @@ +#!/usr/bin/python3 -u +# URL escape each path given on stdin +import sys +import urllib.parse +while True: + l = sys.stdin.readline() + if not l: + break + print(urllib.parse.quote(l.rstrip("\n"))) diff --git a/modules/apache/templates/vhost_base.conf b/modules/apache/templates/vhost_base.conf new file mode 100644 index 00000000..da26b683 --- /dev/null +++ b/modules/apache/templates/vhost_base.conf @@ -0,0 +1,53 @@ +<%- if @use_ssl then + port = 443 +else + port = 80 +end +-%> + +<VirtualHost *:<%= port %>> +<%- if @use_ssl then -%> +<%= scope.function_template(["apache/vhost_ssl.conf"]) %> +<%- end -%> + ServerName <%= @real_vhost %> +<%- @server_aliases.each do |key| -%> + ServerAlias <%= key %> +<%- end -%> + DocumentRoot <%= @location %> + + CustomLog <%= @real_access_logfile %> combined + ErrorLog <%= @real_error_logfile %> + +<%- if @enable_public_html -%> + #TODO add the rest + UserDir public_html +<%- else -%> +<IfModule mod_userdir.c> + UserDir disabled +</IfModule> +<%- end -%> + +<%- @aliases.keys.sort {|a,b| a.size <=> b.size }.reverse.each do |key| -%> + Alias <%= key %> <%= @aliases[key] %> +<%- end -%> + + <%= @content %> + +<%- if @options.length > 0 -%> + <Directory <%= @location %>> + Options <%= @options.join(" ") %> + </Directory> +<%- end -%> + +<%- if @enable_location -%> + <Location /> + <IfModule mod_authz_core.c> + Require all granted + </IfModule> + <IfModule !mod_authz_core.c> + Allow from all + </IfModule> + </Location> +<%- end -%> +</VirtualHost> + diff --git a/modules/apache/templates/vhost_catalyst_app.conf b/modules/apache/templates/vhost_catalyst_app.conf deleted file mode 100644 index 57867fc4..00000000 --- a/modules/apache/templates/vhost_catalyst_app.conf +++ /dev/null @@ -1,30 +0,0 @@ -<% if use_ssl then - port = 443 -else - port = 80 -end -%> - -<VirtualHost *:<%= port %>> -<% if use_ssl then %> - SSLEngine on - #TODO deploy SNI later - SSLCertificateFile /etc/ssl/apache/apache.pem - SSLCertificateKeyFile /etc/ssl/apache/apache.pem -<% end %> - ServerName <%= name %> - # Serve static content directly - DocumentRoot /dev/null -# header - -<% if location then %> - Alias /static <%= location %>/root/static -<% end %> - Alias / <%= script %>/ - FastCgiServer <%= script %> -processes <%= process %> -idle-timeout 30 - - <Location /> - Allow from all - </Location> -</VirtualHost> - diff --git a/modules/apache/templates/vhost_django_app.conf b/modules/apache/templates/vhost_django_app.conf index 9d64865f..d85cf7a9 100644 --- a/modules/apache/templates/vhost_django_app.conf +++ b/modules/apache/templates/vhost_django_app.conf @@ -1,12 +1 @@ -<VirtualHost *:80> - ServerName <%= name %> - # Serve static content directly - DocumentRoot /dev/null - - WSGIScriptAlias / /usr/local/lib/wsgi/<%= name %>.wsgi -#footer - <Location /> - Allow from all - </Location> -</VirtualHost> - +WSGIScriptAlias / /usr/local/lib/wsgi/<%= @name %>.wsgi diff --git a/modules/apache/templates/vhost_fcgid.conf b/modules/apache/templates/vhost_fcgid.conf new file mode 100644 index 00000000..fefa4a49 --- /dev/null +++ b/modules/apache/templates/vhost_fcgid.conf @@ -0,0 +1,6 @@ +AddHandler fcgid-script .pl +<%- @script_aliases.keys.sort {|a,b| a.size <=> b.size }.reverse.each do |key| -%> + ScriptAlias <%= key %> <%= @script_aliases[key] %> +<%- end -%> +FcgidMinProcessesPerClass <%= @process %> +FcgidIdleTimeout 30 diff --git a/modules/apache/templates/vhost_fcgid_norobot.conf b/modules/apache/templates/vhost_fcgid_norobot.conf new file mode 100644 index 00000000..0643cac9 --- /dev/null +++ b/modules/apache/templates/vhost_fcgid_norobot.conf @@ -0,0 +1,45 @@ +AddHandler fcgid-script .pl +<%- @script_aliases.keys.sort {|a,b| a.size <=> b.size }.reverse.each do |key| -%> + ScriptAlias <%= key %> <%= @script_aliases[key] %> +<%- end -%> +FcgidMinProcessesPerClass <%= @process %> +FcgidIdleTimeout 30 + +# These robots were scraping the whole of svnweb in 2024-04, causing severe +# load, so they are banned. It's not clear whether they obey robots.txt or +# not (we didn't give them enough of a chance to find out), so we could +# consider giving them a chance to redeem themselves at some point in the +# future. +RewriteEngine on +RewriteCond %{HTTP_USER_AGENT} ClaudeBot|Amazonbot +RewriteRule . - [R=403,L] + +# Block expensive SVN operations on all common robots ("spider" covers a +# bunch). "Expensive" is considered to be most operations other than showing a +# directory or downloading a specific version of a file. +# Note: eliminating view=log and annotate= doesn't make much difference to the +# CPU load when robots are hitting the server in real world operation. +#RewriteCond %{QUERY_STRING} pathrev=|r1= +# Treat anything other than a plain path as "expensive" +RewriteCond %{QUERY_STRING} . +RewriteCond %{HTTP_USER_AGENT} "Googlebot|GoogleOther|bingbot|Yahoo! Slurp|ClaudeBot|Amazonbot|YandexBot|SemrushBot|Barkrowler|DataForSeoBot|PetalBot|facebookexternalhit|GPTBot|ImagesiftBot|spider|Spider|iPod|Trident|Presto" +RewriteRule . - [R=403,L] + +# Only let expensive operations through when a cookie is set. If no cookie is +# set, redirect to a page where it will be set using JavaScript and redirect +# back. This will block requests from user agents that do not support +# JavaScript, which includes many robots. +RewriteMap urlescape prg:/usr/local/bin/urlescape +#RewriteCond %{QUERY_STRING} pathrev=|r1= +# Treat anything other than a plain path as "expensive" +RewriteCond %{QUERY_STRING} . +RewriteCond %{REQUEST_URI} !/_check +RewriteCond %{HTTP_COOKIE} !session=([^;]+) [novary] +RewriteRule . %{REQUEST_SCHEME}://%{SERVER_NAME}:%{SERVER_PORT}/_check?to=%{REQUEST_URI}?${urlescape:%{QUERY_STRING}} [R=302,L] + +# Block abusive spiders by IP address who don't identify themselves in the +# User-Agent: string +RewriteCond expr "-R '47.76.0.0/14' || -R '47.80.0.0/14' || -R '47.208.0.0/16' || -R '47.238.0.0/16' || -R '8.210.0.0/16' || -R '8.218.0.0/16' || -R '188.239.0.0/18' || -R '166.108.192.0/18' || -R '124.243.160.0/19' || -R '101.46.0.0/20'" +RewriteRule . - [R=403,L] + +ErrorDocument 403 "<html><body>Impolite robots are not allowed</body></html>" diff --git a/modules/apache/templates/vhost_redirect.conf b/modules/apache/templates/vhost_redirect.conf new file mode 100644 index 00000000..c787311e --- /dev/null +++ b/modules/apache/templates/vhost_redirect.conf @@ -0,0 +1,2 @@ +Redirect / <%= @url %> + diff --git a/modules/apache/templates/vhost_reverse_proxy.conf b/modules/apache/templates/vhost_reverse_proxy.conf new file mode 100644 index 00000000..4859bda3 --- /dev/null +++ b/modules/apache/templates/vhost_reverse_proxy.conf @@ -0,0 +1,15 @@ +<%= @content %> + + ProxyRequests Off + ProxyPreserveHost On + + <Proxy *> + Order deny,allow + Allow from all + </Proxy> +<%- if @url =~ /^https/ -%> + SSLProxyEngine On +<%- end -%> + ProxyPass / <%= @url %> + ProxyPassReverse / <%= @url %> + diff --git a/modules/apache/templates/vhost_simple.conf b/modules/apache/templates/vhost_simple.conf new file mode 100644 index 00000000..77b55287 --- /dev/null +++ b/modules/apache/templates/vhost_simple.conf @@ -0,0 +1,14 @@ +<VirtualHost *:80> + ServerName <%= @name %> + DocumentRoot <%= @location %> + + <Location /> + <IfModule mod_authz_core.c> + Require all granted + </IfModule> + <IfModule !mod_authz_core.c> + Allow from all + </IfModule> + </Location> +</VirtualHost> + diff --git a/modules/apache/templates/vhost_ssl.conf b/modules/apache/templates/vhost_ssl.conf new file mode 100644 index 00000000..0cb52eca --- /dev/null +++ b/modules/apache/templates/vhost_ssl.conf @@ -0,0 +1,13 @@ + SSLEngine on + SSLProtocol ALL -SSLv2 -SSLv3 + SSLHonorCipherOrder On + SSLCipherSuite ECDH+AESGCM:DH+AESGCM:ECDH+AES256:DH+AES256:ECDH+AES128:DH+AES:ECDH+3DES:DH+3DES:RSA+AESGCM:RSA+AES:RSA+3DES:!aNULL:!MD5:!DSS + <%- if @wildcard_sslcert == 'true' then -%> + SSLCertificateFile /etc/ssl/wildcard.<%= @domain %>.crt + SSLCertificateKeyFile /etc/ssl/wildcard.<%= @domain %>.key + SSLCACertificateFile /etc/ssl/wildcard.<%= @domain %>.pem + SSLVerifyClient None + <%- else -%> + SSLCertificateFile /etc/ssl/apache/<%= @real_vhost %>.pem + SSLCertificateKeyFile /etc/ssl/apache/<%= @real_vhost %>.pem + <%- end -%> diff --git a/modules/apache/templates/vhost_ssl_redirect.conf b/modules/apache/templates/vhost_ssl_redirect.conf index bb22a2c8..23a7eabe 100644 --- a/modules/apache/templates/vhost_ssl_redirect.conf +++ b/modules/apache/templates/vhost_ssl_redirect.conf @@ -1,4 +1 @@ -<VirtualHost *:80> - ServerName <%= name %> - Redirect / https://<%= name %>/ -</VirtualHost> +Redirect / https://<%= @name %>/ diff --git a/modules/apache/templates/vhost_wsgi.conf b/modules/apache/templates/vhost_wsgi.conf new file mode 100644 index 00000000..2f1ba585 --- /dev/null +++ b/modules/apache/templates/vhost_wsgi.conf @@ -0,0 +1,3 @@ +WSGIScriptAlias / <%= @wsgi_path %> + + diff --git a/modules/auto_installation/manifests/download.rb b/modules/auto_installation/manifests/download.rb new file mode 100644 index 00000000..12cc53bf --- /dev/null +++ b/modules/auto_installation/manifests/download.rb @@ -0,0 +1,21 @@ +define "auto_installation::download::netboot_images", :path, :versions, :archs, :mirror_path, :files do + # example : + # mandriva : + # ftp://ftp.free.fr/pub/Distributions_Linux/MandrivaLinux/devel/%{version}/%{arch}/isolinux/alt0/ + for a in @archs do + for v in @versions do + # uncomment when ruby 1.9 will be stable and used + # mirror_file_path = @mirror_path % { :arch => a, :version => v } + mirror_file_path = @mirror_path.gsub(/%{arch}/, a) + mirror_file_path = mirror_file_path.gsub(/%{version}/, v) + for f in @files do + file_name = "#{@path}/#{@name}_#{v}_#{a}_#{f}" + create_resource(:exec, "wget -q #{mirror_file_path}/#{f} -O #{file_name}", + :creates => file_name) + end + end + end +end + + + diff --git a/modules/auto_installation/manifests/init.pp b/modules/auto_installation/manifests/init.pp new file mode 100644 index 00000000..642cddfd --- /dev/null +++ b/modules/auto_installation/manifests/init.pp @@ -0,0 +1,140 @@ +# what should be possible : +# install a base system +# - mandriva +# - mageia +# - others ? ( for testing package ? ) + +# install a server +# - by name, with a valstar clone + +class auto_installation { + class variables { + $pxe_dir = "/var/lib/pxe" + # m/ for menu. There is limitation on the path length so + # while we will likely not hit the limit, it may be easier + $pxe_menu_dir = "${pxe_dir}/pxelinux.cfg/m/" + } + + class download { + import "download.rb" + } + + class pxe_menu inherits variables { + package { 'syslinux': + + } + + file { $pxe_dir: + ensure => directory, + } + + file { "${pxe_dir}/pxelinux.0": + ensure => "/usr/lib/syslinux/pxelinux.0", + } + + file { "${pxe_dir}/menu.c32": + ensure => "/usr/lib/syslinux/menu.c32" + } + + file { "${pxe_dir}/pxelinux.cfg": + ensure => directory, + } + # m for menu, there is some limitation on the path length so I + # prefer to + file { "${pxe_menu_dir}": + ensure => directory, + } + + # TODO make it tag aware + $menu_entries = list_exported_ressources('Auto_installation::Pxe_menu_base') + # default file should have exported resources + file { "${pxe_dir}/pxelinux.cfg/default": + ensure => present, + content => template('auto_installation/default'), + } + Auto_installation::Pxe_menu_base <<| tag == $fqdn |>> + } + + define pxe_menu_base($content) { + include auto_installation::variables + file { "${auto_installation::variables::pxe_menu_dir}/${name}": + ensure => present, + content => $content, + } + } + + define pxe_menu_entry($kernel_path, $append, $label) { + @@auto_installation::pxe_menu_base { $name: + tag => $fqdn, + content => template('auto_installation/menu'), + } + } + + # define pxe_linux_entry + # meant to be exported + # name + # label + # kernel + # append + class netinst_storage { + # to ease the creation of test iso + $netinst_path = "/var/lib/libvirt/netinst" + + file { $netinst_path: + ensure => directory, + require => Package[libvirt-utils], + } + + libvirtd::storage { "netinst": + path => $netinst_path, + require => File[$netinst_path], + } + } + + define download_file($destination_path, $download_url) { + exec { "wget -q -O ${destination_path}/${name} ${download_url}/${name}": + creates => "${destination_path}/${name}", + } + } + + define mandriva_installation_entry($version, $arch = 'x86_64') { + include netinst_storage + $protocol = "ftp" + $server = "ftp.free.fr" + $mirror_url_base = "/pub/Distributions_Linux/MandrivaLinux/" + $mirror_url_middle = $version ? { + "cooker" => "devel/cooker/${arch}/", + default => "official/${version}/${arch}/" + } + $mirror_url = "${mirror_url_base}/${mirror_url_middle}" + + $mirror_url_end = "isolinux/alt0" + + $destination_path = "${netinst_storage::netinst_path}/${name}" + + file { "${destination_path}": + ensure => directory, + } + + $download_url = "${protocol}\\://${server}/${mirror_url}/${mirror_url_end}" + + + download_file { ['all.rdz','vmlinuz']: + destination_path => $destination_path, + download_url => $download_url, + require => File[$destination_path], + } + + pxe_menu_entry { "mandriva_${version}_${arch}": + kernel_path => "${name}/vmlinuz", + label => "Mandriva ${version} ${arch}", + #TODO add autoinst.cfg + append => "${name}/all.rdz useless_thing_accepted=1 lang=fr automatic=int:eth0,netw:dhcp,met:${protocol},ser:${server},dir:${mirror_url} ", + } + } + # + # define a template for autoinst + # - basic installation + # - server installation ( with server name as a parameter ) + +} diff --git a/modules/auto_installation/templates/default b/modules/auto_installation/templates/default new file mode 100644 index 00000000..a9ea8de3 --- /dev/null +++ b/modules/auto_installation/templates/default @@ -0,0 +1,15 @@ +DEFAULT menu.c32 +PROMPT 10 +TIMEOUT 100 +NOESCAPE 1 + +MENU SHIFTKEY 1 +MENU TITLE PXE Boot on <%= fqdn %> + +LABEL local + MENU LABEL Local + localboot 0 + +<% for m in menu_entries %> +INCLUDE pxelinux.cfg/m/<%= m %> +<% end %> diff --git a/modules/auto_installation/templates/menu b/modules/auto_installation/templates/menu new file mode 100644 index 00000000..3d0ce6fa --- /dev/null +++ b/modules/auto_installation/templates/menu @@ -0,0 +1,5 @@ +LABEL <%= name %> + MENU DEFAULT + MENU LABEL Install <%= label %> + kernel <%= kernel_path %> + append <%= append %> diff --git a/modules/bcd/manifests/base.pp b/modules/bcd/manifests/base.pp new file mode 100644 index 00000000..d515f3e1 --- /dev/null +++ b/modules/bcd/manifests/base.pp @@ -0,0 +1,29 @@ +class bcd::base { + include sudo + include bcd + + group { $bcd::login: } + + user { $bcd::login: + home => $bcd::home, + comment => 'User for creating ISOs', + } + + file { [$bcd::public_isos, '/var/lib/bcd']: + ensure => directory, + owner => $bcd::login, + group => $bcd::login, + mode => '0755', + } + + # svn version is used for now + #package { bcd: } + + # needed for qemu-over ssh + package { 'xauth': } + + $isomakers_group = 'mga-iso_makers' + sudo::sudoers_config { 'bcd': + content => template('bcd/sudoers.bcd') + } +} diff --git a/modules/bcd/manifests/init.pp b/modules/bcd/manifests/init.pp new file mode 100644 index 00000000..1ff57144 --- /dev/null +++ b/modules/bcd/manifests/init.pp @@ -0,0 +1,5 @@ +class bcd { + $login = 'bcd' + $home = '/home/bcd' + $public_isos = "${home}/public_html/isos" +} diff --git a/modules/bcd/manifests/rsync.pp b/modules/bcd/manifests/rsync.pp new file mode 100644 index 00000000..0a9ccc34 --- /dev/null +++ b/modules/bcd/manifests/rsync.pp @@ -0,0 +1,7 @@ +class bcd::rsync { + include bcd::base + $public_isos = $bcd::public_isos + class { 'rsyncd': + rsyncd_conf => 'bcd/rsyncd.conf', + } +} diff --git a/modules/bcd/manifests/web.pp b/modules/bcd/manifests/web.pp new file mode 100644 index 00000000..d670cf5d --- /dev/null +++ b/modules/bcd/manifests/web.pp @@ -0,0 +1,9 @@ +class bcd::web { + include bcd::base + $location = "${bcd::home}/public_html" + + apache::vhost::base { "bcd.${::domain}": + location => $location, + content => template('bcd/vhost_bcd.conf'), + } +} diff --git a/modules/bcd/templates/rsyncd.conf b/modules/bcd/templates/rsyncd.conf new file mode 100644 index 00000000..75c7d335 --- /dev/null +++ b/modules/bcd/templates/rsyncd.conf @@ -0,0 +1,12 @@ +# $Id: rsyncd.conf 1419 2011-03-29 17:04:07Z nanardon $ + +uid = nobody +gid = nogroup + +[isos] + path = <%= scope.lookupvar("bcd::public_isos") %> + comment = Mageia ISOs + exclude = .htaccess .htpasswd + read only = yes + auth users = isoqa + secrets file = /etc/rsyncd.secrets diff --git a/modules/bcd/templates/sudoers.bcd b/modules/bcd/templates/sudoers.bcd new file mode 100644 index 00000000..c462bffd --- /dev/null +++ b/modules/bcd/templates/sudoers.bcd @@ -0,0 +1,10 @@ +<%= scope.lookupvar('bcd::login') %> ALL=(root) NOPASSWD:/bin/mount, /bin/umount, \ +/usr/sbin/chroot, \ +/usr/sbin/urpmi, \ +/usr/sbin/urpmi.addmedia, \ +/usr/sbin/urpmi.removemedia, \ +/usr/sbin/urpmi.update, \ +/usr/bin/urpmq, \ +/bin/rm + +%<%= @isomakers_group %> ALL=(<%= scope.lookupvar('bcd::login') %>) SETENV: NOPASSWD: ALL diff --git a/modules/bcd/templates/vhost_bcd.conf b/modules/bcd/templates/vhost_bcd.conf new file mode 100644 index 00000000..c89955e2 --- /dev/null +++ b/modules/bcd/templates/vhost_bcd.conf @@ -0,0 +1,12 @@ +<Directory <%= @location %>> + AuthUserFile <%= scope.lookupvar('bcd::home') %>/htpasswd + AuthGroupFile /dev/null + AuthName "QA test isos, restricted access" + ErrorDocument 403 "For the password, please contact the QA team ( https://wiki.<%= @domain %>/en/QA_Team )" + + AuthType Basic + require valid-user + + Options FollowSymlinks + Options Indexes +</Directory> diff --git a/modules/bind/manifests/init.pp b/modules/bind/manifests/init.pp index 60ef7a04..a5d20c09 100644 --- a/modules/bind/manifests/init.pp +++ b/modules/bind/manifests/init.pp @@ -1,56 +1,25 @@ class bind { - class bind_base { - package { bind: - ensure => installed - } + package { 'bind': } - service { named: - ensure => running, - path => "/etc/init.d/named", - subscribe => [ Package["bind"]] - } - - file { '/etc/named.conf': - ensure => "/var/lib/named/etc/named.conf", - owner => root, - group => root, - mode => 644 - } + service { 'named': + restart => 'service named restart', + subscribe => Package['bind'], } - - file { '/var/lib/named/etc/named.conf': - ensure => present, - owner => root, - group => root, - mode => 644, - require => Package["bind"], - content => "", - notify => [Service['named']] + file { '/etc/named.conf': + ensure => link, + target => '/var/lib/named/etc/named.conf', + require => Package['bind'], } - define zone_master { - file { "/var/lib/named/var/named/master/$name.zone": - ensure => present, - owner => root, - group => root, - mode => 644, - content => template("bind/zones/$name.zone"), - require => Package[bind], - notify => Service[named] - } + exec { 'named_reload': + command => 'service named reload', + refreshonly => true, } - class bind_master inherits bind_base { - file { '/var/lib/named/etc/named.conf': - content => template("bind/named_base.conf", "bind/named_master.conf"), - } - } - - class bind_slave inherits bind_base { - file { '/var/lib/named/etc/named.conf': - content => template("bind/named_base.conf", "bind/named_slave.conf"), - } + file { '/var/lib/named/etc/named.conf': + require => Package['bind'], + content => '', + notify => Service['named'], } - } diff --git a/modules/bind/manifests/master.pp b/modules/bind/manifests/master.pp new file mode 100644 index 00000000..a82d4757 --- /dev/null +++ b/modules/bind/manifests/master.pp @@ -0,0 +1,17 @@ +class bind::master inherits bind { + Tld_redirections::Domain <<| |>> + + $managed_tlds = list_exported_ressources('Tld_redirections::Domain') + + file { "/var/lib/named/var/named/master": + ensure => directory + } + + file { "/var/lib/named/var/named/reverse": + ensure => directory + } + + File['/var/lib/named/etc/named.conf'] { + content => template('bind/named_base.conf', 'bind/named_master.conf'), + } +} diff --git a/modules/bind/manifests/slave.pp b/modules/bind/manifests/slave.pp new file mode 100644 index 00000000..e446b57a --- /dev/null +++ b/modules/bind/manifests/slave.pp @@ -0,0 +1,6 @@ +class bind::slave inherits bind { + $managed_tlds = list_exported_ressources('Tld_redirections::Domain') + File['/var/lib/named/etc/named.conf'] { + content => template('bind/named_base.conf', 'bind/named_slave.conf'), + } +} diff --git a/modules/bind/manifests/zone.pp b/modules/bind/manifests/zone.pp new file mode 100644 index 00000000..17f2075e --- /dev/null +++ b/modules/bind/manifests/zone.pp @@ -0,0 +1,13 @@ +define bind::zone($type, $content = false) { + if ! $content { + $zone_content = template("bind/zones/${name}.zone") + } else { + $zone_content = $content + } + + file { "/var/named/${type}/${name}.zone": + content => $zone_content, + require => Package['bind'], + notify => Exec['named_reload'] + } +} diff --git a/modules/bind/manifests/zone/master.pp b/modules/bind/manifests/zone/master.pp new file mode 100644 index 00000000..460f52c6 --- /dev/null +++ b/modules/bind/manifests/zone/master.pp @@ -0,0 +1,6 @@ +define bind::zone::master($content = false) { + bind::zone { $name : + type => 'master', + content => $content, + } +} diff --git a/modules/bind/manifests/zone/reverse.pp b/modules/bind/manifests/zone/reverse.pp new file mode 100644 index 00000000..400e77f9 --- /dev/null +++ b/modules/bind/manifests/zone/reverse.pp @@ -0,0 +1,6 @@ +define bind::zone::reverse($content = false) { + bind::zone { $name : + type => 'reverse', + content => $content, + } +} diff --git a/modules/bind/templates/named_base.conf b/modules/bind/templates/named_base.conf index 3eb30478..5adba9f3 100644 --- a/modules/bind/templates/named_base.conf +++ b/modules/bind/templates/named_base.conf @@ -18,6 +18,10 @@ logging { acl "trusted_networks" { 127.0.0.1; 212.85.158.144/28; + # used for various virtual machines + 192.168.0.0/16; + 10.0.0.0/8; + 172.16.0.0/12; }; // Enable statistics at http://127.0.0.1:5380/ statistics-channels { @@ -28,7 +32,6 @@ options { version ""; directory "/var/named"; dump-file "/var/tmp/named_dump.db"; - pid-file "/var/run/named.pid"; statistics-file "/var/tmp/named.stats"; zone-statistics yes; // datasize 256M; @@ -97,37 +100,31 @@ zone "." IN { zone "localdomain" IN { type master; - file "master/localdomain.zone"; + file "named.localhost"; allow-update { none; }; }; zone "localhost" IN { type master; - file "master/localhost.zone"; + file "named.localhost"; allow-update { none; }; }; -zone "0.0.127.in-addr.arpa" IN { +zone "1.0.0.127.in-addr.arpa" IN { type master; - file "reverse/named.local"; + file "named.loopback"; allow-update { none; }; }; -zone "0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.ip6.arpa" IN { +zone "1.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.ip6.arpa" IN { type master; - file "reverse/named.ip6.local"; - allow-update { none; }; -}; - -zone "255.in-addr.arpa" IN { - type master; - file "reverse/named.broadcast"; + file "named.loopback"; allow-update { none; }; }; zone "0.in-addr.arpa" IN { type master; - file "reverse/named.zero"; + file "named.empty"; allow-update { none; }; }; diff --git a/modules/bind/templates/named_master.conf b/modules/bind/templates/named_master.conf index f5219e94..30b3418f 100644 --- a/modules/bind/templates/named_master.conf +++ b/modules/bind/templates/named_master.conf @@ -4,11 +4,26 @@ zone "mageia.org" IN { allow-update { none; }; }; -zone "mageia.fr" IN { +<% +for tld in managed_tlds +%> + +zone "mageia.<%= tld %>" IN { type master; - file "master/mageia.fr.zone"; + file "master/mageia.<%= tld %>.zone"; allow-update { none; }; }; +<% end %> +zone "7.0.0.0.2.0.0.0.8.7.1.2.2.0.a.2.ip6.arpa" IN { + type master; + file "reverse/7.0.0.0.2.0.0.0.8.7.1.2.2.0.a.2.ip6.arpa.zone"; + allow-update { none; }; +}; +zone "2.1.0.0.0.0.0.1.b.0.e.0.1.0.a.2.ip6.arpa" IN { + type master; + file "reverse/2.1.0.0.0.0.0.1.b.0.e.0.1.0.a.2.ip6.arpa.zone"; + allow-update { none; }; +}; diff --git a/modules/bind/templates/named_slave.conf b/modules/bind/templates/named_slave.conf index 2a3a2fad..b59db37f 100644 --- a/modules/bind/templates/named_slave.conf +++ b/modules/bind/templates/named_slave.conf @@ -1,14 +1,31 @@ zone "mageia.org" IN { type slave; file "slave/mageia.org"; - allow-update { 212.85.158.146; }; + allow-update { 212.85.158.151; }; }; -zone "mageia.fr" IN { +<% +for tld in managed_tlds +%> + +zone "mageia.<%= tld %>" IN { type master; - file "master/mageia.fr"; - allow-update { 212.85.158.146; }; + file "master/mageia.<= tld %>"; + allow-update { 212.85.158.151; }; }; +<% +end +%> +zone "7.0.0.0.2.0.0.0.8.7.1.2.2.0.a.2.ip6.arpa" IN { + type slave; + file "slave/7.0.0.0.2.0.0.0.8.7.1.2.2.0.a.2.ip6.arpa.zone"; + allow-update { 212.85.158.151; }; +}; +zone "2.1.0.0.0.0.0.1.b.0.e.0.1.0.a.2.ip6.arpa" IN { + type slave; + file "slave/2.1.0.0.0.0.0.1.b.0.e.0.1.0.a.2.ip6.arpa.zone"; + allow-update { 212.85.158.151; }; +}; diff --git a/modules/bind/templates/zones/mageia.fr.zone b/modules/bind/templates/zones/mageia.fr.zone deleted file mode 100644 index 70ecc840..00000000 --- a/modules/bind/templates/zones/mageia.fr.zone +++ /dev/null @@ -1,27 +0,0 @@ -; cfengine-distributed file -; local modifications will be lost -; $Id$ -$TTL 3D -@ IN SOA ns0.mageia.org. mageia.fr. ( - 2010110200 ; Serial - 21600 ; Refresh - 3600 ; Retry - 2419200 ; Expire - 86400 ; Minmun TTL - ) - -; nameservers -@ IN NS ns0.mageia.org. -@ IN NS ns1.mageia.org. - -@ IN MX 10 mx0.zarb.org. -@ IN MX 20 mx1.zarb.org. - -; MX -;@ IN MX 10 mx0.zarb.org. - -; machines -mageia.fr. IN A 212.85.158.22 - -; aliases -www IN CNAME mageia.fr. diff --git a/modules/bind/templates/zones/mageia.org.zone b/modules/bind/templates/zones/mageia.org.zone deleted file mode 100644 index 1a9de019..00000000 --- a/modules/bind/templates/zones/mageia.org.zone +++ /dev/null @@ -1,87 +0,0 @@ -; puppet-distributed file -; local modifications will be lost -; $Id$ -$TTL 3D -@ IN SOA ns0.mageia.org. root.mageia.org. ( - 2010112201 ; Serial - 21600 ; Refresh - 3600 ; Retry - 2419200 ; Expire - 86400 ; Minmun TTL - ) - -; nameservers -@ IN NS ns0.mageia.org. -@ IN NS ns1.mageia.org. - -@ IN MX 10 mx0.zarb.org. -@ IN MX 20 mx1.zarb.org. - -ml IN MX 10 alamut.mageia.org. -ml IN MX 20 krampouezh.mageia.org. - -; MX -;@ IN MX 10 mx0.zarb.org. - -; machines -mageia.org. IN A 212.85.158.22 -www-zarb IN A 212.85.158.22 -; gandi vm 1 -vm-gandi IN A 95.142.164.207 -kouign-amann IN A 95.142.164.207 -krampouezh IN A 95.142.164.207 -champagne IN A 217.70.188.116 - -www-aufml IN A 91.121.11.63 -forum IN A 88.191.127.89 - -; lost oasis -alamut IN A 212.85.158.146 -alamut IN AAAA 2a02:2178:2:7::2 -; since we have a subdomain, we cannot use a CNAME -ml IN A 212.85.158.146 -ml IN AAAA 2a02:2178:2:7::2 - -valstar IN A 212.85.158.147 -valstar IN AAAA 2a02:2178:2:7::3 -ecosse IN A 212.85.158.148 -ecosse IN AAAA 2a02:2178:2:7::4 -jonund IN A 212.85.158.149 -jonund IN AAAA 2a02:2178:2:7::5 -fiona IN A 212.85.158.150 -fiona IN AAAA 2a02:2178:2:7::6 - -; alamut -ns0 IN A 212.85.158.146 -; krampouezh -ns1 IN A 95.142.164.207 - -; aliases -www IN CNAME www-zarb -www-test IN CNAME champagne -blog IN CNAME www-zarb -blog-test IN CNAME champagne -rsync IN CNAME www-zarb - -ldap IN CNAME valstar - -svn IN CNAME valstar -meetbot IN CNAME krampouezh - -donate IN CNAME www-aufml -donation IN CNAME www-aufml - -puppetmaster IN CNAME valstar -pkgsubmit IN CNAME valstar -repository IN CNAME valstar -ldap IN CNAME valstar - -identity IN CNAME alamut -mirrors IN CNAME alamut -epoll IN CNAME alamut -pgsql IN CNAME alamut -bugs IN CNAME alamut -lists IN CNAME alamut -; temporary -;forum IN A 140.211.167.148 -;wiki IN A 88.191.83.84 diff --git a/modules/blog/manifests/init.pp b/modules/blog/manifests/init.pp index ab7f9ec0..c89a8168 100644 --- a/modules/blog/manifests/init.pp +++ b/modules/blog/manifests/init.pp @@ -1,41 +1,97 @@ -#TODO: -# - add the creation of the user 'blog' in puppet class blog { - package { 'mysql': - ensure => installed - } + class base { + $blog_domain = "blog.${::domain}" + $blog_location = "/var/www/vhosts/${blog_domain}" + $blog_db_backupdir = '/var/lib/backups/blog_db' + $blog_files_backupdir = '/var/lib/backups/blog_files' + $blog_newpost_email_to = "i18n-reports@ml.${::domain}" + $blog_newpost_email_from = "Mageia Blog bot <blog@${::domain}>" - package { 'wget': - ensure => installed + user { 'blog': + groups => apache, + comment => 'Mageia Blog bot', + home => '/var/lib/blog', } + } - include apache::mod_php + class files_bots inherits base { +if versioncmp($::lsbdistrelease, '9') < 0 { + package { ['php-mysqlnd', + 'php-ldap', + 'unzip', + 'nail']: } +} else { + package { ['php-mysqlnd', + 'php-ldap', + 'unzip', + 's-nail']: } +} + + mga_common::local_script { 'check_new-blog-post.sh': + content => template('blog/check_new-blog-post.sh'), + } + + cron { 'Blog bot': + user => 'blog', + minute => '*/15', + command => '/usr/local/bin/check_new-blog-post.sh', + require => Mga_common::Local_script['check_new-blog-post.sh'], + } + + include apache::mod::php + + apache::vhost::base { "${blog_domain}": + location => $blog_location, + content => template('blog/blogs_vhosts.conf'), + } + + apache::vhost::base { "ssl_${blog_domain}": + use_ssl => true, + vhost => $blog_domain, + location => $blog_location, + content => template('blog/blogs_vhosts.conf'), + } - package { 'php-mysql': - ensure => installed - } + file { $blog_location: + ensure => directory, + owner => apache, + group => apache, + } + } + + class db_backup inherits base { + file { $blog_db_backupdir: + ensure => directory, + } + mga_common::local_script { 'backup_blog-db.sh': + content => template('blog/backup_blog-db.sh'), + } - file { "check_new-blog-post": - path => "/usr/local/bin/check_new-blog-post.sh", - ensure => present, - owner => blog, - group => blog, - mode => 755, - content => template("blog/check_new-blog-post.sh") - } + cron { "Backup DB (blog)": + user => root, + hour => '23', + minute => '42', + command => '/usr/local/bin/backup_blog-db.sh', + require => Mga_common::Local_script['backup_blog-db.sh'], + } + } - file { "/var/lib/blog": + class files_backup inherits base { + file { $blog_files_backupdir: ensure => directory, - owner => blog, - group => blog, - mode => 644, } - cron { blog: - user => blog, - minute => '*/15', - command => "/usr/local/bin/check_new-blog-post.sh", - require => File["check_new-blog-post"] - } + mga_common::local_script { 'backup_blog-files.sh': + content => template('blog/backup_blog-files.sh'), + } + + cron { 'Backup files (blog)': + user => root, + hour => '23', + minute => '42', + command => '/usr/local/bin/backup_blog-files.sh', + require => Mga_common::Local_script['backup_blog-files.sh'], + } + } } diff --git a/modules/blog/templates/.htaccess b/modules/blog/templates/.htaccess new file mode 100644 index 00000000..19bee3bd --- /dev/null +++ b/modules/blog/templates/.htaccess @@ -0,0 +1,10 @@ +# BEGIN WordPress +<IfModule mod_rewrite.c> +RewriteEngine On +RewriteBase / +RewriteCond %{REQUEST_FILENAME} !-f +RewriteCond %{REQUEST_FILENAME} !-d +RewriteRule . /index.php [L] +</IfModule> + +# END WordPress diff --git a/modules/blog/templates/backup_blog-db.sh b/modules/blog/templates/backup_blog-db.sh new file mode 100755 index 00000000..c497cb8f --- /dev/null +++ b/modules/blog/templates/backup_blog-db.sh @@ -0,0 +1,23 @@ +#!/bin/sh + +# Initialization +PATH_TO_FILE=${PATH_TO_FILE:-<%= blog_db_backupdir %>} +[ ! -f $PATH_TO_FILE/count ] && echo 0 > $PATH_TO_FILE/count +COUNT=$(cat "$PATH_TO_FILE/count") +# Backup each locale DB +for locale in de el en es fr it nl pl pt ro ru tr uk +do + if [ ! -d $PATH_TO_FILE/$locale ] + then + /bin/mkdir $PATH_TO_FILE/$locale + fi + /usr/bin/mysqldump --add-drop-table -h localhost blog_$locale | bzip2 -c > $PATH_TO_FILE/$locale/mageia_$locale-$COUNT.bak.sql.bz2 +done +# Check count file to have a week of backup in the directory +if [ $COUNT -ne 6 ] +then + COUNT=$(expr $COUNT + 1) +else + COUNT="0" +fi +echo $COUNT > $PATH_TO_FILE/count diff --git a/modules/blog/templates/backup_blog-files.sh b/modules/blog/templates/backup_blog-files.sh new file mode 100755 index 00000000..e268ad2b --- /dev/null +++ b/modules/blog/templates/backup_blog-files.sh @@ -0,0 +1,24 @@ +#!/bin/bash + +# Initialization +PATH_TO_FILE=${PATH_TO_FILE:-<%= blog_files_backupdir %>} +[ ! -f $PATH_TO_FILE/count ] && echo 0 > $PATH_TO_FILE/count +COUNT=$(cat "$PATH_TO_FILE/count") +# Backup each locale +for locale in de el en es fr it nl pl pt ro ru sv tr uk +do + if [ ! -d $PATH_TO_FILE/$locale ] + then + /bin/mkdir $PATH_TO_FILE/$locale + fi + # use relative paths to avoid "Removing leading `/' from member names'" warning + tar -C / -Jcf "$PATH_TO_FILE/$locale/$locale-$COUNT.tar.xz" "$(sed s,^/,, <<< "<%= blog_location %>/$locale")" +done +# Check count file to have a week of backup in the directory +if [ $COUNT -ne 6 ] +then + COUNT=$(expr $COUNT + 1) +else + COUNT="0" +fi +echo $COUNT > $PATH_TO_FILE/count diff --git a/modules/blog/templates/blogs_vhosts.conf b/modules/blog/templates/blogs_vhosts.conf new file mode 100644 index 00000000..ff3c792f --- /dev/null +++ b/modules/blog/templates/blogs_vhosts.conf @@ -0,0 +1,16 @@ +<Directory <%= blog_location %> > + Order deny,allow + Allow from All + AllowOverride All + Options FollowSymlinks + Options +Indexes +</Directory> +# Add a permanent redirection for 'pt' as it was 'pt-br' before +# Add a permanent redirection for '/*' as it's now '/en/' for english blog +# TO BE REMOVE in May, 1st (?) +<IfModule mod_alias.c> + Redirect permanent /pt-br/ /pt/ + Redirect permanent /wp-content/uploads/ /en/wp-content/uploads/ + Redirect permanent /wp-includes/images/ /en/wp-includes/images/ + RedirectMatch permanent ^/?$ /en/ +</IfModule> diff --git a/modules/blog/templates/check_new-blog-post.sh b/modules/blog/templates/check_new-blog-post.sh index c3183375..f2089a52 100755 --- a/modules/blog/templates/check_new-blog-post.sh +++ b/modules/blog/templates/check_new-blog-post.sh @@ -2,36 +2,49 @@ # Initialization PATH_TO_FILE=${PATH_TO_FILE:-/var/lib/blog} -/usr/bin/wget -qO $PATH_TO_FILE"/RSS_new" http://blog.mageia.org/?feed=rss2 -if [ -n $? ] +/usr/bin/wget -qO $PATH_TO_FILE"/last_tmp" https://blog.mageia.org/en/?feed=rss2 +if [ $? -ne 0 ] then - exit 2 + exit 2 fi -# Check if RSS_old exists -if [ ! -f $PATH_TO_FILE"/RSS_old" ] +last_title=$(grep "title" $PATH_TO_FILE"/last_tmp" | head -n 2 | sed '1d' | sed 's/<title>//' | sed 's/<\/title>//' | sed 's/^[ \t]*//') +last_pub=$(grep "pubDate" $PATH_TO_FILE"/last_tmp" | head -n 1 | sed 's/<pubDate>//' | sed 's/<\/pubDate>//' | sed 's/^[ \t]*//') +last_creator=$(grep "creator" $PATH_TO_FILE"/last_tmp" | head -n 1 | sed 's/<dc:creator>//' | sed 's/<\/dc:creator>//' | sed 's/^[ \t]*//') +echo -e "$last_title\n$last_pub\n$last_creator" > $PATH_TO_FILE"/last_tmp" + +# Check if 'last_entry' exists +if [ ! -f $PATH_TO_FILE"/last_entry" ] then - /bin/mv -f $PATH_TO_FILE"/RSS_new" $PATH_TO_FILE"/RSS_old" + /bin/mv -f $PATH_TO_FILE"/last_tmp" $PATH_TO_FILE"/last_entry" exit 1 fi +# Add a date file for log /bin/date +"%d:%m:%Y %H:%M" > $PATH_TO_FILE"/last_check" # Check if a new blog post on EN needs to be translated on other blogs -tmp_new=$(/bin/grep 'lastBuildDate' $PATH_TO_FILE"/RSS_new") -tmp_old=$(/bin/grep 'lastBuildDate' $PATH_TO_FILE"/RSS_old") +tmp_new=$(cat $PATH_TO_FILE"/last_tmp" | sed -n '1p') +tmp_old=$(cat $PATH_TO_FILE"/last_entry" | sed -n '1p') if [ "$tmp_old" = "$tmp_new" ] then # Nothing new - echo "NO" >> $PATH_TO_FILE"/last_check" + tmp_new=$(cat $PATH_TO_FILE"/last_tmp" | sed -n '2p') + tmp_old=$(cat $PATH_TO_FILE"/last_entry" | sed -n '2p') + if [ "$tmp_old" != "$tmp_new" ] + then + # Modification on latest post + echo "YES - Modification" >> $PATH_TO_FILE"/last_check" + echo -e "The latest blog post has been modified and needs to be checked!\n\nTitle:\t$last_title\nAuthor:\t$last_creator\n-- \nMail sent by the script '$0' on `hostname`" | /bin/mail -r '<%= blog_newpost_email_from %>' -s "Modification of the latest entry on English Blog" <%= blog_newpost_email_to %> + echo $DATE + else + echo "NO" >> $PATH_TO_FILE"/last_check" + fi else # New post to translate - cat $PATH_TO_FILE"/last_check" > $PATH_TO_FILE"/last_need_translation" - new_post=$(grep "title" $PATH_TO_FILE"/RSS_new" | head -n 2 | sed '1d' | sed 's/<title>//' | sed 's/<\/title>//' | sed 's/^[ \t]*//') - echo $new_post >> $PATH_TO_FILE"/last_need_translation" - echo "YES" >> $PATH_TO_FILE"/last_check" - echo -e "A new blog post is waiting for translation\n\"$new_post\"" | /bin/mail -s "New entry on English Blog" mageia-blogteam@mageia.org + echo "YES - New entry" >> $PATH_TO_FILE"/last_check" + echo -e "A new blog post is waiting for translation:\n\nTitle:\t$last_title\nAuthor:\t$last_creator\n-- \nMail sent by the script '$0' on `hostname`" | /bin/mail -r '<%= blog_newpost_email_from %>' -s "New entry on English Blog" <%= blog_newpost_email_to %> echo $DATE fi # Clean tmp files and copy RSS_new to RSS_old -/bin/mv -f $PATH_TO_FILE"/RSS_new" $PATH_TO_FILE"/RSS_old" +/bin/mv -f $PATH_TO_FILE"/last_tmp" $PATH_TO_FILE"/last_entry" diff --git a/modules/bugzilla-dev/manifests/init.pp b/modules/bugzilla-dev/manifests/init.pp new file mode 100755 index 00000000..c6623872 --- /dev/null +++ b/modules/bugzilla-dev/manifests/init.pp @@ -0,0 +1,81 @@ +class bugzilla-dev { + + $bugzilla_dev_location = '/usr/share/bugzilla/' + + package {['graphviz', + 'perl-Template-GD', # needed for graphical_report support + 'perl-Test-Taint', + 'perl-JSON-RPC', + 'perl-Email-MIME', + 'perl-Email-Sender', + 'Math-Random-ISAAC', + 'perl-Chart', + 'perl-PatchReader', + 'perl-ldap', + 'perl-SOAP-Lite', + 'perl-XMLRPC-Lite', + 'perl-CGI']: } + + $pgsql_password = extlookup('bugzilla_pgsql','x') + $ldap_password = extlookup('bugzilla_ldap','x') + + postgresql::remote_db_and_user { 'bugs': + description => 'Bugzilla database', + password => $pgsql_password, + } + + file { '/usr/share/bugzilla/localconfig': + group => 'apache', + mode => '0640', + content => template('bugzilla-dev/localconfig') + } + + + file { '/usr/share/bugzilla/data/params.json': + group => 'apache', + mode => '0640', + content => template('bugzilla-dev/params.json') + } + + apache::webapp_other { 'bugzilla-dev': + webapp_file => 'bugzilla-dev/webapp_bugzilla.conf', + } + + $bugs_vhost = "bugs-dev.${::domain}" + $vhost_root = '/usr/share/bugzilla' + + apache::vhost::redirect_ssl { $bugs_vhost: } + + apache::vhost::base { $bugs_vhost: + content => template('bugzilla-dev/vhost.conf'), + aliases => { '/bugzilla/' => $vhost_root }, + use_ssl => true, + location => $vhost_root, + vhost => $bugs_vhost, + } + + git::snapshot { $bugzilla_dev_location: + source => "git://git.${::domain}/web/bugs" + } + + file { 'Mageia': + ensure => directory, + path => '/usr/share/bugzilla', + group => 'apache', + recurse => true, + require => Git::Snapshot[$bugzilla_dev_location], + } + + file { '/usr/share/bugzilla/robots.txt': + group => 'apache', + mode => '0640', + content => template('bugzilla-dev/robots.txt') + } + + cron { 'collectstats': + command => '/usr/share/bugzilla/bin/collectstats.pl', + user => 'apache', + hour => 2, + minute => 30, + } +} diff --git a/modules/bugzilla-dev/templates/localconfig b/modules/bugzilla-dev/templates/localconfig new file mode 100755 index 00000000..2b7d6035 --- /dev/null +++ b/modules/bugzilla-dev/templates/localconfig @@ -0,0 +1,121 @@ +# If you are using Apache as your web server, Bugzilla can create .htaccess +# files for you, which will keep this file (localconfig) and other +# confidential files from being read over the web. +# +# If this is set to 1, checksetup.pl will create .htaccess files if +# they don't exist. +# +# If this is set to 0, checksetup.pl will not create .htaccess files. +$create_htaccess = 0; + +# The name of the group that your web server runs as. On Red Hat +# distributions, this is usually "apache". On Debian/Ubuntu, it is +# usually "www-data". +# +# If you have use_suexec turned on below, then this is instead the name +# of the group that your web server switches to to run cgi files. +# +# If this is a Windows machine, ignore this setting, as it does nothing. +# +# If you do not have access to the group your scripts will run under, +# set this to "". If you do set this to "", then your Bugzilla installation +# will be _VERY_ insecure, because some files will be world readable/writable, +# and so anyone who can get local access to your machine can do whatever they +# want. You should only have this set to "" if this is a testing installation +# and you cannot set this up any other way. YOU HAVE BEEN WARNED! +# +# If you set this to anything other than "", you will need to run checksetup.pl +# as root or as a user who is a member of the specified group. +$webservergroup = 'apache'; + +# Set this to 1 if Bugzilla runs in an Apache SuexecUserGroup environment. +# +# If your web server runs control panel software (cPanel, Plesk or similar), +# or if your Bugzilla is to run in a shared hosting environment, then you are +# almost certainly in an Apache SuexecUserGroup environment. +# +# If this is a Windows box, ignore this setting, as it does nothing. +# +# If set to 0, checksetup.pl will set file permissions appropriately for +# a normal webserver environment. +# +# If set to 1, checksetup.pl will set file permissions so that Bugzilla +# works in a SuexecUserGroup environment. +$use_suexec = 0; + +# What SQL database to use. Default is mysql. List of supported databases +# can be obtained by listing Bugzilla/DB directory - every module corresponds +# to one supported database and the name of the module (before ".pm") +# corresponds to a valid value for this variable. +$db_driver = 'pg'; + +# The DNS name or IP address of the host that the database server runs on. +$db_host = 'pg.mageia.org'; + +# The name of the database. For Oracle, this is the database's SID. For +# SQLite, this is a name (or path) for the DB file. +$db_name = 'bugs'; + +# Who we connect to the database as. +$db_user = 'bugs'; + +# Enter your database password here. It's normally advisable to specify +# a password for your bugzilla database user. +# If you use apostrophe (') or a backslash (\) in your password, you'll +# need to escape it by preceding it with a '\' character. (\') or (\) +# (It is far simpler to just not use those characters.) +$db_pass = '<%= pgsql_password %>'; + +# Sometimes the database server is running on a non-standard port. If that's +# the case for your database server, set this to the port number that your +# database server is running on. Setting this to 0 means "use the default +# port for my database server." +$db_port = 0; + +# MySQL Only: Enter a path to the unix socket for MySQL. If this is +# blank, then MySQL's compiled-in default will be used. You probably +# want that. +$db_sock = ''; + +# Should checksetup.pl try to verify that your database setup is correct? +# With some combinations of database servers/Perl modules/moonphase this +# doesn't work, and so you can try setting this to 0 to make checksetup.pl +# run. +$db_check = 1; + +# Path to a PEM file with a list of trusted SSL CA certificates. +# The file must be readable by web server user. +$db_mysql_ssl_ca_file = ''; + +# Path to a directory containing trusted SSL CA certificates in PEM format. +# Directory and files inside must be readable by the web server user. +$db_mysql_ssl_ca_path = ''; + +# Full path to the client SSL certificate in PEM format we will present to the DB server. +# The file must be readable by web server user. +$db_mysql_ssl_client_cert = ''; + +# Full path to the private key corresponding to the client SSL certificate. +# The file must not be password-protected and must be readable by web server user. +$db_mysql_ssl_client_key = ''; + +# Most web servers will allow you to use index.cgi as a directory +# index, and many come preconfigured that way, but if yours doesn't +# then you'll need an index.html file that provides redirection +# to index.cgi. Setting $index_html to 1 below will allow +# checksetup.pl to create an index.html for you if it doesn't exist. +# NOTE: checksetup.pl will not replace an existing file, so if you +# wish to have checksetup.pl create one for you, you must +# make sure that index.html doesn't already exist. +$index_html = 0; + +# If you want to use the "Difference Between Two Patches" feature of the +# Patch Viewer, please specify the full path to the "interdiff" executable +# here. +$interdiffbin = '/usr/bin/interdiff'; + +# For the "Difference Between Two Patches" feature to work, we need to know +# what directory the "diff" bin is in. (You only need to set this if you +# are using that feature of the Patch Viewer.) +$diffpath = '/usr/bin'; + diff --git a/modules/bugzilla-dev/templates/params.json b/modules/bugzilla-dev/templates/params.json new file mode 100644 index 00000000..b51b4c00 --- /dev/null +++ b/modules/bugzilla-dev/templates/params.json @@ -0,0 +1,104 @@ +{ + "LDAPBaseDN" : "ou=People,<%= dc_suffix %>", + "LDAPbinddn" : "cn=bugzilla-alamut,ou=System Accounts,<%= dc_suffix %>:<%= ldap_password %>", + "LDAPfilter" : "", + "LDAPmailattribute" : "mail", + "LDAPserver" : "ldap.<%= domain %>", + "LDAPstarttls" : "1", + "LDAPuidattribute" : "uid", + "RADIUS_NAS_IP" : "", + "RADIUS_email_suffix" : "", + "RADIUS_secret" : "", + "RADIUS_server" : "", + "ajax_user_autocompletion" : "1", + "allow_attachment_deletion" : "0", + "allow_attachment_display" : "1", + "allowbugdeletion" : "0", + "allowemailchange" : "0", + "allowuserdeletion" : "0", + "announcehtml" : "", + "attachment_base" : "", + "auth_env_email" : "", + "auth_env_id" : "", + "auth_env_realname" : "", + "chartgroup" : "editbugs", + "collapsed_comment_tags" : "obsolete, spam", + "comment_taggers_group" : "editbugs", + "commentonchange_resolution" : "1", + "commentonduplicate" : "0", + "confirmuniqueusermatch" : "1", + "cookiedomain" : "", + "cookiepath" : "/", + "createemailregexp" : ".*", + "debug_group" : "editbugs", + "default_search_limit" : "500", + "defaultopsys" : "Linux", + "defaultplatform" : "All", + "defaultpriority" : "Normal", + "defaultquery" : "bug_status=NEW&bug_status=ASSIGNED&bug_status=REOPENED&emailassigned_to1=1&emailassigned_to2=1&emailreporter2=1&emailcc2=1&emailqa_contact2=1&order=Importance&long_desc_type=substring", + "defaultseverity" : "normal", + "duplicate_or_move_bug_status" : "RESOLVED", + "emailregexp" : "^[\\w\\.\\+\\-=]+@[\\w\\.\\-]+\\.[\\w\\-]+$", + "emailregexpdesc" : "A legal address must contain exactly one '@', and at least one '.' after the @.", + "emailsuffix" : "", + "font_file" : "", + "globalwatchers" : "bugs-dev@ml.mageia.org", + "inbound_proxies" : "", + "insidergroup" : "secteam", + "last_visit_keep_days" : "10", + "letsubmitterchoosemilestone" : "1", + "letsubmitterchoosepriority" : "1", + "mail_delivery_method" : "SMTP", + "mailfrom" : "bugzilla-daemon@<%= domain %>", + "maintainer" : "sysadmin@group.<%= domain %>", + "makeproductgroups" : "0", + "max_search_results" : "10000", + "maxattachmentsize" : "1000", + "maxlocalattachment" : "0", + "maxusermatches" : "1000", + "memcached_namespace" : "bugzilla:", + "memcached_servers" : "", + "musthavemilestoneonaccept" : "0", + "mybugstemplate" : "buglist.cgi?bug_status=UNCONFIRMED&bug_status=NEW&bug_status=ASSIGNED&bug_status=REOPENED&emailassigned_to1=1&emailreporter1=1&emailtype1=exact&email1=%userid%&field0-0-0=bug_status&type0-0-0=notequals&value0-0-0=UNCONFIRMED&field0-0-1=reporter&type0-0-1=equals&value0-0-1=%userid%", + "noresolveonopenblockers" : "0", + "or_groups" : "0", + "password_check_on_login" : "1", + "password_complexity" : "no_constraints", + "proxy_url" : "", + "querysharegroup" : "editbugs", + "quip_list_entry_control" : "open", + "rememberlogin" : "on", + "requirelogin" : "0", + "search_allow_no_criteria" : "0", + "shadowdb" : "", + "shadowdbhost" : "", + "shadowdbport" : "3306", + "shadowdbsock" : "", + "shutdownhtml" : "", + "smtp_debug" : "0", + "smtp_password" : "", + "smtp_ssl" : "0", + "smtp_username" : "", + "smtpserver" : "localhost", + "ssl_redirect" : "1", + "sslbase" : "https://bugs-dev.<%= domain %>/", + "strict_isolation" : "0", + "strict_transport_security" : "off", + "timetrackinggroup" : "", + "upgrade_notification" : "latest_stable_release", + "urlbase" : "http://bugs-dev.<%= domain %>/", + "use_mailer_queue" : "0", + "use_see_also" : "1", + "useclassification" : "0", + "usemenuforusers" : "0", + "useqacontact" : "1", + "user_info_class" : "CGI", + "user_verify_class" : "LDAP", + "usestatuswhiteboard" : "1", + "usetargetmilestone" : "1", + "usevisibilitygroups" : "0", + "utf8" : "1", + "webdotbase" : "/usr/bin/dot", + "webservice_email_filter" : "0", + "whinedays" : "0" +} diff --git a/modules/bugzilla-dev/templates/robots.txt b/modules/bugzilla-dev/templates/robots.txt new file mode 100755 index 00000000..63639f02 --- /dev/null +++ b/modules/bugzilla-dev/templates/robots.txt @@ -0,0 +1,10 @@ +User-agent: * +Disallow: / +Allow: /*index.cgi +Allow: /*page.cgi +Allow: /*show_bug.cgi +Allow: /*describecomponents.cgi +Disallow: /*show_bug.cgi*ctype=* +Disallow: /*show_bug.cgi*format=multiple* +Disallow: /*page.cgi*id=voting* +Sitemap: https://bugs.mageia.org/page.cgi?id=sitemap/sitemap.xml diff --git a/modules/bugzilla-dev/templates/vhost.conf b/modules/bugzilla-dev/templates/vhost.conf new file mode 100755 index 00000000..79eab9fb --- /dev/null +++ b/modules/bugzilla-dev/templates/vhost.conf @@ -0,0 +1,2 @@ +RewriteEngine On +RewriteRule ^/([0-9]+)$ /show_bug.cgi?id=$1 [R=301,L] diff --git a/modules/bugzilla-dev/templates/webapp_bugzilla.conf b/modules/bugzilla-dev/templates/webapp_bugzilla.conf new file mode 100755 index 00000000..a8f37a00 --- /dev/null +++ b/modules/bugzilla-dev/templates/webapp_bugzilla.conf @@ -0,0 +1,73 @@ +<% +path_data_directory = "/var/lib/bugzilla" +%> + +<Directory /usr/share/bugzilla/> + AddHandler cgi-script .cgi + Options +ExecCGI +FollowSymLinks + DirectoryIndex index.cgi index.html + AllowOverride All +</Directory> + +# The duplicates.rdf must be accessible, as it is used by +# duplicates.xul +<Directory <%= path_data_directory %>> + <Files duplicates.rdf> + <IfModule mod_authz_core.c> + # Apache 2.4 + Require all granted + </IfModule> + <IfModule !mod_authz_core.c> + # Apache 2.2 + Order allow,deny + Allow from all + </IfModule> + </Files> +</Directory> + +# The png files locally created locally must be accessible +<Directory <%= path_data_directory %>/webdot> + <FilesMatch \.png$> + <IfModule mod_authz_core.c> + # Apache 2.4 + Require all granted + </IfModule> + <IfModule !mod_authz_core.c> + # Apache 2.2 + Order allow,deny + Allow from all + </IfModule> + </FilesMatch> +</Directory> + +Alias /graphs/ <%= path_data_directory %>/graphs/ +<Directory <%= path_data_directory %>/graphs> + <FilesMatch \.png$> + <IfModule mod_authz_core.c> + # Apache 2.4 + Require all granted + </IfModule> + <IfModule !mod_authz_core.c> + # Apache 2.2 + Order allow,deny + Allow from all + </IfModule> + </FilesMatch> +</Directory> + +# This should work automatically, but perhaps something +# in our Bugzilla packaging breaks this? +Alias /extensions/Mageia/web/ <%= scope.lookupvar("bugzilla::extension_location") %>/web/ +<Directory <%= scope.lookupvar("bugzilla::extension_location") %>/web/> + <FilesMatch \.png$> + <IfModule mod_authz_core.c> + # Apache 2.4 + Require all granted + </IfModule> + <IfModule !mod_authz_core.c> + # Apache 2.2 + Order allow,deny + Allow from all + </IfModule> + </FilesMatch> +</Directory> diff --git a/modules/bugzilla/manifests/init.pp b/modules/bugzilla/manifests/init.pp index 23a24f1e..e66ddf0e 100644..100755 --- a/modules/bugzilla/manifests/init.pp +++ b/modules/bugzilla/manifests/init.pp @@ -1,36 +1,202 @@ class bugzilla { - package { 'bugzilla': - ensure => installed; + $bugzilla_location = '/usr/share/bugzilla' + + package {['graphviz', + 'perl-Template-GD', # needed for graphical_report support + 'perl-Test-Taint', + 'perl-JSON-RPC', + 'perl-JSON-XS', + 'perl-Email-MIME', + 'perl-Email-Sender', + 'perl-Math-Random-ISAAC', + 'perl-Chart', + 'perl-PatchReader', + 'perl-ldap', + 'perl-SOAP-Lite', + 'perl-XMLRPC-Lite', + 'perl-CGI', + 'perl-HTML-Scrubber', + 'perl-Encode-Detect', + 'perl-File-MimeInfo', + 'perl-Email-Reply', + 'perl-HTML-FormatText-WithLinks', + 'perl-Cache-Memcached', + 'perl-File-Copy-Recursive', + 'perl-Daemon-Generic']: } + + $pgsql_password = extlookup('bugzilla_pgsql','x') + $ldap_password = extlookup('bugzilla_ldap','x') + $bugzilla_secret_key = extlookup('bugzilla_secret_key','x') + + postgresql::remote_db_and_user { 'bugs': + description => 'Bugzilla database', + password => $pgsql_password, } - $password = extlookup("bugzilla_password") - $passwordLdap = extlookup("bugzilla_ldap") + file { "$bugzilla_location/localconfig": + group => 'apache', + mode => '0640', + content => template('bugzilla/localconfig') + } - file { '/etc/bugzilla/localconfig': - ensure => present, - owner => root, - group => apache, - mode => 640, - content => template("bugzilla/localconfig") + + file { "$bugzilla_location/data/params.json": + group => 'apache', + mode => '0640', + content => template('bugzilla/params.json') + } + file { "$bugzilla_location/graphs": + ensure => directory, + owner => 'apache', + group => 'apache', + mode => '0770' } + apache::webapp_other { 'bugzilla': + webapp_file => 'bugzilla/webapp_bugzilla.conf', + } - file { '/var/lib/bugzilla/params': - ensure => present, - owner => root, - group => apache, - mode => 640, - content => template("bugzilla/params") + $bugs_vhost = "bugs.${::domain}" + $vhost_root = $bugzilla_location + + apache::vhost::redirect_ssl { $bugs_vhost: } + + apache::vhost::base { $bugs_vhost: + content => template('bugzilla/vhost.conf'), + aliases => { '/bugzilla/' => $vhost_root }, + use_ssl => true, + location => $vhost_root, + vhost => $bugs_vhost, + enable_location => false, + } + + git::snapshot { $bugzilla_location: + source => "git://git.${::domain}/web/bugs" } - include apache::mod_fcgid - apache::webapp_other{"bugzilla": - webapp_file => "bugzilla/webapp_bugzilla.conf", - } + file { 'Mageia': + ensure => directory, + path => $bugzilla_location, + group => 'apache', + recurse => true, + require => Git::Snapshot[$bugzilla_location], + } - apache::vhost_other_app { "bugs.$domain": - vhost_file => "bugzilla/vhost_bugs.conf", + file { ["$bugzilla_location/data", + "$bugzilla_location/data/mining"]: + ensure => directory, + owner => 'apache', + group => 'apache', + mode => '0770' } -} + file { "$bugzilla_location/data/assets": + ensure => directory, + owner => 'apache', + group => 'apache', + mode => '0770' + } + + file { "$bugzilla_location/robots.txt": + group => 'apache', + mode => '0640' + } + + file { "$bugzilla_location/data/bugzilla-update.xml": + owner => 'apache', + group => 'apache', + mode => '0640' + } + + file { [ + "$bugzilla_location/admin.cgi", + "$bugzilla_location/attachment.cgi", + "$bugzilla_location/buglist.cgi", + "$bugzilla_location/chart.cgi", + "$bugzilla_location/colchange.cgi", + "$bugzilla_location/config.cgi", + "$bugzilla_location/createaccount.cgi", + "$bugzilla_location/describecomponents.cgi", + "$bugzilla_location/describekeywords.cgi", + "$bugzilla_location/duplicates.cgi", + "$bugzilla_location/editclassifications.cgi", + "$bugzilla_location/editfields.cgi", + "$bugzilla_location/editgroups.cgi", + "$bugzilla_location/editmilestones.cgi", + "$bugzilla_location/editproducts.cgi", + "$bugzilla_location/editusers.cgi", + "$bugzilla_location/editversions.cgi", + "$bugzilla_location/editworkflow.cgi", + "$bugzilla_location/editcomponents.cgi", + "$bugzilla_location/editflagtypes.cgi", + "$bugzilla_location/editkeywords.cgi", + "$bugzilla_location/editparams.cgi", + "$bugzilla_location/editsettings.cgi", + "$bugzilla_location/editvalues.cgi", + "$bugzilla_location/editwhines.cgi", + "$bugzilla_location/enter_bug.cgi", + "$bugzilla_location/index.cgi", + "$bugzilla_location/jsonrpc.cgi", + "$bugzilla_location/page.cgi", + "$bugzilla_location/post_bug.cgi", + "$bugzilla_location/process_bug.cgi", + "$bugzilla_location/query.cgi", + "$bugzilla_location/quips.cgi", + "$bugzilla_location/relogin.cgi", + "$bugzilla_location/reports.cgi", + "$bugzilla_location/rest.cgi", + "$bugzilla_location/search_plugin.cgi", + "$bugzilla_location/show_bug.cgi", + "$bugzilla_location/showdependencytree.cgi", + "$bugzilla_location/testagent.cgi", + "$bugzilla_location/userprefs.cgi", + "$bugzilla_location/xmlrpc.cgi", + "$bugzilla_location/report.cgi", + "$bugzilla_location/request.cgi", + "$bugzilla_location/sanitycheck.cgi", + "$bugzilla_location/show_activity.cgi", + "$bugzilla_location/showdependencygraph.cgi", + "$bugzilla_location/summarize_time.cgi", + "$bugzilla_location/token.cgi", + "$bugzilla_location/votes.cgi", + "$bugzilla_location/checksetup.pl", + "$bugzilla_location/clean-bug-user-last-visit.pl", + "$bugzilla_location/collectstats.pl", + "$bugzilla_location/email_in.pl", + "$bugzilla_location/importxml.pl", + "$bugzilla_location/install-module.pl", + "$bugzilla_location/jobqueue.pl", + "$bugzilla_location/migrate.pl", + "$bugzilla_location/runtests.pl", + "$bugzilla_location/sanitycheck.pl", + "$bugzilla_location/testserver.pl", + "$bugzilla_location/whineatnews.pl", + "$bugzilla_location/whine.pl", + ]: + group => 'apache', + mode => '0750', + } + +# Improper file permissions makes this fail, and nobody seems to care +# cron { 'collectstats': +# command => "cd $bugzilla_location && ./collectstats.pl", +# user => 'apache', +# hour => 2, +# minute => 30, +# } + + cron { 'clean-bug-user-last-visit': + command => "cd $bugzilla_location && ./clean-bug-user-last-visit.pl", + user => 'apache', + hour => 3, + minute => 0, + } + cron { 'sanitycheck': + command => "cd $bugzilla_location && $bugzilla_location/sanitycheck.pl --login LpSolit@gmail.com", + user => 'apache', + hour => 21, + minute => 0, + } + +} diff --git a/modules/bugzilla/templates/localconfig b/modules/bugzilla/templates/localconfig index 23089510..61935552 100644 --- a/modules/bugzilla/templates/localconfig +++ b/modules/bugzilla/templates/localconfig @@ -1,51 +1,59 @@ - # If you are using Apache as your web server, Bugzilla can create .htaccess -# files for you that will instruct Apache not to serve files that shouldn't -# be accessed from the web browser (like your local configuration data and non-cgi -# executable files). For this to work, the directory your Bugzilla -# installation is in must be within the jurisdiction of a <Directory> block -# in the httpd.conf file that has 'AllowOverride Limit' in it. If it has -# 'AllowOverride All' or other options with Limit, that's fine. -# (Older Apache installations may use an access.conf file to store these -# <Directory> blocks.) -# If this is set to 1, Bugzilla will create these files if they don't exist. -# If this is set to 0, Bugzilla will not create these files. -$create_htaccess = 0; - -# Usually, this is the group your web server runs as. -# If you have a Windows box, ignore this setting. -# If you have use_suexec switched on below, this is the group Apache switches -# to in order to run Bugzilla scripts. +# files for you, which will keep this file (localconfig) and other +# confidential files from being read over the web. +# +# If this is set to 1, checksetup.pl will create .htaccess files if +# they don't exist. +# +# If this is set to 0, checksetup.pl will not create .htaccess files. +$create_htaccess = 1; + +# The name of the group that your web server runs as. On Red Hat +# distributions, this is usually "apache". On Debian/Ubuntu, it is +# usually "www-data". +# +# If you have use_suexec turned on below, then this is instead the name +# of the group that your web server switches to to run cgi files. +# +# If this is a Windows machine, ignore this setting, as it does nothing. +# # If you do not have access to the group your scripts will run under, # set this to "". If you do set this to "", then your Bugzilla installation # will be _VERY_ insecure, because some files will be world readable/writable, # and so anyone who can get local access to your machine can do whatever they # want. You should only have this set to "" if this is a testing installation # and you cannot set this up any other way. YOU HAVE BEEN WARNED! +# # If you set this to anything other than "", you will need to run checksetup.pl -# asroot, or as a user who is a member of the specified group. +# as root or as a user who is a member of the specified group. $webservergroup = 'apache'; -# Set this if Bugzilla runs in an Apache SuexecUserGroup environment. -# (If your web server runs control panel software (cPanel, Plesk or similar), +# Set this to 1 if Bugzilla runs in an Apache SuexecUserGroup environment. +# +# If your web server runs control panel software (cPanel, Plesk or similar), # or if your Bugzilla is to run in a shared hosting environment, then you are -# almost certainly in an Apache SuexecUserGroup environment.) -# If you have a Windows box, ignore this setting. -# If set to 0, Bugzilla will set file permissions as tightly as possible. -# If set to 1, Bugzilla will set file permissions so that it may work in an -# SuexecUserGroup environment. The difference is that static files (CSS, -# JavaScript and so on) will receive world read permissions. +# almost certainly in an Apache SuexecUserGroup environment. +# +# If this is a Windows box, ignore this setting, as it does nothing. +# +# If set to 0, checksetup.pl will set file permissions appropriately for +# a normal webserver environment. +# +# If set to 1, checksetup.pl will set file permissions so that Bugzilla +# works in a SuexecUserGroup environment. $use_suexec = 0; # What SQL database to use. Default is mysql. List of supported databases # can be obtained by listing Bugzilla/DB directory - every module corresponds -# to one supported database and the name corresponds to a driver name. +# to one supported database and the name of the module (before ".pm") +# corresponds to a valid value for this variable. $db_driver = 'pg'; -# The DNS name of the host that the database server runs on. -$db_host = 'pgsql.<%= domain %>'; +# The DNS name or IP address of the host that the database server runs on. +$db_host = 'pg.mageia.org'; -# The name of the database +# The name of the database. For Oracle, this is the database's SID. For +# SQLite, this is a name (or path) for the DB file. $db_name = 'bugs'; # Who we connect to the database as. @@ -55,8 +63,8 @@ $db_user = 'bugs'; # a password for your bugzilla database user. # If you use apostrophe (') or a backslash (\) in your password, you'll # need to escape it by preceding it with a '\' character. (\') or (\) -# (Far simpler just not to use those characters.) -$db_pass = '<%= password %>'; +# (It is far simpler to just not use those characters.) +$db_pass = '<%= pgsql_password %>'; # Sometimes the database server is running on a non-standard port. If that's # the case for your database server, set this to the port number that your @@ -70,35 +78,50 @@ $db_port = 0; $db_sock = ''; # Should checksetup.pl try to verify that your database setup is correct? -# (with some combinations of database servers/Perl modules/moonphase this -# doesn't work) +# With some combinations of database servers/Perl modules/moonphase this +# doesn't work, and so you can try setting this to 0 to make checksetup.pl +# run. $db_check = 1; -# With the introduction of a configurable index page using the -# template toolkit, Bugzilla's main index page is now index.cgi. +# Path to a PEM file with a list of trusted SSL CA certificates. +# The file must be readable by web server user. +$db_mysql_ssl_ca_file = ''; + +# Path to a directory containing trusted SSL CA certificates in PEM format. +# Directory and files inside must be readable by the web server user. +$db_mysql_ssl_ca_path = ''; + +# Full path to the client SSL certificate in PEM format we will present to the DB server. +# The file must be readable by web server user. +$db_mysql_ssl_client_cert = ''; + +# Full path to the private key corresponding to the client SSL certificate. +# The file must not be password-protected and must be readable by web server user. +$db_mysql_ssl_client_key = ''; + # Most web servers will allow you to use index.cgi as a directory # index, and many come preconfigured that way, but if yours doesn't # then you'll need an index.html file that provides redirection # to index.cgi. Setting $index_html to 1 below will allow -# checksetup.pl to create one for you if it doesn't exist. +# checksetup.pl to create an index.html for you if it doesn't exist. # NOTE: checksetup.pl will not replace an existing file, so if you # wish to have checksetup.pl create one for you, you must -# make sure that index.html doesn't already exist +# make sure that index.html doesn't already exist. $index_html = 0; -# For some optional functions of Bugzilla (such as the pretty-print patch -# viewer), we need the cvs binary to access files and revisions. -# Because it's possible that this program is not in your path, you can specify -# its location here. Please specify the full path to the executable. -$cvsbin = '/usr/bin/cvs'; - -# For some optional functions of Bugzilla (such as the pretty-print patch -# viewer), we need the interdiff binary to make diffs between two patches. -# Because it's possible that this program is not in your path, you can specify -# its location here. Please specify the full path to the executable. +# If you want to use the "Difference Between Two Patches" feature of the +# Patch Viewer, please specify the full path to the "interdiff" executable +# here. $interdiffbin = '/usr/bin/interdiff'; -# The interdiff feature needs diff, so we have to have that path. -# Please specify the directory name only; do not use trailing slash. +# For the "Difference Between Two Patches" feature to work, we need to know +# what directory the "diff" bin is in. (You only need to set this if you +# are using that feature of the Patch Viewer.) $diffpath = '/usr/bin'; +# This secret key is used by your installation for the creation and +# validation of encrypted tokens. These tokens are used to implement +# security features in Bugzilla, to protect against certain types of attacks. +# A random string is generated by default. It's very important that this key +# is kept secret. It also must be very long. +$site_wide_secret = '<%= bugzilla_secret_key %>'; diff --git a/modules/bugzilla/templates/params b/modules/bugzilla/templates/params index df5c98a2..2e71a39d 100644 --- a/modules/bugzilla/templates/params +++ b/modules/bugzilla/templates/params @@ -1,6 +1,6 @@ %param = ( 'LDAPBaseDN' => 'ou=People,<%= dc_suffix %>', - 'LDAPbinddn' => 'cn=bugzilla-alamut,ou=System Accounts,<%= dc_suffix %>:<%= passwordLdap %>', + 'LDAPbinddn' => 'cn=bugzilla-alamut,ou=System Accounts,<%= dc_suffix %>:<%= ldap_password %>', 'LDAPfilter' => '', 'LDAPmailattribute' => 'mail', 'LDAPserver' => 'ldap.<%= domain %>', @@ -10,11 +10,12 @@ 'RADIUS_email_suffix' => '', 'RADIUS_secret' => '', 'RADIUS_server' => '', + 'ajax_user_autocompletion' => '1', 'allow_attach_url' => 0, 'allow_attachment_deletion' => 0, - 'allow_attachment_display' => 0, + 'allow_attachment_display' => 1, 'allowbugdeletion' => 0, - 'allowemailchange' => 1, + 'allowemailchange' => 0, 'allowloginid' => '0', 'allowuserdeletion' => 0, 'announcehtml' => '', @@ -24,33 +25,36 @@ 'auth_env_realname' => '', 'bonsai_url' => '', 'chartgroup' => 'editbugs', - 'commentonchange_resolution' => 0, + 'commentonchange_resolution' => 1, 'commentonduplicate' => 0, 'confirmuniqueusermatch' => 1, 'cookiedomain' => '', 'cookiepath' => '/', - 'createemailregexp' => '.*', - 'cvsroot' => '', - 'cvsroot_get' => '', - 'defaultopsys' => '', - 'defaultplatform' => '', - 'defaultpriority' => '---', - 'defaultquery' => 'bug_status=NEW&bug_status=ASSIGNED&bug_status=REOPENED&emailassigned_to1=1&emailassigned_to2=1&emailreporter2=1&emailcc2=1&emailqa_contact2=1&order=Importance&long_desc_type=substring', - 'defaultseverity' => 'enhancement', - 'docs_urlbase' => 'docs/%lang%/html/', + 'createemailregexp' => '.*', + 'cvsroot' => '', + 'cvsroot_get' => '', + 'debug_group' => 'editbugs', + 'default_search_limit' => '500', + 'defaultopsys' => 'Linux', + 'defaultplatform' => 'All', + 'defaultpriority' => 'Normal', + 'defaultquery' => 'bug_status=NEW&bug_status=ASSIGNED&bug_status=REOPENED&emailassigned_to1=1&emailassigned_to2=1&emailreporter2=1&emailcc2=1&emailqa_contact2=1&order=Importance&long_desc_type=substring', + 'defaultseverity' => 'normal', + 'docs_urlbase' => ' https://www.bugzilla.org/docs/4.4/en/html/', 'duplicate_or_move_bug_status' => 'RESOLVED', 'emailregexp' => '^[\\w\\.\\+\\-=]+@[\\w\\.\\-]+\\.[\\w\\-]+$', 'emailregexpdesc' => 'A legal address must contain exactly one \'@\', and at least one \'.\' after the @.', 'emailsuffix' => '', - 'globalwatchers' => '', + 'globalwatchers' => 'bugs@ml.<%= domain %>', 'inbound_proxies' => '', - 'insidergroup' => '', + 'insidergroup' => 'secteam', 'letsubmitterchoosemilestone' => 1, 'letsubmitterchoosepriority' => 1, 'lxr_root' => '', 'lxr_url' => '', - 'mail_delivery_method' => 'Sendmail', - 'mailfrom' => 'bugzilla-daemon', + 'mail_delivery_method' => 'SMTP', + 'mailfrom' => 'bugzilla_noreply@ml.<%= domain %>', + 'maintainer' => 'sysadmin@group.<%= domain %>', 'makeproductgroups' => 0, 'maxattachmentsize' => '1000', 'maxlocalattachment' => '0', @@ -72,36 +76,36 @@ 'quip_list_entry_control' => 'open', 'rememberlogin' => 'on', 'requirelogin' => '0', + 'search_allow_no_criteria' => '0', 'sendmailnow' => 1, 'shadowdb' => '', 'shadowdbhost' => '', 'shadowdbport' => '3306', 'shadowdbsock' => '', - 'shutdownhtml' => '', 'smtp_debug' => 0, 'smtp_password' => '', 'smtp_username' => '', 'smtpserver' => 'localhost', 'specific_search_allow_empty_words' => 1, - 'ssl_redirect' => 0, - 'sslbase' => '', + 'ssl_redirect' => 1, + 'sslbase' => 'https://bugs.<%= domain %>/', 'strict_isolation' => 0, - 'timetrackinggroup' => 'editbugs', + 'timetrackinggroup' => '', 'upgrade_notification' => 'latest_stable_release', - 'urlbase' => 'http://bugs.<%= domain %>/', + 'urlbase' => 'https://bugs.<%= domain %>/', 'use_mailer_queue' => 0, 'use_see_also' => 1, 'usebugaliases' => 0, 'useclassification' => 0, 'usemenuforusers' => '0', - 'useqacontact' => 0, + 'useqacontact' => 1, 'user_info_class' => 'CGI', 'user_verify_class' => 'LDAP', - 'usestatuswhiteboard' => 0, - 'usetargetmilestone' => 0, + 'usestatuswhiteboard' => 1, + 'usetargetmilestone' => 1, 'usevisibilitygroups' => 0, 'usevotes' => 0, 'utf8' => 1, - 'webdotbase' => 'http://www.research.att.com/~north/cgi-bin/webdot.cgi/%urlbase%', - 'whinedays' => 7 + 'webdotbase' => '/usr/bin/dot', + 'whinedays' => 0 ); diff --git a/modules/bugzilla/templates/params.json b/modules/bugzilla/templates/params.json new file mode 100644 index 00000000..05325bc7 --- /dev/null +++ b/modules/bugzilla/templates/params.json @@ -0,0 +1,104 @@ +{ + "LDAPBaseDN" : "ou=People,<%= dc_suffix %>", + "LDAPbinddn" : "cn=bugzilla-alamut,ou=System Accounts,<%= dc_suffix %>:<%= ldap_password %>", + "LDAPfilter" : "", + "LDAPmailattribute" : "mail", + "LDAPserver" : "ldap.<%= domain %>", + "LDAPstarttls" : "1", + "LDAPuidattribute" : "uid", + "RADIUS_NAS_IP" : "", + "RADIUS_email_suffix" : "", + "RADIUS_secret" : "", + "RADIUS_server" : "", + "ajax_user_autocompletion" : "1", + "allow_attachment_deletion" : "0", + "allow_attachment_display" : "1", + "allowbugdeletion" : "0", + "allowemailchange" : "0", + "allowuserdeletion" : "0", + "announcehtml" : "", + "attachment_base" : "", + "auth_env_email" : "", + "auth_env_id" : "", + "auth_env_realname" : "", + "chartgroup" : "editbugs", + "collapsed_comment_tags" : "obsolete, spam, off-topic", + "comment_taggers_group" : "editbugs", + "commentonchange_resolution" : "1", + "commentonduplicate" : "0", + "confirmuniqueusermatch" : "1", + "cookiedomain" : "", + "cookiepath" : "/", + "createemailregexp" : ".*", + "debug_group" : "admin", + "default_search_limit" : "500", + "defaultopsys" : "Linux", + "defaultplatform" : "All", + "defaultpriority" : "Normal", + "defaultquery" : "resolution=---&emailassigned_to1=1&emailassigned_to2=1&emailreporter2=1&emailcc2=1&emailqa_contact2=1&emaillongdesc3=1&order=Importance&long_desc_type=substring", + "defaultseverity" : "normal", + "duplicate_or_move_bug_status" : "RESOLVED", + "emailregexp" : "^[\\w\\.\\+\\-=]+@[\\w\\.\\-]+\\.[\\w\\-]+$", + "emailregexpdesc" : "A legal address must contain exactly one '@', and at least one '.' after the @.", + "emailsuffix" : "", + "font_file" : "", + "globalwatchers" : "bugs@ml.mageia.org", + "inbound_proxies" : "", + "insidergroup" : "secteam", + "last_visit_keep_days" : "60", + "letsubmitterchoosemilestone" : "1", + "letsubmitterchoosepriority" : "1", + "mail_delivery_method" : "SMTP", + "mailfrom" : "bugzilla_noreply@ml.<%= domain %>", + "maintainer" : "sysadmin@group.<%= domain %>", + "makeproductgroups" : "0", + "max_search_results" : "10000", + "maxattachmentsize" : "1000", + "maxlocalattachment" : "0", + "maxusermatches" : "1000", + "memcached_namespace" : "bugzilla:", + "memcached_servers" : "", + "musthavemilestoneonaccept" : "0", + "mybugstemplate" : "buglist.cgi?resolution=---&emailassigned_to1=1&emailreporter1=1&emailtype1=exact&email1=%userid%", + "noresolveonopenblockers" : "0", + "or_groups" : "0", + "password_check_on_login" : "1", + "password_complexity" : "no_constraints", + "proxy_url" : "", + "querysharegroup" : "editbugs", + "quip_list_entry_control" : "open", + "rememberlogin" : "on", + "requirelogin" : "0", + "search_allow_no_criteria" : "0", + "shadowdb" : "", + "shadowdbhost" : "", + "shadowdbport" : "3306", + "shadowdbsock" : "", + "shutdownhtml" : "", + "smtp_debug" : "0", + "smtp_password" : "", + "smtp_ssl" : "0", + "smtp_username" : "", + "smtpserver" : "localhost", + "ssl_redirect" : "1", + "sslbase" : "https://bugs.<%= domain %>/", + "strict_isolation" : "0", + "strict_transport_security" : "off", + "timetrackinggroup" : "", + "upgrade_notification" : "latest_stable_release", + "urlbase" : "https://bugs.<%= domain %>/", + "use_mailer_queue" : "0", + "use_see_also" : "1", + "useclassification" : "0", + "usemenuforusers" : "0", + "useqacontact" : "1", + "user_info_class" : "CGI", + "user_verify_class" : "LDAP", + "usestatuswhiteboard" : "1", + "usetargetmilestone" : "1", + "usevisibilitygroups" : "0", + "utf8" : "1", + "webdotbase" : "/usr/bin/dot", + "webservice_email_filter" : "0", + "whinedays" : "0" +} diff --git a/modules/bugzilla/templates/vhost.conf b/modules/bugzilla/templates/vhost.conf new file mode 100644 index 00000000..fd55e5f2 --- /dev/null +++ b/modules/bugzilla/templates/vhost.conf @@ -0,0 +1,14 @@ +RewriteEngine On +RewriteRule ^/([0-9]+)$ /show_bug.cgi?id=$1 [R=301,L] + + <Directory /usr/share/bugzilla> + Require all granted + </Directory> + + + <Directory /usr/share/bugzilla> + AllowOverride all + AddHandler cgi-script .cgi + Options +ExecCGI +FollowSymLinks + DirectoryIndex index.cgi index.html + </Directory> diff --git a/modules/bugzilla/templates/vhost_bugs.conf b/modules/bugzilla/templates/vhost_bugs.conf deleted file mode 100644 index 25306b1e..00000000 --- a/modules/bugzilla/templates/vhost_bugs.conf +++ /dev/null @@ -1,13 +0,0 @@ -<% -path_data_directory = lib_dir + "/bugzilla" -%> - -<VirtualHost *:80> - ServerName bugs.<%= domain %> - DocumentRoot /usr/share/bugzilla/www - Alias /bugzilla/data <%= path_data_directory %> - Alias /bugzilla /usr/share/bugzilla/www - <Location /> - Allow from all - </Location> -</VirtualHost> diff --git a/modules/bugzilla/templates/webapp_bugzilla.conf b/modules/bugzilla/templates/webapp_bugzilla.conf index a37760d8..d2e3f395 100644 --- a/modules/bugzilla/templates/webapp_bugzilla.conf +++ b/modules/bugzilla/templates/webapp_bugzilla.conf @@ -1,35 +1,11 @@ <% -path_data_directory = lib_dir + "/bugzilla" +path_data_directory = "/usr/share/bugzilla/" %> -<Directory /usr/share/bugzilla/www> - Order allow,deny - Allow from all - - Options ExecCGI - DirectoryIndex index.cgi -</Directory> - -# The duplicates.rdf must be accessible, as it is used by -# duplicates.xul -<Directory <%= path_data_directory %>> - <Files duplicates.rdf> - Order allow,deny - Allow from all - </Files> +<Directory <%= path_data_directory %> > + AddHandler cgi-script .cgi + Options +ExecCGI +FollowSymLinks + DirectoryIndex index.cgi index.html + AllowOverride All </Directory> -# The dot files must be accessible to the public webdot server -# The png files locally created locally must be accessible -<Directory <%= path_data_directory %>/webdot> - <FilesMatch \.dot$> - Order deny,allow - Deny from all - Allow from research.att.com - </FilesMatch> - - <FilesMatch \.png$> - Order allow,deny - Allow from all - </FilesMatch> -</Directory> diff --git a/modules/buildsystem/files/Mageia.pm b/modules/buildsystem/files/Mageia.pm new file mode 100644 index 00000000..443f6cb7 --- /dev/null +++ b/modules/buildsystem/files/Mageia.pm @@ -0,0 +1,509 @@ +package Youri::Repository::Mageia; + +=head1 NAME + +Youri::Repository::Mageia - Mageia repository implementation + +=head1 DESCRIPTION + +This module implements Mageia repository + +=cut + +use warnings; +use strict; +use Carp; +use Memoize; +use File::Find 'find'; +use base qw/Youri::Repository/; +use MDV::Distribconf::Build; +use SVN::Client; +use Sys::Hostname; + +use constant { + PACKAGE_CLASS => 'Youri::Package::RPM::URPM', + PACKAGE_CHARSET => 'utf8' +}; + +memoize('_get_media_config'); + +my %translate_arch = ( + i386 => 'i586', + sparc64 => 'sparcv9', +); + +sub _init { + my $self = shift; + my %options = ( + noarch => 'i586', # noarch packages policy + src => 'i586', + install_root => '', + test => 0, # test mode + verbose => 0, # verbose mode + queue => '', + rejected => '', + @_ + ); + foreach my $var ('upload_state') { + $self->{"_$var"} = []; + foreach my $value (split ' ', $options{$var}) { + push @{$self->{"_$var"}}, $value + } + } + print "Initializing repository\n"; + foreach my $v ('rejected', 'svn', 'queue', 'noarch', 'install_root', 'upload_root', 'verbose') { + $self->{"_$v"} = $options{$v} + } + foreach my $target (@{$options{targets}}) { + $self->{$target} = []; + print "Adding $target ($options{$target}{arch})\n" if $self->{_verbose}; + foreach my $value (split ' ', $options{$target}{arch}) { + push @{$self->{_arch}{$target}}, $value; + push @{$self->{_extra_arches}}, $value + } + } + $self +} + +sub get_group_id { + my ($user) = @_; + my ($sec,$min,$hour,$mday,$mon,$year,$wday,$yday,$isdst) = gmtime(time); + $year+=1900; + $mon++; + my ($host) = hostname =~ /([^.]*)/; + sprintf "$year%02d%02d%02d%02d%02d.$user.$host.${$}_", $mon, $mday, $hour, $min, $sec; +} + +sub get_target_arch { + my ($self, $target) = $_; + return $self->{_arch}{$target} +} + +sub set_arch_changed { + my ($self, $target, $arch) = @_; + if ($arch eq 'noarch') { + $self->{_arch_changed}{$_} = 1 foreach @{$self->{_arch}{$target}} + } elsif ($arch eq 'src') { + $self->{_arch_changed} = $self->{_src} + } else { + $self->{_arch_changed}{$arch} = 1 + } +} + +sub get_arch_changed { + my ($self, $target) = @_; + return [ keys %{$self->{_arch_changed}} ] +} + +sub set_install_dir_changed { + my ($self, $install_dir) = @_; + $self->{_install_dir_changed}{$install_dir} = 1; +} + +sub get_install_dir_changed { + my ($self) = @_; + return [ keys %{$self->{_install_dir_changed}} ]; +} + +sub _get_media_config { + my ($self, $target) = @_; + my %media; + my $real_target = $target; + $real_target =~ s/_force//; + foreach my $arch (@{$self->{_arch}{$target}}) { + my $root = "$self->{_install_root}/$real_target/$arch"; + my $distrib = MDV::Distribconf::Build->new($root); + print "Getting media config from $root\n" if $self->{_verbose}; + $self->{distrib}{$arch} = $distrib; + $distrib->loadtree or die "$root does not seem to be a distribution tree\n"; + $distrib->parse_mediacfg; + foreach my $media ($distrib->listmedia) { + my $rpms = $distrib->getvalue($media, 'rpms'); + my $debug_for = $distrib->getvalue($media, 'debug_for'); + my $srpms = $distrib->getvalue($media, 'srpms'); + my $path = $distrib->getfullpath($media, 'path'); + if (!$rpms) { + if (-d $path) { + print "MEDIA defining $media in $path\n" if $self->{_verbose} > 1; + $media{$arch}{$media} = $path + } else { + print "ERROR $path does not exist for media $media on $arch\n" + } + } else { + my ($media) = split ' ', $rpms; + if (-d $path) { + print "MEDIA defining SOURCE media for $media in $path\n" if $self->{_verbose} > 1; + $media{src}{$media} = $path + } else { + print "ERROR $path does not exist for source media $media on $arch\n" + } + } + } + } + \%media +} + +sub get_package_class { + return PACKAGE_CLASS; +} + +sub get_package_charset { + return PACKAGE_CHARSET; +} + +sub get_upload_dir { + my ($self, $package, $target, $user_context, $app_context) = @_; + croak "Not a class method" unless ref $self; + my $arch = $package->get_arch(); + return + $self->{_upload_root} . + "/$self->{_queue}/$target/" . + _get_section($self, $package, $target, $user_context, $app_context) . + '/' . + ($user_context->{prefix} ? '' : get_group_id($user_context->{user})) +} + +sub get_install_path { + my ($self, $package, $target, $user_context, $app_context) = @_; + + return $self->_get_path($package, $target, $user_context, $app_context); +} + + +sub get_distribution_paths { + my ($self, $package, $target) = @_; + + return $self->_get_distribution_paths($package, $target); +} + +=head2 get_distribution_roots() + +Returns distribution roots (ie install_root + target + arch) +(it returns a list in case of noarch) + +=cut + +sub get_distribution_roots { + my ($self, $package, $target) = @_; + croak "Not a class method" unless ref $self; + + map { + $self->_get_dir($self->{_install_root}, $_); + } $self->_get_distribution_paths($package, $target); +} + +sub get_archive_path { + my ($self, $package, $target, $user_context, $app_context) = @_; + + return $self->_get_path($package, $target, $user_context, $app_context); +} + +sub get_reject_path { + my ($self, $package, $target, $user_context, $app_context) = @_; + + return $self->{_rejected}; +} + + +sub _get_path { + my ($self, $package, $target, $user_context, $app_context) = @_; + + my $section = $self->_get_section($package, $target, $user_context, $app_context); + my $arch = $app_context->{arch} || $package->get_arch(); + $arch = $translate_arch{$arch} || $arch; + if ($arch eq 'noarch') { + $arch = $self->{_noarch} + } elsif ($arch eq 'src') { + return "$target/SRPMS/$section" + } + "$target/$arch/media/$section" +} + +sub _get_distribution_paths { + my ($self, $package, $target) = @_; + + my $arch = $package->get_arch(); + $arch = $translate_arch{$arch} || $arch; + if ($arch eq 'noarch') { + map { "$target/$_" } $self->get_target_arches($target); + } elsif ($arch eq 'src') { + die "no way to get distribution path using a $arch package"; + } else { + "$target/$arch"; + } +} + +sub get_arch { + my ($self, $package, $target, $user_context, $app_context) = @_; + my $arch = $package->get_arch(); + $arch = $translate_arch{$arch} || $arch; + if ($arch eq 'noarch') { + $arch = $self->{_noarch} + } + $arch +} + +sub get_version_path { + my ($self, $package, $target, $user_context, $app_context) = @_; + + my $section = $self->_get_section($package, $target, $user_context, $app_context); + + return "$self->{_module}/$section"; +} + +=head2 get_replaced_packages($package, $target, $user_context, $app_context) + +Overrides parent method to add libified packages. + +=cut + +sub get_replaced_packages { + my ($self, $package, $target, $user_context, $app_context) = @_; + croak "Not a class method" unless ref $self; + + my @replaced_packages = + $self->SUPER::get_replaced_packages($package, $target, $user_context, $app_context); + + my $name = $package->get_name(); + + # kernel packages have the version in the name + # binary dkms built for old kernels have to be removed too + if ($name =~ /^kernel-([^\d]*-)?([\d.]*)-(.*)$/) { # "desktop", "2.6.28", "2mnb" + push(@replaced_packages, + map { PACKAGE_CLASS->new(file => $_) } + $self->get_files( + $self->{_install_root}, + $self->get_install_path($package, $target, $user_context, $app_context), + PACKAGE_CLASS->get_pattern( + '(kernel-' . $1 . '\d.*|.*-kernel-[\d.]*-' . $1 . '\d.*)', + undef, + undef, + $package->get_arch() + ), + ) + ); + } + + return @replaced_packages; + +} + +sub _get_main_section { + my ($self, $package, $target, $user_context, $app_context) = @_; + + my $section = $self->_get_section($package, $target, $user_context, $app_context); + my ($main_section) = $section =~ m,^([^/]+),; + $main_section +} + +sub _get_section { + my ($self, $package, $target, $user_context, $app_context) = @_; + + my $name = $package->get_name(); + my $cname = $package->get_canonical_name(); + my $version = $package->get_version(); + my $release = $package->get_release(); + my $section = $user_context->{section}; + my $media = $self->_get_media_config($target); + my $arch = $package->get_arch(); + my $file = $package->as_file(); + $file =~ s,/+,/,g; # unneeded? + # FIXME: use $self->get_arch() + $arch = $self->{_noarch} if $arch eq 'noarch'; + $arch = $translate_arch{$arch} || $arch; + + if (!$section) { + $section = $self->{packages}{$file}{section}; + print "Section undefined, repository says it is '$section' for '$file'\n" if $self->{_verbose}; + } + # FIXME: use debug_for info + if ($section && $section !~ m|debug/| && $package->is_debug()) { + $section = "debug/$section" + } + + # if have section already, check if it exists, and may return immediately + if ($section) { + print "Using requested section $section\n" if $self->{_verbose}; + if ($media->{$arch}{$section}) { + return $section + } else { + die "FATAL youri: unknown section $section for target $target for arch $arch\n" + } + } + # else, try to find section automatically + + # pattern for search of src package with specific version-release, + # should be searched first, because we prefer to find the precise + # section a package is already in + my $specific_source_pattern = PACKAGE_CLASS->get_pattern( + $cname, + $version, + $release, + 'src' + ); + + my $source_pattern = PACKAGE_CLASS->get_pattern( + $cname, + undef, + undef, + 'src' + ); + + # if a media has no source media configured, or if it is a debug + # package, we search in binary media + + # pattern for search when a binary media has no src media configured + my $specific_binary_pattern = PACKAGE_CLASS->get_pattern( + $name, + $version, + $release, + $arch + ); + + # last resort pattern: previous existing binary packages + my $binary_pattern = PACKAGE_CLASS->get_pattern( + $name, + undef, + undef, + $arch + ); + + # first try to find section for the specific version, as it is possibly already there; + # this is the case for when called in Youri::Submit::Action::Archive, to find the + # section the package got installed + print "Looking for package $name with version $version-$release\n" if $self->{_verbose}; + foreach my $m (keys %{$media->{$arch}}) { + print " .. section '$m' path '".$media->{$arch}{$m}."'\n" if $self->{_verbose}; + # - prefer source for non-debug packages, use binary if there is no source media configured + # - debug packages must be searched in binary medias, due to their + # src section != binary section; NOTE: should/need we search in + # src medias and add the 'debug_' prefix? + if (!$package->is_debug() && $media->{src}{$m}) { + next unless $self->get_files('', $media->{src}{$m}, $specific_source_pattern); + } else { + next unless $self->get_files('', $media->{$arch}{$m}, $specific_binary_pattern); + } + $section = $m; + last; + } + + # if still not found, try finding any version of the package in a + # /release subsection (safe default: /release is default for cooker, + # should be locked for released distros, and we don't risk wrongly + # choosing /backports, /testing, or /updates); + # this is the case for when called at submit, to find the section where + # the package already resides + if (!$section) { + # debug packages should be found by previous specific version search + # NOTE: as above, should/need we search here and add the 'debug_' prefix? + # ... probably... as at least mga-youri-submit-force will process debug packages + if ($package->is_debug() && $self->{_verbose}) { + print "Warning: debug package $name with version $version-$release not found.\n"; + } + + print "Warning: Looking for any section with a package $name of any version\n"; + foreach my $m (keys %{$media->{$arch}}) { + print " .. section '$m' path '".$media->{$arch}{$m}."'\n" if $self->{_verbose}; + # NOTE: !$package->is_debug() test is here to prevent when above FATAL error is removed + next if $m !~ /release/ || ($m =~ /debug/ && !$package->is_debug()); + # - prefer source + if ($media->{src}{$m}) { + next unless $self->get_files('', $media->{src}{$m}, $source_pattern); + } else { + next unless $self->get_files('', $media->{$arch}{$m}, $binary_pattern); + } + $section = $m; + last; + } + } + + # FIXME: doing this here is wrong; this way the caller can never know if + # a section was actually found or not; should return undef and let the + # caller set a default (Note: IIRC PLF|Zarb has this right, see there) -spuk + print STDERR "Warning: Can't guess destination: section missing, defaulting to core/release\n" unless $section; + $section ||= 'core/release'; + + # next time we don't need to search everything again + $self->{packages}{$file}{section} = $section; + + print "Section is '$section'.\n" if $self->{_verbose}; + + return $section; +} + +sub get_upload_newer_revisions { + my ($self, $package, $target, $user_context, $app_context) = @_; + croak "Not a class method" unless ref $self; + my $arch = $package->get_arch(); + my $name = $package->as_string(); + $name =~ s/^\@\d+://; + my $pattern = $self->get_package_class()->get_pattern($package->get_name(), undef, undef, $arch); + my $media = $self->_get_media_config($target); + my @packages; + foreach my $state (@{$self->{_upload_state}}) { + foreach my $m (keys %{$media->{$arch}}) { + next if defined($user_context->{section}) and $user_context->{section} ne $m; + my $path = "$self->{_upload_root}/$state/$target/$m"; + print "Looking for package $package revisions for $target in $path (pattern $pattern)\n" if $self->{_verbose}; + find( + sub { + s/\d{14}\.[^.]*\.[^.]*\.\d+_//; + s/^\@\d+://; + return if ! /^$pattern/; + return if /\.info$/; + print "Find $_\n" if $self->{_verbose} > 1; + push @packages, $File::Find::name if $package->check_ranges_compatibility("== $name", "< $_") + }, $path); + } + } + return + @packages; +} + +sub package_in_svn { + my ($self, $srpm_name) = @_; + my $ctx = new SVN::Client( + auth => [SVN::Client::get_simple_provider(), + SVN::Client::get_simple_prompt_provider(\&simple_prompt,2), + SVN::Client::get_username_provider()] + ); + + my $svn_entry = $ctx->ls("$self->{_svn}/$srpm_name", 'HEAD', 0); + if ($svn_entry) { + print "Package $srpm_name is in the SVN\n" if $self->{_verbose}; + return 1 + } +} + +sub get_svn_url { + my ($self) = @_; + $self->{_svn} +} + +sub reject { + my ($self, $package, $target, $user_context, $app_context) = @_; + croak "Not a class method" unless ref $self; + + +} + +sub get_archive_dir { + my ($self, $package, $target, $user_context, $app_context) = @_; + croak "Not a class method" unless ref $self; + + return + $self->{_archive_root} +} + + +=head1 COPYRIGHT AND LICENSE + +Copyright (C) 2002-2006, YOURI project +Copyright (C) 2006,2007,2009 Mandriva +Copyright (C) 2011 Nicolas Vigier, Michael Scherer, Pascal Terjan + +This program is free software; you can redistribute it and/or modify it under the same terms as Perl itself. + +=cut + +1; diff --git a/modules/buildsystem/files/signbot/mga-signpackage b/modules/buildsystem/files/signbot/mga-signpackage new file mode 100755 index 00000000..199dbe0e --- /dev/null +++ b/modules/buildsystem/files/signbot/mga-signpackage @@ -0,0 +1,31 @@ +#!/usr/bin/perl -w + +use strict; +use warnings; +use RPM4::Sign; +use File::Spec; + +sub signpackage { + my ($file, $name, $path) = @_; + + # check if parent directory is writable + my $parent = (File::Spec->splitpath($file))[1]; + die "Unsignable package, parent directory is read-only" + unless -w $parent; + + my $sign = RPM4::Sign->new( + name => $name, + path => $path, + passphrase => '', + ); + + $sign->rpmssign($file) +} + +if (@ARGV != 3) { + exit 1; +} + +signpackage(@ARGV); +exit 0 + diff --git a/modules/buildsystem/files/signbot/sign-check-package b/modules/buildsystem/files/signbot/sign-check-package new file mode 100644 index 00000000..fc9704fd --- /dev/null +++ b/modules/buildsystem/files/signbot/sign-check-package @@ -0,0 +1,37 @@ +#!/bin/sh + +if [ $# != 3 ] ; then + echo "missing arguments" + echo "usage : $0 file key_number key_directory" + exit 1 +fi + +file="$1" +key="$2" +keydir="$3" + +tmpdir=`mktemp -d ${TMPDIR:-/tmp}/signbot-XXXXX` +tmpfile="$tmpdir/$(basename $file)" +cp -pf "$file" "$tmpfile" +rpm --delsign "$tmpfile" +/usr/local/bin/mga-signpackage "$tmpfile" "$key" "$keydir" +nbtry=0 +while rpmsign -Kv "$tmpfile" 2>&1 | grep BAD +do + nbtry=$(($nbtry + 1)) + if [ $nbtry -ge 30 ] + then + exit 1 + fi + + # Archive failed file for further analysis + mkdir -p "/tmp/failed-sign/" + failedfile="/tmp/failed-sign/$(basename "$file").$(date +%Y%m%d%H%M%S)" + cp -pf "$file" "$failedfile" + + cp -pf "$file" "$tmpfile" + rpm --delsign "$tmpfile" + /usr/local/bin/mga-signpackage "$tmpfile" "$key" "$keydir" +done +mv -f "$tmpfile" "$file" +rmdir "$tmpdir" diff --git a/modules/buildsystem/files/signbot/signbot-rpmmacros b/modules/buildsystem/files/signbot/signbot-rpmmacros new file mode 100644 index 00000000..aab7e389 --- /dev/null +++ b/modules/buildsystem/files/signbot/signbot-rpmmacros @@ -0,0 +1,3 @@ +%__gpg_sign_cmd %{__gpg} \ + gpg --batch --force-v3-sigs --no-verbose --no-armor --passphrase-fd 3 --no-secmem-warning \ + -u "%{_gpg_name}" -sbo %{__signature_filename} %{__plaintext_filename} diff --git a/modules/buildsystem/manifests/binrepo.pp b/modules/buildsystem/manifests/binrepo.pp new file mode 100644 index 00000000..5bf16b53 --- /dev/null +++ b/modules/buildsystem/manifests/binrepo.pp @@ -0,0 +1,48 @@ +class buildsystem::binrepo { + include buildsystem::var::binrepo + include buildsystem::var::groups + include sudo + + # upload-bin script uses the mailx command provided by nail +if versioncmp($::lsbdistrelease, '9') < 0 { + package { 'nail': + ensure => installed, + } +} else { + package { 's-nail': + ensure => installed, + } +} + + user { $buildsystem::var::binrepo::login: + home => $buildsystem::var::binrepo::homedir, + } + + file { [$buildsystem::var::binrepo::repodir, $buildsystem::var::binrepo::uploadinfosdir]: + ensure => directory, + owner => $buildsystem::var::binrepo::login, + } + + mga_common::local_script { + 'upload-bin': + content => template('buildsystem/binrepo/upload-bin'); + 'wrapper.upload-bin': + content => template('buildsystem/binrepo/wrapper.upload-bin'); + } + + sudo::sudoers_config { 'binrepo': + content => template('buildsystem/binrepo/sudoers.binrepo') + } + + apache::vhost::base { $buildsystem::var::binrepo::hostname: + location => $buildsystem::var::binrepo::repodir, + content => template('buildsystem/binrepo/vhost_binrepo.conf'), + } + + apache::vhost::base { "ssl_${buildsystem::var::binrepo::hostname}": + use_ssl => true, + vhost => $buildsystem::var::binrepo::hostname, + location => $buildsystem::var::binrepo::repodir, + content => template('buildsystem/binrepo/vhost_binrepo.conf'), + } +} diff --git a/modules/buildsystem/manifests/buildnode.pp b/modules/buildsystem/manifests/buildnode.pp new file mode 100644 index 00000000..1573c093 --- /dev/null +++ b/modules/buildsystem/manifests/buildnode.pp @@ -0,0 +1,12 @@ +class buildsystem::buildnode { + include buildsystem::iurt + include buildsystem::var::scheduler + include buildsystem::var::iurt + include buildsystem::sshkeys + + sshkeys::set_authorized_keys { 'iurt-allow-scheduler': + keyname => $buildsystem::var::scheduler::login, + home => $buildsystem::var::iurt::homedir, + user => $buildsystem::var::iurt::login, + } +} diff --git a/modules/buildsystem/manifests/create_upload_dir.rb b/modules/buildsystem/manifests/create_upload_dir.rb new file mode 100644 index 00000000..8023ab5d --- /dev/null +++ b/modules/buildsystem/manifests/create_upload_dir.rb @@ -0,0 +1,28 @@ +hostclass "buildsystem::create_upload_dir" do + states = ["todo","done","failure","queue","rejected"] + owner = scope.lookupvar('buildsystem::var::scheduler::login') + group = owner + uploads_dir = scope.lookupvar('buildsystem::var::scheduler::homedir') + '/uploads' + + file uploads_dir, :ensure => 'directory', :owner => owner, :group => group + + for st in states do + file [uploads_dir, st].join('/'), :ensure => 'directory', :owner => owner, :group => group + + scope.lookupvar('buildsystem::var::distros::distros').each{|rel, distro| + file [uploads_dir, st, rel].join('/'), :ensure => 'directory', :owner => owner, :group => group + medias = distro['medias'] + medias.each{|media, m| + file [uploads_dir, st, rel, media].join('/'), :ensure => 'directory', :owner => owner, :group => group + + for repo in m['repos'].keys do + if st == 'done' + file [uploads_dir, st, rel, media, repo].join('/'), :ensure => 'directory', :owner => owner, :group => group, :mode => 0775 + else + file [uploads_dir, st, rel, media, repo].join('/'), :ensure => 'directory', :owner => owner, :group => group + end + end + } + } + end +end diff --git a/modules/buildsystem/manifests/distros.rb b/modules/buildsystem/manifests/distros.rb new file mode 100644 index 00000000..a298c0a8 --- /dev/null +++ b/modules/buildsystem/manifests/distros.rb @@ -0,0 +1,97 @@ +hostclass "buildsystem::distros" do + mirror_user = 'root' + schedbot_user = scope.lookupvar('buildsystem::var::scheduler::login') + bootstrap_reporoot = scope.lookupvar('buildsystem::var::repository::bootstrap_reporoot') + scope.lookupvar('buildsystem::var::distros::distros').each{|rel, distro| + file [ bootstrap_reporoot, rel ].join('/'), :ensure => 'directory', + :owner => mirror_user, :group => mirror_user + for arch in distro['arch'] do + # As ruby dsl cannot use defined resources, we have to use a + # workaround with 'find_resource_type' as described in this + # puppet issue: http://projects.puppetlabs.com/issues/11912 + scope.find_resource_type 'buildsystem::media_cfg' + media_cfg_args = { + :distro_name => rel, + :arch => arch, + } + if distro['tmpl_media.cfg'] != nil + media_cfg_args['templatefile'] = distro['tmpl_media.cfg'] + end + if ! distro['no_media_cfg_update'] + create_resource 'buildsystem::media_cfg', + [ rel, ' ', arch ].join('/'), media_cfg_args + end + file [ bootstrap_reporoot, rel, arch ].join('/'), + :ensure => 'directory', :owner => schedbot_user, + :group => schedbot_user + mediadir = [ bootstrap_reporoot, rel, arch, 'media' ].join('/') + file mediadir, :ensure => 'directory', :owner => schedbot_user, + :group => schedbot_user + file [ mediadir, 'media_info' ].join('/'), :ensure => 'directory', + :owner => schedbot_user, :group => schedbot_user + file [ mediadir, 'debug' ].join('/'), :ensure => 'directory', + :owner => schedbot_user, :group => schedbot_user + distro['medias'].each{|media, m| + file [ mediadir, media ].join('/'), :ensure => 'directory', + :owner => schedbot_user, :group => schedbot_user + file [ mediadir, 'debug', media ].join('/'), + :ensure => 'directory', :owner => schedbot_user, + :group => schedbot_user + for repo in m['repos'].keys do + file [ mediadir, media, repo ].join('/'), + :ensure => 'directory', :owner => schedbot_user, + :group => schedbot_user + file [ mediadir, media, repo, 'media_info' ].join('/'), + :ensure => 'directory', :owner => schedbot_user, + :group => schedbot_user + file [ mediadir, media, repo, 'repodata' ].join('/'), + :ensure => 'directory', :owner => schedbot_user, + :group => schedbot_user + file [ mediadir, 'debug', media, repo ].join('/'), + :ensure => 'directory', :owner => schedbot_user, + :group => schedbot_user + file [ mediadir, 'debug', media, repo, 'media_info' ].join('/'), + :ensure => 'directory', :owner => schedbot_user, + :group => schedbot_user + file [ mediadir, 'debug', media, repo, 'repodata' ].join('/'), + :ensure => 'directory', :owner => schedbot_user, + :group => schedbot_user + end + } + if distro['based_on'] != nil + distro['based_on'].each{|bdistroname, medias| + file [ mediadir, bdistroname ].join('/'), + :ensure => 'directory', :owner => mirror_user, + :group => mirror_user + medias.each{|medianame, media| + mdir = [ mediadir, bdistroname, medianame ].join('/') + file mdir, :ensure => 'directory', + :owner => mirror_user, :group => mirror_user + for reponame in media + file [ mdir, reponame ].join('/'), + :ensure => 'link', + :target => [ + '../../../../..', bdistroname, arch, + 'media', medianame, reponame ].join('/'), + :owner => mirror_user, :group => mirror_user + end + } + } + end + end + # SRPMS + srpmsdir = [ bootstrap_reporoot, rel, 'SRPMS' ].join('/') + file srpmsdir, + :ensure => 'directory', :owner => schedbot_user, + :group => schedbot_user + distro['medias'].each{|media, m| + file [ srpmsdir, media ].join('/'), :ensure => 'directory', + :owner => schedbot_user, :group => schedbot_user + for repo in m['repos'].keys do + file [ srpmsdir, media, repo ].join('/'), + :ensure => 'directory', :owner => schedbot_user, + :group => schedbot_user + end + } + } +end diff --git a/modules/buildsystem/manifests/gatherer.pp b/modules/buildsystem/manifests/gatherer.pp new file mode 100644 index 00000000..eebfd97e --- /dev/null +++ b/modules/buildsystem/manifests/gatherer.pp @@ -0,0 +1,5 @@ +class buildsystem::gatherer { + # emi is in main iurt rpm, should be moved out + include iurt::packages + include iurt::upload +} diff --git a/modules/buildsystem/manifests/init.pp b/modules/buildsystem/manifests/init.pp index e78468f2..f15b5dbf 100644 --- a/modules/buildsystem/manifests/init.pp +++ b/modules/buildsystem/manifests/init.pp @@ -1,83 +1,2 @@ class buildsystem { - - class base { - $build_login = "iurt" - $build_home_dir = "/home/iurt/" - - include ssh::auth - ssh::auth::key { $build_login: } # declare a key for build bot: RSA, 2048 bits - } - - class mainnode inherits base { - include iurtuser - ssh::auth::server { $build_login: } - - package { "task-bs-cluster-main": - ensure => "installed" - } - } - - class buildnode inherits base { - include iurt - } - - class scheduler { - # ulri - } - - class dispatcher { - # emi - } - - class repsys { - package { 'repsys': - - } - - - } - - class iurtuser { - group {"$build_login": - ensure => present, - } - - user {"$build_login": - ensure => present, - comment => "System user used to run build bots", - managehome => true, - gid => $build_login, - shell => "/bin/bash", - } - } - - class iurt { - include sudo - include iurtuser - ssh::auth::client { $build_login: } - - # build node common settings - # we could have the following skip list to use less space: - # '/(drakx-installer-binaries|drakx-installer-advertising|gfxboot|drakx-installer-stage2|mandriva-theme)/' - $package_list = ['task-bs-cluster-chroot', 'iurt'] - package { $package_list: - ensure => installed; - } - - file { "$build_home_dir/.iurt.cauldron.conf": - ensure => present, - owner => $build_login, - group => $build_login, - mode => 644, - content => template("buildsystem/iurt.cauldron.conf") - } - - file { "/etc/sudoers.d/iurt": - ensure => present, - owner => root, - group => root, - mode => 440, - content => template("buildsystem/sudoers.iurt") - } - } } diff --git a/modules/buildsystem/manifests/iurt.pp b/modules/buildsystem/manifests/iurt.pp new file mode 100644 index 00000000..231c5373 --- /dev/null +++ b/modules/buildsystem/manifests/iurt.pp @@ -0,0 +1,26 @@ +class buildsystem::iurt { + include sudo + include buildsystem::iurt::user + include buildsystem::iurt::packages + include buildsystem::var::iurt + include buildsystem::var::distros + + # remove old build directory + tidy { "${buildsystem::var::iurt::homedir}/iurt": + age => '8w', + recurse => true, + matches => ['[0-9][0-9].*\..*\..*\.[0-9]*','log','*.rpm','*.log','*.mga[0-9]+'], + rmdirs => true, + } + + file { '/etc/iurt/build': + ensure => directory, + } + + $distros_list = hash_keys($buildsystem::var::distros::distros) + buildsystem::iurt::config { $distros_list: } + + sudo::sudoers_config { 'iurt': + content => template('buildsystem/sudoers.iurt') + } +} diff --git a/modules/buildsystem/manifests/iurt/config.pp b/modules/buildsystem/manifests/iurt/config.pp new file mode 100644 index 00000000..02f5be63 --- /dev/null +++ b/modules/buildsystem/manifests/iurt/config.pp @@ -0,0 +1,50 @@ +define buildsystem::iurt::config() { + include buildsystem::var::iurt + include buildsystem::var::webstatus + include buildsystem::var::repository + $distribution = $name + # TODO rename the variable too in template + $build_login = $buildsystem::var::iurt::login + + $build_timeout = { + 'default' => 36000, + 'atlas' => 57600, + 'blender' => 57600, + 'chromium-browser-stable' => 172800, + 'clang' => 172800, + 'cross-gcc' => 115200, + 'gcc' => 115200, + 'itk' => 115200, + 'java-1.8.0-openjdk' => 172800, + 'java-17-openjdk' => 172800, + 'java-21-openjdk' => 172800, + 'java-latest-openjdk' => 172800, + 'kernel' => 115200, + 'libreoffice' => 432000, + 'llvm' => 115200, + 'llvm17-suite' => 115200, + 'llvm19-suite' => 115200, + 'openfoam' => 115200, + 'paraview' => 115200, + 'qgis' => 57600, + 'qtwebengine5' => 115200, + 'qtwebengine6' => 172800, + 'rust' => 180000, + 'salome' => 57600, + 'vtk' => 57600, + 'webkit' => 57600, + 'webkit2' => 115200, + 'wrapitk' => 115200, + 'rocm-llvm' => 70000, + } + + $allow_network_access = [ + 'libguestfs', # Needs access to the configured mirrors + ] + + file { "/etc/iurt/build/${distribution}.conf": + owner => $build_login, + group => $build_login, + content => template("buildsystem/iurt.conf") + } +} diff --git a/modules/buildsystem/manifests/iurt/packages.pp b/modules/buildsystem/manifests/iurt/packages.pp new file mode 100644 index 00000000..e814b7c2 --- /dev/null +++ b/modules/buildsystem/manifests/iurt/packages.pp @@ -0,0 +1,3 @@ +class buildsystem::iurt::packages { + package { 'iurt': } +} diff --git a/modules/buildsystem/manifests/iurt/upload.pp b/modules/buildsystem/manifests/iurt/upload.pp new file mode 100644 index 00000000..5417d36e --- /dev/null +++ b/modules/buildsystem/manifests/iurt/upload.pp @@ -0,0 +1,16 @@ +class buildsystem::iurt::upload { + include buildsystem::var::iurt + include buildsystem::var::webstatus + include buildsystem::var::repository + file { '/etc/iurt/upload.conf': + require => File['/etc/iurt'], + content => template('buildsystem/upload.conf'), + notify => Exec['check iurt config'], + } + + exec { 'check iurt config': + refreshonly => true, + command => 'perl -cw /etc/iurt/upload.conf', + logoutput => 'on_failure', + } +} diff --git a/modules/buildsystem/manifests/iurt/user.pp b/modules/buildsystem/manifests/iurt/user.pp new file mode 100644 index 00000000..a93ac7e7 --- /dev/null +++ b/modules/buildsystem/manifests/iurt/user.pp @@ -0,0 +1,11 @@ +class buildsystem::iurt::user { + include buildsystem::var::iurt + + buildsystem::sshuser { $buildsystem::var::iurt::login: + homedir => $buildsystem::var::iurt::homedir, + } + + file { '/etc/iurt': + ensure => directory, + } +} diff --git a/modules/buildsystem/manifests/mainnode.pp b/modules/buildsystem/manifests/mainnode.pp new file mode 100644 index 00000000..01de764f --- /dev/null +++ b/modules/buildsystem/manifests/mainnode.pp @@ -0,0 +1,23 @@ +class buildsystem::mainnode { + include buildsystem::var::repository + include buildsystem::var::scheduler + include buildsystem::var::distros + include buildsystem::iurt::user + include buildsystem::scheduler + include buildsystem::gatherer + include buildsystem::mgarepo + include buildsystem::signbot + include buildsystem::youri_submit + include buildsystem::sshkeys + include buildsystem::distros + + sshkeys::set_client_key_pair { $buildsystem::var::scheduler::login: + home => $buildsystem::var::scheduler::homedir, + user => $buildsystem::var::scheduler::login, + } + sshkeys::set_authorized_keys { 'scheduler-allow-scheduler': + keyname => $buildsystem::var::scheduler::login, + home => $buildsystem::var::scheduler::homedir, + user => $buildsystem::var::scheduler::login, + } +} diff --git a/modules/buildsystem/manifests/maintdb.pp b/modules/buildsystem/manifests/maintdb.pp new file mode 100644 index 00000000..5a961b63 --- /dev/null +++ b/modules/buildsystem/manifests/maintdb.pp @@ -0,0 +1,58 @@ +class buildsystem::maintdb { + include buildsystem::var::maintdb + include buildsystem::var::groups + include buildsystem::var::webstatus + include sudo + + user { $buildsystem::var::maintdb::login: + home => $buildsystem::var::maintdb::homedir, + } + + file { [$buildsystem::var::maintdb::homedir,$buildsystem::var::maintdb::dbdir]: + ensure => directory, + owner => $buildsystem::var::maintdb::login, + group => $buildsystem::var::maintdb::login, + mode => '0711', + require => User[$buildsystem::var::maintdb::login], + } + + file { $buildsystem::var::maintdb::binpath: + mode => '0755', + content => template('buildsystem/maintdb/maintdb.bin') + } + + mga_common::local_script { 'wrapper.maintdb': + content => template('buildsystem/maintdb/wrapper.maintdb') + } + + sudo::sudoers_config { 'maintdb': + content => template('buildsystem/maintdb/sudoers.maintdb') + } + + file { [$buildsystem::var::maintdb::dump, + "${buildsystem::var::maintdb::dump}.new", + $buildsystem::var::maintdb::unmaintained, + "${buildsystem::var::maintdb::unmaintained}.new"]: + owner => $buildsystem::var::maintdb::login, + require => File["${buildsystem::var::webstatus::location}/data"], + } + + cron { 'update maintdb export': + user => $buildsystem::var::maintdb::login, + command => "${buildsystem::var::maintdb::binpath} root get > ${buildsystem::var::maintdb::dump}.new; cp -f ${buildsystem::var::maintdb::dump}.new ${buildsystem::var::maintdb::dump}; grep ' nobody\$' ${buildsystem::var::maintdb::dump} | sed 's/ nobody\$//' > ${buildsystem::var::maintdb::unmaintained}.new; cp -f ${buildsystem::var::maintdb::unmaintained}.new ${buildsystem::var::maintdb::unmaintained}", + minute => '*/30', + require => User[$buildsystem::var::maintdb::login], + } + + apache::vhost::base { $buildsystem::var::maintdb::hostname: + location => $buildsystem::var::maintdb::dbdir, + content => template('buildsystem/maintdb/vhost_maintdb.conf'), + } + + apache::vhost::base { "ssl_${buildsystem::var::maintdb::hostname}": + use_ssl => true, + vhost => $buildsystem::var::maintdb::hostname, + location => $buildsystem::var::maintdb::dbdir, + content => template('buildsystem/maintdb/vhost_maintdb.conf'), + } +} diff --git a/modules/buildsystem/manifests/media_cfg.pp b/modules/buildsystem/manifests/media_cfg.pp new file mode 100644 index 00000000..77fcc8fd --- /dev/null +++ b/modules/buildsystem/manifests/media_cfg.pp @@ -0,0 +1,11 @@ +define buildsystem::media_cfg($distro_name, $arch, $templatefile = 'buildsystem/media.cfg') { + include buildsystem::var::repository + include buildsystem::var::scheduler + include buildsystem::repository + + file { "${buildsystem::var::repository::bootstrap_reporoot}/${distro_name}/${arch}/media/media_info/media.cfg": + owner => $buildsystem::var::scheduler::login, + group => $buildsystem::var::scheduler::login, + content => template($templatefile), + } +} diff --git a/modules/buildsystem/manifests/mgarepo.pp b/modules/buildsystem/manifests/mgarepo.pp new file mode 100644 index 00000000..14e11e1a --- /dev/null +++ b/modules/buildsystem/manifests/mgarepo.pp @@ -0,0 +1,36 @@ +class buildsystem::mgarepo { + include buildsystem::var::scheduler + include buildsystem::var::distros + include buildsystem::var::groups + include buildsystem::var::binrepo + include buildsystem::create_upload_dir + $sched_login = $buildsystem::var::scheduler::login + $sched_home_dir = $buildsystem::var::scheduler::homedir + + package { ['mgarepo','rpm-build']: } + + file { '/etc/mgarepo.conf': + content => template('buildsystem/mgarepo.conf'), + } + + file { "${sched_home_dir}/repsys": + ensure => 'directory', + owner => $sched_login, + require => File[$sched_home_dir], + } + + file { ["${sched_home_dir}/repsys/tmp", "${sched_home_dir}/repsys/srpms"]: + ensure => 'directory', + owner => $sched_login, + group => $buildsystem::var::groups::packagers, + mode => '1775', + require => File["${sched_home_dir}/repsys"], + } + + # FIXME: disabled temporarily as upload dir is a symlink to /var/lib/repsys/uploads + #file { "${sched_home_dir}/uploads": + # ensure => "directory", + # owner => $sched_login, + # require => File[$sched_home_dir], + #} +} diff --git a/modules/buildsystem/manifests/release.pp b/modules/buildsystem/manifests/release.pp new file mode 100644 index 00000000..d9feac8e --- /dev/null +++ b/modules/buildsystem/manifests/release.pp @@ -0,0 +1,5 @@ +class buildsystem::release { + git::snapshot { '/root/release': + source => "git://git.${::domain}/software/infrastructure/release", + } +} diff --git a/modules/buildsystem/manifests/repoctl.pp b/modules/buildsystem/manifests/repoctl.pp new file mode 100644 index 00000000..8d44e52c --- /dev/null +++ b/modules/buildsystem/manifests/repoctl.pp @@ -0,0 +1,11 @@ +class buildsystem::repoctl { + include buildsystem::var::distros + include buildsystem::var::repository + + package{ 'repoctl': } + + file { '/etc/repoctl.conf': + content => template('buildsystem/repoctl.conf'), + require => Package['repoctl'], + } +} diff --git a/modules/buildsystem/manifests/repository.pp b/modules/buildsystem/manifests/repository.pp new file mode 100644 index 00000000..dda90eb2 --- /dev/null +++ b/modules/buildsystem/manifests/repository.pp @@ -0,0 +1,11 @@ +class buildsystem::repository { + include buildsystem::var::repository + file { [ $buildsystem::var::repository::bootstrap_root, + $buildsystem::var::repository::bootstrap_reporoot ] : + ensure => directory, + } + + apache::vhost::other_app { $buildsystem::var::repository::hostname: + vhost_file => 'buildsystem/vhost_repository.conf', + } +} diff --git a/modules/buildsystem/manifests/rpmlint.pp b/modules/buildsystem/manifests/rpmlint.pp new file mode 100644 index 00000000..388d0bee --- /dev/null +++ b/modules/buildsystem/manifests/rpmlint.pp @@ -0,0 +1,3 @@ +class buildsystem::rpmlint { + package { 'rpmlint': } +} diff --git a/modules/buildsystem/manifests/scheduler.pp b/modules/buildsystem/manifests/scheduler.pp new file mode 100644 index 00000000..53b248fc --- /dev/null +++ b/modules/buildsystem/manifests/scheduler.pp @@ -0,0 +1,57 @@ +class buildsystem::scheduler { + # until ulri is split from main iurt rpm + include buildsystem::iurt::packages + include buildsystem::iurt::upload + include buildsystem::var::scheduler + + $login = $buildsystem::var::scheduler::login + $homedir = $buildsystem::var::scheduler::homedir + $logdir = $buildsystem::var::scheduler::logdir + + buildsystem::sshuser { $login: + homedir => $homedir, + } + + file { $logdir: + ensure => directory, + mode => '0755', + owner => $login, + } + + cron { 'dispatch jobs': + user => $login, + command => "EMI_LOG_FILE=${logdir}/emi.log ULRI_LOG_FILE=${logdir}/ulri.log ulri; EMI_LOG_FILE=${logdir}/emi.log emi", + minute => '*', + } + + if ($buildsystem::var::scheduler::clean_uploads_logs_age != 0) { + cron { 'clean uploads logs': + user => $login, + # Delete old upload logs + command => sprintf("/usr/bin/find %s/uploads -ignore_readdir_race -xdev -depth -type f -ctime +%d -delete", shellquote($homedir), shellquote($buildsystem::var::scheduler::clean_uploads_logs_age)), + hour => '*/4', + minute => '51', + } + cron { 'clean uploads dirs': + user => $login, + # Remove old empty uploads directories. This will take several + # passes (over several weeks) to delete a directory hierarchy + # because it is looking at ctime instead of mtime, which resets + # every time a file/directory underneath it is deleted. + # Directories don't take much space, so this shouldn't be a + # real issue. + command => sprintf("/usr/bin/find %s/uploads -ignore_readdir_race -mindepth 5 -xdev -depth -type d -ctime +%d -empty -delete", shellquote($homedir), shellquote($buildsystem::var::scheduler::clean_uploads_logs_age)), + hour => '*/4', + minute => '53', + } + } + if ($buildsystem::var::scheduler::clean_uploads_packages_age != 0) { + cron { 'clean uploads packages': + user => $login, + # Delete old upload RPMs + command => sprintf("/usr/bin/find %s/uploads -ignore_readdir_race -xdev -depth -type f -name '*.rpm' -ctime +%d -delete", shellquote($homedir), shellquote($buildsystem::var::scheduler::clean_uploads_packages_age)), + hour => '*/4', + minute => '52', + } + } +} diff --git a/modules/buildsystem/manifests/signbot.pp b/modules/buildsystem/manifests/signbot.pp new file mode 100644 index 00000000..60c7c318 --- /dev/null +++ b/modules/buildsystem/manifests/signbot.pp @@ -0,0 +1,31 @@ +class buildsystem::signbot { + include buildsystem::var::scheduler + include buildsystem::var::signbot + $sched_login = $buildsystem::var::scheduler::login + + sshuser { $buildsystem::var::signbot::login: + homedir => $buildsystem::var::signbot::home_dir, + groups => [$sched_login], + } + + gnupg::keys{ 'packages': + email => $buildsystem::var::signbot::keyemail, + key_name => $buildsystem::var::signbot::keyname, + login => $buildsystem::var::signbot::login, + batchdir => "${buildsystem::var::signbot::home_dir}/batches", + keydir => $buildsystem::var::signbot::sign_keydir, + } + + sudo::sudoers_config { 'signpackage': + content => template('buildsystem/signbot/sudoers.signpackage') + } + + file { "${home_dir}/.rpmmacros": + source => 'puppet:///modules/buildsystem/signbot/signbot-rpmmacros', + } + + mga_common::local_script { + 'sign-check-package': source => 'puppet:///modules/buildsystem/signbot/sign-check-package'; + 'mga-signpackage': source => 'puppet:///modules/buildsystem/signbot/mga-signpackage'; + } +} diff --git a/modules/buildsystem/manifests/sshkeys.pp b/modules/buildsystem/manifests/sshkeys.pp new file mode 100644 index 00000000..5a1b2900 --- /dev/null +++ b/modules/buildsystem/manifests/sshkeys.pp @@ -0,0 +1,5 @@ +class buildsystem::sshkeys { + include buildsystem::var::scheduler + + sshkeys::create_key { $buildsystem::var::scheduler::login: } +} diff --git a/modules/buildsystem/manifests/sshuser.pp b/modules/buildsystem/manifests/sshuser.pp new file mode 100644 index 00000000..5cad97ad --- /dev/null +++ b/modules/buildsystem/manifests/sshuser.pp @@ -0,0 +1,36 @@ +# $groups: array of secondary groups (only local groups, no ldap) +define buildsystem::sshuser($homedir, $comment = undef, $groups = []) { + group { $name: } + + user { $name: + comment => $comment, + managehome => true, + home => $homedir, + gid => $name, + groups => $groups, + shell => '/bin/bash', + notify => Exec["unlock ${name}"], + require => Group[$title], + } + + # set password to * to unlock the account but forbid login through login + exec { "unlock ${name}": + command => "usermod -p '*' ${name}", + refreshonly => true, + } + + file { $homedir: + ensure => directory, + owner => $name, + group => $name, + require => User[$name], + } + + file { "${homedir}/.ssh": + ensure => directory, + mode => '0600', + owner => $name, + group => $name, + require => File[$homedir], + } +} diff --git a/modules/buildsystem/manifests/var/binrepo.pp b/modules/buildsystem/manifests/var/binrepo.pp new file mode 100644 index 00000000..1431ed25 --- /dev/null +++ b/modules/buildsystem/manifests/var/binrepo.pp @@ -0,0 +1,15 @@ +# $uploadmail_from: +# from who will be sent the binrepo upload email notifications +# $uploadmail_to: +# where binrepo email notifications are sent +class buildsystem::var::binrepo( + $hostname = "binrepo.${::domain}", + $login = 'binrepo', + $homedir = '/var/lib/binrepo', + $uploadmail_from, + $uploadmail_to +) { + $repodir = "${homedir}/data" + $uploadinfosdir = "${homedir}/infos" + $uploadbinpath = '/usr/local/bin/upload-bin' +} diff --git a/modules/buildsystem/manifests/var/distros.pp b/modules/buildsystem/manifests/var/distros.pp new file mode 100644 index 00000000..9e45e2c2 --- /dev/null +++ b/modules/buildsystem/manifests/var/distros.pp @@ -0,0 +1,126 @@ +# $default_distro: +# the name of the default distribution +# $repo_allow_from_ips: +# $repo_allow_from_domains: +# list of IP or domains allowed to access the repository. If you don't want to +# filter allowed IPs, don't those values. +# $distros: +# a hash variable containing distributions information indexed by +# distribution name. Each distribution is itself an hash containing +# the following infos: +# { +# # the 'cauldron' distribution +# 'cauldron' => { +# # list of arch supported by 'cauldron' +# 'arch' => [ 'i586', 'x86_64' ], +# # Set this if you don't want media.cfg to be generated +# 'no_media_cfg_update' => true, +# 'medias' => { +# # the 'core' media +# 'core' => { +# 'repos' => { +# # the 'release' repo in the 'core' media +# 'release' => { +# 'media_type' => [ 'release' ], +# 'noauto' => '1', +# # the 'release' repo should be listed first in media.cfg +# 'order' => 0, +# }, +# # the 'updates' repo +# 'updates' => { +# 'media_type' => [ 'updates' ], +# 'noauto' => '1', +# # the 'updates' repo requires the 'release' repo +# 'requires' => [ 'release' ], +# # the 'updates' repo should be listed after 'release' in media.cfg +# 'order' => 1, +# }, +# }, +# # media_type for media.cfg +# 'media_type' => [ 'official', 'free' ], +# # if noauto is set to '1' either in medias or repos, +# # the option will be added to media.cfg +# 'noauto' => '1', +# # list 'core' first in media.cfg +# 'order' => 0, +# }, +# # the 'non-free' media +# 'non-free' => { +# 'repos' => { +# ... +# }, +# 'media_type' => [ 'official', 'non-free' ], +# # the 'non-free' media requires the 'core' media +# 'requires' => [ 'core' ], +# # list 'non-free' second +# 'order' => 1, +# } +# }, +# # the list of media used by iurt to build the chroots +# 'base_medias' => [ 'core/release' ], +# # optionally, a media.cfg template file can be specified, if +# # the default one should not be used +# 'tmpl_media.cfg' => 'buildsystem/something', +# # branch is Devel or Official. Used in media.cfg. +# 'branch' => 'Devel', +# # Version of the distribution +# 'version' => '3', +# # SVN Urls allowed to submit +# 'submit_allowed' => 'svn://svn.something/svn/packages/cauldron', +# # rpm macros to set when build source package +# 'macros' => { +# 'distsuffix' => '.mga', +# 'distribution' => 'Mageia', +# 'vendor' => 'Mageia.Org', +# }, +# # set this if the distro is not mirrored. This is used to add +# # an Alias in the vhost. +# 'no_mirror' => true, +# Optionally, the distribution can be based on the repos from an other +# distribution. In this example we're saying that the distribution is +# based on 2/core/release and 2/core/updates. +# 'based_on' => { +# '2' => { +# 'core' => [ 'release', 'updates' ], +# }, +# }, +# 'youri' => { +# # Configuration for youri-upload +# 'upload' => { +# # list of enabled checks, actions and posts +# 'targets' => { +# 'checks' => [ +# ... +# ], +# 'actions' => [ +# ... +# ], +# 'posts' => [ +# ... +# ], +# }, +# 'checks' => { +# # rpmlint checks options +# 'rpmlint' => { +# 'config' => '/usr/share/rpmlint/config', +# 'path' => ''/usr/bin/rpmlint', +# }, +# }, +# # options for actions +# 'actions' => { +# ... +# }, +# }, +# # Configuration for youri-todo +# 'todo' => { +# ... +# }, +# }, +# }, +# } +class buildsystem::var::distros( + $default_distro, + $repo_allow_from_ips, + $repo_allow_from_domains, + $distros, +) { } diff --git a/modules/buildsystem/manifests/var/groups.pp b/modules/buildsystem/manifests/var/groups.pp new file mode 100644 index 00000000..c0b2c917 --- /dev/null +++ b/modules/buildsystem/manifests/var/groups.pp @@ -0,0 +1,9 @@ +# $packagers: +# name of packagers group, who should be allowed to submit packages +# $packagers_committers: +# name of group of users who are allowed to commit on packages +class buildsystem::var::groups( + $packagers, + $packagers_committers +) { +} diff --git a/modules/buildsystem/manifests/var/iurt.pp b/modules/buildsystem/manifests/var/iurt.pp new file mode 100644 index 00000000..fb65a160 --- /dev/null +++ b/modules/buildsystem/manifests/var/iurt.pp @@ -0,0 +1,5 @@ +class buildsystem::var::iurt( + $login = 'iurt', + $homedir = '/home/iurt', + $timeout_multiplier = 1, +) { } diff --git a/modules/buildsystem/manifests/var/maintdb.pp b/modules/buildsystem/manifests/var/maintdb.pp new file mode 100644 index 00000000..e0079e40 --- /dev/null +++ b/modules/buildsystem/manifests/var/maintdb.pp @@ -0,0 +1,11 @@ +class buildsystem::var::maintdb( + $hostname = "maintdb.${::domain}", + $login = 'maintdb', + $homedir = '/var/lib/maintdb' +) { + include buildsystem::var::webstatus + $dbdir = "${homedir}/db" + $binpath = '/usr/local/sbin/maintdb' + $dump = "${buildsystem::var::webstatus::location}/data/maintdb.txt" + $unmaintained = "${buildsystem::var::webstatus::location}/data/unmaintained.txt" +} diff --git a/modules/buildsystem/manifests/var/mgarepo.pp b/modules/buildsystem/manifests/var/mgarepo.pp new file mode 100644 index 00000000..9099c7ee --- /dev/null +++ b/modules/buildsystem/manifests/var/mgarepo.pp @@ -0,0 +1,22 @@ +# $submit_host: +# hostname used to submit packages +# $svn_hostname: +# hostname of the svn server used for packages +# $svn_root_packages: +# svn root url of the svn repository for packages +# $svn_root_packages_ssh: +# svn+ssh root url of the svn repository for packages +# $oldurl: +# svn url where the import logs of the rpm are stored +# $conf: +# $conf{'global'} is a has table of values used in mgarepo.conf in +# the [global] section +class buildsystem::var::mgarepo( + $submit_host, + $svn_hostname, + $svn_root_packages, + $svn_root_packages_ssh, + $oldurl, + $conf +) { +} diff --git a/modules/buildsystem/manifests/var/repository.pp b/modules/buildsystem/manifests/var/repository.pp new file mode 100644 index 00000000..0ea1058c --- /dev/null +++ b/modules/buildsystem/manifests/var/repository.pp @@ -0,0 +1,9 @@ +class buildsystem::var::repository( + $hostname = "repository.${::domain}", + $bootstrap_root = '/distrib/bootstrap', + $mirror_root = '/distrib/mirror', + $distribdir = 'distrib' +) { + $bootstrap_reporoot = "${bootstrap_root}/${distribdir}" + $mirror_reporoot = "${mirror_root}/${distribdir}" +} diff --git a/modules/buildsystem/manifests/var/scheduler.pp b/modules/buildsystem/manifests/var/scheduler.pp new file mode 100644 index 00000000..b431594c --- /dev/null +++ b/modules/buildsystem/manifests/var/scheduler.pp @@ -0,0 +1,31 @@ +# $admin_mail: +# the email address from which the build failure notifications +# will be sent +# $pkg_uphost: +# hostname of the server where submitted packages are uploaded +# $build_nodes: +# a hash containing available build nodes indexed by architecture +# $build_nodes_aliases: +# a hash containing build nodes indexed by their alias +# $build_src_node: +# hostname of the server building the initial src.rpm +# $clean_uploads_logs_age: +# old logs are cleaned when they are older than some amount of days. +# You can define this amount of time using this variable. Set it to +# 14 for two weeks, 2 for two days, or 0 if you don't want to +# clean old logs at all +# $clean_uploads_packages_age: +# same as $clean_uploads_logs_age but for old RPMs +class buildsystem::var::scheduler( + $admin_mail = "root@${::domain}", + $pkg_uphost = "pkgsubmit.${::domain}", + $build_nodes, + $build_nodes_aliases = {}, + $build_src_node, + $clean_uploads_logs_age = 14, + $clean_uploads_packages_age = 7 +){ + $login = 'schedbot' + $homedir = "/var/lib/${login}" + $logdir = "/var/log/${login}" +} diff --git a/modules/buildsystem/manifests/var/signbot.pp b/modules/buildsystem/manifests/var/signbot.pp new file mode 100644 index 00000000..7d92a324 --- /dev/null +++ b/modules/buildsystem/manifests/var/signbot.pp @@ -0,0 +1,15 @@ +# $keyid: +# the key id of the gnupg key used to sign packages +# $keyemail: +# email address of the key used to sign packages +# $keyname: +# name of the key used to sign packages +class buildsystem::var::signbot( + $keyid, + $keyemail, + $keyname +) { + $login = 'signbot' + $home_dir = "/var/lib/${login}" + $sign_keydir = "${home_dir}/keys" +} diff --git a/modules/buildsystem/manifests/var/webstatus.pp b/modules/buildsystem/manifests/var/webstatus.pp new file mode 100644 index 00000000..21f8d59f --- /dev/null +++ b/modules/buildsystem/manifests/var/webstatus.pp @@ -0,0 +1,25 @@ +# $git_url: +# git url where the sources of webstatus are located +# $hostname: +# vhost name of the webstatus page +# $location: +# path of the directory where the webstatus files are located +# $package_commit_url: +# url to view a commit on a package. %d is replaced by the commit id. +# $max_modified: +# how much history should we display, in days +# $theme_name: +# name of the webstatus theme +# $themes_dir: +# path of the directory where the themes are located. If you want +# to use a theme not included in webstatus, you need to change this. +class buildsystem::var::webstatus( + $git_url = "git://git.${::domain}/web/pkgsubmit", + $hostname = "pkgsubmit.${::domain}", + $location = '/var/www/bs', + $package_commit_url, + $max_modified = '2', + $theme_name = 'mageia', + $themes_dir = '/var/www/bs/themes/' +) { +} diff --git a/modules/buildsystem/manifests/var/youri.pp b/modules/buildsystem/manifests/var/youri.pp new file mode 100644 index 00000000..f20b6c7b --- /dev/null +++ b/modules/buildsystem/manifests/var/youri.pp @@ -0,0 +1,401 @@ +# The youri configuration files are created using information from 3 +# different hash variables : +# - the $youri_conf_default variable defined in this class, containing +# the default configuration for youri. It contains the repository +# configuration, and the definitions of the checks, actions and posts. +# - the $youri_conf parameter passed to this class. The values defined +# in this hash override the values defined in the default configuration. +# - for each distribution defined in the hash variable $distros from +# var::buildsystem::distros the hash defined in index 'youri' contains +# some distro specific options for youri checks, actions or posts. It +# also contains for each distribution the list of active checks, +# actions and posts. +# +# Each of those variables contain the configuration for youri submit-todo +# (in index 'todo') and youri submit-upload (in index 'upload') +# +# +# Parameters : +# $tmpl_youri_upload_conf: +# template file for youri submit-upload.conf +# $tmpl_youri_todo_conf: +# template file for youri submit-todo.conf +# $packages_archivedir: +# the directory where youri will archive old packages when they are +# replaced by a new version +# $youri_conf: +# a hash containing the youri configuration +class buildsystem::var::youri( + $tmpl_youri_upload_conf = 'buildsystem/youri/submit.conf', + $tmpl_youri_todo_conf = 'buildsystem/youri/submit.conf', + $packages_archivedir, + $youri_conf = {} +) { + include buildsystem::var::repository + include buildsystem::var::mgarepo + include buildsystem::var::distros + include buildsystem::var::signbot + include buildsystem::var::scheduler + + $check_tag = { 'class' => 'Youri::Submit::Check::Tag', } + $check_recency = { 'class' => 'Youri::Submit::Check::Recency', } + $check_queue_recency = { 'class' => 'Youri::Submit::Check::Queue_recency', } + $check_host = { + 'class' => 'Youri::Submit::Check::Host', + 'options' => { + 'host_file' => '/etc/youri/host.conf', + }, + } + $check_rpmlint = { 'class' => 'Youri::Submit::Check::Rpmlint', } + $check_acl = { + 'class' => 'Youri::Submit::Check::ACL', + 'options' => { + 'acl_file' => '/etc/youri/acl.conf', + }, + } + $check_source = { 'class' => 'Youri::Submit::Check::Source', } + $check_version = { + 'class' => 'Youri::Submit::Check::Version', + 'options' => {}, + } + + $youri_conf_default = { + 'upload' => { + 'repository' => { + 'class' => 'Youri::Repository::Mageia', + 'options' => { + 'install_root' => $buildsystem::var::repository::bootstrap_reporoot, + 'upload_root' => '$home/uploads/', + 'archive_root' => $packages_archivedir, + 'upload_state' => 'queue', + 'queue' => 'queue', + 'noarch' => 'x86_64', + 'svn' => "${buildsystem::var::mgarepo::svn_root_packages_ssh}/${buildsystem::var::distros::default_distro}", + }, + }, + 'checks' => { + 'tag' => $check_tag, + 'recency' => $check_recency, + 'queue_recency' => $check_queue_recency, + 'host' => $check_host, + 'section' => { + 'class' => 'Youri::Submit::Check::Section', + }, + 'rpmlint' => $check_rpmlint, + 'svn' => { + 'class' => 'Youri::Submit::Check::SVN', + }, + 'acl' => $check_acl, + 'history' => { + 'class' => 'Youri::Submit::Check::History', + }, + 'source' => $check_source, + 'precedence' => { + 'class' => 'Youri::Submit::Check::Precedence', + 'options' => { + 'target' => $buildsystem::var::distros::default_distro, + }, + }, + 'version' => $check_version, + }, + 'actions' => { + 'install' => { + 'class' => 'Youri::Submit::Action::Install', + }, + 'markrelease' => { + 'class' => 'Youri::Submit::Action::Markrelease', + }, + 'link' => { + 'class' => 'Youri::Submit::Action::Link', + }, + 'archive' => { + 'class' => 'Youri::Submit::Action::Archive', + }, + 'clean' => { + 'class' => 'Youri::Submit::Action::Clean', + }, + 'sign' => { + 'class' => 'Youri::Submit::Action::Sign', + 'options' => { + 'signuser' => $buildsystem::var::signbot::login, + 'path' => $buildsystem::var::signbot::sign_keydir, + 'name' => $buildsystem::var::signbot::keyid, + 'signscript' => '/usr/local/bin/sign-check-package', + }, + }, + 'unpack_gfxboot_theme' => { + 'class' => 'Youri::Submit::Action::Unpack', + 'options' => { + 'name' => 'mageia-gfxboot-theme', + 'source_subdir' => '/usr/share/gfxboot/themes/Mageia/install/', + 'dest_directory' => 'isolinux', + 'unpack_inside_distribution_root' => '1', + }, + }, + 'unpack_meta_task' => { + 'class' => 'Youri::Submit::Action::Unpack', + 'options' => { + 'name' => 'meta-task', + 'source_subdir' => '/usr/share/meta-task', + 'dest_directory' => 'media/media_info', + 'unpack_inside_distribution_root' => '1', + }, + }, + 'unpack_installer_images' => { + 'class' => 'Youri::Submit::Action::Unpack', + 'options' => { + 'name' => 'drakx-installer-images', + 'source_subdir' => '/usr/lib*/drakx-installer-images', + 'dest_directory' => '.', + 'preclean_directory' => 'install/images/alternatives', + 'unpack_inside_distribution_root' => '1', + }, + }, + 'unpack_installer_images_nonfree' => { + 'class' => 'Youri::Submit::Action::Unpack', + 'options' => { + 'name' => 'drakx-installer-images-nonfree', + 'source_subdir' => '/usr/lib*/drakx-installer-images', + 'dest_directory' => '.', + 'unpack_inside_distribution_root' => '1', + }, + }, + 'unpack_installer_stage2' => { + 'class' => 'Youri::Submit::Action::Unpack', + 'options' => { + 'name' => 'drakx-installer-stage2', + 'source_subdir' => '/usr/lib*/drakx-installer-stage2', + 'dest_directory' => '.', + 'unpack_inside_distribution_root' => '1', + }, + }, + 'unpack_installer_advertising' => { + 'class' => 'Youri::Submit::Action::Unpack', + 'options' => { + 'name' => 'drakx-installer-advertising', + 'source_subdir' => '/usr/share/drakx-installer-advertising', + 'dest_directory' => '.', + 'unpack_inside_distribution_root' => '1', + }, + }, + 'unpack_installer_rescue' => { + 'class' => 'Youri::Submit::Action::Unpack', + 'options' => { + 'name' => 'drakx-installer-rescue', + 'source_subdir' => '/usr/lib*/drakx-installer-rescue', + 'dest_directory' => 'install/stage2', + 'unpack_inside_distribution_root' => '1', + }, + }, + 'unpack_release_notes' => { + 'class' => 'Youri::Submit::Action::Unpack', + 'options' => { + 'name' => 'mageia-release-common', + 'source_subdir' => '/usr/share/doc/mageia-release-common', + 'grep_files' => 'release-notes.*', + 'dest_directory' => '.', + 'unpack_inside_distribution_root' => '1', + }, + }, + 'unpack_syslinux' => { + 'class' => 'Youri::Submit::Action::Unpack', + 'options' => { + 'name' => 'syslinux', + 'source_subdir' => '/usr/lib/syslinux/', + 'grep_files' => '\\(hdt\\|ifcpu\\|ldlinux\\|libcom32\\|libgpl\\|libmenu\\|libutil\\).c32', + 'dest_directory' => 'isolinux', + 'unpack_inside_distribution_root' => '1', + }, + }, + 'unpack_pci_usb_ids' => { + 'class' => 'Youri::Submit::Action::Unpack', + 'options' => { + 'name' => 'ldetect-lst', + 'source_subdir' => '/usr/share/', + 'grep_files' => '\\(pci\\|usb\\).ids', + 'dest_directory' => 'isolinux', + 'unpack_inside_distribution_root' => '1', + }, + }, + 'mail' => { + 'class' => 'Youri::Submit::Action::Mail', + 'options' => { + 'mta' => '/usr/sbin/sendmail', + }, + }, + 'maintdb' => { + 'class' => 'Youri::Submit::Action::UpdateMaintDb', + }, + 'rebuild' => { + 'class' => 'Youri::Submit::Action::RebuildPackage', + 'options' => { + 'rules' => { + 'drakx-installer-binaries' => ['drakx-installer-images'], + 'drakx-kbd-mouse-x11' => ['drakx-installer-stage2'], + 'drakx-net' => ['drakx-installer-stage2'], + 'kernel-desktop-latest' => ['drakx-installer-images', 'kmod-virtualbox', 'kmod-xtables-addons'], + 'kernel-desktop586-latest' => ['drakx-installer-images', 'kmod-virtualbox', 'kmod-xtables-addons'], + 'kernel-server-latest' => ['kmod-virtualbox', 'kmod-xtables-addons'], + 'ldetect-lst' => ['drakx-installer-stage2'], + 'meta-task' => ['drakx-installer-stage2'], + 'perl' => ['drakx-installer-stage2'], + 'perl-URPM' => ['drakx-installer-stage2'], + 'rpm' => ['drakx-installer-stage2'], + 'rpm-mageia-setup' => ['drakx-installer-stage2'], + 'urpmi' => ['drakx-installer-stage2'], + }, + }, + }, + }, + 'posts' => { + 'genhdlist2' => { + 'class' => 'Youri::Submit::Post::Genhdlist2', + 'options' => { + 'command' => '/usr/bin/genhdlist2 --xml-info-filter ".lzma:xz -T4" --synthesis-filter ".cz:xz -7 -T8"', + }, + }, + 'genhdlist2_zstd' => { + 'class' => 'Youri::Submit::Post::Genhdlist2', + 'options' => { + 'command' => '/usr/bin/genhdlist2 --xml-info-filter ".lzma:xz -T4" --synthesis-filter ".cz:zstd -19 -T8"', + }, + }, + 'createrepo_mga6' => { + 'class' => 'Youri::Submit::Post::RunOnModifiedMedia', + 'options' => { + 'command' => 'createrepo_c --no-database --update --workers=10', + }, + }, + 'createrepo_mga7' => { + 'class' => 'Youri::Submit::Post::RunOnModifiedMedia', + 'options' => { + 'command' => 'createrepo_c --no-database --update --workers=10 --zck --zck-dict-dir /usr/share/mageia-repo-zdicts/mga7/', + }, + }, + 'createrepo_mga8' => { + 'class' => 'Youri::Submit::Post::RunOnModifiedMedia', + 'options' => { + 'command' => 'createrepo_c --no-database --update --workers=10 --zck --zck-dict-dir /usr/share/mageia-repo-zdicts/mga7/', + }, + }, + 'createrepo_mga9' => { + 'class' => 'Youri::Submit::Post::RunOnModifiedMedia', + 'options' => { + 'command' => 'createrepo_c --no-database --update --workers=10 --zck --zck-dict-dir /usr/share/mageia-repo-zdicts/mga7/', + }, + }, + 'createrepo_cauldron' => { + 'class' => 'Youri::Submit::Post::RunOnModifiedMedia', + 'options' => { + 'command' => 'createrepo_c --no-database --update --workers=10', + }, + }, + 'appstream_mga6' => { + 'class' => 'Youri::Submit::Post::RunOnModifiedMedia', + 'options' => { + 'command' => '/distrib/appstream/appstream-6-modifyrepo.sh', + }, + }, + 'appstream_mga7' => { + 'class' => 'Youri::Submit::Post::RunOnModifiedMedia', + 'options' => { + 'command' => '/distrib/appstream/appstream-7-modifyrepo.sh', + }, + }, + 'appstream_mga8' => { + 'class' => 'Youri::Submit::Post::RunOnModifiedMedia', + 'options' => { + 'command' => '/distrib/appstream/appstream-8-modifyrepo.sh', + }, + }, + 'appstream_mga9' => { + 'class' => 'Youri::Submit::Post::RunOnModifiedMedia', + 'options' => { + 'command' => '/distrib/appstream/appstream-9-modifyrepo.sh', + }, + }, + 'appstream_cauldron' => { + 'class' => 'Youri::Submit::Post::RunOnModifiedMedia', + 'options' => { + 'command' => '/distrib/appstream/appstream-cauldron-modifyrepo.sh', + }, + }, + 'clean_rpmsrate' => { + 'class' => 'Youri::Submit::Post::CleanRpmsrate', + }, + 'mirror' => { + 'class' => 'Youri::Submit::Post::Mirror', + 'options' => { + 'destination' => $buildsystem::var::repository::mirror_reporoot, + }, + }, + }, + }, + 'todo' => { + 'repository' => { + 'class' => 'Youri::Repository::Mageia', + 'options' => { + 'install_root' => $buildsystem::var::repository::bootstrap_reporoot, + 'upload_root' => '$home/uploads/', + 'upload_state' => 'todo done queue', + 'queue' => 'todo', + 'noarch' => 'x86_64', + 'svn' => "${buildsystem::var::mgarepo::svn_root_packages_ssh}/${buildsystem::var::distros::default_distro}", + }, + }, + 'checks' => { + 'tag' => $check_tag, + 'recency' => $check_recency, + 'queue_recency' => $check_queue_recency, + 'host' => $check_host, + 'rpmlint' => $check_rpmlint, + 'acl' => $check_acl, + 'source' => $check_source, + 'version' => $check_version, + 'deps' => { + 'class' => 'Youri::Submit::Check::Deps', + }, + }, + 'actions' => { + 'send' => { + 'class' => 'Youri::Submit::Action::Send', + 'options' => { + 'user' => $buildsystem::var::scheduler::login, + 'keep_svn_release' => 'yes', + 'uphost' => $buildsystem::var::scheduler::pkg_uphost, + 'root' => '$home/uploads', + 'ssh_key' => '$home/.ssh/id_rsa', + }, + }, + 'dependencies' => { + 'class' => 'Youri::Submit::Action::Dependencies', + 'options' => { + 'user' => $buildsystem::var::scheduler::login, + 'uphost' => $buildsystem::var::scheduler::pkg_uphost, + 'root' => '$home/uploads', + 'ssh_key' => '$home/.ssh/id_rsa', + }, + }, + 'rpminfo' => { + 'class' => 'Youri::Submit::Action::Rpminfo', + 'options' => { + 'user' => $buildsystem::var::scheduler::login, + 'uphost' => $buildsystem::var::scheduler::pkg_uphost, + 'root' => '$home/uploads', + 'ssh_key' => '$home/.ssh/id_rsa', + }, + }, + 'ulri' => { + 'class' => 'Youri::Submit::Action::Ulri', + 'options' => { + 'user' => $buildsystem::var::scheduler::login, + 'uphost' => $buildsystem::var::scheduler::pkg_uphost, + 'ssh_key' => '$home/.ssh/id_rsa', + }, + }, + }, + 'posts' => { + }, + }, + } +} diff --git a/modules/buildsystem/manifests/webstatus.pp b/modules/buildsystem/manifests/webstatus.pp new file mode 100644 index 00000000..49346dbc --- /dev/null +++ b/modules/buildsystem/manifests/webstatus.pp @@ -0,0 +1,44 @@ +class buildsystem::webstatus { + include buildsystem::var::webstatus + include buildsystem::var::scheduler + include apache::mod::php + + file { [ $buildsystem::var::webstatus::location, "${buildsystem::var::webstatus::location}/data" ]: + ensure => directory, + } + + $vhost = $buildsystem::var::webstatus::hostname + apache::vhost::base { $vhost: + aliases => { + '/uploads' => "${buildsystem::var::scheduler::homedir}/uploads", + '/autobuild/cauldron/x86_64/core/log/status.core.log' => "${buildsystem::var::webstatus::location}/autobuild/broken.php", + '/themes' => $buildsystem::var::webstatus::themes_dir, + }, + location => $buildsystem::var::webstatus::location, + content => template('buildsystem/vhost_webstatus.conf'), + } + + apache::vhost::base { "ssl_${vhost}": + vhost => $vhost, + use_ssl => true, + aliases => { + '/uploads' => "${buildsystem::var::scheduler::homedir}/uploads", + '/autobuild/cauldron/x86_64/core/log/status.core.log' => "${buildsystem::var::webstatus::location}/autobuild/broken.php", + '/themes' => $buildsystem::var::webstatus::themes_dir, + }, + location => $buildsystem::var::webstatus::location, + content => template('buildsystem/vhost_webstatus.conf'), + } + + git::snapshot { $buildsystem::var::webstatus::location: + source => $buildsystem::var::webstatus::git_url, + } + + file { '/etc/bs-webstatus.conf': + ensure => present, + content => template('buildsystem/bs-webstatus.conf'), + mode => '0644', + owner => root, + group => root, + } +} diff --git a/modules/buildsystem/manifests/youri_submit.pp b/modules/buildsystem/manifests/youri_submit.pp new file mode 100644 index 00000000..6b4d7dc2 --- /dev/null +++ b/modules/buildsystem/manifests/youri_submit.pp @@ -0,0 +1,83 @@ +class buildsystem::youri_submit { + include sudo + include buildsystem::rpmlint + include buildsystem::repository + include buildsystem::var::scheduler + include buildsystem::var::youri + + mga_common::local_script { + 'mga-youri-submit': + content => template('buildsystem/mga-youri-submit'); + 'mga-youri-submit.wrapper': + content => template('buildsystem/mga-youri-submit.wrapper'); + 'submit_package': + content => template('buildsystem/submit_package.pl'); + 'mga-clean-distrib': + content => template('buildsystem/cleaner.rb'); + } + + sudo::sudoers_config { 'mga-youri-submit': + content => template('buildsystem/sudoers.youri') + } + $release_managers = group_members('mga-release_managers') + # ordering is automatic : + # https://docs.puppetlabs.com/learning/ordering.html#autorequire + file { + '/etc/youri/': + ensure => 'directory'; + '/etc/youri/acl.conf': + content => template('buildsystem/youri/acl.conf'); + '/etc/youri/host.conf': + content => template('buildsystem/youri/host.conf'); + } + + buildsystem::youri_submit_conf{ 'upload': + tmpl_file => $buildsystem::var::youri::tmpl_youri_upload_conf, + } + buildsystem::youri_submit_conf{ 'todo': + tmpl_file => $buildsystem::var::youri::tmpl_youri_todo_conf, + } + + cron { 'Archive orphan packages from cauldron': + command => "/usr/local/bin/mga-clean-distrib --auto -v cauldron -d ${buildsystem::var::youri::packages_archivedir} -l ${buildsystem::var::scheduler::homedir}/tmp/upload", + hour => 5, + minute => 30, + user => $buildsystem::var::scheduler::login, + } + + file { $buildsystem::var::youri::packages_archivedir: + ensure => 'directory', + owner => $buildsystem::var::scheduler::login, + require => File[$buildsystem::var::scheduler::homedir], + } + + tidy { $buildsystem::var::youri::packages_archivedir: + type => 'ctime', + recurse => true, + age => '1w', + matches => '*.rpm', + } + + include mga_common::var::perl + file { [ "${mga_common::var::perl::site_perl_dir}/Youri", + "${mga_common::var::perl::site_perl_dir}/Youri/Repository"]: + ensure => directory, + mode => '0755', + owner => root, + group => root, + } + file { "${mga_common::var::perl::site_perl_dir}/Youri/Repository/Mageia.pm": + source => 'puppet:///modules/buildsystem/Mageia.pm', + } + + $package_list= ['perl-SVN', + 'mdv-distrib-tools', + 'perl-Youri-Media', + 'perl-Youri-Package', + 'perl-Youri-Repository', + 'perl-Youri-Utils', + 'perl-Youri-Config', + 'mga-youri-submit'] + + package { $package_list: } +} diff --git a/modules/buildsystem/manifests/youri_submit_conf.pp b/modules/buildsystem/manifests/youri_submit_conf.pp new file mode 100644 index 00000000..28b911d9 --- /dev/null +++ b/modules/buildsystem/manifests/youri_submit_conf.pp @@ -0,0 +1,6 @@ +define buildsystem::youri_submit_conf($tmpl_file) { + $conf_name = $name + file { "/etc/youri/submit-${conf_name}.conf": + content => template($tmpl_file), + } +} diff --git a/modules/buildsystem/templates/binrepo/sudoers.binrepo b/modules/buildsystem/templates/binrepo/sudoers.binrepo new file mode 100644 index 00000000..c20810cf --- /dev/null +++ b/modules/buildsystem/templates/binrepo/sudoers.binrepo @@ -0,0 +1 @@ +%<%= scope.lookupvar('buildsystem::var::groups::packagers_committers') %> ALL =(<%= scope.lookupvar('buildsystem::var::binrepo::login') %>) NOPASSWD: <%= scope.lookupvar('buildsystem::var::binrepo::uploadbinpath') %> diff --git a/modules/buildsystem/templates/binrepo/upload-bin b/modules/buildsystem/templates/binrepo/upload-bin new file mode 100755 index 00000000..7cad5838 --- /dev/null +++ b/modules/buildsystem/templates/binrepo/upload-bin @@ -0,0 +1,32 @@ +#!/bin/sh +set -e + +binrepodir=<%= scope.lookupvar('buildsystem::var::binrepo::repodir') %> +uploadinfosdir=<%= scope.lookupvar('buildsystem::var::binrepo::uploadinfosdir') %> +tmpfile=$(mktemp) +mail_from="<%= scope.lookupvar('buildsystem::var::binrepo::uploadmail_from') %>" +mail_dest="<%= scope.lookupvar('buildsystem::var::binrepo::uploadmail_to') %>" + +test $# = 2 || exit 3 +username="$1" +comment="$2" + +/bin/cat > "$tmpfile" +sha1sum=$(/usr/bin/sha1sum "$tmpfile" | sed 's/ .*$//') +test -n "$sha1sum" +if [ -f "$binrepodir/$sha1sum" ] +then + echo "File $sha1sum already exists." >&2 + /bin/rm -f "$tmpfile" + exit 2 +fi +/bin/mv "$tmpfile" "$binrepodir/$sha1sum" +/bin/chmod 644 "$binrepodir/$sha1sum" +echo "$username:$comment" > "$uploadinfosdir/$sha1sum" +echo "User $username uploaded file $sha1sum: $comment" + +echo "User $username uploaded file $sha1sum: $comment" | \ + /usr/bin/mailx -s "New file uploaded: $sha1sum - $comment" -S "from=$username <$mail_from>" "$mail_dest" + +exit 0 + diff --git a/modules/buildsystem/templates/binrepo/vhost_binrepo.conf b/modules/buildsystem/templates/binrepo/vhost_binrepo.conf new file mode 100644 index 00000000..f411c07a --- /dev/null +++ b/modules/buildsystem/templates/binrepo/vhost_binrepo.conf @@ -0,0 +1,3 @@ +<Directory <%= scope.lookupvar('buildsystem::var::binrepo::repodir') %>> + Options None +</Directory> diff --git a/modules/buildsystem/templates/binrepo/wrapper.upload-bin b/modules/buildsystem/templates/binrepo/wrapper.upload-bin new file mode 100644 index 00000000..3def84a0 --- /dev/null +++ b/modules/buildsystem/templates/binrepo/wrapper.upload-bin @@ -0,0 +1,26 @@ +#!/bin/sh + +binrepouser="<%= scope.lookupvar('buildsystem::var::binrepo::login') %>" +uploadbinpath="<%= scope.lookupvar('buildsystem::var::binrepo::uploadbinpath') %>" +packagerscommittersgroup="<%= scope.lookupvar('buildsystem::var::groups::packagers_committers') %>" + +function isingroup() +{ + grp="$1" + for group in `groups` + do if [ "$grp" = "$group" ] + then + return 0 + fi + done + return 1 +} + +if ! isingroup "$packagerscommittersgroup" +then + echo "You are not in $packagerscommittersgroup group." + exit 1 +fi + +sudo -u "$binrepouser" "$uploadbinpath" $(whoami) $@ + diff --git a/modules/buildsystem/templates/bs-webstatus.conf b/modules/buildsystem/templates/bs-webstatus.conf new file mode 100644 index 00000000..9f37a990 --- /dev/null +++ b/modules/buildsystem/templates/bs-webstatus.conf @@ -0,0 +1,32 @@ +<?php + +/** Where is the current app located. */ +$g_webapp_dir = '<%= scope.lookupvar('buildsystem::var::webstatus::location') %>'; + +/** Full system path where packages are uploaded. */ +$upload_dir = '<%= scope.lookupvar('buildsystem::var::scheduler::homedir') %>/uploads'; + +/** How long a history should we keep, in days. */ +$max_modified = <%= scope.lookupvar('buildsystem::var::webstatus::max_modified') %>; + +/** How many nodes are available. */ +$g_nodes_count = 2; + +/** html > body > h1 title */ +$title = 'Build system status'; + +/** Should crawlers index this page or not? meta[robots] tag.*/ +$robots = 'index,nofollow,nosnippet,noarchive'; + +/** */ +$g_root_url = 'https://<%= scope.lookupvar('buildsystem::var::webstatus::hostname') %>/'; + +/** URL to view a package svn revision. %d is replaced by the revision */ +$package_commit_url = '<%= scope.lookupvar('buildsystem::var::webstatus::package_commit_url') %>'; + +/** name of the theme */ +$theme_name = '<%= scope.lookupvar('buildsystem::var::webstatus::theme_name') %>'; + +/** themes directory */ +$themes_dir = '<%= scope.lookupvar('buildsystem::var::webstatus::themes_dir') %>'; + diff --git a/modules/buildsystem/templates/cleaner.rb b/modules/buildsystem/templates/cleaner.rb new file mode 100755 index 00000000..fa0d08ca --- /dev/null +++ b/modules/buildsystem/templates/cleaner.rb @@ -0,0 +1,235 @@ +#!/usr/bin/ruby + +def usage + puts "Usage: #{$0} [options]" + puts "Moves obsolete packages" + puts + puts "-h, --help show this help" + puts "-a, --archs <arch1>,<arch2>,... list of architectures to clean" + puts "-a, --auto do not ask confirmation" + puts "-p, --base <path> base path to the repository" + puts "-m, --media <media1>,<media2>,... list of media to clean (default: core/release,tainted/release,nonfree/release)" + puts "-d, --destination <path> path to the old packages storage" + puts "-v, --version <version> version to clean (default: cauldron)" +end + +require 'fileutils' +require 'getoptlong' +require 'readline' + +def process + opts = GetoptLong.new( + [ '--help', '-h', GetoptLong::NO_ARGUMENT ], + [ '--archs', '-a', GetoptLong::REQUIRED_ARGUMENT ], + [ '--auto', '-A', GetoptLong::NO_ARGUMENT ], + [ '--base', '-p', GetoptLong::REQUIRED_ARGUMENT ], + [ '--media', '-m', GetoptLong::REQUIRED_ARGUMENT ], + [ '--destination', '-d', GetoptLong::REQUIRED_ARGUMENT ], + [ '--version', '-v', GetoptLong::REQUIRED_ARGUMENT ], + [ '--lockfile', '-l', GetoptLong::REQUIRED_ARGUMENT ], + ) + + base_path = "<%= scope.lookupvar('buildsystem::var::repository::bootstrap_root') %>/distrib" + archs = [ "x86_64", "i686", "aarch64", "armv7hl" ] + medias = ["core/release", "tainted/release", "nonfree/release"] + old_path = "<%= scope.lookupvar('buildsystem::var::youri::packages_archivedir') %>" + version = "cauldron" + auto = false + lockfile = nil + + opts.each do |opt, arg| + case opt + when '--help' + usage + exit 0 + when '--destination' + old_path = arg + when '--media' + medias = arg.split(",") + when '--archs' + archs = arg.split(",") + when '--auto' + auto = true + when '--base' + base_path = arg + when '--version' + version = arg + when '--lockfile' + lockfile = arg + end + end + + take_upload_lock(lockfile) if lockfile + + medias.each{|media| + src_path = "#{base_path}/#{version}/SRPMS/#{media}" + + $used_srcs = {} + $old_srcs = {} + $srcs = {} + $srcages = {} + $noarch = {} + + # Get a list of all src.rpm and their build time + `urpmf --synthesis "#{src_path}/media_info/synthesis.hdlist.cz" --qf '%filename:%buildtime:%buildarchs' "."`.each_line{|l| + l2 = l.split(':') + filename = l2[0] + buildtime = l2[1].to_i + buildarch = l2[2].rstrip + name = name_from_filename(filename) + if $srcages[name] then + if buildtime < $srcages[name][1] then + # This src.rpm is older, ignore it and store it in the list to be deleted + $old_srcs[filename] = true + next + else + # This src.rpm has an older version, ignore that version and store it in the list to be deleted + old_filename = $srcages[name][0] + $old_srcs[old_filename] = true + $srcs.delete(old_filename) + end + end + $srcages[name] = [ filename, buildtime ] + $srcs[filename] = true + $noarch[name] = true if buildarch == 'noarch' + } + archs.each{|arch| + bin_path = "#{base_path}/#{version}/#{arch}/media/#{media}" + debug_path = bin_path.sub("/media/", "/media/debug/") + old_packages = check_binaries(arch, $srcs, $srcages, src_path, bin_path, $used_srcs) + old_debug_packages = check_binaries(arch, $srcs, {}, src_path, debug_path, nil) + move_packages(bin_path, old_path, old_packages, auto) + move_packages(debug_path, old_path, old_debug_packages, auto) + } + $used_srcs.keys.each{|s| $srcs.delete(s)} + + move_packages(src_path, old_path, $srcs.keys + $old_srcs.keys, auto) + } +end + +def take_upload_lock(path) + start_time = Time.new + has_lock = false + at_exit { + if File.exists?(path) + if File.readlines(path)[0].to_i == Process.pid + File.delete(path) + end + end + } + until has_lock + while File.exists?(path) + if Time.new - start_time > 2*3600.0 + puts "Could not acquire upload lock for more than 2h, giving up" + end + sleep(5) + end + File.write(path, Process.pid) + if File.readlines(path)[0].to_i == Process.pid + has_lock = true + end + end +end + +def move_packages(src, dst, list, auto) + list.reject!{|f| !File.exist?(src + "/" + f)} + return if list.empty? + list.each{|b| + puts b + } + puts "The #{list.length} listed packages will be moved from #{src} to #{dst}." + line = Readline::readline('Are you sure [Yn]? ') unless auto + if auto || line =~ /^y?$/i + list.each{|s| + oldfile = src + "/" + s + newfile = dst + "/" + s + next unless File.exist?(oldfile) + if (File.exist?(newfile)) + File.unlink(oldfile) + else + FileUtils.mv(oldfile, newfile) + end + } + end +end + +# For each binary media: +# - Check if we have the src.rpm (else the binary package is obsolete) +# * If we don't have the src.rpm, check if we have a newer version +# - If there is a new version: +# * check if this architecture has packages from it to avoid deleting armv7hl packages before the new one get rebuilt +# * check if the new version is old enough to allow rebuilding everything (7d?) +# - Mark used src.rpm (if one is never marked, the src.rpm is obsolete) + +def packages(path) + `urpmf --synthesis "#{path}/media_info/synthesis.hdlist.cz" --qf '%sourcerpm:%filename:%buildtime' ":"`.each_line{|l| + l2 = l.split(':') + sourcerpm = l2[0] + filename = l2[1] + buildtime = l2[2].to_i + yield(sourcerpm, filename, buildtime) + } +end + +def name_from_filename(filename) + filename.sub(/-[^-]*-[^-]*$/, '') +end + +def arch_wanted(src, arch) + exclusive_arch = `rpmquery -p #{src} --qf '[%{EXCLUSIVEARCH} ]'`.rstrip + if exclusive_arch != "" then + if !exclusive_arch.split(/ /).include?(arch) then + return false + end + end + exclude_arch = `rpmquery -p #{src} --qf '[%{EXCLUDEARCH} ]'`.rstrip + if exclude_arch != "" then + if exclude_arch.split(/ /).include?(arch) then + return false + end + end + + return true +end + +def check_binaries(arch, srcs, srcages, src_path, path, used_srcs) + used_here_srcs = {} + all_versions = {} + packages(path) {|src, filename, buildtime| + used_srcs[src] = true if used_srcs != nil + if filename =~ /noarch.rpm$/ then + # We need to mark the src.rpm present on this arch only for full noarch packages + used_here_srcs[src] = true if $noarch[name_from_filename(src)] + else + used_here_srcs[src] = true + end + name = name_from_filename(filename) + if all_versions[name] then + all_versions[name] << src + else + all_versions[name] = [src] + end + } + old_binaries = [] + packages(path) {|src, filename, buildtime| + if ! srcs[src] then + srcname = name_from_filename(src) + if srcages[srcname] then + # The src.rpm is gone but there is a different version of it + latestsrc = srcages[srcname][0] + # Only delete old binaries after 7d or if there is a new version + name = name_from_filename(filename) + next unless (srcages[srcname][1] < Time.now.to_i - 24*60*60*7 || all_versions[name].include?(latestsrc)) + # Do not delete if the new version of the package hasn't been built for this arch yet + # but still delete it if it is no longer expected to be built. + next unless (used_here_srcs[latestsrc] || !arch_wanted("#{src_path}/#{latestsrc}", arch)) + end + old_binaries << filename + end + } + old_binaries +end + +if __FILE__ == $0 then + process +end diff --git a/modules/buildsystem/templates/cleaner_test.rb b/modules/buildsystem/templates/cleaner_test.rb new file mode 100644 index 00000000..804bd1b5 --- /dev/null +++ b/modules/buildsystem/templates/cleaner_test.rb @@ -0,0 +1,83 @@ +require 'cleaner' +require "test/unit" + +class TestCleaner < Test::Unit::TestCase + + @pkgs = [] + + def setpackages(pkgs) + @pkgs = pkgs + end + + def packages(path) + @pkgs.map{|p| + l2 = p.split(':') + sourcerpm = l2[0] + filename = l2[1] + buildtime = l2[2].to_i + yield(sourcerpm, filename, buildtime) + } + end + + def test_old + # Package was built on this arch and src.rpm for new version is 15d old + setpackages(['foo-43-1.src.rpm:libfoo2-43-1.armv7hl.rpm:43', 'foo-42-1.src.rpm:libfoo1-42-1.armv7hl.rpm:42']) + srcages = {} + srcages['foo'] = [ 'foo-43-1.src.rpm', Time.now.to_i - 15*24*3600 ] + srcs = {} + srcs['foo-43-1.src.rpm'] = true + assert_equal(['libfoo1-42-1.armv7hl.rpm'], check_binaries('armv7hl', srcs, srcages, '', '', nil)) + end + + def test_recent + # Package was built on this arch but src.rpm for new version is only 1d old + setpackages(['foo-43-1.src.rpm:foo-43-1.armv7hl.rpm:43', 'foo-42-1.src.rpm:foo-42-1.armv7hl.rpm:42']) + srcages = {} + srcages['foo'] = [ 'foo-43.src.rpm', Time.now.to_i - 24*3600 ] + srcs = {} + srcs['foo-43-1.src.rpm'] = true + assert_equal([], check_binaries('armv7hl', srcs, srcages, '', '', nil)) + end + + def test_arm_late + # Package was not yet built on this arch + setpackages(['foo-42-1.src.rpm:foo-42-1.armv7hl.rpm:42']) + srcages = {} + srcages['foo'] = [ 'foo-43.src.rpm', Time.now.to_i - 24*3600 ] + srcs = {} + srcs['foo-43-1.src.rpm'] = true + assert_equal([], check_binaries('armv7hl', srcs, srcages, '', '', nil)) + end + + def test_multiple_versions + # Old package remains (usually happens to noarch due to youri bug) + $noarch = { 'foo' => true } + setpackages(['foo-42-1.src.rpm:foo-42-1.noarch.rpm:42', 'foo-42-2.src.rpm:foo-42-2.noarch.rpm:43']) + srcages = {} + srcages['foo'] = [ 'foo-42-2.src.rpm', Time.now.to_i - 24*3600 ] + srcs = {} + srcs['foo-42-2.src.rpm'] = true + assert_equal(['foo-42-1.noarch.rpm'], check_binaries('i586', srcs, srcages, '', '', nil)) + end + + def test_icu + $noarch = {} + now = Time.now.to_i + srctime = now - 3600 + oldbintime = now - 10*24*3600 + newbintime = now - 3200 + setpackages([ + "icu-71.1-2.mga9.src.rpm:icu71-data-71.1-2.mga9.noarch.rpm:#{oldbintime}", + "icu-71.1-2.mga9.src.rpm:lib64icu71-71.1-2.mga9.aarch64.rpm:#{oldbintime}", + "icu-72.1-1.mga9.src.rpm:icu72-data-72.1-1.mga9.noarch.rpm:#{newbintime}", + "icu-72.1-1.mga9.src.rpm:lib64icu-devel-72.1-1.mga9.aarch64.rpm:#{newbintime}", + "icu-72.1-1.mga9.src.rpm:lib64icu72-72.1-1.mga9.aarch64.rpm:#{newbintime}" + ]) + srcages = {} + srcages['icu'] = [ 'icu-71.1-2.mga9.src.rpm', srctime ] + srcs = {} + srcs['icu-71.1-2.mga9.src.rpm'] = true + assert_equal([], check_binaries('aarch64', srcs, srcages, '', '', nil)) + end + +end diff --git a/modules/buildsystem/templates/iurt.cauldron.conf b/modules/buildsystem/templates/iurt.cauldron.conf deleted file mode 100644 index 5a7f047e..00000000 --- a/modules/buildsystem/templates/iurt.cauldron.conf +++ /dev/null @@ -1,30 +0,0 @@ -{ - supported_arch => [ 'i586', 'x86_64' ], - all_media => { 'main' => [ 'release' ], 'contrib' => [ 'release' ] }, - upload => 'schedbot@pkgsubmit:~/uploads/', - upload_queue => 'schedbot@pkgsubmit:~/uploads/queue/', - unwanted_packages => '^monotone-', - repository => '/mnt/BIG/dis/', - rsync_to => 'schedbot@pkgsubmit:/mnt/BIG/dis/uploads/build/', - log_url => 'http://pkgsubmit.mageia.org/queue/build/', - admin => 'mageia-sysadm@mageia.org', - iurt_root_command => '/home/buildbot/iurt-trunk/iurt_root_command', - packager => 'Iurt the rebuild bot <mageia-sysadm@mageia.org>', - sendmail => 0, - build_timeout => { - 'default' => 18000, - 'gcc' => 57600, - 'paraview' => 115200, - 'salome' => 57600, - 'itk' => 115200, - 'wrapitk' => 115200, - 'kernel-rt' => 57600, - 'kernel-xen' => 57600, - 'kernel-tmb' => 57600, - 'openoffice.org' => 345600, - 'openoffice.org64' => 345600, - 'openoffice.org-go-ooo' => 345600, - 'openoffice.org64-go-ooo' => 345600 - }, -} - diff --git a/modules/buildsystem/templates/iurt.conf b/modules/buildsystem/templates/iurt.conf new file mode 100644 index 00000000..2dd8bf0e --- /dev/null +++ b/modules/buildsystem/templates/iurt.conf @@ -0,0 +1,37 @@ +<%- distro = scope.lookupvar('buildsystem::var::distros::distros')[@distribution] -%> +{ + supported_arch => [ '<%= distro['arch'].join("', '") %>' ], + all_media =>{ +<%- distro['medias'].keys.sort.each{|media| -%> + '<%= media %>' => [ '<%= + distro['medias'][media]['repos'].keys.sort.join("', '") %>' ], +<%- +} -%> + }, + distribution => '<%= distro['macros']['distribution'] %>', + vendor => '<%= distro['macros']['vendor'] %>', + base_media => [ '<%= distro['base_media'].join("', '") %>' ], + upload => '<%= build_login %>@pkgsubmit:~/uploads/', + upload_queue => '<%= build_login %>@pkgsubmit:~/uploads/queue/', + unwanted_packages => '^monotone-', + repository => 'http://<%= scope.lookupvar('buildsystem::var::repository::hostname') %>/<%= scope.lookupvar('buildsystem::var::repository::distribdir') %>/', + log_url => 'https://<%= scope.lookupvar('buildsystem::var::webstatus::hostname') %>/queue/build/', + admin => 'mageia-sysadm@mageia.org', + packager => 'Iurt the rebuild bot <mageia-sysadm@mageia.org>', + sendmail => 0, + log_size_limit => '600M', + build_timeout => { +<%- build_timeout.keys.sort.each{|package| -%> + '<%= package %>' => <%= (build_timeout[package].to_f * scope.lookupvar('buildsystem::var::iurt::timeout_multiplier').to_f).to_i %>, +<%- +} -%> + }, + use_netns => { + 'default' => 1, +<%- allow_network_access.sort.each{|package| -%> + '<%= package %>' => 0, +<%- +} -%> + }, +} + diff --git a/modules/buildsystem/templates/maintdb/maintdb.bin b/modules/buildsystem/templates/maintdb/maintdb.bin new file mode 100755 index 00000000..903ee009 --- /dev/null +++ b/modules/buildsystem/templates/maintdb/maintdb.bin @@ -0,0 +1,98 @@ +#!/bin/bash + +MAINTDBDIR="<%= scope.lookupvar('buildsystem::var::maintdb::dbdir') %>" + +function checkname() +{ + if [ -z "$1" ] || + echo "$1" | grep -q '[/*{}%]' || + echo "$1" | fgrep -q '..' + then + echo "Error: invalid package name." >&2 + exit 1 + fi +} + +function maintnew() +{ + if [ "$user" != "root" ]; then + echo "Error: new is only allowed to root." >&2 + exit 1 + fi + checkname "$1" + maintfile="$MAINTDBDIR/$1" + if [ -f "$maintfile" ]; then + exit 0 + fi + echo "$2" > "$maintfile" +} + +function maintset() +{ + checkname "$1" + maintfile="$MAINTDBDIR/$1" + newmaint="$2" + if [ ! -f "$maintfile" ]; then + echo "Error: package $1 does not exist in maintdb." >&2 + exit 1 + fi + curmaint=$(cat "$maintfile") + if [ "$newmaint" = "nobody" ] || [[ "$newmaint" = *-team ]]; then + if [ "$curmaint" = "$user" ]; then + echo "$newmaint" > "$maintfile" + exit 0 + else + echo "Error: cannot set maintainer for $1." >&2 + exit 1 + fi + elif [ "$newmaint" = "$user" ]; then + if [ "$curmaint" = "nobody" ] || [[ "$curmaint" = *-team ]]; then + echo "$newmaint" > "$maintfile" + exit 0 + else + echo "Error: cannot set maintainer for $1." >&2 + exit 1 + fi + else + echo "Error: cannot set someone else as maintainer." >&2 + exit 1 + fi +} + +function maintgetall() +{ + cd "$MAINTDBDIR" + for file in *; do + echo "$file $(cat $file)" + done + exit 0 +} + +function maintget() +{ + if [ -z "$1" ]; then + maintgetall + fi + checkname "$1" + maintfile="$MAINTDBDIR/$1" + if [ -f "$maintfile" ]; then + cat "$maintfile" + else + echo "Error: package $1 does not exist in maintdb." >&2 + exit 1 + fi +} + +user="$1" +action="$2" + +if [ "$action" = "new" ]; then + maintnew "$3" "$4" +elif [ "$action" = "set" ]; then + maintset "$3" "$4" +elif [ "$action" = "get" ]; then + maintget "$3" +else + echo "Error: unknown command." >&2 + exit 2 +fi diff --git a/modules/buildsystem/templates/maintdb/sudoers.maintdb b/modules/buildsystem/templates/maintdb/sudoers.maintdb new file mode 100644 index 00000000..91c88e47 --- /dev/null +++ b/modules/buildsystem/templates/maintdb/sudoers.maintdb @@ -0,0 +1,4 @@ +%<%= scope.lookupvar('buildsystem::var::groups::packagers') %> ALL =(<%= scope.lookupvar('buildsystem::var::maintdb::login') %>) NOPASSWD: <%= scope.lookupvar('buildsystem::var::maintdb::binpath') %> [a-z]* get +%<%= scope.lookupvar('buildsystem::var::groups::packagers') %> ALL =(<%= scope.lookupvar('buildsystem::var::maintdb::login') %>) NOPASSWD: <%= scope.lookupvar('buildsystem::var::maintdb::binpath') %> [a-z]* [gs]et [a-zA-Z0-9]* +%<%= scope.lookupvar('buildsystem::var::groups::packagers') %> ALL =(<%= scope.lookupvar('buildsystem::var::maintdb::login') %>) NOPASSWD: <%= scope.lookupvar('buildsystem::var::maintdb::binpath') %> [a-z]* set [a-zA-Z0-9]* [a-z]* +<%= scope.lookupvar('buildsystem::var::scheduler::login') %> ALL =(<%= scope.lookupvar('buildsystem::var::maintdb::login') %>) NOPASSWD: <%= scope.lookupvar('buildsystem::var::maintdb::binpath') %> [a-z]* new [a-zA-Z0-9]* [a-z]* diff --git a/modules/buildsystem/templates/maintdb/vhost_maintdb.conf b/modules/buildsystem/templates/maintdb/vhost_maintdb.conf new file mode 100644 index 00000000..146413a7 --- /dev/null +++ b/modules/buildsystem/templates/maintdb/vhost_maintdb.conf @@ -0,0 +1,3 @@ +<Directory <%= scope.lookupvar('buildsystem::var::maintdb::dbdir') %>> + Options None +</Directory> diff --git a/modules/buildsystem/templates/maintdb/wrapper.maintdb b/modules/buildsystem/templates/maintdb/wrapper.maintdb new file mode 100644 index 00000000..fcf69dab --- /dev/null +++ b/modules/buildsystem/templates/maintdb/wrapper.maintdb @@ -0,0 +1,25 @@ +#!/bin/sh + +maintdbuser="<%= scope.lookupvar('buildsystem::var::maintdb::login') %>" +maintdbpath="<%= scope.lookupvar('buildsystem::var::maintdb::binpath') %>" +packagersgroup="<%= scope.lookupvar('buildsystem::var::groups::packagers') %>" + +function isingroup() +{ + grp="$1" + for group in `groups` + do if [ "$grp" = "$group" ] + then + return 0 + fi + done + return 1 +} + +if ! isingroup "$packagersgroup" +then + echo "You are not in $packagersgroup group." + exit 1 +fi + +sudo -u "$maintdbuser" "$maintdbpath" $(whoami) "$@" diff --git a/modules/buildsystem/templates/media.cfg b/modules/buildsystem/templates/media.cfg new file mode 100644 index 00000000..64757a2b --- /dev/null +++ b/modules/buildsystem/templates/media.cfg @@ -0,0 +1,142 @@ +<%- +def media_name(media, repo, type, archname) + name = [ media.capitalize ] + if archname != nil + name += [ archname ] + end + for r in repo.split('_') do + name += [ r.capitalize ] + end + if type != nil + name += [ type.capitalize ] + end + return name.join(' ') +end + +def media_out(name, media_hash) + media_out = "[%s]\n" % name + media_hash.keys.sort.each{|key| + value = media_hash[key] + if value != nil + media_out += "%s=%s\n" % [ key, value ] + end + } + return media_out +end +distro = scope.lookupvar('buildsystem::var::distros::distros')[@distro_name] +-%> +[media_info] +version=<%= distro['version'] %> +mediacfg_version=2 +branch=<%= distro['branch'] %> +<%- +if @arch != 'armv7hl' +-%> +arch=<%= @arch %> +<%- +end +-%> +xml-info=1 + +<%- +distro['medias'].keys.sort { |x,y| distro['medias'][x]['order'] <=> distro['medias'][y]['order'] }.each{|medianame| + media = distro['medias'][medianame] + media['repos'].keys.sort { |x,y| media['repos'][x]['order'] <=> media['repos'][y]['order'] }.each{|reponame| + repo = media['repos'][reponame] + media_type = [] + if media['media_type'] != nil + media_type += media['media_type'] + end + if repo['media_type'] != nil + media_type += repo['media_type'] + end + noauto=nil + if (media['noauto'] == '1') or (repo['noauto'] == '1') + noauto='1' + end + updates_for = nil + if repo['updates_for'] != nil + updates_for = [ medianame, repo['updates_for'] ].join('/') + end + -%><%= + media_out [ medianame, reponame ].join('/'), + :hdlist => [ 'hdlist', medianame, reponame ].join('_') + '.cz', + :name => media_name(medianame, reponame, nil, nil), + :srpms => [ '../../SRPMS', medianame, reponame ].join('/'), + :media_type => media_type.join(':'), + :updates_for => updates_for, + :noauto => noauto + + %> + <%-# debug -%> + <%- + debug_media_type = media_type + [ 'debug' ] + -%><%= + media_out [ 'debug', medianame, reponame ].join('/'), + :hdlist => [ 'hdlist_debug', medianame, reponame ].join('_') + '.cz', + :name => media_name(medianame, reponame, 'debug', nil), + :srpms => [ '../../SRPMS', medianame, reponame ].join('/'), + :media_type => debug_media_type.join(':'), + :noauto => '1' + + %> + <%-# source -%> + <%- + source_media_type = media_type + [ 'source' ] + -%><%= + media_out [ '../../SRPMS', medianame, reponame ].join('/'), + :hdlist => [ 'hdlist', medianame, reponame ].join('_') + '.src.cz', + :name => media_name(medianame, reponame, 'sources', nil), + :rpms => [ medianame, reponame ].join('/'), + :media_type => source_media_type.join(':'), + :noauto => '1' + + %> + <%-# we add 32bit media if arch is x86_64 -%> + <%- + if @arch == 'x86_64' and distro['arch'].include?('i586') + medianame32 = [ medianame, '32' ].join('') + -%><%= + media_out [ '../../i586/media', medianame, reponame ].join('/'), + :hdlist => [ 'hdlist', medianame32, reponame ].join('_') + '.src.cz', + :name => media_name(medianame, reponame, nil, '32bit'), + :media_type => media_type.join(':'), + :noauto => noauto + + %> + <%- + end + if @arch == 'x86_64' and distro['arch'].include?('i686') + medianame32 = [ medianame, '32' ].join('') + -%><%= + media_out [ '../../i686/media', medianame, reponame ].join('/'), + :hdlist => [ 'hdlist', medianame32, reponame ].join('_') + '.src.cz', + :name => media_name(medianame, reponame, nil, '32bit'), + :media_type => media_type.join(':'), + :noauto => noauto + + %> + <%- + end + } +} +if distro['based_on'] != nil + distro['based_on'].keys.sort.each{|bdistroname| + bdistro = distro['based_on'][bdistroname] + bdistro.keys.sort.each{|medianame| + media = bdistro[medianame] + for reponame in media + -%><%= + media_out [ bdistroname, medianame, reponame ].join('/'), + :hdlist => [ 'hdlist', bdistroname, medianame, + reponame ].join('_'), + :name => media_name([ medianame, bdistroname].join(''), reponame, nil, nil), + :media_type => 'base_distro', + :noauto => 1 + %> + <%- + end + } + } +end +-%> diff --git a/modules/buildsystem/templates/mga-youri-submit b/modules/buildsystem/templates/mga-youri-submit new file mode 100755 index 00000000..0d29d462 --- /dev/null +++ b/modules/buildsystem/templates/mga-youri-submit @@ -0,0 +1,2 @@ +#!/bin/sh +exec sudo /usr/local/bin/mga-youri-submit.wrapper "$@" diff --git a/modules/buildsystem/templates/mga-youri-submit.wrapper b/modules/buildsystem/templates/mga-youri-submit.wrapper new file mode 100755 index 00000000..66fc59bc --- /dev/null +++ b/modules/buildsystem/templates/mga-youri-submit.wrapper @@ -0,0 +1,36 @@ +#!/usr/bin/perl +# youri-submit wrapper + +use strict; +use warnings; +use Fcntl ':mode'; +use File::Basename; +use MDK::Common; + +my $log_dir = "$ENV{HOME}/submit-logs"; + +my $sudo_user = $ENV{SUDO_USER} or die "should be run through sudo"; +my @prog = ('perl', '-I/usr/share/mga-youri-submit/lib', '/usr/share/mga-youri-submit/bin/youri-submit'); + +my @options; +foreach my $arg (@ARGV) { + if ($arg =~ /^-?-(\S+)/) { + # drop prohibited options + if ($arg =~ /-c/ || $arg =~ /-s/) { + print STDERR "prohibited option $arg, skipping\n"; + next; + } + } + push(@options, $arg); +} + +# logging for bug #30315 -spuk, 2007-05-29 +mkdir_p($log_dir); +open(STDERR, "| tee -a $log_dir/$sudo_user.err >&2"); +open(STDOUT, "| tee -a $log_dir/$sudo_user.out"); + +# call wrapped program +print "Executing @prog --config /etc/youri/submit-todo.conf --define user=$sudo_user @options (sudo_user $sudo_user)\n"; +my $err = system(@prog, "--config", "/etc/youri/submit-todo.conf", "--define", "user=$sudo_user", @options) && ($? >> 8 || 1); + +exit $err diff --git a/modules/buildsystem/templates/mgarepo.conf b/modules/buildsystem/templates/mgarepo.conf new file mode 100644 index 00000000..fbe5109c --- /dev/null +++ b/modules/buildsystem/templates/mgarepo.conf @@ -0,0 +1,88 @@ +<%- + default_distro = scope.lookupvar('buildsystem::var::distros::default_distro') + distros = scope.lookupvar('buildsystem::var::distros::distros') +-%> +[global] +verbose = no +default_parent = <%= scope.lookupvar('buildsystem::var::mgarepo::svn_root_packages') %>/<%= default_distro %> +#url-map = svn\+ssh://svn\.mageia\.org/(.*) file:///\1 +tempdir = <%= sched_home_dir %>/repsys/tmp +trunk-dir = <%= default_distro %> +<%- +conf = scope.lookupvar('buildsystem::var::mgarepo::conf') +if conf['global'] != nil + conf['global'].keys.sort.each{|key| + value = conf['global'][key] + -%><%= key %> = <%= value %> +<%- + } +end +-%> + + +[log] +oldurl = <%= scope.lookupvar('buildsystem::var::mgarepo::oldurl') %> +# controls up to which revision the rpm changelog +# will be constructed (default zero, i.e., oldest +# commit) +# revision-offset = 0 +# commit lines containing this string won't be shown in the changelog: +ignore-string = SILENT + +[template] +path = /usr/share/mgarepo/default.chlog + +[users] +iurt = Mageia build bot <mageia-sysadm@<%= domain %>> + +[submit] +default = <%= default_distro %> +host = <%= scope.lookupvar('buildsystem::var::mgarepo::submit_host') %> + +<%- + distros.keys.sort.each{|d| + distro = distros[d] +-%> +[submit <%= d %>] +target = <%= sched_home_dir %>/repsys/srpms +allowed = <%= distro['submit_allowed'] %> <%= distro['backports_allowed'] %> +rpm-macros = global <%= d %> + +<%- + } +-%> + +[macros global] +# mkrel definition to be removed when rpm-setup is updated on main build node +mkrel(c:) = %{-c: 0.%{-c*}.}%{1}%{?subrel:.%subrel}%{?distsuffix:%distsuffix}%{?!distsuffix:.mga}%{?distro_release:%distro_release} +dist = %{?distsuffix:%distsuffix}%{?!distsuffix:.mga}%{?distro_release:%distro_release} + +<%- + distros.keys.sort.each{|d| + distro = distros[d] +-%> +[macros <%= d %>] +distro_release = <%= distro['version'] %> +<%- + distro['macros'].keys.sort.each{|macro| + value = distro['macros'][macro] + -%><%= macro %> = <%= value %> + <%- } %> +<%- } +%> + +[helper] +create-srpm = /usr/share/repsys/create-srpm +upload-srpm = /usr/local/bin/mga-youri-submit +# needed by mdvsys 2.0 +install-buildrequires = sudo rurpmi --auto --no-suggests +upload-bin = /usr/local/bin/wrapper.upload-bin + +[srpm] +run-prep = yes + +[binrepo] +<%- binrepo_hostname = scope.lookupvar('buildsystem::var::binrepo::hostname') -%> +download_url = http://<%= binrepo_hostname %>/ +upload_host = <%= binrepo_hostname %> + diff --git a/modules/buildsystem/templates/repoctl.conf b/modules/buildsystem/templates/repoctl.conf new file mode 100644 index 00000000..14506a25 --- /dev/null +++ b/modules/buildsystem/templates/repoctl.conf @@ -0,0 +1,40 @@ +<%- +distribdir = scope.lookupvar('buildsystem::var::repository::distribdir') +distros = scope.lookupvar('buildsystem::var::distros::distros') +arches = {} +distrosections = {} +sectionsrepos = {} +distros.each{|distroname, distro| + distro['medias'].each{|medianame, media| + distrosections[medianame] = 1 + media['repos'].each{|reponame, repo| + sectionsrepos[reponame] = 1 + } + } + distro['arch'].each{|arch| + arches[arch] = 1 + } +} +-%> +dryrun=echo +if [ -z $SUDO_USER ] +then + requestuser="$USER" +else + requestuser="$SUDO_USER" +fi +lockdir=/var/lib/repoctl/locks +hdlistsdir=/var/lib/repoctl/hdlists +rootdir=<%= scope.lookupvar('buildsystem::var::repository::bootstrap_root') %> +finalrootdir=<%= scope.lookupvar('buildsystem::var::repository::mirror_root') %> +distribdir=$rootdir/<%= distribdir %> +finaldistribdir=$finalrootdir/<%= distribdir %> +distroreleases='<%= distros.keys.sort.join(' ') -%>' +distrosections='<%= distrosections.keys.sort.join(' ') -%>' +sectionsrepos='<%= sectionsrepos.keys.sort.join(' ') -%>' +arches='<%= arches.keys.sort.join(' ') -%>' +mirror_rsync_options="-v --delete -alH" +timestampfile="mageia_timestamp" +sha1sumfile="mageia_sha1sum" +sha1sumsigfile="mageia_sha1sum.gpg" +sign_mirror_sha1sum=/bin/true diff --git a/modules/buildsystem/templates/rpmlint.conf b/modules/buildsystem/templates/rpmlint.conf new file mode 100644 index 00000000..b81f169b --- /dev/null +++ b/modules/buildsystem/templates/rpmlint.conf @@ -0,0 +1,7 @@ +from Config import * +execfile('/etc/rpmlint/extracted.d/distribution.exceptions.conf') + +for i in open('/etc/rpmlint/extracted.d/distribution.error.list').readlines(): + setBadness(i, 10) + + diff --git a/modules/buildsystem/templates/signbot/sudoers.signpackage b/modules/buildsystem/templates/signbot/sudoers.signpackage new file mode 100644 index 00000000..4ea30238 --- /dev/null +++ b/modules/buildsystem/templates/signbot/sudoers.signpackage @@ -0,0 +1,2 @@ +<%= sched_login %> ALL =(<%= scope.lookupvar('buildsystem::var::signbot::login') %>) NOPASSWD: /usr/local/bin/mga-signpackage +<%= sched_login %> ALL =(<%= scope.lookupvar('buildsystem::var::signbot::login') %>) NOPASSWD: /usr/local/bin/sign-check-package diff --git a/modules/buildsystem/templates/submit_package.pl b/modules/buildsystem/templates/submit_package.pl new file mode 100755 index 00000000..1fdf7749 --- /dev/null +++ b/modules/buildsystem/templates/submit_package.pl @@ -0,0 +1,18 @@ +#!/usr/bin/perl +use strict; +use warnings; + +my $svn_server = '<%= scope.lookupvar('buildsystem::var::mgarepo::svn_hostname') %>'; +my $packagersgroup="<%= scope.lookupvar('buildsystem::var::groups::packagers') %>"; + +my $login = getpwuid($<); +my (undef, undef, undef, $members) = getgrnam $packagersgroup; +if (not $members =~ /\b$login\b/) { + print "You are not in $packagersgroup group\n"; + exit 1; +} + +# for bug 914 +# https://bugs.mageia.org/show_bug.cgi?id=914 +map { $_ =~ s|^svn\+ssh://$svn_server/|svn://$svn_server/| } @ARGV; +exec "/usr/share/mgarepo/create-srpm", @ARGV; diff --git a/modules/buildsystem/templates/sudoers.iurt b/modules/buildsystem/templates/sudoers.iurt index 266f301c..21e81e87 100644 --- a/modules/buildsystem/templates/sudoers.iurt +++ b/modules/buildsystem/templates/sudoers.iurt @@ -1 +1 @@ -<%= build_login %> ALL = NOPASSWD: /usr/sbin/iurt_root_command +<%= scope.lookupvar('buildsystem::var::iurt::login') %> ALL = NOPASSWD: /usr/sbin/iurt_root_command diff --git a/modules/buildsystem/templates/sudoers.youri b/modules/buildsystem/templates/sudoers.youri new file mode 100644 index 00000000..3bc7cc2d --- /dev/null +++ b/modules/buildsystem/templates/sudoers.youri @@ -0,0 +1,6 @@ +<%- sched_login = scope.lookupvar('buildsystem::var::scheduler::login') -%> +Cmnd_Alias YOURI = /usr/local/bin/mga-youri-submit.wrapper +Defaults!YOURI always_set_home +Defaults!YOURI runas_default = <%= sched_login %> +Defaults!YOURI !requiretty +%<%= scope.lookupvar('buildsystem::var::groups::packagers') -%> ALL = (<%= sched_login %>) NOPASSWD: YOURI diff --git a/modules/buildsystem/templates/upload.conf b/modules/buildsystem/templates/upload.conf new file mode 100644 index 00000000..af610c92 --- /dev/null +++ b/modules/buildsystem/templates/upload.conf @@ -0,0 +1,131 @@ +### +# +# Do not disable the host without appropriate warning +# to somebody able to fix the machine +# +# Please run 'perl -cw .upload.conf' in order to check the file is OK. +# +### + +<%- + build_nodes = scope.lookupvar('buildsystem::var::scheduler::build_nodes') +-%> +my %nodes = ( +<%- + build_nodes.keys.sort.each{|arch| +-%> + <%= arch -%> => [ '<%= build_nodes[arch].join("', '") -%>' ], +<%- + } +-%> +); +my $repository = "http://<%= scope.lookupvar('buildsystem::var::repository::hostname') %>/<%= scope.lookupvar('buildsystem::var::repository::distribdir') %>/"; +my $homedir = "<%= scope.lookupvar('buildsystem::var::iurt::homedir') %>"; + +{ + bot => { + (map { + my $arch = $_; + $arch => { + map { + my $node = $_; + ($node => { + iurt => { + user => '<%= scope.lookupvar('buildsystem::var::iurt::login') %>', +# (spuk, 2007-08-16) disabled iurt_cache additional media, locks trying to mount -o bind +# command => "iurt --copy-srpm --group -v 6 --config local_spool $homedir/iurt/__DIR__ --no_rsync --chrooted-urpmi -m __MEDIA__ -- $repository --additional-media -m __MEDIA__ -- file://$homedir/cache/ -p \"__PACKAGER__\" -r __TARGET__ __ARCH__", + command => "iurt --copy_srpm --group --rpmmacros \"%distro_section __SECTION__\" --config local_spool $homedir/iurt/__DIR__ --no_rsync --chrooted-urpmi -m __MEDIA__ -- $repository -p \"__PACKAGER__\" -r __TARGET__ __ARCH__", + packages => "$homedir/iurt/", + }, + }); + } @{$nodes{$arch}}, + }; + } keys %nodes), + }, + media => { + <%- + def repo_deps(distros, dname, mname, rname) + deps = {} + distro = distros[dname] + if distro['based_on'] != nil + distro['based_on'].each{|bdistro, bmedias| + if bmedias[mname] != nil and \ + bmedias[mname].include?(rname) then + deps[ [ bdistro, mname, rname ].join('/') ] = 1 + end + } + end + if distro['medias'][mname] != nil \ + and distro['medias'][mname]['repos'][rname] != nil + then + deps[ [ mname, rname ].join('/') ] = 1 + else + return deps + end + mlist = distro['medias'][mname]['requires'] + mlist = mlist == nil ? [ mname ] : [ mname ] + mlist + mlist.each{|mreq| + rlist = distro['medias'][mname]['repos'][rname]['requires'] + rlist = [] if rlist == nil + rlist += [ rname ] if mreq != mname + rlist.each{|rreq| + deps.merge!(repo_deps(distros, dname, mreq, rreq)) + } + } + return deps + end + distros = scope.lookupvar('buildsystem::var::distros::distros') + distros.keys.sort.each{|distroname| + -%> + '<%= distroname -%>' => { + <%- + distro = distros[distroname] + distro['medias'].keys.sort.each{|medianame| + media = distro['medias'][medianame] + media['repos'].keys.sort.each{|reponame| + deps = repo_deps(distros, distroname, medianame, reponame) + -%> + "<%= [ medianame, reponame ].join('/') %>" => [ "<%= + deps.keys.sort.join('", "') + %>" ], + <%- + } + } + -%> + }, + <%- + } + -%> + }, + admin => '<%= scope.lookupvar('buildsystem::var::scheduler::admin_mail') %>', + http_queue => 'https://<%= scope.lookupvar('buildsystem::var::webstatus::hostname') %>/uploads', + upload_user => '<%= scope.lookupvar('buildsystem::var::scheduler::login') %>', + email_domain => '<%= domain %>', + arch => { + <%- + distros.keys.sort.each{|distroname| + -%> + <%= distroname -%> => [ '<%= distros[distroname]['arch'].join("', '") %>' ], + <%- + } + -%> + default => [ 'i586', 'x86_64' ], + }, + mandatory_arch => { + <%- + distros.keys.sort.each{|distroname| + if distros[distroname]['mandatory_arch'] != nil + march = distros[distroname]['mandatory_arch'] + else + march = distros[distroname]['arch'] + end + -%> + <%= distroname -%> => [ '<%= march.join("', '") %>' ], + <%- + } + -%> + default => [ 'i586', 'x86_64' ], + }, + ssh_options => "-o ServerAliveInterval=10 -o ConnectTimeout=20 -o BatchMode=yes", + faildelay => 360000, +} diff --git a/modules/buildsystem/templates/vhost_repository.conf b/modules/buildsystem/templates/vhost_repository.conf new file mode 100644 index 00000000..e082ffca --- /dev/null +++ b/modules/buildsystem/templates/vhost_repository.conf @@ -0,0 +1,73 @@ +<%- +mirror_root = scope.lookupvar('buildsystem::var::repository::mirror_root') +mirror_reporoot = scope.lookupvar('buildsystem::var::repository::mirror_reporoot') +bootstrap_reporoot = scope.lookupvar('buildsystem::var::repository::bootstrap_reporoot') +distribdir = scope.lookupvar('buildsystem::var::repository::distribdir') +repo_allow_from_ips = scope.lookupvar('buildsystem::var::distros::repo_allow_from_ips') +repo_allow_from_domains = scope.lookupvar('buildsystem::var::distros::repo_allow_from_ips') +distros = scope.lookupvar('buildsystem::var::distros::distros') +-%> +<VirtualHost *:80> + ServerName <%= scope.lookupvar('buildsystem::var::repository::hostname') %> + DocumentRoot <%= mirror_root %> + + # Some simple API to check existence of SRPMs for QA + RewriteEngine On + + RewriteCond /distrib/bootstrap/distrib/$2/SRPMS/$3/$1s_testing/$4.src.rpm -f + RewriteRule ^/qa/checksrpm/(update|backport)/([1-9][0-9]*)/([a-z_]+)/([^/]+)$ http://repository.mageia.org/qa/checksrpm/found [L,R=302] + + RewriteRule ^/qa/checksrpm/ - [L,G] + +<%- + if repo_allow_from_ips != nil || repo_allow_from_domains != nil then + access_requires = [ 'all denied' ] + if repo_allow_from_ips != nil then + for allow in repo_allow_from_ips do + access_requires << 'ip ' + allow + end + end + if repo_allow_from_domains != nil then + for allow in repo_allow_from_domains do + access_requires << 'host ' + allow + end + end + else + access_requires = [ 'all granted' ] + end +%> + Alias /bootstrap/ "<%= bootstrap_reporoot %>/" +<%- + distros.keys.sort.each{|distroname| + distro = distros[distroname] + if distro['no_mirror'] -%> + Alias /<%= distribdir %>/<%= distroname %>/ "<%= bootstrap_reporoot %>/<%= distroname %>/" +<%- + end + } +-%> + + <Directory <%= bootstrap_reporoot %>> + Header append Cache-Control "public, must-revalidate" +<%- + for req in access_requires do +-%> + Require <%= req %> +<%- + end +-%> + Options Indexes FollowSymLinks + </Directory> + + <Directory <%= mirror_root %>> + Header append Cache-Control "public, must-revalidate" +<%- + for req in access_requires do +-%> + Require <%= req %> +<%- + end +-%> + Options Indexes FollowSymLinks + </Directory> +</VirtualHost> diff --git a/modules/buildsystem/templates/vhost_webstatus.conf b/modules/buildsystem/templates/vhost_webstatus.conf new file mode 100644 index 00000000..3b0e6246 --- /dev/null +++ b/modules/buildsystem/templates/vhost_webstatus.conf @@ -0,0 +1,13 @@ +<Location /uploads> + Require all granted + Options Indexes + IndexOptions NameWidth=* +</Location> +<Location /autobuild> + Require all granted + Options Indexes + IndexOptions NameWidth=* +</Location> +<Directory /var/www/bs/autobuild> + Options FollowSymlinks FollowSymLinks +</Directory> diff --git a/modules/buildsystem/templates/youri/acl.conf b/modules/buildsystem/templates/youri/acl.conf new file mode 100644 index 00000000..f0949f8a --- /dev/null +++ b/modules/buildsystem/templates/youri/acl.conf @@ -0,0 +1 @@ +.* .* .* ^glib$ ^blacklisted$ diff --git a/modules/buildsystem/templates/youri/host.conf b/modules/buildsystem/templates/youri/host.conf new file mode 100644 index 00000000..bf4fa086 --- /dev/null +++ b/modules/buildsystem/templates/youri/host.conf @@ -0,0 +1,23 @@ +<%- + aliases = scope.lookupvar('buildsystem::var::scheduler::build_nodes_aliases') + nodes = {} + nodes['src'] = [ scope.lookupvar('buildsystem::var::scheduler::build_src_node') ] + scope.lookupvar('buildsystem::var::scheduler::build_nodes').each{|arch,n| + a = arch + '|noarch|src' + nodes[a] = [] + n.each{|node| + if aliases[node] != nil + nodes[a] += [ aliases[node] ] + else + nodes[a] += [ node ] + end + } + } + str = '' + nodes.keys.sort.each{|arch| + nodes[arch].sort.uniq.each{|node| + str += node + ' ' + arch + "\n" + } + } +-%> +<%= str -%> diff --git a/modules/buildsystem/templates/youri/submit.conf b/modules/buildsystem/templates/youri/submit.conf new file mode 100644 index 00000000..0d7cf927 --- /dev/null +++ b/modules/buildsystem/templates/youri/submit.conf @@ -0,0 +1,140 @@ +<% +Puppet::Parser::Functions.autoloader.loadall +sched_home_dir = scope.lookupvar('buildsystem::var::scheduler::homedir') + +conf = scope.lookupvar('buildsystem::var::youri::youri_conf') +conf_default = scope.lookupvar('buildsystem::var::youri::youri_conf_default') +distros = scope.lookupvar('buildsystem::var::distros::distros') + +def line(text, indent) + res = '' + i = 0 + while i < indent + res += ' ' + i += 1 + end + res += text + "\n" +end + +def array_text(array, indent) + res = '' + array.each{|a| + res += line('- ' + a, indent) + } + return res +end + +def hash_text(hash, indent) + res = '' + curindent = indent + hash.keys.sort.each{|key| + if hash[key].instance_of? Hash + res += line(key + ':', indent) + res += hash_text(hash[key], indent + 4) + elsif hash[key].instance_of? Array + res += line(key + ':', indent) + res += array_text(hash[key], indent + 4) + elsif hash[key].instance_of? String + res += line(key + ': ' + hash[key], indent) + end + } + return res +end + +def class_hash(conf, conf_default) + res = {} + res['class'] = get_conf(conf, ['class']) == nil ? + conf_default['class'] : conf['class'] + res['options'] = get_conf(conf_default, ['options']) == nil ? {} : + conf_default['options'].dup + if get_conf(conf, ['options']) != nil + res['options'].merge!(conf['options']) + end + return res +end + +def get_conf(conf, path) + res = conf + path.each{|p| + if res == nil + return nil + end + res = res[p] + } + return res +end + +def get_distros_conf(distros, conf_name, path) + res = {} + distros.keys.each{|distro| + t = get_conf(distros[distro], [ 'youri', conf_name ] + path) + if t != nil + res[distro] = t.dup + end + } + return res +end + +def get_definitions(def_name, conf_name, conf, conf_default, distros) + res = {} + res[def_name] = {} + def_list = conf_default[conf_name][def_name].keys + def_list += get_conf(conf, [ conf_name, def_name ]) != nil ? \ + conf[conf_name][def_name].keys : [] + def_list.uniq.each{|d| + res[def_name][d] = class_hash( + get_conf(conf, [ conf_name, def_name, d ]), + get_conf(conf_default, [ conf_name, def_name, d ]) + ) + res[def_name][d]['options'].merge!(get_distros_conf(distros, + conf_name, [ def_name, d ])) + } + return res +end + +%> +home: <%= sched_home_dir %> + +<%- + repository = { + 'repository' => class_hash(get_conf(conf[conf_name], + ['repository']), + conf_default[conf_name]['repository']), + } + distros.keys.each{|distro| + repository['repository']['options'][distro] = { + 'arch' => distros[distro]['arch'].join(' '), + } + } +-%> +# repository declaration +<%= hash_text(repository, 0) %> + +<%- + targets = { + 'targets' => get_distros_conf(distros, conf_name, [ 'targets' ]), + } +-%> +# targets definition +<%= hash_text(targets, 0) %> + +<%- + checks = get_definitions('checks', conf_name, conf, conf_default, distros) +-%> +# checks definition +<%= hash_text(checks, 0) -%> + +<%- + actions = get_definitions('actions', conf_name, conf, conf_default, distros) +-%> +# actions definitions +<%= hash_text(actions, 0) -%> + +<%- + posts = get_definitions('posts', conf_name, conf, conf_default, distros) +-%> + +# posts definitions +<%= hash_text(posts, 0) -%> + +# vim:ft=yaml:et:sw=4 diff --git a/modules/catdap/manifests/init.pp b/modules/catdap/manifests/init.pp index 018b6ed5..f7172208 100644 --- a/modules/catdap/manifests/init.pp +++ b/modules/catdap/manifests/init.pp @@ -1,42 +1,47 @@ class catdap { - $catdap_location = "/var/www/identity" - $catdap_vhost = "identity.$domain" + $upstream_git = "git://git.${::domain}/web/identity" # TODO switch to a proper rpm packaging - $rpm_requirement = ['perl-Catalyst-Runtime',"perl-FCGI", 'perl-Catalyst-Plugin-Authorization-Roles', -"perl-Catalyst-Action-RenderView", "perl-Catalyst-Model-LDAP-FromAuthentication", "perl-Catalyst-P-A-Store-LDAP", "perl-Catalyst-Plugin-Authentication", "perl-Catalyst-Plugin-Captcha", -"perl-Catalyst-Plugin-ConfigLoader", "perl-Catalyst-Plugin-I18N", "perl-Catalyst-Plugin-Session-Store-File", "perl-Catalyst-Plugin-Static-Simple", -"perl-Catalyst-P-S-State-Cookie", "perl-Catalyst-P-S-Store-File", "perl-Catalyst-View-Email", -"perl-Catalyst-View-TT", "perl-Config-General", "perl-Crypt-CBC", "perl-Data-UUID", -"perl-Email-Valid", "perl-Moose", "perl-namespace-autoclean", "perl-Test-Simple", -"perl-Crypt-Blowfish", "perl-Email-Date-Format", "perl-YAML-LibYAML", -] + $rpm_requirement = ['perl-Catalyst-Runtime', + 'perl-FCGI', + 'perl-Catalyst-Plugin-Authorization-Roles', + 'perl-Catalyst-Action-RenderView', + 'perl-Catalyst-Model-LDAP-FromAuthentication', + 'perl-Catalyst-P-A-Store-LDAP', + 'perl-Catalyst-Plugin-Authentication', + 'perl-Catalyst-Plugin-Captcha', + 'perl-Catalyst-Plugin-ConfigLoader', + 'perl-Catalyst-Plugin-I18N', + 'perl-Catalyst-Plugin-Session-Store-File', + 'perl-Catalyst-Plugin-Static-Simple', + 'perl-Catalyst-P-S-State-Cookie', + 'perl-Catalyst-View-Email', + 'perl-Catalyst-View-TT', + 'perl-Config-General', + 'perl-Crypt-CBC', + 'perl-Data-UUID', + 'perl-Email-Valid', + 'perl-Moose', + 'perl-namespace-autoclean', + 'perl-Test-Simple', + 'perl-Crypt-Blowfish', + 'perl-Email-Date-Format', + 'perl-YAML-LibYAML', + 'perl-IO-Socket-INET6' ] - package { $rpm_requirement: - ensure => installed - } + package { $rpm_requirement: } - subversion::snapshot { $catdap_location: - source => "svn://svn.mageia.org/soft/identity/CatDap/branches/live" - } + $ldap_password = extlookup('catdap_ldap','x') - $catdap_password = extlookup('catdap_password') - - file { "$catdap_location/catdap_local.yml": - ensure => present, - owner => root, - group => apache, - mode => 640, - content => template("catdap/catdap_local.yml"), - require => Subversion::Snapshot[$catdap_location] + catdap::snapshot { "identity.${::domain}": + location => '/var/www/identity', + git_location => $upstream_git, + git_branch => 'topic/production', } - apache::vhost_catalyst_app { $catdap_vhost: - script => "$catdap_location/script/catdap_fastcgi.pl", - location => $catdap_location, - use_ssl => true, + catdap::snapshot { "identity-trunk.${::domain}": + location => '/var/www/identity-trunk', + git_location => $upstream_git, } - - apache::vhost_redirect_ssl { $catdap_vhost: } } diff --git a/modules/catdap/manifests/snapshot.pp b/modules/catdap/manifests/snapshot.pp new file mode 100644 index 00000000..35ca692e --- /dev/null +++ b/modules/catdap/manifests/snapshot.pp @@ -0,0 +1,21 @@ +define catdap::snapshot($location, $git_location, $git_branch = 'master') { + file { "${location}/catdap_local.yml": + group => apache, + mode => '0640', + content => template('catdap/catdap_local.yml'), + require => Git::Snapshot[$location], + } + + git::snapshot { $location: + source => $git_location, + branch => $git_branch, + } + + apache::vhost::catalyst_app { $name: + script => "${location}/script/catdap_fastcgi.pl", + location => $location, + use_ssl => true, + } + + apache::vhost::redirect_ssl { $name: } +} diff --git a/modules/catdap/templates/catdap_local.yml b/modules/catdap/templates/catdap_local.yml index 50f43601..d982b40b 100644 --- a/modules/catdap/templates/catdap_local.yml +++ b/modules/catdap/templates/catdap_local.yml @@ -1,22 +1,20 @@ <% -ldap_server = 'ldap.' + domain +ldap_server = "ldap-master.#{domain}" -ldap_password = catdap_password - -ldap_account = 'cn=catdap-valstar,ou=System Accounts,' + dc_suffix +ldap_account = "cn=catdap-#{hostname},ou=System Accounts,#{dc_suffix}" %> organisation: Mageia apptitle: Mageia Identity Management -emailfrom: noreply@<%= domain %> +emailfrom: noreply@<%= @domain %> Model::Proxy: - base: ou=People,<%= dc_suffix %> + base: ou=People,<%= @dc_suffix %> dn: <%= ldap_account %> - password: <%= ldap_password %> + password: <%= scope.lookupvar("catdap::ldap_password") %> Model::User: - base: <%= dc_suffix %> + base: <%= @dc_suffix %> host: <%= ldap_server %> start_tls: 1 @@ -27,7 +25,98 @@ authentication: store: ldap_server: <%= ldap_server %> binddn: <%= ldap_account %> - bindpw: <%= ldap_password %> - user_basedn: ou=People,<%= dc_suffix %> - role_basedn: <%= dc_suffix %> + bindpw: <%= scope.lookupvar("catdap::ldap_password") %> + user_basedn: ou=People,<%= @dc_suffix %> + role_basedn: <%= @dc_suffix %> + +register: + login_regex: ^[a-z][a-z0-9]*$ + login_blacklist: + - abuse + - apache + - bcd + - hostmaster + - iurt + - listmaster + - MAILER-DAEMON + - mirror + - noc + - postmaster + - president + - schedbot + - secretary + - security + - signbot + - treasurer + - webmaster + - www + + email_domain_blacklist: + - armyspy.com + - bitmessage.ch + - codehot.co.uk + - crazymailing.com + - dayrep.com + - group.mageia.org + - grr.la + - guerrillamail.biz + - guerrillamail.com + - guerrillamail.de + - guerrillamail.info + - guerrillamail.net + - guerrillamail.org + - guerrillamailblock.com + - jourrapide.com + - ml.mageia.org + - namecheap.com + - pokemail.net + - rhyta.com + - runbox.com + - sharklasers.com + - spam4.me + - vmani.com + - wowring.ru + - yopmail.com + - zasod.com +Controller::User: + editable_attrs: + - cn + - sn + - givenName + - mobile + - mailForwardingAddress + - preferredLanguage + uneditable_attrs: + - uid + - uidNumber + - gidNumber + - homeDirectory + - mail + - sshPublicKey + - loginShell + skip_attrs: + - objectClass + - krb5Key + - sambaMungedDial + - sambaPasswordHistory + - userPassword + - sambaLMPassword + - sambaNTPassword + - sambaPwdMustChange + - sambaSID + - sambaPrimaryGroupSID + - sambaAcctFlags + - sambaPwdCanChange + - sambaPwdLastSet + - sambaKickOffTime + - sambaUserWorkstations + - sambaLogonTime + - krb5KeyVersionNumber + - krb5PasswordEnd + - krb5MaxLife + - krb5MaxRenew + - krb5KDCFlags + - shadowLastChange + - roomNumber + - secretary diff --git a/modules/cgit/manifests/init.pp b/modules/cgit/manifests/init.pp new file mode 100644 index 00000000..60dc9bad --- /dev/null +++ b/modules/cgit/manifests/init.pp @@ -0,0 +1,27 @@ +class cgit { + package { 'cgit': } + + file { '/etc/cgitrc': + content => template('cgit/cgitrc'), + notify => Service['apache'], + require => Package['cgit'], + } + + apache::webapp_other { 'cgit': + webapp_file => 'cgit/webapp.conf', + } + + mga_common::local_script { 'cgit.filter.commit-links.sh': + content => template('cgit/filter.commit-links.sh'), + } + + apache::vhost::base { "gitweb.${::domain}": + content => template('cgit/vhost.conf') + } + + apache::vhost::base { "ssl_gitweb.${::domain}": + use_ssl => true, + vhost => "gitweb.${::domain}", + content => template('cgit/vhost.conf') + } +} diff --git a/modules/cgit/templates/cgitrc b/modules/cgit/templates/cgitrc new file mode 100644 index 00000000..1e1a399c --- /dev/null +++ b/modules/cgit/templates/cgitrc @@ -0,0 +1,137 @@ +# +# See cgitrc(5) or /usr/share/doc/cgit-*/cgitrc.5.html for details +# + +# Enable caching of up to 1000 output entries +cache-size=1000 + + +# Specify some default clone urls using macro expansion +clone-url=git://git.mageia.org/$CGIT_REPO_URL ssh://git@git.mageia.org/$CGIT_REPO_URL + +# Specify the css url +css=/cgit-data/cgit.css + + +# Show owner on index page +enable-index-owner=1 + + +# Allow http transport git clone +enable-git-clone=1 + + +# Show extra links for each repository on the index page +enable-index-links=1 + + +# Enable ASCII art commit history graph on the log pages +enable-commit-graph=1 + + +# Show number of affected files per commit on the log pages +enable-log-filecount=1 + + +# Show number of added/removed lines per commit on the log pages +enable-log-linecount=1 + + +# Sort branches by date +branch-sort=age + + +# Add a cgit favicon +#favicon=/favicon.ico + + +# Use a custom logo +logo=//nav.mageia.org/css/mageia-logo-nav-3.png + +# Try to avoid pagination on the mail page (until we have too many software repos) +max-repo-count=200 + +# Enable statistics per week, month and quarter +max-stats=quarter + + +# Set the title and heading of the repository index page +root-title=Mageia git Repositories + + +# Set a subheading for the repository index page +root-desc=A web frontend to the git repositories of the Mageia project + + +# Include some more info about example.com on the index page +#root-readme=/var/www/htdocs/about.html + + +# Allow download of tar.gz, tar.bz2 and zip-files +#snapshots=tar.gz tar.bz2 zip +snapshots=all + + +## +## List of common mimetypes +## + +mimetype.gif=image/gif +mimetype.html=text/html +mimetype.jpg=image/jpeg +mimetype.jpeg=image/jpeg +mimetype.pdf=application/pdf +mimetype.png=image/png +mimetype.svg=image/svg+xml + + +# Highlight source code with python pygments-based highlighter +source-filter=/usr/libexec/cgit/filters/syntax-highlighting.sh + +# Format markdown, restructuredtext, manpages, text files, and html files +# through the right converters +about-filter=/usr/libexec/cgit/filters/about-formatting.sh + +## +## Search for these files in the root of the default branch of repositories +## for coming up with the about page: +## +readme=:README.mga.md +readme=:README.md +readme=:README.rst +readme=:README.html +readme=:README.txt +readme=:README +readme=:INSTALL.md +readme=:INSTALL.rst +readme=:INSTALL.html +readme=:INSTALL.txt +readme=:INSTALL + +# Special Case mainly for initscripts git repo where we cannot write to master +readme=distro/mga:README.md + +## +## List of repositories. +## PS: Any repositories listed when section is unset will not be +## displayed under a section heading +## PPS: This list could be kept in a different file (e.g. '/etc/cgitrepos') +## and included like this: +## include=/etc/cgitrepos +## + +#repo.url=foo +#repo.path=/var/lib/git/foo.git +#repo.desc=the master foo repository +#repo.owner=fooman@example.com +#repo.readme=info/web/about.html + + +commit-filter=/usr/local/bin/cgit.filter.commit-links.sh + +enable-git-config=1 +section-from-path=-1 +case-sensitive-sort=0 +remove-suffix=1 +scan-path=/git +enable-http-clone=0 diff --git a/modules/cgit/templates/filter.commit-links.sh b/modules/cgit/templates/filter.commit-links.sh new file mode 100755 index 00000000..f0f7ee14 --- /dev/null +++ b/modules/cgit/templates/filter.commit-links.sh @@ -0,0 +1,44 @@ +#!/bin/sh +# This script can be used to generate links in commit messages. +# +# To use this script, refer to this file with either the commit-filter or the +# repo.commit-filter options in cgitrc. +# +# The following environment variables can be used to retrieve the configuration +# of the repository for which this script is called: +# CGIT_REPO_URL ( = repo.url setting ) +# CGIT_REPO_NAME ( = repo.name setting ) +# CGIT_REPO_PATH ( = repo.path setting ) +# CGIT_REPO_OWNER ( = repo.owner setting ) +# CGIT_REPO_DEFBRANCH ( = repo.defbranch setting ) +# CGIT_REPO_SECTION ( = section setting ) +# CGIT_REPO_CLONE_URL ( = repo.clone-url setting ) +# + +regex='' + +# This expression generates links to commits referenced by their SHA1. +regex=$regex' +s|\b([0-9a-fA-F]{7,40})\b|<a href="./?id=\1">\1</a>|g' + +# This expression generates links various common bugtrackers. +# When editing this list, remember to edit the same list in +# deployment/mgagit/templates/git-post-receive-hook +regex=$regex' +s|mga#([0-9]+)\b|<a href="https://bugs.mageia.org/\1">mga#\1</a>|g' +regex=$regex' +s|rhbz#([0-9]+)\b|<a href="https://bugzilla.redhat.com/show_bug.cgi?id=\1">rhbz#\1</a>|g' +regex=$regex' +s|fdo#([0-9]+)\b|<a href="https://bugs.freedesktop.org/show_bug.cgi?id=\1">fdo#\1</a>|g' +regex=$regex' +s|bko#([0-9]+)\b|<a href="https://bugs.kde.org/show_bug.cgi?id=\1">bko#\1</a>|g' +regex=$regex' +s|kde#([0-9]+)\b|<a href="https://bugs.kde.org/show_bug.cgi?id=\1">kde#\1</a>|g' +regex=$regex' +s|bgo#([0-9]+)\b|<a href="https://bugzilla.gnome.org/show_bug.cgi?id=\1">bgo#\1</a>|g' +regex=$regex' +s|gnome#([0-9]+)\b|<a href="https://bugzilla.gnome.org/show_bug.cgi?id=\1">gnome#\1</a>|g' +regex=$regex' +s|lp#([0-9]+)\b|<a href="https://launchpad.net/bugs/\1">lp#\1</a>|g' + +sed -re "$regex" diff --git a/modules/cgit/templates/vhost.conf b/modules/cgit/templates/vhost.conf new file mode 100644 index 00000000..5c1d99e7 --- /dev/null +++ b/modules/cgit/templates/vhost.conf @@ -0,0 +1,8 @@ +Alias /cgit-data /usr/share/cgit +Alias /robots.txt /usr/share/cgit/robots.txt +ScriptAliasMatch ^(.*) /var/www/cgi-bin/cgit$1 + +<Directory /usr/share/cgit> + Order allow,deny + Allow from all +</Directory> diff --git a/modules/cgit/templates/webapp.conf b/modules/cgit/templates/webapp.conf new file mode 100644 index 00000000..4e1d8289 --- /dev/null +++ b/modules/cgit/templates/webapp.conf @@ -0,0 +1,3 @@ +# Disable standard cgit configuration +#Alias /cgit-data /usr/share/cgit +#ScriptAlias /cgit /var/www/cgi-bin/cgit diff --git a/modules/concat/CHANGELOG b/modules/concat/CHANGELOG deleted file mode 100644 index 2f8aecc3..00000000 --- a/modules/concat/CHANGELOG +++ /dev/null @@ -1,27 +0,0 @@ -KNOWN ISSUES: -- In 0.24.8 you will see inintended notifies, if you build a file - in a run, the next run will also see it as changed. This is due - to how 0.24.8 does the purging of unhandled files, this is improved - in 0.25.x and we cannot work around it in our code. - -CHANGELOG: -- 2010/02/19 - initial release -- 2010/03/12 - add support for 0.24.8 and newer - - make the location of sort configurable - - add the ability to add shell comment based warnings to - top of files - - add the ablity to create empty files -- 2010/04/05 - fix parsing of WARN and change code style to match rest - of the code - - Better and safer boolean handling for warn and force - - Don't use hard coded paths in the shell script, set PATH - top of the script - - Use file{} to copy the result and make all fragments owned - by root. This means we can chnage the ownership/group of the - resulting file at any time. - - You can specify ensure => "/some/other/file" in concat::fragment - to include the contents of a symlink into the final file. -- 2010/04/16 - Add more cleaning of the fragment name - removing / from the $name -- 2010/05/22 - Improve documentation and show the use of ensure => -- 2010/07/14 - Add support for setting the filebucket behavior of files -- 2010/10/04 - Make the warning message configurable diff --git a/modules/concat/README.markdown b/modules/concat/README.markdown deleted file mode 100644 index 3f325097..00000000 --- a/modules/concat/README.markdown +++ /dev/null @@ -1,103 +0,0 @@ -What is it? -=========== - -A Puppet module that can construct files from fragments. - -Please see the comments in the various .pp files for details -as well as posts on my blog at http://www.devco.net/ - -Released under the Apache 2.0 licence - -Usage: ------- - -If you wanted a /etc/motd file that listed all the major modules -on the machine. And that would be maintained automatically even -if you just remove the include lines for other modules you could -use code like below, a sample /etc/motd would be: - -<pre> -Puppet modules on this server: - - -- Apache - -- MySQL -</pre> - -Local sysadmins can also append to the file by just editing /etc/motd.local -their changes will be incorporated into the puppet managed motd. - -<pre> -# class to setup basic motd, include on all nodes -class motd { - include concat::setup - $motd = "/etc/motd" - - concat{$motd, - owner => root, - group => root, - mode => 644 - } - - concat::fragment{"motd_header": - target => $motd, - content => "\nPuppet modules on this server:\n\n", - order => 01, - } - - # local users on the machine can append to motd by just creating - # /etc/motd.local - concat::fragment{"motd_local": - target => $motd, - ensure => "/etc/motd.local", - order => 15 - } -} - -# used by other modules to register themselves in the motd -define motd::register($content="", $order=10) { - if $content == "" { - $body = $name - } else { - $body = $content - } - - concat::fragment{"motd_fragment_$name": - target => "/etc/motd", - content => " -- $body\n" - } -} - -# a sample apache module -class apache { - include apache::install, apache::config, apache::service - - motd::register{"Apache": } -} -</pre> - -Known Issues: -------------- -* In 0.24.8 you will see inintended notifies, if you build a file - in a run, the next run will also see it as changed. This is due - to how 0.24.8 does the purging of unhandled files, this is improved - in 0.25.x and we cannot work around it in our code. - -Contributors: -------------- -**Paul Elliot** - - * Provided 0.24.8 support, shell warnings and empty file creation support. - -**Chad Netzer** - - * Various patches to improve safety of file operations - * Symlink support - -**David Schmitt** - - * Patch to remove hard coded paths relying on OS path - * Patch to use file{} to copy the resulting file to the final destination. This means Puppet client will show diffs and that hopefully we can change file ownerships now - -Contact: --------- -You can contact me on rip@devco.net or follow my blog at http://www.devco.net I am also on twitter as ripienaar diff --git a/modules/concat/files/concatfragments.sh b/modules/concat/files/concatfragments.sh deleted file mode 100755 index b486047d..00000000 --- a/modules/concat/files/concatfragments.sh +++ /dev/null @@ -1,123 +0,0 @@ -#!/bin/bash - -# Script to concat files to a config file. -# -# Given a directory like this: -# /path/to/conf.d -# |-- fragments -# | |-- 00_named.conf -# | |-- 10_domain.net -# | `-- zz_footer -# -# The script supports a test option that will build the concat file to a temp location and -# use /usr/bin/cmp to verify if it should be run or not. This would result in the concat happening -# twice on each run but gives you the option to have an unless option in your execs to inhibit rebuilds. -# -# Without the test option and the unless combo your services that depend on the final file would end up -# restarting on each run, or in other manifest models some changes might get missed. -# -# OPTIONS: -# -o The file to create from the sources -# -d The directory where the fragments are kept -# -t Test to find out if a build is needed, basically concats the files to a temp -# location and compare with what's in the final location, return codes are designed -# for use with unless on an exec resource -# -w Add a shell style comment at the top of the created file to warn users that it -# is generated by puppet -# -f Enables the creation of empty output files when no fragments are found -# -n Sort the output numerically rather than the default alpha sort -# -# the command: -# -# concatfragments.sh -o /path/to/conffile.cfg -d /path/to/conf.d -# -# creates /path/to/conf.d/fragments.concat and copies the resulting -# file to /path/to/conffile.cfg. The files will be sorted alphabetically -# pass the -n switch to sort numerically. -# -# The script does error checking on the various dirs and files to make -# sure things don't fail. - -OUTFILE="" -WORKDIR="" -TEST="" -FORCE="" -WARN="" -SORTARG="-z" - -PATH=/sbin:/usr/sbin:/bin:/usr/bin - -while getopts "o:s:d:tnw:f" options; do - case $options in - o ) OUTFILE=$OPTARG;; - d ) WORKDIR=$OPTARG;; - n ) SORTARG="-zn";; - w ) WARNMSG="$OPTARG";; - f ) FORCE="true";; - t ) TEST="true";; - * ) echo "Specify output file with -o and fragments directory with -d" - exit 1;; - esac -done - -# do we have -o? -if [ x${OUTFILE} = "x" ]; then - echo "Please specify an output file with -o" - exit 1 -fi - -# do we have -d? -if [ x${WORKDIR} = "x" ]; then - echo "Please fragments directory with -d" - exit 1 -fi - -# can we write to -o? -if [ -a ${OUTFILE} ]; then - if [ ! -w ${OUTFILE} ]; then - echo "Cannot write to ${OUTFILE}" - exit 1 - fi -else - if [ ! -w `dirname ${OUTFILE}` ]; then - echo "Cannot write to `dirname ${OUTFILE}` to create ${OUTFILE}" - exit 1 - fi -fi - -# do we have a fragments subdir inside the work dir? -if [ ! -d "${WORKDIR}/fragments" ] && [ ! -x "${WORKDIR}/fragments" ]; then - echo "Cannot access the fragments directory" - exit 1 -fi - -# are there actually any fragments? -if [ ! "$(ls -A ${WORKDIR}/fragments)" ]; then - if [ x${FORCE} = "x" ]; then - echo "The fragments directory is empty, cowardly refusing to make empty config files" - exit 1 - fi -fi - -cd ${WORKDIR} - -if [ x${WARNMSG} = "x" ]; then - : > "fragments.concat" -else - echo -e "$WARNMSG" > "fragments.concat" -fi - -# find all the files in the fragments directory, sort them numerically and concat to fragments.concat in the working dir -find fragments/ -type f -follow -print0 |sort ${SORTARG}|xargs -0 cat >>"fragments.concat" - -if [ x${TEST} = "x" ]; then - # This is a real run, copy the file to outfile - cp fragments.concat ${OUTFILE} - RETVAL=$? -else - # Just compare the result to outfile to help the exec decide - cmp ${OUTFILE} fragments.concat - RETVAL=$? -fi - -exit $RETVAL diff --git a/modules/concat/manifests/fragment.pp b/modules/concat/manifests/fragment.pp deleted file mode 100644 index 890d43a4..00000000 --- a/modules/concat/manifests/fragment.pp +++ /dev/null @@ -1,51 +0,0 @@ -# Puts a file fragment into a directory previous setup using concat -# -# OPTIONS: -# - target The file that these fragments belong to -# - content If present puts the content into the file -# - source If content was not specified, use the source -# - order By default all files gets a 10_ prefix in the directory -# you can set it to anything else using this to influence the -# order of the content in the file -# - ensure Present/Absent or destination to a file to include another file -# - mode Mode for the file -# - owner Owner of the file -# - group Owner of the file -# - backup Controls the filebucketing behavior of the final file and -# see File type reference for its use. Defaults to 'puppet' -define concat::fragment($target, $content='', $source='', $order=10, $ensure = "present", $mode = 0644, $owner = root, $group = root, $backup = "puppet") { - $safe_name = regsubst($name, '/', '_', 'G') - $safe_target_name = regsubst($target, '/', '_', 'G') - $concatdir = $concat::setup::concatdir - $fragdir = "${concatdir}/${safe_target_name}" - - # if content is passed, use that, else if source is passed use that - # if neither passed, but $ensure is in symlink form, make a symlink - case $content { - "": { - case $source { - "": { - case $ensure { - "", "absent", "present", "file", "directory": { - crit("No content, source or symlink specified") - } - } - } - default: { File{ source => $source } } - } - } - default: { File{ content => $content } } - } - - file{"${fragdir}/fragments/${order}_${safe_name}": - mode => $mode, - owner => $owner, - group => $group, - ensure => $ensure, - backup => $backup, - alias => "concat_fragment_${name}", - notify => Exec["concat_${target}"] - } -} - -# vi:tabstop=4:expandtab:ai diff --git a/modules/concat/manifests/init.pp b/modules/concat/manifests/init.pp deleted file mode 100644 index b94411c2..00000000 --- a/modules/concat/manifests/init.pp +++ /dev/null @@ -1,164 +0,0 @@ -# A system to construct files using fragments from other files or templates. -# -# This requires at least puppet 0.25 to work correctly as we use some -# enhancements in recursive directory management and regular expressions -# to do the work here. -# -# USAGE: -# The basic use case is as below: -# -# concat{"/etc/named.conf": -# notify => Service["named"] -# } -# -# concat::fragment{"foo.com_config": -# target => "/etc/named.conf", -# order => 10, -# content => template("named_conf_zone.erb") -# } -# -# # add a fragment not managed by puppet so local users -# # can add content to managed file -# concat::fragment{"foo.com_user_config": -# target => "/etc/named.conf", -# order => 12, -# ensure => "/etc/named.conf.local" -# } -# -# This will use the template named_conf_zone.erb to build a single -# bit of config up and put it into the fragments dir. The file -# will have an number prefix of 10, you can use the order option -# to control that and thus control the order the final file gets built in. -# -# SETUP: -# The class concat::setup defines a variable $concatdir - you should set this -# to a directory where you want all the temporary files and fragments to be -# stored. Avoid placing this somewhere like /tmp since you should never -# delete files here, puppet will manage them. -# -# There's some regular expression magic to figure out the puppet version but -# if you're on an older 0.24 version just set $puppetversion = 24 -# -# Before you can use any of the concat features you should include the -# class concat::setup somewhere on your node first. -# -# DETAIL: -# We use a helper shell script called concatfragments.sh that gets placed -# in /usr/local/bin to do the concatenation. While this might seem more -# complex than some of the one-liner alternatives you might find on the net -# we do a lot of error checking and safety checks in the script to avoid -# problems that might be caused by complex escaping errors etc. -# -# LICENSE: -# Apache Version 2 -# -# LATEST: -# http://github.com/ripienaar/puppet-concat/ -# -# CONTACT: -# R.I.Pienaar <rip@devco.net> -# Volcane on freenode -# @ripienaar on twitter -# www.devco.net - - -# Sets up so that you can use fragments to build a final config file, -# -# OPTIONS: -# - mode The mode of the final file -# - owner Who will own the file -# - group Who will own the file -# - force Enables creating empty files if no fragments are present -# - warn Adds a normal shell style comment top of the file indicating -# that it is built by puppet -# - backup Controls the filebucketing behavior of the final file and -# see File type reference for its use. Defaults to 'puppet' -# -# ACTIONS: -# - Creates fragment directories if it didn't exist already -# - Executes the concatfragments.sh script to build the final file, this script will create -# directory/fragments.concat. Execution happens only when: -# * The directory changes -# * fragments.concat != final destination, this means rebuilds will happen whenever -# someone changes or deletes the final file. Checking is done using /usr/bin/cmp. -# * The Exec gets notified by something else - like the concat::fragment define -# - Copies the file over to the final destination using a file resource -# -# ALIASES: -# - The exec can notified using Exec["concat_/path/to/file"] or Exec["concat_/path/to/directory"] -# - The final file can be referened as File["/path/to/file"] or File["concat_/path/to/file"] -define concat($mode = 0644, $owner = "root", $group = "root", $warn = "false", $force = "false", $backup = "puppet") { - $safe_name = regsubst($name, '/', '_', 'G') - $concatdir = $concat::setup::concatdir - $version = $concat::setup::majorversion - $fragdir = "${concatdir}/${safe_name}" - $concat_name = "fragments.concat.out" - $default_warn_message = '# This file is managed by Puppet. DO NOT EDIT.' - - case $warn { - 'true',true,yes,on: { $warnmsg = "$default_warn_message" } - 'false',false,no,off: { $warnmsg = "" } - default: { $warnmsg = "$warn" } - } - - $warnmsg_escaped = regsubst($warnmsg, "'", "'\\\\''", 'G') - $warnflag = $warnmsg_escaped ? { - '' => '', - default => "-w '$warnmsg_escaped'" - } - - case $force { - 'true',true,yes,on: { $forceflag = "-f" } - 'false',false,no,off: { $forceflag = "" } - default: { fail("Improper 'force' value given to concat: $force") } - } - - File{ - owner => root, - group => root, - mode => $mode, - backup => $backup - } - - file{$fragdir: - ensure => directory; - - "${fragdir}/fragments": - ensure => directory, - recurse => true, - purge => true, - force => true, - ignore => [".svn", ".git", ".gitignore"], - source => $version ? { - 24 => "puppet:///concat/null", - default => undef, - }, - notify => Exec["concat_${name}"]; - - "${fragdir}/fragments.concat": - ensure => present; - - "${fragdir}/${concat_name}": - ensure => present; - - $name: - source => "${fragdir}/${concat_name}", - owner => $owner, - group => $group, - checksum => md5, - mode => $mode, - ensure => present, - alias => "concat_${name}"; - } - - exec{"concat_${name}": - user => root, - group => root, - notify => File[$name], - subscribe => File[$fragdir], - alias => "concat_${fragdir}", - require => [ File["/usr/local/bin/concatfragments.sh"], File[$fragdir], File["${fragdir}/fragments"], File["${fragdir}/fragments.concat"] ], - unless => "/usr/local/bin/concatfragments.sh -o ${fragdir}/${concat_name} -d ${fragdir} -t ${warnflag} ${forceflag}", - command => "/usr/local/bin/concatfragments.sh -o ${fragdir}/${concat_name} -d ${fragdir} ${warnflag} ${forceflag}", - } -} diff --git a/modules/concat/manifests/setup.pp b/modules/concat/manifests/setup.pp deleted file mode 100644 index 9676fb66..00000000 --- a/modules/concat/manifests/setup.pp +++ /dev/null @@ -1,36 +0,0 @@ -# Sets up the concat system. -# -# $concatdir should point to a place where you wish the fragments to -# live. This should not be somewhere like /tmp since ideally these files -# should not be deleted ever, puppet should always manage them -# -# $puppetversion should be either 24 or 25 to enable a 24 compatible -# mode, in 24 mode you might see phantom notifies this is a side effect -# of the method we use to clear the fragments directory. -# -# The regular expression below will try to figure out your puppet version -# but this code will only work in 0.24.8 and newer. -# -# It also copies out the concatfragments.sh file to /usr/local/bin -class concat::setup { - $concatdir = "/var/lib/puppet/concat" - $majorversion = regsubst($puppetversion, '^[0-9]+[.]([0-9]+)[.][0-9]+$', '\1') - - file{"/usr/local/bin/concatfragments.sh": - owner => root, - group => root, - mode => 755, - source => $majorversion ? { - 24 => "puppet:///concat/concatfragments.sh", - default => "puppet:///modules/concat/concatfragments.sh" - }; - - $concatdir: - ensure => directory, - owner => root, - group => root, - mode => 755; - } -} - -# vi:tabstop=4:expandtab:ai diff --git a/modules/cron/manifests/init.pp b/modules/cron/manifests/init.pp new file mode 100644 index 00000000..6dd0ea44 --- /dev/null +++ b/modules/cron/manifests/init.pp @@ -0,0 +1,7 @@ +class cron { + package { 'cronie': } + + service { 'crond': + subscribe => Package['cronie'], + } +} diff --git a/modules/dashboard/manifests/init.pp b/modules/dashboard/manifests/init.pp new file mode 100644 index 00000000..34ef41b3 --- /dev/null +++ b/modules/dashboard/manifests/init.pp @@ -0,0 +1,44 @@ +class dashboard { + $dashboard_login = 'dashboard' + $dashboard_home_dir = "/var/lib/${dashboard_login}" + $dashboard_dir = "${dashboard_home_dir}/dashboard" + $dashboard_bindir = "${dashboard_home_dir}/bin" + $dashboard_wwwdir = "/var/www/vhosts/dashboard.${::domain}" + + user { $dashboard_login: + comment => 'dashboard system user', + home => $dashboard_home_dir, + } + + git::snapshot { $dashboard_dir: + source => "git://git.${::domain}/web/generators/dashboard", + } + + package { 'php-cli': } + + file { $dashboard_wwwdir: + ensure => directory, + owner => $dashboard_login, + group => $dashboard_login, + } + + file { $dashboard_bindir: + ensure => directory, + } + + file { "${dashboard_bindir}/make_report": + mode => '0755', + content => template('dashboard/make_report'), + } + + apache::vhost::base { "dashboard.${::domain}": + location => $dashboard_wwwdir, + } + + cron { 'update dashboard': + command => "${dashboard_bindir}/make_report", + user => $dashboard_login, + hour => '*/2', + minute => '15', + } +} diff --git a/modules/dashboard/templates/make_report b/modules/dashboard/templates/make_report new file mode 100644 index 00000000..5da59617 --- /dev/null +++ b/modules/dashboard/templates/make_report @@ -0,0 +1,8 @@ +#!/bin/sh + +dashboard_dir='<%= @dashboard_dir %>' +dashboard_wwwdir='<%= @dashboard_wwwdir %>' + +cd "$dashboard_dir" +/usr/bin/php ./make_report.php > "$dashboard_wwwdir/index.html" + diff --git a/modules/django_application/files/custom_backend.py b/modules/django_application/files/custom_backend.py new file mode 100644 index 00000000..5ab35385 --- /dev/null +++ b/modules/django_application/files/custom_backend.py @@ -0,0 +1,7 @@ + +from django_auth_ldap.backend import LDAPBackend,_LDAPUser + +class ForceUidLDAPBackend(LDAPBackend): + def ldap_to_django_username(self, username): + # force uid if someone give a email + return _LDAPUser(self, username=username).attrs['uid'][0] diff --git a/modules/django_application/files/django_add_permission_to_group.py b/modules/django_application/files/django_add_permission_to_group.py new file mode 100644 index 00000000..69ac7be5 --- /dev/null +++ b/modules/django_application/files/django_add_permission_to_group.py @@ -0,0 +1,27 @@ +#!/usr/bin/python +import sys +group_name = sys.argv[1] +permission = sys.argv[2] + +# as codename is not unique, we need to give the application name +app = '' +if len(sys.argv) > 3: + app = sys.argv[3] + +from django.contrib.auth.models import Group, Permission +group = Group.objects.get(name=group_name) + +permissions = Permission.objects.filter(codename=permission) +if app: + permissions = permissions.filter(content_type__app_label__exact=app) + +if len(permissions) > 1: + print "Error, result not unique, please give the application among :" + print ' '.join([p.content_type.app_label for p in permissions]) + sys.exit(1) +elif len(permissions) < 1: + print "Error, wrong codename" + sys.exit(1) + +group.permissions.add(permissions[0]) +group.save() diff --git a/modules/django_application/files/django_create_group.py b/modules/django_application/files/django_create_group.py new file mode 100644 index 00000000..b5052217 --- /dev/null +++ b/modules/django_application/files/django_create_group.py @@ -0,0 +1,10 @@ +#!/usr/bin/python +import sys +group_name = sys.argv[1] + +from django.contrib.auth.models import Group +try: + group = Group.objects.get(name=group_name) +except Group.DoesNotExist: + group = Group.objects.create(name=group_name) + group.save() diff --git a/modules/django_application/manifests/add_permission_to_group.pp b/modules/django_application/manifests/add_permission_to_group.pp new file mode 100644 index 00000000..6e0663ed --- /dev/null +++ b/modules/django_application/manifests/add_permission_to_group.pp @@ -0,0 +1,11 @@ +define django_application::add_permission_to_group( $path, + $module, + $group, + $app='') { + exec { "/usr/local/bin/django_add_permission_to_group.py ${group} ${name} ${app}": + user => 'root', + environment => ["DJANGO_SETTINGS_MODULE=${module}.settings", + "PYTHONPATH=${path}" ], + require => Django_application::Script['django_add_permission_to_group.py'] + } +} diff --git a/modules/django_application/manifests/create_group.pp b/modules/django_application/manifests/create_group.pp new file mode 100644 index 00000000..1931205f --- /dev/null +++ b/modules/django_application/manifests/create_group.pp @@ -0,0 +1,10 @@ +define django_application::create_group($path, $module) { + exec { "/usr/local/bin/django_create_group.py ${name}": + user => 'root', + environment => ["DJANGO_SETTINGS_MODULE=${module}.settings", + "PYTHONPATH=${path}" ], + require => Django_application::Script['django_create_group.py'] + } +} + + diff --git a/modules/django_application/manifests/init.pp b/modules/django_application/manifests/init.pp new file mode 100644 index 00000000..f56f73ef --- /dev/null +++ b/modules/django_application/manifests/init.pp @@ -0,0 +1,18 @@ +# this class hold the common stuff for all django applications +# as we cannot declare the same resource twice ( ie, +# python-psycopg2 for example ) +# it is required to place this in a common class +class django_application { + package {['python-django', + 'python-psycopg2', + 'python-django-auth-ldap']: } + + file { '/usr/local/lib/custom_backend.py': + source => 'puppet:///modules/django_application/custom_backend.py', + notify => Service['apache'] + } + + django_application::script { ['django_create_group.py', + 'django_add_permission_to_group.py']: } + +} diff --git a/modules/django_application/manifests/script.pp b/modules/django_application/manifests/script.pp new file mode 100644 index 00000000..f414d864 --- /dev/null +++ b/modules/django_application/manifests/script.pp @@ -0,0 +1,9 @@ +define django_application::script() { + file { $name: + path => "/usr/local/bin/${name}", + mode => '0755', + source => "puppet:///modules/django_application/${name}", + } +} + + diff --git a/modules/draklive/files/clean-live.sh b/modules/draklive/files/clean-live.sh new file mode 100755 index 00000000..cceb6a4a --- /dev/null +++ b/modules/draklive/files/clean-live.sh @@ -0,0 +1,15 @@ +#!/bin/bash + +# clean old draklive build sets +DRAKLIVE_ROOT=/var/lib/draklive +RM="rm -rf" + +# keep only chroot/build sets from previous day +MAX_BUILD_AGE=1 +find $DRAKLIVE_ROOT/{chroot/*,build/*/*} -maxdepth 0 -not -name dist -mtime +$(expr $MAX_BUILD_AGE - 1) -exec $RM {} \; + +# keep dist (iso + lists) for all sets during 20 days +MAX_DIST_AGE=20 +find $DRAKLIVE_ROOT/build/*/dist -maxdepth 0 -mtime +$(expr $MAX_DIST_AGE - 1) -exec $RM {} \; + +find $DRAKLIVE_ROOT/build -maxdepth 1 -links 2 -exec rmdir {} \; diff --git a/modules/draklive/manifests/init.pp b/modules/draklive/manifests/init.pp new file mode 100644 index 00000000..ade2527f --- /dev/null +++ b/modules/draklive/manifests/init.pp @@ -0,0 +1,58 @@ +class draklive { + $login = 'draklive' + $home = '/home/draklive' + $config = "${home}/live-config" + $var_data = "${home}/var-data" + # TODO merge with bcd + $isomakers_group = 'mga-iso_makers' + + include sudo + + group { $login: } + + user { $login: + home => $home, + comment => 'User for creating live ISOs', + } + + package { 'drakiso': } + + sudo::sudoers_config { 'draklive': + content => template('draklive/sudoers.draklive') + } + + file { $var_data: + ensure => directory, + owner => $login, + group => $login, + mode => '0755', + } + + file { '/var/lib/draklive': + ensure => symlink, + target => $var_data, + } + + git::snapshot { $config: + source => "git://git.${::domain}/software/build-system/draklive-config", + user => $login, + } + + cron { 'build live images': + command => "${config}/tools/build_live.sh", + user => $login, + hour => '4', + minute => '30', + } + + file { '/usr/local/bin/clean-live.sh': + mode => '0755', + source => 'puppet:///modules/draklive/clean-live.sh', + } + + cron { 'clean live build data': + command => '/usr/local/bin/clean-live.sh', + hour => '4', + minute => '20', + } +} diff --git a/modules/draklive/templates/sudoers.draklive b/modules/draklive/templates/sudoers.draklive new file mode 100644 index 00000000..536e4e9f --- /dev/null +++ b/modules/draklive/templates/sudoers.draklive @@ -0,0 +1,3 @@ +<%= @login %> ALL=(root) NOPASSWD: /usr/sbin/draklive +<%= @login %> ALL=(root) NOPASSWD: /usr/bin/draklive2 +%<%= isomakers_group %> ALL=(<%= @login %>) SETENV: NOPASSWD: ALL diff --git a/modules/epoll/manifests/create_db.pp b/modules/epoll/manifests/create_db.pp new file mode 100644 index 00000000..8ef9c0aa --- /dev/null +++ b/modules/epoll/manifests/create_db.pp @@ -0,0 +1,7 @@ +class epoll::create_db () { + postgresql::remote_db_and_user { $epoll::var::db_name: + description => 'Epoll database', + password => $epoll::var::db_password, + } +} +# vim: sw=2 diff --git a/modules/epoll/manifests/init.pp b/modules/epoll/manifests/init.pp index e981a952..fb86f23a 100644 --- a/modules/epoll/manifests/init.pp +++ b/modules/epoll/manifests/init.pp @@ -1,23 +1,20 @@ class epoll { + include epoll::var - $vhost = "epoll.$domain" + package { 'Epoll': } - package { 'Epoll': - ensure => installed + apache::vhost::catalyst_app { $epoll::var::vhost: + script => '/usr/bin/epoll_fastcgi.pl', + use_ssl => true, + require => Package['Epoll'] } - - apache::vhost_catalyst_app { $vhost: - script => "/usr/bin/epoll_fastcgi.pl" - } - - $password = extlookup("epoll_password") - - file { "epoll.yml": - path => "/etc/epoll.yml", - ensure => "present", - owner => root, - group => apache, - mode => 640, - content => template("epoll/epoll.yml") + + apache::vhost::redirect_ssl { $epoll::var::vhost: } + + file { 'epoll.yml': + path => '/etc/epoll.yml', + group => 'apache', + mode => '0640', + content => template('epoll/epoll.yml') } } diff --git a/modules/epoll/manifests/var.pp b/modules/epoll/manifests/var.pp new file mode 100644 index 00000000..1ddc342a --- /dev/null +++ b/modules/epoll/manifests/var.pp @@ -0,0 +1,35 @@ +# == Class: epoll::var +# +# epoll configuration +# +# === Parameters +# +# [*vhost*] +# epoll vhost +# +# [*db_hostname*] +# hostname of the database server +# +# [*db_name*] +# name of the database +# +# [*db_user*] +# user to connect to the database +# +# [*db_password*] +# password to connect to the database +# +# [*password*] +# password to create new polls +# + +class epoll::var ( + $vhost = "epoll.${::domain}", + $db_hostname = 'localhost', + $db_name = 'epoll', + $db_user = 'epoll', + $db_password, + $password +) { +} +# vim: sw=2 diff --git a/modules/epoll/templates/epoll.yml b/modules/epoll/templates/epoll.yml index 74e44efd..d442a41e 100644 --- a/modules/epoll/templates/epoll.yml +++ b/modules/epoll/templates/epoll.yml @@ -2,10 +2,10 @@ name: Vote # db: connection, see libpq documentation # dbname=BASENAME;host=SERVER;user=USER;password=PASS -db: dbname=epoll;host=localhost;user=epoll;password=<%= password %> +db: dbname=<%= scope.lookupvar('epoll::var::db_name') %>;host=<%= scope.lookupvar('epoll::var::db_hostname') %>;user=<%= scope.lookupvar('epoll::var::db_user') %>;password=<%= scope.lookupvar('epoll::var::db_password') %> # The smtp serveur to use, default is localhost # smtp: # This change the poll creation behavior, instead ask want confirmation by # mail # it ask for this password (in clear) -# newpollpasswd: +newpollpasswd: <%= scope.lookupvar('epoll::var::password') %> diff --git a/modules/facter/lib/facter/dc_suffix.rb b/modules/facter/lib/facter/dc_suffix.rb index a8526978..c480e3ac 100644 --- a/modules/facter/lib/facter/dc_suffix.rb +++ b/modules/facter/lib/facter/dc_suffix.rb @@ -2,9 +2,9 @@ Facter.add("dc_suffix") do setcode do begin Facter.domain - rescue + rescue Facter.loadfacts() end dc_suffix = 'dc=' + Facter.value('domain').gsub('.',',dc=') end -end +end diff --git a/modules/facter/lib/facter/lib_dir.rb b/modules/facter/lib/facter/lib_dir.rb index fe7d6a31..315d7594 100644 --- a/modules/facter/lib/facter/lib_dir.rb +++ b/modules/facter/lib/facter/lib_dir.rb @@ -2,9 +2,9 @@ Facter.add("lib_dir") do setcode do begin Facter.architecture - rescue + rescue Facter.loadfacts() end - '/usr/lib' + ( Facter.value('architecture') == "x86_64" ? '64' : '') + '/' + '/usr/lib' + ( Facter.value('architecture') == "x86_64" ? '64' : '') end -end +end diff --git a/modules/facter/lib/facter/wildcard_sslcert.rb b/modules/facter/lib/facter/wildcard_sslcert.rb new file mode 100644 index 00000000..093982d9 --- /dev/null +++ b/modules/facter/lib/facter/wildcard_sslcert.rb @@ -0,0 +1,16 @@ +Facter.add("wildcard_sslcert") do + setcode do + begin + Facter.domain + rescue + Facter.loadfacts() + end + sslfiles = '/etc/ssl/wildcard.' + Facter.value('domain') + if File.exist?(sslfiles + '.crt') and File.exist?(sslfiles + '.key') \ + and File.exist?(sslfiles + '.pem') + 'true' + else + 'false' + end + end +end diff --git a/modules/facter/spec/spec_helper.rb b/modules/facter/spec/spec_helper.rb new file mode 100644 index 00000000..ec3fe615 --- /dev/null +++ b/modules/facter/spec/spec_helper.rb @@ -0,0 +1,34 @@ +# taken from facter source code +# ASL 2.0 +dir = File.expand_path(File.dirname(__FILE__)) + +SPECDIR = dir +$LOAD_PATH.unshift("#{dir}/../lib") + +require 'mocha' +require 'rspec' +require 'facter' +require 'fileutils' + +RSpec.configure do |config| + config.mock_with :mocha + + config.before :each do + # Ensure that we don't accidentally cache facts and environment + # between test cases. + Facter::Util::Loader.any_instance.stubs(:load_all) + Facter.clear + Facter.clear_messages + + # Store any environment variables away to be restored later + @old_env = {} + ENV.each_key {|k| @old_env[k] = ENV[k]} + end + + config.after :each do + # Restore environment variables after execution of each test + @old_env.each_pair {|k, v| ENV[k] = v} + to_remove = ENV.keys.reject {|key| @old_env.include? key } + to_remove.each {|key| ENV.delete key } + end +end diff --git a/modules/facter/spec/unit/dc_suffix.rb b/modules/facter/spec/unit/dc_suffix.rb new file mode 100644 index 00000000..4b7a4648 --- /dev/null +++ b/modules/facter/spec/unit/dc_suffix.rb @@ -0,0 +1,15 @@ +#!/usr/bin/env rspec + +require 'spec_helper' + +describe "Dc_suffix fact" do + it "should be based on tld domain" do + Facter.fact(:domain).stubs(:value).returns("test") + Facter.fact(:dc_suffix).value.should == "dc=test" + end + + it "should be based on domain" do + Facter.fact(:domain).stubs(:value).returns("test.example.org") + Facter.fact(:dc_suffix).value.should == "dc=test,dc=example,dc=org" + end +end diff --git a/modules/facter/spec/unit/lib_dir.rb b/modules/facter/spec/unit/lib_dir.rb new file mode 100644 index 00000000..50049f19 --- /dev/null +++ b/modules/facter/spec/unit/lib_dir.rb @@ -0,0 +1,23 @@ +#!/usr/bin/env rspec + +require 'spec_helper' + +describe "Lib_dir fact" do + it "should default to /usr/lib" do + Facter.fact(:architecture).stubs(:value).returns("bogus") + Facter.fact(:lib_dir).value.should == "/usr/lib" + end + + archs = Hash.new + # TODO add arm 64 and others + archs = { + "i586" => "/usr/lib", + "x86_64" => "/usr/lib64", + } + archs.each do |arch, dir| + it "should be #{dir} on #{arch}" do + Facter.fact(:architecture).stubs(:value).returns(arch) + Facter.fact(:lib_dir).value.should == dir + end + end +end diff --git a/modules/git/files/apply_git_puppet_config.sh b/modules/git/files/apply_git_puppet_config.sh new file mode 100644 index 00000000..1ed6fbf1 --- /dev/null +++ b/modules/git/files/apply_git_puppet_config.sh @@ -0,0 +1,7 @@ +#!/bin/bash + +while read line +do + # --local is a option for the newer git + git config --add $line +done < config.puppet diff --git a/modules/git/files/create_git_repo.sh b/modules/git/files/create_git_repo.sh new file mode 100644 index 00000000..144d063b --- /dev/null +++ b/modules/git/files/create_git_repo.sh @@ -0,0 +1,10 @@ +#!/bin/bash +umask 0002 +# https://eagleas.livejournal.com/18907.html +name="$1" +mkdir -p $name +cd $name +git --bare init --shared=group +chmod g+ws branches info objects refs +( cd objects; chmod g+ws * ) +git config receive.denyNonFastForwards true diff --git a/modules/git/files/update_git_svn.sh b/modules/git/files/update_git_svn.sh new file mode 100644 index 00000000..b3802f81 --- /dev/null +++ b/modules/git/files/update_git_svn.sh @@ -0,0 +1,13 @@ +#!/bin/bash +GIT_REP="$1" +LOCKFILE="$GIT_REP/.git/update.cron.lock" + +cd "$GIT_REP" +[ -f $LOCKFILE ] && exit 0 +trap "rm -f '$LOCKFILE'" EXIT + +touch "$LOCKFILE" + +/usr/bin/git svn fetch +/usr/bin/git svn rebase +exit 0 diff --git a/modules/git/manifests/client.pp b/modules/git/manifests/client.pp new file mode 100644 index 00000000..2ba50721 --- /dev/null +++ b/modules/git/manifests/client.pp @@ -0,0 +1,3 @@ +class git::client { + include git::common +} diff --git a/modules/git/manifests/common.pp b/modules/git/manifests/common.pp new file mode 100644 index 00000000..ed8ebbdf --- /dev/null +++ b/modules/git/manifests/common.pp @@ -0,0 +1,3 @@ +class git::common { + package { 'git-core': } +} diff --git a/modules/git/manifests/init.pp b/modules/git/manifests/init.pp new file mode 100644 index 00000000..dece14f0 --- /dev/null +++ b/modules/git/manifests/init.pp @@ -0,0 +1 @@ +class git { } diff --git a/modules/git/manifests/mirror.pp b/modules/git/manifests/mirror.pp new file mode 100644 index 00000000..f7364846 --- /dev/null +++ b/modules/git/manifests/mirror.pp @@ -0,0 +1,20 @@ +define git::mirror( $source, + $description, + $refresh = '*/5') { + + include git::common + exec { "/usr/bin/git clone --mirror ${source} ${name}": + alias => "git mirror ${name}", + creates => $name, + before => File["${name}/description"], + } + + file { "${name}/description": + content => $description, + } + + cron { "update ${name}": + command => "cd ${name} ; /usr/bin/git fetch -q", + minute => $refresh + } +} diff --git a/modules/git/manifests/server.pp b/modules/git/manifests/server.pp new file mode 100644 index 00000000..3f07ed9c --- /dev/null +++ b/modules/git/manifests/server.pp @@ -0,0 +1,37 @@ +class git::server { + include git::common + + $git_base_path = '/git/' + + xinetd::service { 'git': + content => template('git/xinetd') + } + + file { '/usr/local/bin/create_git_repo.sh': + mode => '0755', + source => 'puppet:///modules/git/create_git_repo.sh', + } + + file { '/usr/local/bin/apply_git_puppet_config.sh': + mode => '0755', + source => 'puppet:///modules/git/apply_git_puppet_config.sh', + } + + + # TODO + # define common syntax check, see svn + # https://stackoverflow.com/questions/3719883/git-hook-syntax-check + # proper policy : fast-forward-only + # ( https://progit.org/book/ch7-4.html ) + # no branch ? + # no binary + # no big file + # no empty commit message + # no commit from root + # see https://www.itk.org/Wiki/Git/Hooks + # automated push to another git repo ( see https://noone.org/blog/English/Computer/VCS/Thoughts%20on%20Gitorious%20and%20GitHub%20plus%20a%20useful%20git%20hook.futile + # + # how do we handle commit permission ? + # mail sending + # +} diff --git a/modules/git/manifests/snapshot.pp b/modules/git/manifests/snapshot.pp new file mode 100644 index 00000000..06473efe --- /dev/null +++ b/modules/git/manifests/snapshot.pp @@ -0,0 +1,24 @@ +define git::snapshot( $source, + $refresh = '*/5', + $user = 'root', + $branch = 'master') { + include git::client + #TODO + # should handle branch -> clone -n + branch + checkout + # create a script + # Ideally, should be handled by vcsrepo + # https://github.com/bruce/puppet-vcsrepo + # once it is merged in puppet + exec { "/usr/bin/git clone -b ${branch} ${source} ${name}": + creates => $name, + user => $user + } + + if ($refresh != '0') { + cron { "update ${name}": + command => "cd ${name} && /usr/bin/git pull -q && /usr/bin/git submodule --quiet update --init --recursive", + user => $user, + minute => $refresh + } + } +} diff --git a/modules/git/manifests/svn.pp b/modules/git/manifests/svn.pp new file mode 100644 index 00000000..43df012b --- /dev/null +++ b/modules/git/manifests/svn.pp @@ -0,0 +1,4 @@ +class git::svn { + include git::client + package { 'git-svn': } +} diff --git a/modules/git/manifests/svn_repository.pp b/modules/git/manifests/svn_repository.pp new file mode 100644 index 00000000..ea215ce6 --- /dev/null +++ b/modules/git/manifests/svn_repository.pp @@ -0,0 +1,35 @@ +define git::svn_repository( $source, + $std_layout = true, + $refresh = '*/5') { + include git::svn + include git::server + # a cron job + # a exec + if $std_layout { + $options = '-s' + } else { + $options = '' + } + + exec { "/usr/bin/git svn init ${options} ${source} ${name}": + alias => "git svn ${name}", + creates => $name, + } + + file { '/usr/local/bin/update_git_svn.sh': + mode => '0755', + source => 'puppet:///modules/git/update_git_svn.sh', + } + + cron { "update ${name}": + # done in 2 times, so fetch can fill the repo after init + command => "/usr/local/bin/update_git_svn.sh ${name}" , + minute => $refresh + } + + file { "${name}/.git/hooks/pre-receive": + mode => '0755', + content => template('git/pre-receive'), + require => Exec["git svn ${name}"] + } +} diff --git a/modules/concat/files/null/.gitignore b/modules/git/templates/config.puppet index e69de29b..e69de29b 100644 --- a/modules/concat/files/null/.gitignore +++ b/modules/git/templates/config.puppet diff --git a/modules/git/templates/post-receive b/modules/git/templates/post-receive new file mode 100644 index 00000000..b4330e13 --- /dev/null +++ b/modules/git/templates/post-receive @@ -0,0 +1,6 @@ +#!/bin/sh + +# FIXME the contrib/hooks should be in /usr/share/git-core +# but this may cause issue with automated requirement +. /usr/share/doc/git-core/contrib/hooks/post-receive-email + diff --git a/modules/git/templates/pre-receive b/modules/git/templates/pre-receive new file mode 100644 index 00000000..7eec7505 --- /dev/null +++ b/modules/git/templates/pre-receive @@ -0,0 +1,5 @@ +#!/bin/bash +echo +echo "This repository is readonly" +echo +false diff --git a/modules/git/templates/xinetd b/modules/git/templates/xinetd new file mode 100644 index 00000000..654ae2be --- /dev/null +++ b/modules/git/templates/xinetd @@ -0,0 +1,14 @@ +service git +{ + disable = no + type = UNLISTED + port = 9418 + socket_type = stream + server = <%= @lib_dir %>/git-core/git-daemon + wait = no + user = nobody + server_args = --inetd --verbose --export-all --base-path=<%= @git_base_path %> + log_on_failure += HOST + flags = IPv6 +} + diff --git a/modules/gitmirror/files/on-the-pull b/modules/gitmirror/files/on-the-pull new file mode 100755 index 00000000..416b75a4 --- /dev/null +++ b/modules/gitmirror/files/on-the-pull @@ -0,0 +1,365 @@ +#!/usr/bin/python3 + +import cgi +import http.server +import os +import pwd +import re +import subprocess +import sys +from optparse import OptionParser +from queue import Queue +from threading import Thread + + +GitUpdaterQueue = Queue(0) + + +# NB The following class and bits for running git commands were "liberated" +# from git_multimail.py + +class CommandError(Exception): + def __init__(self, cmd, retcode): + self.cmd = cmd + self.retcode = retcode + Exception.__init__( + self, + 'Command "%s" failed with retcode %s' % (' '.join(cmd), retcode,) + ) + + +# It is assumed in many places that the encoding is uniformly UTF-8, +# so changing these constants is unsupported. But define them here +# anyway, to make it easier to find (at least most of) the places +# where the encoding is important. +ENCODING = 'UTF-8' + + +# The "git" program (this could be changed to include a full path): +GIT_EXECUTABLE = 'git' + + +# How "git" should be invoked (including global arguments), as a list +# of words. This variable is usually initialized automatically by +# read_git_output() via choose_git_command(), but if a value is set +# here then it will be used unconditionally. +GIT_CMD = None + + +def choose_git_command(): + """Decide how to invoke git, and record the choice in GIT_CMD.""" + + global GIT_CMD + + if GIT_CMD is None: + try: + # Check to see whether the "-c" option is accepted (it was + # only added in Git 1.7.2). We don't actually use the + # output of "git --version", though if we needed more + # specific version information this would be the place to + # do it. + cmd = [GIT_EXECUTABLE, '-c', 'foo.bar=baz', '--version'] + read_output(cmd) + GIT_CMD = [GIT_EXECUTABLE, '-c', f'i18n.logoutputencoding={ENCODING}'] + except CommandError: + GIT_CMD = [GIT_EXECUTABLE] + + +def read_git_output(args, inp=None, keepends=False, **kw): + """Read the output of a Git command.""" + + if GIT_CMD is None: + choose_git_command() + + return read_output(GIT_CMD + args, inp=inp, keepends=keepends, **kw) + + +# NOTE: output is in bytes, not a string +def read_output(cmd, inp=None, keepends=False, **kw): + if inp: + stdin = subprocess.PIPE + else: + stdin = None + p = subprocess.Popen( + cmd, stdin=stdin, stdout=subprocess.PIPE, stderr=subprocess.PIPE, **kw + ) + (out, err) = p.communicate(inp) + retcode = p.wait() + if retcode: + raise CommandError(cmd, retcode) + if not keepends: + out = out.rstrip(b'\n\r') + return out + + +def run_git_command(args, **kw): + """Runs a git command, ignoring the output. + """ + + read_git_output(args, **kw) + + +def run_command(args, **kw): + """Runs a git command, ignoring the output. + """ + + read_output(args, **kw) + + +class GitUpdater(Thread): + def __init__(self, server, basedir, repoprefix, branch='master', cmd=''): + Thread.__init__(self) + self.server = server + self.basedir = basedir + self.repoprefix = repoprefix + self.branch = branch + self.cmd = cmd + + def run(self): + while 42: + repo = GitUpdaterQueue.get() + if repo is None: + break + try: + print(f"Got update request for '{repo}'", file=sys.stderr) + clonefolder = os.path.join(self.basedir, repo) + if self.repoprefix: + if not repo.startswith(self.repoprefix): + print(f"Ignoring repo '{repo}' due to invalid prefix", file=sys.stderr) + GitUpdaterQueue.task_done() + continue + clonefolder = os.path.join(self.basedir, repo[len(self.repoprefix):]) + command = [] + treeish = '' + changed = True + if not os.path.exists(clonefolder): + cloneparent = os.path.dirname(clonefolder) + if not os.path.exists(cloneparent): + os.makedirs(cloneparent) + cloneurl = self.server + '/' + repo + command = ['clone'] + if '--mirror' == self.branch: + command.append('--mirror') + command.append(cloneurl) + command.append(clonefolder) + print(f"Cloning repo '{repo}' ('{cloneurl}' -> '{clonefolder}')", file=sys.stderr) + + run_git_command(command) + if not os.path.isdir(clonefolder): + raise Exception(f"Clone folder '{clonefolder}' is not a directory. Cloning failed or file in it's place?") + os.chdir(clonefolder) + if '--mirror' != self.branch and 'master' != self.branch: + command = ['checkout', '-t', 'origin/' + self.branch] + run_git_command(command) + elif os.path.isdir(clonefolder): + os.chdir(clonefolder) + print(f"Updating existing repo '{repo}' ({clonefolder})", file=sys.stderr) + command = ['remote', 'update'] + run_git_command(command) + if '--mirror' != self.branch: + sha1before = read_git_output(['rev-parse', 'refs/heads/' + self.branch]) + sha1after = read_git_output(['rev-parse', 'refs/remotes/origin/' + self.branch]) + if sha1before and sha1after: + if sha1before == sha1after: + changed = False + print(f"Repo '{repo}' update on branch '{self.branch}': No changed detected", file=sys.stderr) + else: + treeish = sha1before.decode(ENCODING) + '..' + sha1after.decode(ENCODING) + print(f"Repo '{repo}' update on branch '{self.branch}': Treeish '{treeish}'", file=sys.stderr) + else: + print(f"Repo '{repo}' update on branch '{self.branch}': Before or after sha1 could not be extracted.", file=sys.stderr) + command = ['update-ref', 'refs/heads/' + self.branch, 'refs/remotes/origin/' + self.branch] + run_git_command(command) + command = ['checkout', '-f', self.branch] + run_git_command(command) + else: + raise Exception(f"Clone folder '{clonefolder}' appears to be a file :s") + + if changed and self.cmd: + # Update the info/web/last-modified file as used by cgit + os.chdir(clonefolder) + command = [self.cmd, repo] + if treeish: + command += [treeish] + run_command(command) + + print(f"Update for '{repo}' complete.", file=sys.stderr) + except Exception as e: + print(f"Error processing repo '{repo}'", file=sys.stderr) + print(str(e), file=sys.stderr) + + GitUpdaterQueue.task_done() + sys.stderr.flush() + + +class TimeoutServer(http.server.HTTPServer): + def get_request(self): + result = self.socket.accept() + result[0].settimeout(10) + return result + + +class PostHandler(http.server.BaseHTTPRequestHandler): + def do_POST(self): + ctype, pdict = cgi.parse_header(self.headers['content-type']) + repo = "" + try: + if ctype != 'x-git/repo': + self.send_response(415) + self.end_headers() + return + + # chunked mode is a legitimate reason there would be no content-length, + # but it's easier to just insist on it + length = int(self.headers['content-length']) if self.headers['content-length'] else 0 + if length < 1: + self.send_response(411) + self.end_headers() + return + if length > 1024: + self.send_response(413) + self.end_headers() + return + repo = self.rfile.read(length).decode(ENCODING) + + if re.match(r"^[-_/a-zA-Z0-9\+\.]+$", repo) is None: + self.send_response(400) + self.end_headers() + return + + GitUpdaterQueue.put(repo) + self.send_response(202) + self.end_headers() + + except Exception as e: + print("Error processing request", file=sys.stderr) + print(str(e), file=sys.stderr) + self.send_response(500) + self.end_headers() + + sys.stderr.flush() + + +def Demote(pidfile, uid, gid): + def result(): + piddir = os.path.dirname(pidfile) + if not os.path.exists(piddir): + os.makedirs(piddir) + fd = open(pidfile, 'w') + fd.write(str(os.getpid())) + fd.close() + + if uid and gid: + os.setgid(gid) + os.setuid(uid) + return result + + +def daemonise(options, serverprefix, basefolder): + pw = None + uid = False + gid = False + if options.user: + pw = pwd.getpwnam(options.user) + uid = pw.pw_uid + gid = pw.pw_gid + else: + pw = pwd.getpwnam(os.getlogin()) + + user = pw.pw_name + dirname = pw.pw_dir + env = { + 'HOME': dirname, + 'LOGNAME': user, + 'PWD': dirname, + 'USER': user, + } + if os.getenv('PATH') is not None: + env['PATH'] = os.getenv('PATH') + if os.getenv('PYTHONPATH') is not None: + env['PYTHONPATH'] = os.getenv('PYTHONPATH') + + args = [os.path.abspath(sys.argv[0])] + args.append('-a') + args.append(options.addr) + args.append('-p') + args.append(str(options.port)) + args.append('-r') + args.append(options.repoprefix) + args.append('-b') + args.append(options.branch) + args.append('-c') + args.append(options.cmd) + args.append(serverprefix) + args.append(basefolder) + + subprocess.Popen( + args, preexec_fn=Demote(options.pidfile, uid, gid), cwd=dirname, env=env + ) + exit(0) + + +def main(): + usage = "usage: %prog [options] <serverprefix> <basefolder>" + description = """Listen for repository names being posted via a simple HTTP interface and clone/update them. +POST data simply via curl: +e.g. curl --header 'Content-Type: x-git/repo' --data 'my/repo/name' http://localhost:8000 +""" + parser = OptionParser(usage=usage, description=description) + parser.add_option("-a", "--addr", + type="string", dest="addr", default="0.0.0.0", + help="The interface address to bind to") + parser.add_option("-p", "--port", + type="int", dest="port", default=8000, + help="The port to bind to") + parser.add_option("-r", "--repo-prefix", + type="string", dest="repoprefix", default="", + help="Only handle repositories with the following prefix. This SHOULD contain a trailing slash if it's a folder but SHOULD NOT include a leading slash") + parser.add_option("-b", "--branch", + type="string", dest="branch", default="--mirror", + help="The branch to track on clone. If you pass '--mirror' (the default) as the branch name we will clone as a bare mirror") + parser.add_option("-c", "--cmd", + type="string", dest="cmd", default="", + help="Third party command to execute after updates. It will execute in the " + "folder of the repo and if we're not in mirror mode, a treeish will be " + "passed as the only argument containing the refs that changed otherwise " + "the command will be run without any arguments") + parser.add_option("-d", "--pid-file", + type="string", dest="pidfile", default="", + help="Daemonise and write pidfile") + parser.add_option("-u", "--user", + type="string", dest="user", default="", + help="Drop privileges to the given user (must be run as root)") + + (options, args) = parser.parse_args() + if len(args) < 2: + parser.error("Both the <serverprefix> and <basefolder> arguments must be supplied.") + if len(args) > 2: + parser.print_usage() + exit(1) + + serverprefix = args[0] + basefolder = args[1] + + if options.pidfile: + daemonise(options, serverprefix, basefolder) + + if options.user: + parser.error("You can only specify a user if you're also deamonising (with a pid file).") + + print("Server started", file=sys.stderr) + sys.stderr.flush() + srvr = TimeoutServer((options.addr, options.port), PostHandler) + updater = GitUpdater(serverprefix, basefolder, options.repoprefix, options.branch, options.cmd) + updater.start() + + try: + srvr.serve_forever() + except KeyboardInterrupt: + srvr.socket.close() + GitUpdaterQueue.put(None) + updater.join() + + +if __name__ == "__main__": + main() diff --git a/modules/gitmirror/files/on-the-pull.init b/modules/gitmirror/files/on-the-pull.init new file mode 100755 index 00000000..cc256a06 --- /dev/null +++ b/modules/gitmirror/files/on-the-pull.init @@ -0,0 +1,67 @@ +#! /bin/bash +# +# on-the-pull Keep git mirrors up-to-date via external triggers +# +# chkconfig: 2345 80 30 +# description: Keep git mirrors up-to-date via external triggers +# +### BEGIN INIT INFO +# Provides: on-the-pull +# Required-Start: $network +# Required-Stop: $network +# Default-Start: 2 3 4 5 +# Short-Description: Keep git mirrors up-to-date via external triggers +# Description: Keep git mirrors up-to-date via external triggers +### END INIT INFO + +# Source function library. +. /etc/init.d/functions + +pidfile=/var/run/on-the-pull/on-the-pull.pid +prog=/usr/local/bin/on-the-pull +args="--pid-file=$pidfile --user=git --cmd=/usr/local/bin/gitmirror-sync-metadata git://git.mageia.org /git" + + +start() { + gprintf "Starting On-The-Pull Git Mirror Daemon: " + daemon --check on-the-pull --pidfile $pidfile "$prog $args >>/var/log/on-the-pull.log 2>&1" + RETVAL=$? + echo + [ $RETVAL -eq 0 ] && touch /var/lock/subsys/on-the-pull + return $RETVAL +} + +stop() { + gprintf "Stopping On-The-Pull Git Mirror Daemon: " + killproc -p $pidfile on-the-pull + echo + rm -f /var/lock/subsys/on-the-pull +} + +restart() { + stop + start +} + +case "$1" in + start) + start + ;; + stop) + stop + ;; + status) + status on-the-pull $pidfile + ;; + restart|reload) + restart + ;; + condrestart) + [ -f /var/lock/subsys/on-the-pull ] && restart || : + ;; + *) + gprintf "Usage: %s {start|stop|status|restart|condrestart}\n" "$(basename $0)" + exit 1 +esac + +exit 0 diff --git a/modules/gitmirror/files/rsync-metadata.sh b/modules/gitmirror/files/rsync-metadata.sh new file mode 100755 index 00000000..03a0fe41 --- /dev/null +++ b/modules/gitmirror/files/rsync-metadata.sh @@ -0,0 +1,27 @@ +#!/bin/bash + +REPO="$1" +GITROOT="/git" +RSYNCROOT="rsync://duvel.mageia.org/git" + +if [ ! -d "$GITROOT/$REPO" ]; then + echo "No repository found $REPO" >&2 + exit 1 +fi + +/usr/bin/rsync -a --include="description" --include="info" --include="info/web" --include="info/web/last-modified" --exclude="*" "$RSYNCROOT/$REPO/" "$GITROOT/$REPO/" +/usr/bin/rsync -a "$RSYNCROOT/$REPO/config" "$GITROOT/$REPO/config.upstream" + +OWNER=$(git config --file "$GITROOT/$REPO/config.upstream" gitweb.owner) +DESC=$(git config --file "$GITROOT/$REPO/config.upstream" gitweb.description) +rm -f "$GITROOT/$REPO/config.upstream" + +CUROWNER=$(git config --file "$GITROOT/$REPO/config" gitweb.owner) +if [ "$CUROWNER" != "$OWNER" ]; then + git config --file "$GITROOT/$REPO/config" gitweb.owner "$OWNER" +fi + +CURDESC=$(git config --file "$GITROOT/$REPO/config" gitweb.description) +if [ "$CURDESC" != "$DESC" ]; then + git config --file "$GITROOT/$REPO/config" gitweb.description "$DESC" +fi diff --git a/modules/gitmirror/manifests/init.pp b/modules/gitmirror/manifests/init.pp new file mode 100644 index 00000000..c1dcd894 --- /dev/null +++ b/modules/gitmirror/manifests/init.pp @@ -0,0 +1,48 @@ +class gitmirror { + + $git_dir = '/git' + $git_login = 'git' + $git_homedir = "/var/lib/${git_login}" + $git_rundir = '/var/run/on-the-pull' + + group { $git_login: + ensure => present, + } + + user { $git_login: + ensure => present, + home => $git_homedir, + } + + file { $git_dir: + ensure => directory, + owner => $git_login, + group => $git_login, + mode => '0755', + } + + file { $git_rundir: + ensure => directory, + mode => '0755', + } + + mga_common::local_script { 'on-the-pull': + source => 'puppet:///modules/gitmirror/on-the-pull', + } + + file { '/etc/init.d/on-the-pull': + source => 'puppet:///modules/gitmirror/on-the-pull.init', + mode => '0755', + } + + service { 'on-the-pull': + require => [ + Mga_common::Local_script["on-the-pull"], + File['/etc/init.d/on-the-pull'], + ], + } + + mga_common::local_script { 'gitmirror-sync-metadata': + source => 'puppet:///modules/gitmirror/rsync-metadata.sh', + } +} diff --git a/modules/gitweb/manifests/init.pp b/modules/gitweb/manifests/init.pp new file mode 100644 index 00000000..d7c07b22 --- /dev/null +++ b/modules/gitweb/manifests/init.pp @@ -0,0 +1,32 @@ +class gitweb { + package { 'gitweb': } + # TODO some rpm may be needed ( like perl-FCGI ) + # git >= 17.2 is needed for fastcgi support + + # TODO fix git rpm to show the css, the js, and others missing file + + file { '/etc/gitweb.conf': + content => template('gitweb/gitweb.conf'), + notify => Service['apache'], + require => Package['gitweb'], + } + + apache::webapp_other { 'gitweb': + webapp_file => 'gitweb/webapp.conf', + } + + mga_common::local_script { 'gitweb.wrapper.sh': + content => template('gitweb/wrapper.sh'), + notify => Service['apache'], + } + + $vhost = "gitweb.${::domain}" + apache::vhost::base { $vhost: + content => template('gitweb/vhost.conf') + } + apache::vhost::base { "ssl_${vhost}": + vhost => $vhost, + use_ssl => true, + content => template('gitweb/vhost.conf'), + } +} diff --git a/modules/gitweb/templates/gitweb.conf b/modules/gitweb/templates/gitweb.conf new file mode 100644 index 00000000..688844a8 --- /dev/null +++ b/modules/gitweb/templates/gitweb.conf @@ -0,0 +1,123 @@ +# default config file (in perl syntax) + +# absolute fs-path which will be prepended to the project path +our $projectroot = "/git"; + +# target of the home link on top of all pages +our $home_link = "/"; + +# string of the home link on top of all pages +#our $home_link_str = "projects"; + +# name of your site or organization to appear in page titles +# replace this with something more descriptive for clearer bookmarks +our $site_name = "Mageia Git"; + +# filename of html text to include at top of each page +#our $site_header = ""; +# html text to include at home page +#our $home_text = "indextext.html"; +# filename of html text to include at bottom of each page +#our $site_footer = ""; + +# URI of stylesheets +#our @stylesheets = ("gitweb.css"); +# URI of a single stylesheet +#our $stylesheet = undef; +# URI of GIT logo (72x27 size) +#our $logo = "git-logo.png"; +# URI of GIT favicon, assumed to be image/png type +#our $favicon = "git-favicon.png"; + +# URI and label (title) of GIT logo link +#our $logo_url = "http://git.or.cz/"; +#our $logo_label = "git homepage"; + +# source of projects list +#our $projects_list = ""; + +# default order of projects list +# valid values are none, project, descr, owner, and age +#our $default_projects_order = "project"; + +# show repository only if this file exists +# (only effective if this variable evaluates to true) +#our $export_ok = ""; + +# only allow viewing of repositories also shown on the overview page +#our $strict_export = ""; + +# list of git base URLs used for URL to where fetch project from, +# i.e. full URL is "$git_base_url/$project" +#our @git_base_url_list = grep { $_ ne '' } (""); + +# Enable the 'blame' blob view, showing the last commit that modified +# each line in the file. This can be very CPU-intensive. + +# To enable system wide have in /etc/gitweb.conf +# $feature{'blame'}{'default'} = [1]; +# To have project specific config enable override in /etc/gitweb.conf +# $feature{'blame'}{'override'} = 1; +# and in project config gitweb.blame = 0|1; + +# Enable the 'snapshot' link, providing a compressed tarball of any +# tree. This can potentially generate high traffic if you have large +# project. + +# To disable system wide have in /etc/gitweb.conf +# $feature{'snapshot'}{'default'} = [undef]; +# To have project specific config enable override in /etc/gitweb.conf +# $feature{'snapshot'}{'override'} = 1; +# and in project config gitweb.snapshot = none|gzip|bzip2; + +# Enable text search, which will list the commits which match author, +# committer or commit text to a given string. Enabled by default. +# Project specific override is not supported. + +# Enable grep search, which will list the files in currently selected +# tree containing the given string. Enabled by default. This can be +# potentially CPU-intensive, of course. + +# To enable system wide have in /etc/gitweb.conf +# $feature{'grep'}{'default'} = [1]; +# To have project specific config enable override in /etc/gitweb.conf +# $feature{'grep'}{'override'} = 1; +# and in project config gitweb.grep = 0|1; + +# Enable the pickaxe search, which will list the commits that modified +# a given string in a file. This can be practical and quite faster +# alternative to 'blame', but still potentially CPU-intensive. + +# To enable system wide have in /etc/gitweb.conf +# $feature{'pickaxe'}{'default'} = [1]; +# To have project specific config enable override in /etc/gitweb.conf +# $feature{'pickaxe'}{'override'} = 1; +# and in project config gitweb.pickaxe = 0|1; + +# Make gitweb use an alternative format of the URLs which can be +# more readable and natural-looking: project name is embedded +# directly in the path and the query string contains other +# auxiliary information. All gitweb installations recognize +# URL in either format; this configures in which formats gitweb +# generates links. + +# To enable system wide have in /etc/gitweb.conf +# $feature{'pathinfo'}{'default'} = [1]; +# Project specific override is not supported. + +# Note that you will need to change the default location of CSS, +# favicon, logo and possibly other files to an absolute URL. Also, +# if gitweb.cgi serves as your indexfile, you will need to force +# $my_uri to contain the script name in your /etc/gitweb.conf. + +# Make gitweb consider projects in project root subdirectories +# to be forks of existing projects. Given project $projname.git, +# projects matching $projname/*.git will not be shown in the main +# projects list, instead a '+' mark will be added to $projname +# there and a 'forks' view will be enabled for the project, listing +# all the forks. If project list is taken from a file, forks have +# to be listed after the main project. + +# To enable system wide have in /etc/gitweb.conf +# $feature{'forks'}{'default'} = [1]; +# Project specific override is not supported. diff --git a/modules/gitweb/templates/vhost.conf b/modules/gitweb/templates/vhost.conf new file mode 100644 index 00000000..d558d591 --- /dev/null +++ b/modules/gitweb/templates/vhost.conf @@ -0,0 +1,3 @@ +Alias /static/ /usr/share/gitweb/static/ +Alias / /usr/local/bin/gitweb.wrapper.sh +FastCgiServer /usr/local/bin/gitweb.wrapper.sh -processes 1 -idle-timeout 30 -socket /tmp/gitweb.socket diff --git a/modules/gitweb/templates/webapp.conf b/modules/gitweb/templates/webapp.conf new file mode 100644 index 00000000..a4d13624 --- /dev/null +++ b/modules/gitweb/templates/webapp.conf @@ -0,0 +1,8 @@ +# gitweb configuration +# disabled +#Alias /gitweb /usr/share/gitweb + +<Directory /usr/share/gitweb> + Order allow,deny + Allow from all +</Directory> diff --git a/modules/gitweb/templates/wrapper.sh b/modules/gitweb/templates/wrapper.sh new file mode 100644 index 00000000..4303007b --- /dev/null +++ b/modules/gitweb/templates/wrapper.sh @@ -0,0 +1,4 @@ +#!/bin/bash +export FCGI_SOCKET_PATH=/tmp/gitweb.socket + +/usr/share/gitweb/gitweb.cgi --fastcgi diff --git a/modules/gnupg/manifests/client.pp b/modules/gnupg/manifests/client.pp new file mode 100644 index 00000000..301e569a --- /dev/null +++ b/modules/gnupg/manifests/client.pp @@ -0,0 +1,17 @@ +class gnupg::client { +if versioncmp($::lsbdistrelease, '7') < 0 { + package {['gnupg', + 'rng-utils']: + } +} else { + package {['gnupg2', + 'rng-utils']: + } +} + + mga_common::local_script { 'create_gnupg_keys.sh': + content => template('gnupg/create_gnupg_keys.sh') + } +} + + diff --git a/modules/gnupg/manifests/init.pp b/modules/gnupg/manifests/init.pp new file mode 100644 index 00000000..d6ae319d --- /dev/null +++ b/modules/gnupg/manifests/init.pp @@ -0,0 +1 @@ +class gnupg { } diff --git a/modules/gnupg/manifests/keys.pp b/modules/gnupg/manifests/keys.pp new file mode 100644 index 00000000..b99ed393 --- /dev/null +++ b/modules/gnupg/manifests/keys.pp @@ -0,0 +1,38 @@ + # debian recommend SHA2, with 4096 + # https://wiki.debian.org/Keysigning + # as they are heavy users of gpg, I will tend + # to follow them + # however, for testing purpose, 4096 is too strong, + # this empty the entropy of my vm +define gnupg::keys($email, + $key_name, + $key_type = 'RSA', + $key_length = '4096', + $expire_date = '400d', + $login = 'signbot', + $batchdir = '/var/lib/signbot/batches', + $keydir = '/var/lib/signbot/keys') { + + include gnupg::client + file { "${name}.batch": + path => "${batchdir}/${name}.batch", + content => template('gnupg/batch') + } + + file { $keydir: + ensure => directory, + owner => $login, + mode => '0700', + } + + file { $batchdir: + ensure => directory, + owner => $login, + } + + exec { "/usr/local/bin/create_gnupg_keys.sh ${batchdir}/${name}.batch ${keydir} ${batchdir}/${name}.done": + user => $login, + creates => "${batchdir}/${name}.done", + require => [File[$keydir], File["${batchdir}/${name}.batch"], Package['rng-utils']], + } +} diff --git a/modules/gnupg/templates/batch b/modules/gnupg/templates/batch new file mode 100644 index 00000000..d55bdd52 --- /dev/null +++ b/modules/gnupg/templates/batch @@ -0,0 +1,8 @@ +%echo Generating a standard key +Key-Type: <%= @key_type %> +Key-Length: <%= @key_length %> +Name-Real: <%= @key_name %> +Name-Email: <%= @email %> +Expire-Date: <%= @expire_date %> +%commit +%echo done diff --git a/modules/gnupg/templates/create_gnupg_keys.sh b/modules/gnupg/templates/create_gnupg_keys.sh new file mode 100644 index 00000000..a2caba2d --- /dev/null +++ b/modules/gnupg/templates/create_gnupg_keys.sh @@ -0,0 +1,26 @@ +#!/bin/bash + +BATCHFILE="$1" +HOMEDIR="$2" +LOCK="$3" + +test $# -eq 3 || exit 1 + +if [ -e "$LOCK" ] +then + echo "Lock file already exist." 1>&2 + echo "Remove $LOCK if you want to regenerate key." 1>&2 + exit 2 +fi + +touch "$LOCK" + +/sbin/rngd -f -r /dev/urandom & +RAND=$! +cd $HOMEDIR +gpg --homedir $HOMEDIR --batch --gen-key $BATCHFILE +EXIT=$? + +kill $RAND + +exit $EXIT diff --git a/modules/icecream/manifests/client.pp b/modules/icecream/manifests/client.pp new file mode 100644 index 00000000..5364d87d --- /dev/null +++ b/modules/icecream/manifests/client.pp @@ -0,0 +1,6 @@ +define icecream::client($host = '') { + include icecream::client_common + file { '/etc/sysconfig/icecream': + content => template('icecream/sysconfig'), + } +} diff --git a/modules/icecream/manifests/client_common.pp b/modules/icecream/manifests/client_common.pp new file mode 100644 index 00000000..b4ee4ac5 --- /dev/null +++ b/modules/icecream/manifests/client_common.pp @@ -0,0 +1,7 @@ +class icecream::client_common { + package { 'icecream': } + + service { 'icecream': + subscribe => Package['icecream'], + } +} diff --git a/modules/icecream/manifests/init.pp b/modules/icecream/manifests/init.pp new file mode 100644 index 00000000..01828f03 --- /dev/null +++ b/modules/icecream/manifests/init.pp @@ -0,0 +1 @@ +class icecream { } diff --git a/modules/icecream/manifests/scheduler.pp b/modules/icecream/manifests/scheduler.pp new file mode 100644 index 00000000..e3d876b8 --- /dev/null +++ b/modules/icecream/manifests/scheduler.pp @@ -0,0 +1,7 @@ +class icecream::scheduler { + package { 'icecream-scheduler': } + + service { 'icecream-scheduler': + subscribe => Package['icecream-scheduler'], + } +} diff --git a/modules/icecream/templates/sysconfig b/modules/icecream/templates/sysconfig new file mode 100644 index 00000000..8a5bc92c --- /dev/null +++ b/modules/icecream/templates/sysconfig @@ -0,0 +1,89 @@ +# +## Type: integer(0:19) +## Path: Applications/icecream +## Description: Icecream settings +## ServiceRestart: icecream +## Default: 5 +# +# Nice level of running compilers +# +ICECREAM_NICE_LEVEL="5" + +# +## Type: string +## Path: Applications/icecream +## Default: /var/log/iceccd +# +# icecream daemon log file +# +ICECREAM_LOG_FILE="/var/log/icecream.log" + +# +## Type: string +## Path: Applications/icecream +## Default: no +# +# Start also the scheduler? +# +ICECREAM_RUN_SCHEDULER="no" + +# +## Type: string +## Path: Applications/icecream +## Default: /var/log/icecc_scheduler +# +# icecream scheduler log file +# +ICECREAM_SCHEDULER_LOG_FILE="/var/log/scheduler.log" + +# +## Type: string +## Path: Applications/icecream +## Default: "" +# +# Identification for the network the scheduler and daemon run on. +# You can have several distinct icecream networks in the same LAN +# for whatever reason. +# +ICECREAM_NETNAME="" + +# +## Type: string +## Path: Applications/icecream +## Default: "" +# +# If the daemon can't find the scheduler by broadcast (e.g. because +# of a firewall) you can specify it. +# +ICECREAM_SCHEDULER_HOST="<%= @host %>" + +# +## Type: string +## Path: Applications/icecream +## Default: "" +## Type: integer +# +# You can overwrite here the number of jobs to run in parallel. Per +# default this depends on the number of (virtual) CPUs installed. +# +ICECREAM_MAX_JOBS="" + +# +## Type: string +## Path: Applications/icecream +## Default: "/var/cache/icecream" +# +# This is the directory where the icecream daemon stores the environments +# it compiles in. In a big network this can grow quite a bit, so use some +# path if your /tmp is small - but the user icecream has to write to it. +# +ICECREAM_BASEDIR="/var/cache/icecream" + +# +## Type: string +## Path: Applications/icecream +# Default: "" +# +# Just set the environment var to enable DEBUG +ICECREAM_DEBUG="1" +ICECREAM_SCHEDULER_DEBUG="1" diff --git a/modules/ii/manifests/init.pp b/modules/ii/manifests/init.pp new file mode 100644 index 00000000..2947c75d --- /dev/null +++ b/modules/ii/manifests/init.pp @@ -0,0 +1,38 @@ +class ii { + class base { + package {['ii', + 'perl-Proc-Daemon']: } + + file { '/var/lib/ii/': + ensure => directory, + owner => 'nobody', + } + } + + define bot( $server = 'irc.freenode.net', + $channel) { + + $nick = $name + + include ii::base + # a custom wrapper is needed since ii does not fork in the + # background, and bash is not able to properly do it + mga_common::local_script { "ii_${nick}": + content => template('ii/ii_wrapper.pl'), + require => Class['ii::base'], + } + + service { 'ii': + provider => base, + start => "/usr/local/bin/ii_${nick}", + require => Mga_common::Local_script["ii_${nick}"], + } + + exec { "join channel ${nick}": + command => "echo '/j ${channel}' > /var/lib/ii/${nick}/${server}/in", + user => 'nobody', + creates => "/var/lib/ii/${nick}/${server}/${channel}/in", + require => Service['ii'], + } + } +} diff --git a/modules/ii/templates/ii_wrapper.pl b/modules/ii/templates/ii_wrapper.pl new file mode 100644 index 00000000..68128314 --- /dev/null +++ b/modules/ii/templates/ii_wrapper.pl @@ -0,0 +1,15 @@ +#!/usr/bin/perl +use warnings; +use strict; +use POSIX; +use Proc::Daemon; +my $nick = "<%= @nick %>"; +my $server = "<%= @server %>"; + + +Proc::Daemon::Init(); +my (undef, undef, $uid) = getpwnam("nobody"); +POSIX::setuid($uid); + +fork() || exec "ii -n $nick -i /var/lib/ii/$nick -s $server"; +wait(); diff --git a/modules/irkerd/manifests/init.pp b/modules/irkerd/manifests/init.pp new file mode 100644 index 00000000..adffc452 --- /dev/null +++ b/modules/irkerd/manifests/init.pp @@ -0,0 +1,9 @@ +class irkerd { + package { 'irker': + ensure => installed, + } + + service { 'irkerd': + ensure => running, + } +} diff --git a/modules/libvirtd/files/network_add.py b/modules/libvirtd/files/network_add.py new file mode 100644 index 00000000..4ed63109 --- /dev/null +++ b/modules/libvirtd/files/network_add.py @@ -0,0 +1,61 @@ +#!/usr/bin/python3 +import libvirt +import os +import IPy + +# bridge_name + +# forward -> nat/ route +# forward-dev + +# network +# => deduire la gateway , et le range +# en dhcp automatiquement + +# tftp_root + +# enable_pxelinux + + +bridge_name = os.environ.get('BRIDGE_NAME', 'virbr0') +forward = os.environ.get('FORWARD', 'nat') +forward_dev = os.environ.get('FORWARD_DEV', 'eth0') + +network = os.environ.get('NETWORK', '192.168.122.0/24') + +tftp_root = os.environ.get('TFTP_ROOT', '') +disable_pxelinux = os.environ.get('DISABLE_PXE', False) + +name = os.environ.get('NAME', 'default') + + +ip = IPy.IP(network) +gateway = ip[1] +dhcp_start = ip[2] +dhcp_end = ip[-2] + +netmask = ip.netmask() +tftp_xml = '' +pxe_xml = '' + +if tftp_root: + tftp_xml = "<tftp root='" + tftp_root + "' />" + if not disable_pxelinux: + pxe_xml = "<bootp file='pxelinux.0' />" + +network_xml = """ +<network> + <name>%(name)s</name> + <bridge name="%(bridge_name)s" /> + <forward mode="%(forward)s" dev="%(forward_dev)s"/> + <ip address="%(gateway)s" netmask="%(netmask)s"> + %(tftp_xml)s + <dhcp> + <range start="%(dhcp_start)s" end="%(dhcp_end)s" /> + %(pxe_xml)s + </dhcp> + </ip> +</network>""" % globals() + +c=libvirt.open("qemu:///system") +c.networkDefineXML(network_xml) diff --git a/modules/libvirtd/files/storage_add.py b/modules/libvirtd/files/storage_add.py new file mode 100644 index 00000000..10369e36 --- /dev/null +++ b/modules/libvirtd/files/storage_add.py @@ -0,0 +1,27 @@ +#!/usr/bin/python3 +import libvirt +import sys + +name = sys.argv[1] +path = sys.argv[2] + +storage_xml = """ +<pool type='dir'> + <name>%s</name> + <capacity>0</capacity> + <allocation>0</allocation> + <available>0</available> + <source> + </source> + <target> + <path>%s</path> + <permissions> + <mode>0700</mode> + <owner>-1</owner> + <group>-1</group> + </permissions> + </target> +</pool>""" % ( name, path ) + +c=libvirt.open("qemu:///system") +c.storagePoolDefineXML(storage_xml,0) diff --git a/modules/libvirtd/manifests/init.pp b/modules/libvirtd/manifests/init.pp new file mode 100644 index 00000000..f0cbb887 --- /dev/null +++ b/modules/libvirtd/manifests/init.pp @@ -0,0 +1,109 @@ +class libvirtd { + class base { + # make sure to use a recent enough version + # dnsmasq-base -> for nat network + # netcat-openbsd -> for ssh remote access + # iptables -> for dhcp, message error was quite puzzling + # python-* => needed for helper script + package {['libvirt-utils', + 'dnsmasq', + 'netcat-openbsd', + 'iptables', + 'python3-libvirt', + 'python3-IPy']: + } + service { 'libvirtd': + require => Package['libvirt-utils'], + } + + #TODO remove once libvirt package is fixed to manage the directory + file { ['/etc/libvirt/storage', + '/etc/libvirt/storage/autostart']: + ensure => directory, + require => Package['libvirt-utils'], + } + + file { '/usr/local/bin/storage_add.py': + mode => '0755', + source => 'puppet:///modules/libvirtd/storage_add.py', + } + + file { '/usr/local/bin/network_add.py': + mode => '0755', + source => 'puppet:///modules/libvirtd/network_add.py', + } + + } + + class kvm inherits base { + # pull cyrus-sasl, should be checked + package { 'qemu': } + } + + # see https://wiki.libvirt.org/page/SSHPolicyKitSetup + define group_access() { + # to pull polkit and create the directory + include libvirtd::base + file { "/etc/polkit-1/localauthority/50-local.d/50-${name}-libvirt-remote-access.pkla": + content => template('libvirtd/50-template-libvirt-remote-access.pkla'), + require => Package['libvirt-utils'], + } + # give access to /dev/kvm to people allowed to use libvirt + file { '/dev/kvm': + group => $name, + owner => 'root', + mode => '0660', + } + } + + define storage($path, $autostart = true) { + include libvirtd::base + + exec { "/usr/local/bin/storage_add.py ${name} ${path}": + creates => "/etc/libvirt/storage/${name}.xml", + require => [File['/usr/local/bin/storage_add.py'], + Package['python3-libvirt'] ] + } + + #TODO use API of libvirt + file { "/etc/libvirt/storage/autostart/${name}.xml": + ensure => $autostart ? { + true => "/etc/libvirt/storage/${name}.xml", + false => absent + }, + require => Package['libvirt-utils'], + } + } + + define network( $bridge_name = 'virbr0', + $forward = 'nat', + $forward_dev = 'eth0', + $network = '192.168.122.0/24', + $tftp_root = '', + $disable_pxe = '', + $autostart = true, + $vm_type = 'qemu') { + + exec { '/usr/local/bin/network_add.py': + environment => ["BRIDGE_NAME=${bridge_name}", + "FORWARD=${forward}", + "FORWARD_DEV=${forward_dev}", + "NETWORK=${network}", + "TFTP_ROOT=${tftp_root}", + "DISABLE_PXE=\"${disable_pxe}\""], + + creates => "/etc/libvirt/${vm_type}/networks/${name}.xml", + require => [File['/usr/local/bin/network_add.py'], + Package['python3-IPy'], Package['python3-libvirt'] ] + } + + #TODO use API of libvirt + file { "/etc/libvirt/${vm_type}/networks/autostart/${name}.xml": + ensure => $autostart ? { + true => "/etc/libvirt/${vm_type}/networks/${name}.xml", + false => absent + }, + require => Package['libvirt-utils'], + } + } +} diff --git a/modules/libvirtd/templates/50-template-libvirt-remote-access.pkla b/modules/libvirtd/templates/50-template-libvirt-remote-access.pkla new file mode 100644 index 00000000..8806e3cb --- /dev/null +++ b/modules/libvirtd/templates/50-template-libvirt-remote-access.pkla @@ -0,0 +1,6 @@ +[Remote libvirt SSH access] +Identity=unix-user:root;unix-group:<%= @name %> +Action=org.libvirt.unix.manage +ResultAny=yes +ResultInactive=yes +ResultActive=yes diff --git a/modules/mediawiki/files/init_wiki.php b/modules/mediawiki/files/init_wiki.php new file mode 100644 index 00000000..da1d46f5 --- /dev/null +++ b/modules/mediawiki/files/init_wiki.php @@ -0,0 +1,31 @@ +<? +$wiki_root = $argv[1]; +$mw_root = '/usr/share/mediawiki'; + +if (!is_dir("$wiki_root/config")) { + exit(1); +} + +// DefaultSettings.php complain if not defined +define('MEDIAWIKI',1); + +require_once("$mw_root/includes/Defines.php"); +require_once("$mw_root/includes/AutoLoader.php"); +require_once("$mw_root/includes/GlobalFunctions.php"); +include("$wiki_root/LocalSettings.php"); + +$dbclass = 'Database'.ucfirst($wgDBtype); +$wgDatabase = new $dbclass($wgDBserver, + $wgDBuser, + $wgDBpassword, $wgDBname, 1); + +$wgDatabase->initial_setup($wgDBpassword, $wgDBname); +$wgDatabase->setup_database(); + +$dir = "$wiki_root/config"; +foreach (scandir($dir) as $item) { + if (!is_dir($item) || is_link($item)) + unlink($item); +} +rmdir("$dir"); +?> diff --git a/modules/mediawiki/files/robots.txt b/modules/mediawiki/files/robots.txt new file mode 100644 index 00000000..a58c6199 --- /dev/null +++ b/modules/mediawiki/files/robots.txt @@ -0,0 +1,4 @@ +User-agent: * +Disallow: /mw-*/index.php? +Disallow: /*/Special: +Crawl-delay: 30 diff --git a/modules/mediawiki/manifests/base.pp b/modules/mediawiki/manifests/base.pp new file mode 100644 index 00000000..76c8625b --- /dev/null +++ b/modules/mediawiki/manifests/base.pp @@ -0,0 +1,46 @@ +class mediawiki::base { + include apache::mod::php + $vhost = $mediawiki::config::vhost + $root = $mediawiki::config::root + + package { ['mediawiki','mediawiki-ldapauthentication']: } + + file { $mediawiki::config::root: + ensure => directory, + } + + $wiki_root = $mediawiki::config::root + $robotsfile = "$wiki_root/robots.txt" + file { $robotsfile: + ensure => present, + mode => '0644', + owner => root, + group => root, + source => 'puppet:///modules/mediawiki/robots.txt', + } + +# file { '/usr/local/bin/init_wiki.php': +# mode => '0755', +# source => 'puppet:///modules/mediawiki/init_wiki.php', +# } + + $user = 'mediawiki' + + postgresql::remote_user { $user: + password => $mediawiki::config::pgsql_password, + } + + # TODO create the ldap user + + if $vhost { + apache::vhost::redirect_ssl { $vhost: } + + apache::vhost::base { "ssl_${vhost}": + location => $root, + use_ssl => true, + vhost => $vhost, + content => template('mediawiki/wiki_vhost.conf'), + } + } + # add index.php +} diff --git a/modules/mediawiki/manifests/config.pp b/modules/mediawiki/manifests/config.pp new file mode 100644 index 00000000..0c54cdf6 --- /dev/null +++ b/modules/mediawiki/manifests/config.pp @@ -0,0 +1,9 @@ +# the class is just here to handle global configuration +# a smart variation of the methods exposed on +# https://puppetlabs.com/blog/the-problem-with-separating-data-from-puppet-code/ +class mediawiki::config( + $pgsql_password, + $secretkey, + $ldap_password, + $vhost = "wiki.${::domain}", + $root = '/srv/wiki/') {} diff --git a/modules/mediawiki/manifests/init.pp b/modules/mediawiki/manifests/init.pp new file mode 100644 index 00000000..28e79fab --- /dev/null +++ b/modules/mediawiki/manifests/init.pp @@ -0,0 +1 @@ +class mediawiki { } diff --git a/modules/mediawiki/manifests/instance.pp b/modules/mediawiki/manifests/instance.pp new file mode 100644 index 00000000..c6906449 --- /dev/null +++ b/modules/mediawiki/manifests/instance.pp @@ -0,0 +1,100 @@ +define mediawiki::instance( $title, + $wiki_settings = '', + $skinsdir = '/usr/share/mediawiki/skins') { + + include mediawiki::base + + $path = $name + $lang = $name + $wiki_root = "${mediawiki::base::root}/${path}" + $db_name = "mediawiki_${name}" + $db_user = $mediawiki::base::user + $db_password = $mediawiki::config::pgsql_password + $secret_key = $mediawiki::config::secretkey + $ldap_password = $mediawiki::config::ldap_password + $includedir = "/usr/share/mediawiki/includes" + $maintenancedir = "/usr/share/mediawiki/maintenance" + $vendordir = "/usr/share/mediawiki/vendor" + $resourcesdir = "/usr/share/mediawiki/resources" + $extensionsdir = "/usr/share/mediawiki/extensions" + + file { $wiki_root: + ensure => directory + } + + file { "${wiki_root}/skins": + ensure => link, + target => $skinsdir, + require => File[$wiki_root], + } + file { "${wiki_root}/includes": + ensure => link, + target => $includedir, + require => File[$wiki_root], + } + + file { "${wiki_root}/maintenance": + ensure => link, + target => $maintenancedir, + require => File[$wiki_root], + } + + file { "${wiki_root}/vendor": + ensure => link, + target => $vendordir, + require => File[$wiki_root], + } + + file { "${wiki_root}/resources": + ensure => link, + target => $resourcesdir, + require => File[$wiki_root], + } + + file { "${wiki_root}/extensions": + ensure => link, + target => $extensionsdir, + require => File[$wiki_root], + } + + file { "${wiki_root}/cache": + ensure => directory, + owner => apache, + mode => '0755', + } + + file { "${wiki_root}/tmp": + ensure => directory, + owner => apache, + mode => '0755', + } + + exec { "wikicreate ${name}": + command => "mediawiki-create ${wiki_root}", + cwd => $mediawiki::base::root, + require => [File[$wiki_root],Package['mediawiki']], + creates => "${wiki_root}/index.php", + } + +# postgresql::remote_database { $db_name: +# user => $db_user, +# callback_notify => Exec["deploy_db ${name}"], +# } +# +# exec { "deploy_db ${name}": +# command => "php /usr/local/bin/init_wiki.php ${wiki_root}", +# refreshonly => true, +# onlyif => "/usr/bin/test -d ${wiki_root}/config", +# } + + file { "${wiki_root}/LocalSettings.php": + owner => 'apache', + mode => '0600', + content => template('mediawiki/LocalSettings.php'), + # if LocalSettings is created first, the wikicreate script + # do not create a confg directory, and so it doesn't + # trigger deploy_db exec + require => Exec["wikicreate ${name}"], + } +} + diff --git a/modules/mediawiki/templates/LocalSettings.php b/modules/mediawiki/templates/LocalSettings.php new file mode 100644 index 00000000..c340dfd9 --- /dev/null +++ b/modules/mediawiki/templates/LocalSettings.php @@ -0,0 +1,208 @@ +<?php + +# This file was created by puppet, so any change will be overwritten + +# See includes/DefaultSettings.php for all configurable settings +# and their default values, but don't forget to make changes in _this_ +# file, not there. +# +# Further documentation for configuration settings may be found at: +# https://www.mediawiki.org/wiki/Manual:Configuration_settings + +# Protect against web entry +if ( !defined( 'MEDIAWIKI' ) ) { + exit; +} + +## Installation path (should default to this value, but define for clarity) +$IP = '/usr/share/mediawiki'; + +## Include path necessary to load LDAP module +$path = array( $IP, "$IP/includes", "$IP/languages" ); +set_include_path( implode( PATH_SEPARATOR, $path ) . PATH_SEPARATOR . get_include_path() ); + +## Uncomment this to disable output compression +# $wgDisableOutputCompression = true; + +$wgSitename = "<%= @title %>"; +# $wgMetaNamespace = ""; # Defaults to $wgSitename + +## The URL base path to the directory containing the wiki; +## defaults for all runtime URL paths are based off of this. +## For more information on customizing the URLs +## (like /w/index.php/Page_title to /wiki/Page_title) please see: +## https://www.mediawiki.org/wiki/Manual:Short_URL +$wgScriptPath = "/<%= @path %>"; + +## The protocol and server name to use in fully-qualified URLs +$wgServer = "https://wiki.mageia.org"; + +## The URL path to static resources (images, scripts, etc.) +$wgResourceBasePath = $wgScriptPath; + +## The relative URL path to the skins directory +$wgStylePath = "$wgScriptPath/skins"; + +## The relative URL path to the logo. Make sure you change this from the default, +## or else you'll overwrite your logo when you upgrade! +$wgLogo = "$wgStylePath/common/images/wiki_mga.png"; + +## UPO means: this is also a user preference option + +$wgEnableEmail = true; +$wgEnableUserEmail = true; # UPO + +$wgEmergencyContact = "root@<%= @domain %>"; +$wgPasswordSender = "wiki_noreply@ml.<%= @domain %>"; + +$wgEnotifUserTalk = true; # UPO +$wgEnotifWatchlist = true; # UPO +$wgEmailAuthentication = true; + +## Database settings +$wgDBtype = "postgres"; +$wgDBserver = "pg.<%= @domain %>"; +$wgDBname = "<%= @db_name %>"; +$wgDBuser = "<%= @db_user %>"; +$wgDBpassword = "<%= @db_password %>"; + +# Postgres specific settings +$wgDBport = "5432"; +$wgDBmwschema = "mediawiki"; +$wgDBts2schema = "public"; + +## Shared memory settings +$wgMainCacheType = CACHE_NONE; +$wgMemCachedServers = []; + +## To enable image uploads, make sure the 'images' directory +## is writable, then set this to true: +$wgEnableUploads = true; +# use gd, as convert do not work for big image +# see https://bugs.mageia.org/show_bug.cgi?id=3202 +$wgUseImageMagick = true; +#$wgImageMagickConvertCommand = "/usr/bin/convert"; + +# InstantCommons allows wiki to use images from https://commons.wikimedia.org +$wgUseInstantCommons = false; + +## If you use ImageMagick (or any other shell command) on a +## Linux server, this will need to be set to the name of an +## available UTF-8 locale +$wgShellLocale = "en_US.UTF-8"; + +## Set $wgCacheDirectory to a writable directory on the web server +## to make your wiki go slightly faster. The directory should not +## be publicly accessible from the web. +# This seems actually mandatory to get the Vector skin to work properly +# https://serverfault.com/a/744059 +# FIXME: Dehardcode that path (maybe via ${wiki_root} if exposed?) +$wgCacheDirectory = "/srv/wiki/<%= @path %>/cache"; + +$wgUploadDirectory = "/srv/wiki/<%= @path %>/images"; + +# This seems mandatory to get the Vector skin to work properly +# https://phabricator.wikimedia.org/T119934 +# FIXME: Dehardcode that path (maybe via ${wiki_root} if exposed?) +$wgTmpDirectory = "/srv/wiki/<%= @path %>/tmp"; + +# Array of interwiki prefixes for current wiki. +$wgLocalInterwikis = array( strtolower( $wgSitename ) ); + +# Site language code, should be one of the list in ./languages/data/Names.php +$wgLanguageCode = "<%= @lang %>"; + +$wgSecretKey = "<%= @secret_key %>"; + +# Changing this will log out all existing sessions. +$wgAuthenticationTokenVersion = "1"; + +# Site upgrade key. Must be set to a string (default provided) to turn on the +# web installer while LocalSettings.php is in place +# FIXME: This should be set to a secure value: +# https://www.mediawiki.org/wiki/Manual:$wgUpgradeKey +# $wgUpgradeKey = ""; + +## For attaching licensing metadata to pages, and displaying an +## appropriate copyright notice / icon. GNU Free Documentation +## License and Creative Commons licenses are supported so far. +$wgEnableCreativeCommonsRdf = true; +# TODO add a proper page +$wgRightsPage = ""; # Set to the title of a wiki page that describes your license/copyright +$wgRightsUrl = "https://creativecommons.org/licenses/by-sa/3.0/"; +$wgRightsText = "Creative Commons - Attribution-ShareAlike 3.0 Unported"; +# TODO get the icon to host it on our server +$wgRightsIcon = "https://licensebuttons.net/l/by-sa/3.0/88x31.png"; + +# Path to the GNU diff3 utility. Used for conflict resolution. +$wgDiff3 = "/usr/bin/diff3"; + +## Default skin: you can change the default skin. Use the internal symbolic +## names, ie 'vector', 'monobook': +$wgDefaultSkin = 'vector'; + +# Enabled skins. +# The following skins were automatically enabled: +wfLoadSkin( 'MonoBook' ); +wfLoadSkin( 'Vector' ); + + +# End of automatically generated settings. +# Add more configuration options below. + + +# Setting this to true will invalidate all cached pages whenever +# LocalSettings.php is changed. +$wgInvalidateCacheOnLocalSettingsChange = true; + +# FIXME: Obsoleted, to be replaced by $wgPasswordPolicy +# https://www.mediawiki.org/wiki/Manual:$wgPasswordPolicy +$wgMinimalPasswordLength = 1; + +# Give more details on errors +$wgShowExceptionDetails = true; + + +## LDAP setup + +require_once 'extensions/LdapAuthentication/LdapAuthentication.php'; +$wgAuth = new LdapAuthenticationPlugin(); + +## uncomment to debug +# $wgLDAPDebug = 10; +# $wgDebugLogGroups["ldap"] = "/tmp/wiki_ldap.log"; +# +$wgDebugLogFile = "/tmp/wiki.log"; +# + +$wgLDAPUseLocal = false; + +$wgLDAPDomainNames = array( 'ldap' ); + +# TODO make it workable with more than one server +$wgLDAPServerNames = array( 'ldap' => 'ldap.<%= @domain %>' ); + +$wgLDAPSearchStrings = array( 'ldap' => 'uid=USER-NAME,ou=People,<%= @dc_suffix %>' ); + +$wgLDAPEncryptionType = array( 'ldap' => 'tls' ); + +$wgLDAPBaseDNs = array( 'ldap' => '<%= @dc_suffix %>' ); +$wgLDAPUserBaseDNs = array( 'ldap' => 'ou=People,<%= @dc_suffix %>' ); +$wgLDAPGroupBaseDNs = array ( 'ldap' => 'ou=Group,<%= @dc_suffix %>' ); + +$wgLDAPProxyAgent = array( 'ldap' => 'cn=mediawiki-alamut,ou=System Accounts,<%= @dc_suffix %>' ); + +$wgLDAPProxyAgentPassword = array( 'ldap' => '<%= @ldap_password %>' ); + +$wgLDAPUseLDAPGroups = array( 'ldap' => true ); +$wgLDAPGroupNameAttribute = array( 'ldap' => 'cn' ); +$wgLDAPGroupUseFullDN = array( 'ldap' => true ); +$wgLDAPLowerCaseUsername = array( 'ldap' => true ); +$wgLDAPGroupObjectclass = array( 'ldap' => 'posixGroup' ); +$wgLDAPGroupAttribute = array( 'ldap' => 'member' ); + +$wgLDAPLowerCaseUsername = array( 'ldap' => true ); + +$wgLDAPPreferences = array( 'ldap' => array( 'email'=>'mail','realname'=>'cn','nickname'=>'uid','language'=>'preferredlanguage') ); + +<%= @wiki_settings %> diff --git a/modules/mediawiki/templates/wiki_vhost.conf b/modules/mediawiki/templates/wiki_vhost.conf new file mode 100644 index 00000000..1ae3492d --- /dev/null +++ b/modules/mediawiki/templates/wiki_vhost.conf @@ -0,0 +1,17 @@ +# heavily used by the wiki farm stuff +<Directory <%= @root %>> +Options +FollowSymLinks +</Directory> + +<Directory <%= @root %>/images> + SetHandler default-handler +</Directory> + +AliasMatch /.*/skins/(.*)$ /usr/share/mediawiki/skins/$1 + +RewriteEngine On + +RewriteCond %{REQUEST_URI} ^/.*/index.php$ +RewriteCond %{QUERY_STRING} ^title=Special:UserLogin +RewriteCond %{HTTPS} ^off$ +RewriteRule ^(.*)$ https://%{SERVER_NAME}/$1 [R] diff --git a/modules/memcached/files/memcached.sysconfig b/modules/memcached/files/memcached.sysconfig new file mode 100644 index 00000000..a29f2270 --- /dev/null +++ b/modules/memcached/files/memcached.sysconfig @@ -0,0 +1,23 @@ +# Specify the binary to use +# MEMCACHED_DAEMON="memcached-replication" +MEMCACHED_DAEMON="memcached" + +# TCP port to listen on +TCP_PORT="11211" +# UDP port to listen on, can be disabled by setting it to 0 +UDP_PORT="11211" +# User to run under +USER="memcached" +# Max simultaneous connections +MAXCONN="1024" +# MB memory max to use for object storage +CACHESIZE="64" +# IP address to listen on. Set to "INADDR_ANY" or "" to listen on all interfaces +IPADDR="127.0.0.1" +# Number of threads to use to process incoming requests +THREADS="4" +# Unix socket path to listen on (disables network support) +#UNIX_SOCKET="/var/run/memcached/memcached.sock" +# Additional options +OPTIONS="" + diff --git a/modules/memcached/manifests/init.pp b/modules/memcached/manifests/init.pp new file mode 100644 index 00000000..50152871 --- /dev/null +++ b/modules/memcached/manifests/init.pp @@ -0,0 +1,13 @@ +class memcached { + package { 'memcached': } + + service { 'memcached': + require => Package['memcached'], + } + + file { '/etc/sysconfig/memcached': + require => Package['memcached'], + source => 'puppet:///modules/memcached/memcached.sysconfig', + notify => Service['memcached'], + } +} diff --git a/modules/mga-advisories/manifests/init.pp b/modules/mga-advisories/manifests/init.pp new file mode 100644 index 00000000..1937bb62 --- /dev/null +++ b/modules/mga-advisories/manifests/init.pp @@ -0,0 +1,98 @@ +class mga-advisories( + $advisories_svn = "svn://svn.${::domain}/svn/advisories", + $vhost +){ + $mgaadv_login = 'mga-advisories' + $mgaadv_homedir = "/var/lib/${mgaadv_login}" + $vhostdir = "${mgaadv_homedir}/vhost" + $advisories_dir = "${mgaadv_homedir}/advisories" + $status_dir = "${mgaadv_homedir}/status" + $update_script = '/usr/local/bin/update_mga-advisories' + $move_script = '/root/tmp/mgatools-new/mga-move-pkg' + $move_wrapper_script = '/usr/local/bin/mga-adv-move-pkg' + + group { $mgaadv_login: + ensure => present, + } + + user { $mgaadv_login: + ensure => present, + home => $mgaadv_homedir, + managehome => true, + gid => $mgaadv_login, + } + + package { 'mga-advisories': + ensure => installed, + } + + file {'/etc/mga-advisories.conf': + ensure => present, + owner => root, + group => root, + mode => '0644', + content => template('mga-advisories/mga-advisories.conf'), + require => Package['mga-advisories'], + } + + file { [ $vhostdir, $status_dir ]: + ensure => directory, + owner => $mgaadv_login, + group => $mgaadv_login, + mode => '0755', + } + + $vhost_aliases = { + "/static" => '/usr/share/mga-advisories/static', + } + apache::vhost::base { $vhost: + location => $vhostdir, + aliases => $vhost_aliases, + require => File[$vhostdir], + } + + apache::vhost::base { "ssl_${vhost}": + use_ssl => true, + vhost => $vhost, + aliases => $vhost_aliases, + location => $vhostdir, + require => File[$vhostdir], + } + + subversion::snapshot { $advisories_dir: + source => $advisories_svn, + user => $mgaadv_login, + refresh => '0', + require => User[$mgaadv_login], + } + + file { $update_script: + ensure => present, + owner => root, + group => root, + mode => '0755', + content => template('mga-advisories/update_script'), + } + + file { $move_wrapper_script: + ensure => present, + owner => root, + group => root, + mode => '0755', + content => template('mga-advisories/adv-move-pkg'), + } + + sudo::sudoers_config { 'mga-adv-move-pkg': + content => template('mga-advisories/sudoers.adv-move-pkg') + } + + # Disable for now... we may re-instate once it's been a little more tested. + #cron { $update_script: + # command => $update_script, + # user => $mgaadv_login, + # hour => '*', + # minute => '10', + # require => Subversion::Snapshot[$advisories_dir], + #} +} +# vim: sw=2 diff --git a/modules/mga-advisories/templates/adv-move-pkg b/modules/mga-advisories/templates/adv-move-pkg new file mode 100644 index 00000000..71e1880e --- /dev/null +++ b/modules/mga-advisories/templates/adv-move-pkg @@ -0,0 +1,8 @@ +#!/bin/sh + +if [ "$USER" != "<%= @mgaadv_login %>" ]; then + echo "This script must be run as the <%= @mgaadv_login %> user." >&2 + exit 1 +fi + +exec sudo <%= @move_script %> "$@" diff --git a/modules/mga-advisories/templates/mga-advisories.conf b/modules/mga-advisories/templates/mga-advisories.conf new file mode 100644 index 00000000..4dab1543 --- /dev/null +++ b/modules/mga-advisories/templates/mga-advisories.conf @@ -0,0 +1,14 @@ +mode: site +send_adv_mail: yes +move_pkg_cmd: <%= @move_wrapper_script %> +send_report_mail: yes +out_dir: <%= @vhostdir %> +advisories_dir: <%= @advisories_dir %> +status_dir: <%= @status_dir %> +adv_mail_to: updates-announce@ml.mageia.org +adv_mail_from: Mageia Updates <buildsystem-daemon@mageia.org> +report_mail_to: qa-reports@ml.mageia.org +report_mail_from: Mageia Advisories <buildsystem-daemon@mageia.org> +bugzilla_url: https://bugs.mageia.org/ +bugzilla_login: bot +bugzilla_password: file:///var/lib/git/.gitzilla-password diff --git a/modules/mga-advisories/templates/sudoers.adv-move-pkg b/modules/mga-advisories/templates/sudoers.adv-move-pkg new file mode 100644 index 00000000..5d9618a9 --- /dev/null +++ b/modules/mga-advisories/templates/sudoers.adv-move-pkg @@ -0,0 +1 @@ +<%= @mgaadv_login %> ALL=(root) NOPASSWD:<%= @move_script %> * diff --git a/modules/mga-advisories/templates/update_script b/modules/mga-advisories/templates/update_script new file mode 100644 index 00000000..71d8d1d4 --- /dev/null +++ b/modules/mga-advisories/templates/update_script @@ -0,0 +1,16 @@ +#!/bin/sh +set -e + +if [ "$UID" = "0" ]; then + echo "Re-running as '<%= @mgaadv_login %>' user." >&2 + exec /bin/su -c <%= @update_script %> - <%= @mgaadv_login %> +fi + +if [ "$USER" != "<%= @mgaadv_login %>" ]; then + echo "This script must be run as the <%= @mgaadv_login %> user." >&2 + exit 1 +fi + +cd <%= @advisories_dir %> +svn up +exec /usr/bin/mgaadv process diff --git a/modules/mga-mirrors/files/check_mirrors_status b/modules/mga-mirrors/files/check_mirrors_status new file mode 100755 index 00000000..9c00ac8d --- /dev/null +++ b/modules/mga-mirrors/files/check_mirrors_status @@ -0,0 +1,271 @@ +#!/usr/bin/ruby + +require 'date' +require 'net/http' +require 'optparse' +require 'thread' +require 'uri' + +def get_dates(base, archs_per_distro, optional=true) + r = {} + begin + r['base'] = get_timestamp(base) + rescue Net::OpenTimeout, Timeout::Error, ArgumentError, NoMethodError, Errno::EHOSTUNREACH, Errno::ECONNREFUSED, Errno::ECONNRESET, IOError, OpenSSL::SSL::SSLError => e + end + + archs_per_distro.each{|d, archs| + r[d] = {} + archs.each{|a| + begin + r[d][a] = get_date(base, d, a) + rescue Net::OpenTimeout, Timeout::Error, ArgumentError, NoMethodError, Errno::EHOSTUNREACH, Errno::ECONNREFUSED, Errno::ECONNRESET, IOError, OpenSSL::SSL::SSLError => e + if !optional then + STDERR.puts "Failed to fetch #{version_url(base, d, a)}" + raise + end + end + } + } + r +end + +def get_mirrors + # TODO Get it from the DB + mirrors = [] + url = nil + tier1 = false + fetch_url("https://mirrors.mageia.org/").each_line{|l| + if l =~ /rsync.mageia.org/ then + tier1 = true + next + end + if l=~ /<\/tr>/ && !url.nil? then + if tier1 then + mirrors.prepend url + tier1 = false + else + mirrors.append url + end + url = nil + next + end + next unless l =~ /https?:.*>http/ + # No need to check twice mirrors available in http + https + if !url.nil? && url =~ /https:/ && l =~ /https:\/\// + # Skip http:// if https:// already seen for current mirror + # If the are in the other order http one will just be replaced + next + end + url = l.sub(/<a href="(http[^"]*)".*\n/, '\1') + url += "/" unless url =~ /\/$/ + } + mirrors +end + +def fetch_url(url, redirect_limit = 3) + return if redirect_limit < 0 + if url =~ /^\// then + open(url){|f| + return f.read + } + else + uri = URI.parse(url) + http = Net::HTTP.new(uri.host, uri.port) + http.open_timeout = 30 + http.read_timeout = 30 + if uri.scheme == 'https' then + http.use_ssl = true + end + # Ruby 1.8.7 doesn't set a default User-Agent which causes at + # least one mirror to return 403 + response = http.get(uri.path, {'User-Agent' => 'check_mirrors'}) + case response + when Net::HTTPSuccess then + return response.body + when Net::HTTPRedirection then + location = response['location'] + # Make location absolute if it was not + if location =~ /:\/\// then + fetch_url(location, redirect_limit - 1) + else + uri.path = location + fetch_url(uri.to_s, redirect_limit - 1) + end + end + end +end + +def timestamp_url(url) + "#{url}mageia_timestamp" +end + +def get_timestamp(url) + ti = fetch_url(timestamp_url(url)).to_i + if ti == 0 then + return nil + end + return DateTime.strptime(ti.to_s, '%s') +end + +def parse_version(version) + date = version.sub(/.* (........ ..:..)$/, '\1').rstrip + DateTime.strptime(date, '%Y%m%d %H:%M') +end + +def version_url(url, distrib, arch) + "#{url}distrib/#{distrib}/#{arch}/VERSION" +end + +def get_date(url, distrib, arch) + return parse_version(fetch_url(version_url(url, distrib, arch))) +end + +def format_age(ref_time, time) + return " <td class='broken'>X</td>" unless ref_time and time + + diff = ref_time - time + cls = 'broken' + if diff == 0 then + cls = 'ok' + elsif diff < 0.5 then + cls = 'almost' + elsif diff < 2 then + cls = 'bad' + end + if cls == 'ok' then + return " <td class='#{cls}'> </td>" + else + return " <td class='#{cls}'>#{time.strftime("%F %R")}</td>" + end +end + +def print_output(archs_per_distro, mirrors, ref_times, times) + puts "<html><head><title>Mageia Mirror Status #{Time.now.utc.strftime("%F")}</title> +<link rel=\"icon\" type=\"image/png\" href=\"//www.mageia.org/g/favicon.png\"> +<style> +td.broken {background-color:#FF0033;} +td.bad {background-color:#FF9933;} +td.almost {background-color:#CCFF66;} +td.ok {background-color:#00FF66;} + +td {text-align:center;} +td.name {text-align:left;} + +td.sep {width:12px;} +table.legend td {padding:4px;} + +th {background-color:#EEEEEE;} +</style> +</head> +<body>" + puts "Last checked on #{Time.now.utc.strftime("%F %R %Z")}<br/>" + puts "<table class='legend'><tr><td class='ok'>Up to date</td><td class='almost'>Less than 12h old</td><td class='bad'>Less than 2 days old</td><td class='broken'>Old or broken</td></tr></table>" + puts "<table><thead>" + puts "<tr><td/>" + puts "<td/><th>Base directory</th>" + archs_per_distro.each{|d, archs| + nb_arches = archs.size + puts " <td/><th colspan='#{nb_arches}'>#{d}</th>" + } + puts "</tr>" + puts "<tr><td/><td/><td/>" + archs_per_distro.each{|d, archs| + puts " <td class='sep' />" + archs.each{|a| + puts " <th>#{a}</th>" + } + } + puts "</tr></thead>" + puts "<tbody>" + puts "<tr><td class='name'>Reference</td>" + puts " <td class='sep' />" + puts " <td>#{!ref_times['base'].nil? ? ref_times['base'].strftime("%F %R") : "?"}</td>" + archs_per_distro.each{|d, archs| + puts " <td class='sep' />" + archs.each{|a| + puts " <td>#{ref_times[d][a].strftime("%F %R")}</td>" + } + } + puts "</tr>" + + mirrors.each{|u| + puts "<tr><td class='name'><a href='#{u}'>#{u}</a></td>" + puts " <td class='sep' />" + puts format_age(ref_times['base'], times[u]['base']) + archs_per_distro.each{|d, archs| + puts " <td class='sep' />" + archs.each{|a| + puts format_age(ref_times[d][a], times[u][d][a]) + } + } + puts "</tr>" + } + puts "</tbody></table>" + puts "</body></html>" +end + + + +# Defaults +ref = 'http://repository.mageia.org/' +archs_per_distro = { + 'cauldron' => ['i686', 'x86_64', 'armv7hl', 'aarch64'], + '9' => ['i586', 'x86_64', 'armv7hl', 'aarch64'] +} +parallel = 8 + +OptionParser.new {|opts| + opts.banner = "Usage: #{$0} [options]" + opts.on("--repository URL", + "Reference repository. Default: #{ref}") { + |url| ref = url + } + opts.on("--parallel n", Integer, + "Max number of parallel connections. Default: #{parallel}") { + |n| $parallel = n + } + opts.on("--output file", + "Write output into given file. Default to STDOUT") { + |f| $stdout.reopen(f, "w") + } +}.parse! + +# Get dates from the reference repository, and fail if some requested distros +# or archs are missing +ref_times = get_dates(ref, archs_per_distro, false) + +# Get the list of mirror URLs to check +mirrors = get_mirrors + +workqueue = Queue.new +times = {} + +# Create all the thread and have them loop on the work queue +threads = (1..parallel).map{|n| + Thread.new { + loop do + u = workqueue.pop + break if u == :exit + times[u] = get_dates(u, archs_per_distro) + end + } +} + +# Push all mirrors into the queue +mirrors.each{|u| + workqueue << u +} + +# Get all the threads to exit after all the work is done +parallel.times{|i| + workqueue << :exit +} + +# Wait for the threads to exit +threads.each{|t| + t.join +} + +# Generate output +print_output(archs_per_distro, mirrors, ref_times, times) + diff --git a/modules/mga-mirrors/manifests/init.pp b/modules/mga-mirrors/manifests/init.pp index f602a47e..4b8b5552 100644 --- a/modules/mga-mirrors/manifests/init.pp +++ b/modules/mga-mirrors/manifests/init.pp @@ -1,23 +1,54 @@ class mga-mirrors { - - $vhost = "mirrors.$domain" - package { 'mga-mirrors': - ensure => installed + $vhost = "mirrors.${::domain}" + $mirrors_dir = '/var/www/mirrors' + + package { 'mga-mirrors': } + + apache::vhost::catalyst_app { $vhost: + script => '/usr/bin/mga_mirrors_fastcgi.pl', + require => Package['mga-mirrors'], + aliases => { + '/status' => '/var/www/mirrors/status.html', + } + } + + apache::vhost::catalyst_app { "ssl_${vhost}": + script => '/usr/bin/mga_mirrors_fastcgi.pl', + require => Package['mga-mirrors'], + vhost => $vhost, + use_ssl => true, + aliases => { + '/status' => '/var/www/mirrors/status.html', + }, + } + + $pgsql_password = extlookup('mga_mirror_pgsql','x') + + postgresql::remote_db_and_user { 'mirrors': + password => $pgsql_password, + description => 'Mirrors database', + } + + file { '/etc/mga-mirrors.ini': + group => 'apache', + mode => '0640', + content => template('mga-mirrors/mga-mirrors.ini'), + require => Package['mga-mirrors'] + } + + file { '/etc/cron.d/check_mga_mirrors': + content => template('mga-mirrors/cron-mga_mirrors'), + require => Package['mga-mirrors'] } - apache::vhost_catalyst_app { $vhost: - script => "/usr/bin/mga_mirrors_fastcgi.pl" + file { $mirrors_dir: + ensure => directory, + owner => 'nobody', } - $password = extlookup("mga_mirror_password") - - file { "mga-mirrors.ini": - path => "/etc/mga-mirrors.ini", - ensure => "present", - owner => root, - group => apache, - mode => 640, - content => template("mga-mirrors/mga-mirrors.ini") + file { '/usr/local/bin/check_mirrors_status': + mode => '0755', + source => 'puppet:///modules/mga-mirrors/check_mirrors_status', } } diff --git a/modules/mga-mirrors/templates/cron-mga_mirrors b/modules/mga-mirrors/templates/cron-mga_mirrors new file mode 100644 index 00000000..7236be04 --- /dev/null +++ b/modules/mga-mirrors/templates/cron-mga_mirrors @@ -0,0 +1,2 @@ +MAILTO=root +*/20 * * * * nobody /usr/local/bin/check_mirrors_status --output /var/www/mirrors/status.html.tmp && mv -f /var/www/mirrors/status.html.tmp /var/www/mirrors/status.html diff --git a/modules/mga-mirrors/templates/mga-mirrors.ini b/modules/mga-mirrors/templates/mga-mirrors.ini index 973c65fd..b438edd1 100644 --- a/modules/mga-mirrors/templates/mga-mirrors.ini +++ b/modules/mga-mirrors/templates/mga-mirrors.ini @@ -1,4 +1,4 @@ [db] -pgconn=host=pgsql.<%= domain %>;dbname=mirrors +pgconn=host=pg.<%= @domain %>;dbname=mirrors user=mirrors -password=<%= password %> +password=<%= @pgsql_password %> diff --git a/modules/mga-treasurer/manifests/init.pp b/modules/mga-treasurer/manifests/init.pp new file mode 100644 index 00000000..d092e982 --- /dev/null +++ b/modules/mga-treasurer/manifests/init.pp @@ -0,0 +1,91 @@ +class mga-treasurer( + $grisbi_git = "git://git.${::domain}/org/accounts", + $grisbi_filename = 'mageia-accounts.gsb', + $vhost, + $vhostdir +){ + $mgatres_login = 'mga-treasurer' + $mgatres_homedir = "/var/lib/${mgatres_login}" + $grisbi_dir = "${mgatres_homedir}/grisbi" + $grisbi_path = "${grisbi_dir}/${grisbi_filename}" + + $update_script = '/usr/local/bin/update_mga-treasurer' + + group { $mgatres_login: + ensure => present, + } + + user { $mgatres_login: + ensure => present, + comment => 'mga-treasurer user', + home => $mgatres_homedir, + managehome => true, + gid => $mgatres_login, + } + + package { 'mga-treasurer': + ensure => installed, + } + + file {'/etc/mga-treasurer.conf': + ensure => present, + owner => root, + group => root, + mode => '0644', + content => template('mga-treasurer/mga-treasurer.conf'), + require => Package['mga-treasurer'], + } + + file { $vhostdir: + ensure => directory, + owner => $mgatres_login, + group => $mgatres_login, + mode => '0755', + } + + apache::vhost::base { $vhost: + location => $vhostdir, + aliases => { + "/${grisbi_filename}" => $grisbi_path, + "/static" => '/usr/share/mga-treasurer/static', + }, + content => template('mga-treasurer/vhost_mga-treasurer.conf'), + require => File[$vhostdir], + } + + apache::vhost::base { "ssl_${vhost}": + use_ssl => true, + vhost => $vhost, + location => $vhostdir, + aliases => { + "/${grisbi_filename}" => $grisbi_path, + "/static" => '/usr/share/mga-treasurer/static', + }, + content => template('mga-treasurer/vhost_mga-treasurer.conf'), + require => File[$vhostdir], + } + + file { $update_script: + ensure => present, + owner => root, + group => root, + mode => '0755', + content => template('mga-treasurer/update_script'), + } + + git::snapshot { $grisbi_dir: + source => $grisbi_git, + user => $mgatres_login, + refresh => '0', + require => User[$mgatres_login], + } + + cron { $update_script: + command => $update_script, + user => $mgatres_login, + hour => '*/2', + minute => '10', + require => Git::Snapshot[$grisbi_dir], + } +} +# vim: sw=2 diff --git a/modules/mga-treasurer/templates/mga-treasurer.conf b/modules/mga-treasurer/templates/mga-treasurer.conf new file mode 100644 index 00000000..75ac180f --- /dev/null +++ b/modules/mga-treasurer/templates/mga-treasurer.conf @@ -0,0 +1,2 @@ +grisbi_file: <%= @grisbi_path %> +out_dir: <%= @vhostdir %> diff --git a/modules/mga-treasurer/templates/update_script b/modules/mga-treasurer/templates/update_script new file mode 100644 index 00000000..30fab72d --- /dev/null +++ b/modules/mga-treasurer/templates/update_script @@ -0,0 +1,6 @@ +#!/bin/sh +set -e + +cd <%= @grisbi_dir %> +git pull +exec /usr/bin/mktreasurer diff --git a/modules/mga-treasurer/templates/vhost_mga-treasurer.conf b/modules/mga-treasurer/templates/vhost_mga-treasurer.conf new file mode 100644 index 00000000..763cd87d --- /dev/null +++ b/modules/mga-treasurer/templates/vhost_mga-treasurer.conf @@ -0,0 +1,3 @@ +<FilesMatch "\.json$"> + Header set Access-Control-Allow-Origin "*" +</FilesMatch> diff --git a/modules/mga_common/lib/puppet/parser/functions/group_members.rb b/modules/mga_common/lib/puppet/parser/functions/group_members.rb new file mode 100644 index 00000000..ea275be2 --- /dev/null +++ b/modules/mga_common/lib/puppet/parser/functions/group_members.rb @@ -0,0 +1,14 @@ +# group_members($group) +# -> return a array with the login of the group members + +module Puppet::Parser::Functions + newfunction(:group_members, :type => :rvalue) do |args| + group = args[0] + `getent group`.each_line do |l| + if l =~ /^#{group}:/ then + return l.chomp.split(':')[3].split(',') + end + end + raise ArgumentError, "can't find group for #{group}" + end +end diff --git a/modules/mga_common/lib/puppet/parser/functions/hash_keys.rb b/modules/mga_common/lib/puppet/parser/functions/hash_keys.rb new file mode 100644 index 00000000..3a926bee --- /dev/null +++ b/modules/mga_common/lib/puppet/parser/functions/hash_keys.rb @@ -0,0 +1,10 @@ +module Puppet::Parser::Functions + newfunction(:hash_keys, :type => :rvalue) do |args| + unless args[0].is_a?(Hash) + Puppet.warning "hash_keys takes one argument, the input hash" + nil + else + args[0].keys + end + end +end diff --git a/modules/mga_common/lib/puppet/parser/functions/hash_merge.rb b/modules/mga_common/lib/puppet/parser/functions/hash_merge.rb new file mode 100644 index 00000000..375bffa4 --- /dev/null +++ b/modules/mga_common/lib/puppet/parser/functions/hash_merge.rb @@ -0,0 +1,11 @@ +module Puppet::Parser::Functions + newfunction(:hash_merge, :type => :rvalue) do |args| + unless args[0].is_a?(Hash) and args[1].is_a?(Hash) + Puppet.warning "hash_merge takes two arguments" + nil + else + print "hash_merge\n" + args[0].merge(args[1]) + end + end +end diff --git a/modules/mga_common/lib/puppet/parser/functions/str_join.rb b/modules/mga_common/lib/puppet/parser/functions/str_join.rb new file mode 100644 index 00000000..c881c37d --- /dev/null +++ b/modules/mga_common/lib/puppet/parser/functions/str_join.rb @@ -0,0 +1,11 @@ +# str_join($array, $sep) +# -> return a string created by converting each element of the array to +# a string, separated by $sep + +module Puppet::Parser::Functions + newfunction(:str_join, :type => :rvalue) do |args| + array = args[0] + sep = args[1] + return array.join(sep) + end +end diff --git a/modules/mga_common/manifests/local_script.pp b/modules/mga_common/manifests/local_script.pp new file mode 100644 index 00000000..3272786b --- /dev/null +++ b/modules/mga_common/manifests/local_script.pp @@ -0,0 +1,22 @@ +define mga_common::local_script( + $content = undef, + $source = undef, + $owner = 'root', + $group = 'root', + $mode = '0755') { + $filename = "/usr/local/bin/${name}" + file { $filename: + owner => $owner, + group => $group, + mode => $mode, + } + if ($source == undef) { + File[$filename] { + content => $content, + } + } else { + File[$filename] { + source => $source, + } + } +} diff --git a/modules/mga_common/manifests/var/perl.pp b/modules/mga_common/manifests/var/perl.pp new file mode 100644 index 00000000..47ff54be --- /dev/null +++ b/modules/mga_common/manifests/var/perl.pp @@ -0,0 +1,3 @@ +class mga_common::var::perl( + $site_perl_dir = '/usr/lib/perl5/site_perl' +) {} diff --git a/modules/mgapeople/manifests/init.pp b/modules/mgapeople/manifests/init.pp new file mode 100644 index 00000000..7c40ab9c --- /dev/null +++ b/modules/mgapeople/manifests/init.pp @@ -0,0 +1,77 @@ +class mgapeople( + $site_name = "people.${::domain}", + $groupbase = 'ou=Group,dc=mageia,dc=org', + $maintdburl = undef, + $ldap_server, + $binddn, + $bindpw, + $vhost, + $vhostdir +){ + $mgapeople_login = 'mgapeople' + $bindpw_file = '/etc/mgapeople.ldapsecret' + + group { $mgapeople_login: + ensure => present, + } + + user { $mgapeople_login: + ensure => present, + comment => 'mgapeople user', + home => "/var/lib/${mgapeople_login}", + managehome => true, + gid => $mgapeople_login, + } + + file { $bindpw_file: + ensure => present, + owner => $mgapeople_login, + group => $mgapeople_login, + mode => '0600', + content => $bindpw, + } + + package { 'mgapeople': + ensure => installed, + } + + file {'/etc/mgapeople.conf': + ensure => present, + owner => root, + group => root, + mode => '0644', + content => template('mgapeople/mgapeople.conf'), + require => Package['mgapeople'], + } + + file { $vhostdir: + ensure => directory, + owner => $mgapeople_login, + group => $mgapeople_login, + mode => '0755', + } + + $vhost_aliases = { + '/static' => '/usr/share/mgapeople/static', + } + apache::vhost::base { $vhost: + location => $vhostdir, + require => File[$vhostdir], + aliases => $vhost_aliases, + } + apache::vhost::base { "ssl_${vhost}": + vhost => $vhost, + use_ssl => true, + location => $vhostdir, + require => File[$vhostdir], + aliases => $vhost_aliases, + } + + cron { '/usr/bin/mkpeople': + command => '/usr/bin/mkpeople', + user => $mgapeople_login, + hour => '*/2', + minute => '10', + } +} +# vim: sw=2 diff --git a/modules/mgapeople/templates/mgapeople.conf b/modules/mgapeople/templates/mgapeople.conf new file mode 100644 index 00000000..5bc7b21b --- /dev/null +++ b/modules/mgapeople/templates/mgapeople.conf @@ -0,0 +1,17 @@ +ldapserver: <%= @ldap_server %> +binddn: <%= @binddn %> +bindpwfile: <%= @bindpw_file %> +groupbase: <%= @groupbase %> +output_dir: <%= @vhostdir %> +output_format: + - html + - txt +tmpl_dir: /usr/share/mgapeople/tmpl +<%- if @maintdburl -%> +maintdburl: <%= @maintdburl %> +<%- end -%> +sitename: <%= @site_name %> +staticdir_url: //people.mageia.org/static +links_protocol: https:// +package_url: https://svnweb.mageia.org/packages/cauldron/ +package_url_suffix: /current/ diff --git a/modules/mgasoft/manifests/init.pp b/modules/mgasoft/manifests/init.pp new file mode 100644 index 00000000..70431701 --- /dev/null +++ b/modules/mgasoft/manifests/init.pp @@ -0,0 +1,36 @@ +class mgasoft( + $anonsvn_soft = "svn://svn.${::domain}/svn/soft", + $pubinfodir = '/var/lib/mgasoft/infos', + $pubmirrordir = '/distrib/mirror/software', + $svn_soft_publish = 'file:///svn/soft_publish', + $mgasoft_login = 'mgasoft' +) { + group { $mgasoft_login: } + + user { $mgasoft_login: + managehome => true, + home => "/var/lib/${mgasoft_login}", + gid => $mgasoft_login, + require => Group[$mgasoft_login], + } + + package { 'mgasoft-publish': } + + file { '/etc/mgasoft.conf': + content => template('mgasoft/mgasoft.conf'), + } + + subversion::snapshot { $pubinfodir: + source => $svn_soft_publish, + user => $mgasoft_login, + refresh => '0', + require => User[$mgasoft_login], + } + + cron { "mgasoft-publish": + command => '/usr/bin/mgasoft-publish', + user => $mgasoft_login, + minute => '*/5', + require => User[$mgasoft_login], + } +} diff --git a/modules/mgasoft/templates/mgasoft.conf b/modules/mgasoft/templates/mgasoft.conf new file mode 100644 index 00000000..81cce013 --- /dev/null +++ b/modules/mgasoft/templates/mgasoft.conf @@ -0,0 +1,5 @@ +svn_soft=svn+ssh://svn.mageia.org/svn/soft +anonsvn_soft=<%= @anonsvn_soft %> +svn_soft_publish=<%= @svn_soft_publish %> +pubinfodir=<%= @pubinfodir %> +pubmirrordir=<%= @pubmirrordir %> diff --git a/modules/mirror/manifests/base.pp b/modules/mirror/manifests/base.pp new file mode 100644 index 00000000..db48f808 --- /dev/null +++ b/modules/mirror/manifests/base.pp @@ -0,0 +1,15 @@ +class mirror::base { + $locksdir = '/home/mirror/locks' + + file { $locksdir: + ensure => directory, + owner => 'mirror', + group => 'mirror', + } + + group { 'mirror': } + + user { 'mirror': + gid => 'mirror', + } +} diff --git a/modules/mirror/manifests/init.pp b/modules/mirror/manifests/init.pp index 512b0463..bb89e1d0 100644 --- a/modules/mirror/manifests/init.pp +++ b/modules/mirror/manifests/init.pp @@ -1,40 +1 @@ -class mirror { - - file { "update_timestamp": - path => "/home/mirror/bin/update_timestamp", - ensure => present, - owner => mirror, - group => mirror, - mode => 755, - content => template("mirror/update_timestamp") - } - - file { "/home/mirror/bin/": - ensure => directory, - owner => mirror, - group => mirror, - mode => 755 - } - - group {"mirror": - ensure => present, - } - - user {"mirror": - ensure => present, - comment => "System user use to run mirror scripts", - managehome => true, - gid => mirror, - shell => "/bin/bash", - } - - - cron { mirror: - user => mirror, - hour => 10, - minute => 14, - command => "~mirror/bin/update_timestamp", - require => File["update_timestamp"], - } - -} +class mirror { } diff --git a/modules/mirror/manifests/mageia.pp b/modules/mirror/manifests/mageia.pp new file mode 100644 index 00000000..c14a09bb --- /dev/null +++ b/modules/mirror/manifests/mageia.pp @@ -0,0 +1,7 @@ +class mirror::mageia { + include mirror::base + mirrordir { 'mageia': + remoteurl => "rsync://rsync.${::domain}/mageia", + localdir => '/distrib/mageia', + } +} diff --git a/modules/mirror/manifests/main.pp b/modules/mirror/manifests/main.pp new file mode 100644 index 00000000..f368038d --- /dev/null +++ b/modules/mirror/manifests/main.pp @@ -0,0 +1,14 @@ +# For main Mageia mirror +class mirror::main { + include mirror::base + mga_common::local_script { 'update_timestamp': + content => template('mirror/update_timestamp') + } + + cron { 'mirror': + user => 'mirror', + minute => '*/10', + command => '/usr/local/bin/update_timestamp', + require => [Mga_common::Local_script['update_timestamp'], User['mirror']], + } +} diff --git a/modules/mirror/manifests/mdv2010spring.pp b/modules/mirror/manifests/mdv2010spring.pp new file mode 100644 index 00000000..51a67284 --- /dev/null +++ b/modules/mirror/manifests/mdv2010spring.pp @@ -0,0 +1,7 @@ +class mirror::mdv2010spring { + include mirror::base + mirrordir { 'mdv2010.1': + remoteurl => 'rsync://distrib-coffee.ipsl.jussieu.fr/pub/linux/MandrivaLinux/official/2010.1', + localdir => '/distrib/mandriva/', + } +} diff --git a/modules/mirror/manifests/mirrordir.pp b/modules/mirror/manifests/mirrordir.pp new file mode 100644 index 00000000..2100bc6c --- /dev/null +++ b/modules/mirror/manifests/mirrordir.pp @@ -0,0 +1,23 @@ +define mirror::mirrordir ($remoteurl, + $localdir, + $rsync_options='-avH --delete') { + include mirror::base + $lockfile = "${mirror::base::locksdir}/${name}" + + file { $localdir: + ensure => directory, + owner => 'mirror', + group => 'mirror', + } + + mga_common::local_script { "mirror_${name}": + content => template('mirror/mirrordir'), + } + + cron { "mirror_${name}": + user => mirror, + minute => '*/10', + command => "/usr/local/bin/mirror_${name}", + require => Mga_common::Local_script["mirror_${name}"], + } +} diff --git a/modules/mirror/templates/mirrordir b/modules/mirror/templates/mirrordir new file mode 100644 index 00000000..9cf09650 --- /dev/null +++ b/modules/mirror/templates/mirrordir @@ -0,0 +1,15 @@ +#!/bin/sh + +remoteurl="<%= @remoteurl%>" +localdir="<%= @localdir %>" +rsync_options="<%= @rsync_options %>" +lockfile="<%= @lockfile %>" + +if [ -f "$lockfile" ]; then + # show error message when run from command line + [ -t 1 ] && cat $lockfile + exit +fi +echo "sync in progress since $(date)" > "$lockfile" +/usr/bin/rsync $rsync_options "$remoteurl" "$localdir" +rm -f "$lockfile" diff --git a/modules/mirror/templates/update_timestamp b/modules/mirror/templates/update_timestamp index a037d10d..1f7711c6 100644 --- a/modules/mirror/templates/update_timestamp +++ b/modules/mirror/templates/update_timestamp @@ -2,4 +2,4 @@ # $id$ -date +%s%n%c > /distrib/mirror/mageia_timestamp +LC_ALL=C.UTF-8 date -u '+%s%n%c %Z' > /distrib/mirror/mageia_timestamp diff --git a/modules/mirror_cleaner/files/orphans_cleaner.pl b/modules/mirror_cleaner/files/orphans_cleaner.pl new file mode 100755 index 00000000..73e08912 --- /dev/null +++ b/modules/mirror_cleaner/files/orphans_cleaner.pl @@ -0,0 +1,76 @@ +#!/usr/bin/perl + +# this script will look at the list of rpm, and move orphan to a directory, if they are too old +# another script should take care of cleaning this directory ( or puppet ) + +use strict; +use RPM4; +use File::stat; +use File::Basename; +use File::Copy; +use File::Path qw(make_path); + +my @arches = ('i586','x86_64', 'aarch64'); +my @sections = ('core','nonfree','tainted'); +my @medias = ('backports', 'backports_testing', 'release', 'updates', 'updates_testing'); +my $move_delay = 60*60*24*14; + +my ($path, $dest_path) = @ARGV; + +my $qf = "%{NAME}-%{VERSION}-%{RELEASE}.%{ARCH}.rpm %{SOURCERPM}"; + +my %hash ; +my ($filename, $srpm, $dest_rpm); + + +my ($source_hdlist, $binary_hdlist, $rpm_path, $srpm_path); + +foreach my $a ( @arches ) { + foreach my $s ( @sections ) { + foreach my $m ( @medias ) { + + $rpm_path = "$path/$a/media/$s/$m"; + $srpm_path = "$path/SRPMS/$s/$m"; + $binary_hdlist = "$rpm_path/media_info/hdlist.cz"; + $source_hdlist = "$srpm_path/media_info/hdlist.cz"; + + next if not -f $source_hdlist; + next if not -f $binary_hdlist; + + next if stat($source_hdlist)->size() <= 64; + next if stat($binary_hdlist)->size() <= 64; + + open(my $hdfh, "zcat '$binary_hdlist' 2>/dev/null |") or die "Can't open $_"; + while (my $hdr = stream2header($hdfh)) { + ($filename, $srpm) = split(/ /,$hdr->queryformat($qf)); + push(@{$hash{$srpm}}, $filename); + } + close($hdfh); + + + open($hdfh, "zcat '$source_hdlist' 2>/dev/null |") or die "Can't open $_"; + while (my $hdr = stream2header($hdfh)) { + $srpm = $hdr->queryformat("%{NAME}-%{VERSION}-%{RELEASE}.src.rpm"); + delete $hash{$srpm}; + } + close($hdfh); + + foreach my $s ( keys %hash ) + { + # Be safe, maybe hdlists were not in sync + next if -f "$srpm_path/$s"; + foreach my $rpm ( @{$hash{$s}} ) { + $rpm = "$rpm_path/$rpm"; + # sometimes, packages are removed without hdlist to be updated + next if not -f "$rpm"; + if (time() > $move_delay + stat("$rpm")->ctime()) { + ( $dest_rpm = $rpm ) =~ s/$path/$dest_path/; + my $dir = dirname $dest_rpm; + make_path $dir if not -d $dir; + move($rpm, $dest_rpm) + } + } + } + } + } +} diff --git a/modules/mirror_cleaner/manifests/base.pp b/modules/mirror_cleaner/manifests/base.pp new file mode 100644 index 00000000..8ef82856 --- /dev/null +++ b/modules/mirror_cleaner/manifests/base.pp @@ -0,0 +1,6 @@ +class mirror_cleaner::base { + file { '/usr/local/bin/orphans_cleaner.pl': + mode => '0755', + source => 'puppet:///modules/mirror_cleaner/orphans_cleaner.pl', + } +} diff --git a/modules/mirror_cleaner/manifests/init.pp b/modules/mirror_cleaner/manifests/init.pp new file mode 100644 index 00000000..615b4ffe --- /dev/null +++ b/modules/mirror_cleaner/manifests/init.pp @@ -0,0 +1 @@ +class mirror_cleaner { } diff --git a/modules/mirror_cleaner/manifests/orphans.pp b/modules/mirror_cleaner/manifests/orphans.pp new file mode 100644 index 00000000..90be9a8c --- /dev/null +++ b/modules/mirror_cleaner/manifests/orphans.pp @@ -0,0 +1,27 @@ +define mirror_cleaner::orphans($base) { + include mirror_cleaner::base + + $orphan_dir = '/distrib/archive/orphans' + + file { $orphan_dir: + ensure => directory + } + +# Disable cleaning as the ruby version is smarter and this one tends to break things +# It should probably be deleted +# +# cron { "clean orphans ${name}": +# command => "/usr/local/bin/orphans_cleaner.pl ${base}/${name} ${orphan_dir}", +# hour => 5, +# minute => 30, +# weekday => 1, +# user => root, +# } + + tidy { $orphan_dir: + type => 'ctime', + age => '4w', + recurse => true, + matches => ['*.rpm'], + } +} diff --git a/modules/mirrorbrain/manifests/init.pp b/modules/mirrorbrain/manifests/init.pp new file mode 100644 index 00000000..f7f74ead --- /dev/null +++ b/modules/mirrorbrain/manifests/init.pp @@ -0,0 +1,154 @@ +class mirrorbrain { + + $mb_user = 'mirrorbrain' + $mb_home = "/var/lib/${mb_user}" + $mb_repo = "${mb_home}/mirror" + $mb_vhost = "dl.${::domain}" + + $mb_pgsql_pw = extlookup('mirrorbrain_pgsql','x') + + group { $mb_user: + ensure => present + } + + user { $mb_user: + ensure => present, + home => $mb_home + } + + file { $mb_home: + ensure => directory, + owner => $mb_user, + group => $mb_user, + mode => '0751' + } + + file { $mb_repo: + ensure => directory, + owner => $mb_user, + group => $mb_user, + mode => '0755' + } + + package {['mirrorbrain', + 'mirrorbrain-scanner', + 'mirrorbrain-tools', + 'apache-mod_mirrorbrain', + 'apache-mod_dbd']: } + + + postgresql::remote_db_and_user { 'mirrorbrain': + description => 'Mirrorbrain database', + password => $mb_pgsql_pw, + } + + file { '/etc/httpd/conf/geoip.conf': + owner => 'root', + group => 'root', + mode => '0644', + content => template('mirrorbrain/geoip.conf') + } + + file { '/etc/httpd/conf/modules.d/11-mirrorbrain.conf': + owner => 'root', + group => 'root', + mode => '0644', + content => template('mirrorbrain/mod_mirrorbrain.conf') + } + + file { '/etc/mirrorbrain.conf': + owner => 'root', + group => "$mb_user", + mode => '0640', + content => template('mirrorbrain/mirrorbrain.conf') + } + + apache::vhost::base { "${mb_vhost}": + vhost => "${mb_vhost}", + location => "${mb_repo}" + } + + apache::vhost::base { "ssl_${mb_vhost}": + vhost => "${mb_vhost}", + use_ssl => true, + location => "${mb_repo}" + } + + apache::webapp_other { 'mirrorbrain': + webapp_file => 'mirrorbrain/webapp.conf', + } + + # Update GeoIP db + cron { 'MirrorBrain: weekly GeoIP update': + command => 'sleep $(($RANDOM/1024)); /usr/bin/geoip-lite-update', + user => 'root', + minute => 30, + hour => 3, + weekday => 0 + } + + # distrib tree + # mga 1-4 are frozen, so only one manual run has been done + # distrib/5 still active + cron { 'MirrorBrain: Sync Mga 5 every 4 hours ': + command => "/usr/bin/null-rsync rsync.mageia.org::mageia/distrib/5 ${mb_repo}/distrib/", + user => "$mb_user", + minute => '15', + hour => '*/4', + } + + # distrib/cauldron + cron { 'MirrorBrain: Sync Cauldron every 1 hours ': + command => "/usr/bin/null-rsync rsync.mageia.org::mageia/distrib/cauldron ${mb_repo}/distrib/", + user => "$mb_user", + minute => '0', + hour => '*/1', + } + + # iso tree + cron { 'MirrorBrain: Sync iso tree every 1 day ': + command => "/usr/bin/null-rsync rsync.mageia.org::mageia/iso ${mb_repo}/", + user => "$mb_user", + hour => '2', + minute => '30', + } + + # people tree + cron { 'MirrorBrain: Sync people tree every 1 day ': + command => "/usr/bin/null-rsync rsync.mageia.org::mageia/people ${mb_repo}/", + user => "$mb_user", + hour => '3', + minute => '45', + } + + # software tree + cron { 'MirrorBrain: Sync software tree every 1 day ': + command => "/usr/bin/null-rsync rsync.mageia.org::mageia/software ${mb_repo}/", + user => "$mb_user", + hour => '4', + minute => '45', + } + + # Mirror online check + cron { 'MirrorBrain: mirror online status check every 5 minute': + command => '/usr/bin/mirrorprobe', + user => "$mb_user", + minute => 5 + } + + # Mirror scanning + cron { 'MirrorBrain: mirror scanning every 30 minute': + command => '/usr/bin/mb scan --quiet --jobs 4 --all', + user => "$mb_user", + minute => 30 + } + + # Mirror database cleanup + cron { 'MirrorBrain: mirror database cleanup every 1 week': + command => '/usr/bin/mb db vacuum', + user => "$mb_user", + minute => 45, + hour => 5, + weekday => 1 + } +} diff --git a/modules/mirrorbrain/templates/geoip.conf b/modules/mirrorbrain/templates/geoip.conf new file mode 100644 index 00000000..1f71a67d --- /dev/null +++ b/modules/mirrorbrain/templates/geoip.conf @@ -0,0 +1,5 @@ +<IfModule mod_geoip.c> + GeoIPEnable On + GeoIPDBFile /var/lib/GeoIP/GeoLiteCity.dat.updated + GeoIPOutput Env +</IfModule> diff --git a/modules/mirrorbrain/templates/mirrorbrain.conf b/modules/mirrorbrain/templates/mirrorbrain.conf new file mode 100644 index 00000000..94bef340 --- /dev/null +++ b/modules/mirrorbrain/templates/mirrorbrain.conf @@ -0,0 +1,14 @@ +[general] +instances = main + +[main] +dbuser = mirrorbrain +dbpass = <%= @mb_pgsql_pw %> +dbdriver = postgresql +dbhost = pgsql.<%= @domain %> +# optional: dbport = ... +dbname = mirrorbrain + +[mirrorprobe] +# logfile = /var/log/mirrorbrain/mirrorprobe.log +# loglevel = INFO diff --git a/modules/mirrorbrain/templates/mod_mirrorbrain.conf b/modules/mirrorbrain/templates/mod_mirrorbrain.conf new file mode 100644 index 00000000..9b67d7fe --- /dev/null +++ b/modules/mirrorbrain/templates/mod_mirrorbrain.conf @@ -0,0 +1,3 @@ +LoadModule form_module modules/mod_form.so +LoadModule mirrorbrain_module modules/mod_mirrorbrain.so + diff --git a/modules/mirrorbrain/templates/webapp.conf b/modules/mirrorbrain/templates/webapp.conf new file mode 100644 index 00000000..9606be64 --- /dev/null +++ b/modules/mirrorbrain/templates/webapp.conf @@ -0,0 +1,16 @@ +<Directory /var/lib/mirrorbrain/mirror> + MirrorBrainEngine On + MirrorBrainDebug Off + FormGET On + MirrorBrainHandleHEADRequestLocally Off + MirrorBrainFallback na us https://mirrors.kernel.org/mageia/ + MirrorBrainFallback eu fr http://ftp.free.fr/mirrors/mageia.org/ + MirrorBrainFallback eu se https://ftp.acc.umu.se/mirror/mageia/ + MirrorBrainMinSize 0 + #MirrorBrainExcludeUserAgent rpm/4.4.2* + #MirrorBrainExcludeUserAgent *APT-HTTP* + #MirrorBrainExcludeMimeType application/pgp-keys + DirectoryIndex disable + Options +FollowSymLinks +Indexes + Require all granted +</Directory> diff --git a/modules/mysql/manifests/init.pp b/modules/mysql/manifests/init.pp new file mode 100644 index 00000000..1d180778 --- /dev/null +++ b/modules/mysql/manifests/init.pp @@ -0,0 +1,26 @@ +class mysql { + class server { + package {['mariadb', + 'mariadb-obsolete']: } + + service { 'mysqld': + alias => mysql, + subscribe => Package['mariadb'], + } + +# file { "/etc/my.cnf": +# +# } + } + + define database() { + exec { "mysqladmin create ${name}": + user => root, + # not sure if /dev/null is needed + unless => "mysqlshow ${name}" + } + } +# define user($password) { +# +# } +} diff --git a/modules/ntp/manifests/init.pp b/modules/ntp/manifests/init.pp index 3f9ecc14..f75310e7 100644 --- a/modules/ntp/manifests/init.pp +++ b/modules/ntp/manifests/init.pp @@ -1,22 +1,17 @@ class ntp { +if versioncmp($::lsbdistrelease, '9') < 0 { + $ntppkg = 'ntp' +} else { + $ntppkg = 'ntpsec' +} + package { $ntppkg: } - package { ntp: - ensure => installed + service { 'ntpd': + subscribe => [Package[$ntppkg], File['/etc/ntp.conf']], } - service { ntpd: - ensure => running, - path => "/etc/init.d/ntpd", - subscribe => [ Package["ntp"], File["ntp.conf"] ] - } - - file { "ntp.conf": - path => "/etc/ntp.conf", - ensure => present, - owner => root, - group => root, - mode => 644, - require => Package["ntp"], - content => template("ntp/ntp.conf") + file { '/etc/ntp.conf': + require => Package[$ntppkg], + content => template('ntp/ntp.conf'), } } diff --git a/modules/ntp/templates/ntp.conf b/modules/ntp/templates/ntp.conf index 3f9582d7..72f233c0 100644 --- a/modules/ntp/templates/ntp.conf +++ b/modules/ntp/templates/ntp.conf @@ -25,6 +25,12 @@ driftfile /var/lib/ntp/drift multicastclient # listen on default 224.0.1.1 broadcastdelay 0.008 +# https://www.kb.cert.org/vuls/id/348126 +restrict default nomodify notrap nopeer noquery +restrict -6 default nomodify notrap nopeer noquery +# https://isc.sans.edu/forums/diary/NTP+reflection+attack/17300 +disable monitor + # # Keys file. If you want to diddle your server at run time, make a # keys file (mode 600 for sure) and define the key number to be diff --git a/modules/opendkim/Gemfile b/modules/opendkim/Gemfile new file mode 100644 index 00000000..68ba397d --- /dev/null +++ b/modules/opendkim/Gemfile @@ -0,0 +1,19 @@ +source 'https://rubygems.org' + +puppetversion = ENV.key?('PUPPET_VERSION') ? "= #{ENV['PUPPET_VERSION']}" : ['>= 3.3'] +gem 'puppet', puppetversion +gem 'puppetlabs_spec_helper', '>= 0.1.0' +gem 'facter', '>= 1.7.0' + +gem 'puppet-lint', '>= 0.3.2' +gem 'rspec-puppet' +gem "metadata-json-lint" +gem 'beaker-rspec' +gem "travis" +gem "travis-lint" +gem "puppet-blacksmith" +gem "guard-rake" + +gem 'test-kitchen', '>= 1.4.0' +gem 'kitchen-docker', '>= 2.1.0' +gem 'kitchen-puppet', '>= 0.0.27' diff --git a/modules/opendkim/LICENSE b/modules/opendkim/LICENSE new file mode 100644 index 00000000..8f71f43f --- /dev/null +++ b/modules/opendkim/LICENSE @@ -0,0 +1,202 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "{}" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright {yyyy} {name of copyright owner} + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + diff --git a/modules/opendkim/Modulefile b/modules/opendkim/Modulefile new file mode 100644 index 00000000..7790c510 --- /dev/null +++ b/modules/opendkim/Modulefile @@ -0,0 +1,8 @@ + name "bi4o4ek-opendkim" + version "0.0.7" + author "Vladimir Bykanov" + summary "Configures OpenDKIM" + license "Apache-2.0" + source "https://github.com/bi4o4ek/puppet-opendkim" + project_page "https://github.com/bi4o4ek/puppet-opendkim" + diff --git a/modules/opendkim/Puppetfile b/modules/opendkim/Puppetfile new file mode 100644 index 00000000..177adf16 --- /dev/null +++ b/modules/opendkim/Puppetfile @@ -0,0 +1,7 @@ +#!/usr/bin/env ruby +#^syntax detection + +forge "https://forgeapi.puppetlabs.com" + +# use dependencies defined in metadata.json +metadata diff --git a/modules/opendkim/README.md b/modules/opendkim/README.md new file mode 100644 index 00000000..13c40bde --- /dev/null +++ b/modules/opendkim/README.md @@ -0,0 +1,98 @@ +[](https://travis-ci.org/bi4o4ek/puppet-opendkim) + +# opendkim + +#### Table of Contents + +1. [Overview](#overview) +2. [Module Description](#module-description) +3. [Setup - The basics of getting started with opendkim](#setup) + * [Beginning with opendkim](#beginning-with-opendkim) + * [Add domains for signing](#add-domains-for-signing) + * [Add allowed hosts](#add-allowed-hosts) +4. [Usage - Configuration options and additional functionality](#usage) +5. [Reference - An under-the-hood peek at what the module is doing and how](#reference) +5. [Limitations - OS compatibility, etc.](#limitations) +6. [Development - Guide for contributing to the module](#development) + +## Overview + +The opendkim module allows you to set up mail signing and manage DKIM services with minimal effort. + +## Module Description + +OpenDKIM is a widely-used DKIM service, and this module provides a simplified way of creating configurations to manage your infrastructure. +This includes the ability to configure and manage a range of different domain, as well as a streamlined way to install and configure OpenDKIM service. + +## Setup + +### What opendkim affects + +* configuration files and directories (created and written to) +* package/service/configuration files for OpenDKIM +* signing domains list +* trusted hosts list + +### Beginning with opendkim + +To install OpenDKIM with the default parameters + + include opendkim + +### Add domains for signing + + opendkim::domain{['example.com', 'example.org']:} + + +### Add allowed hosts + + opendkim::trusted{['10.0.0.0/8', '203.0.113.0/24']:} + +## Usage + +For example. +There is internal ip 10.3.3.80 and external ip 203.0.113.100 on our mail-relay host with OpenDKIM. +This host signs all mails for domains example.com and example.org. + + # Postfix-relay + class{ 'postfix::server': + inet_interfaces => '10.3.3.80, localhost', + mynetworks => '10.0.0.0/8, 203.0.113.0/24', + smtpd_recipient_restrictions => 'permit_mynetworks, reject_unauth_destination', + smtpd_client_restrictions => 'permit_mynetworks, reject', + mydestination => '$myhostname', + myhostname => 'relay-site.example.com', + smtpd_banner => 'Hello', + extra_main_parameters => { + smtp_bind_address => '203.0.113.100', + smtpd_milters => 'inet:127.0.0.1:8891', + non_smtpd_milters => '$smtpd_milters', + milter_default_action => 'accept', + milter_protocol => '2', + }, + } + + # OpenDKIM + include opendkim + opendkim::domain{['example.com', 'example.org']:} + opendkim::trusted{['10.0.0.0/8', '203.0.113.0/24']:} + +After puppet-run you need to copy contents of /etc/opendkim/keys/example.com/relay-site.txt and paste into corresponding DNS-zone as TXT. +Then repeat this action for example.org + +Puppet module for postfix in this example is [thias/postfix](https://forge.puppetlabs.com/thias/postfix) v0.3.3 +## Reference + +Puppetlabs are working on automating this section. + +## Limitations + +This module is tested on: +* CentOS 6 +* Ubuntu 12.04 +* Ubuntu 14.04 + +## Development + +Fork me on github and make pull request. + diff --git a/modules/opendkim/Rakefile b/modules/opendkim/Rakefile new file mode 100644 index 00000000..312b2952 --- /dev/null +++ b/modules/opendkim/Rakefile @@ -0,0 +1,12 @@ +require 'rubygems' +require 'puppetlabs_spec_helper/rake_tasks' +require 'puppet-lint/tasks/puppet-lint' + +PuppetLint.configuration.fail_on_warnings = true +PuppetLint.configuration.send('relative') +PuppetLint.configuration.send('disable_80chars') +PuppetLint.configuration.send('disable_class_inherits_from_params_class') +PuppetLint.configuration.send('disable_documentation') +PuppetLint.configuration.send('disable_single_quote_string_with_variables') +PuppetLint.configuration.send('disable_only_variable_string') +PuppetLint.configuration.ignore_paths = ["spec/**/*.pp", "pkg/**/*.pp"] diff --git a/modules/opendkim/manifests/domain.pp b/modules/opendkim/manifests/domain.pp new file mode 100644 index 00000000..c708ad08 --- /dev/null +++ b/modules/opendkim/manifests/domain.pp @@ -0,0 +1,46 @@ +define opendkim::domain ( + $domain = $name, + $selector = $hostname, + $pathkeys = '/etc/opendkim/keys', + $keytable = 'KeyTable', + $signing_table = 'SigningTable', +) { + # $pathConf and $pathKeys must be without trailing '/'. + # For example, '/etc/opendkim/keys' + + Exec { path => [ '/bin/', '/sbin/' , '/usr/bin/', '/usr/sbin/' ] } + + # Create directory for domain + file { "${pathkeys}/${domain}": + ensure => directory, + owner => $opendkim::owner, + group => $opendkim::group, + mode => '0755', + notify => Service[$opendkim::service_name], + require => Package[$opendkim::package_name], + } + + # Generate dkim-keys + exec { "opendkim-genkey -D ${pathkeys}/${domain}/ -d ${domain} -s ${selector}": + unless => "/usr/bin/test -f ${pathkeys}/${domain}/${selector}.private && /usr/bin/test -f ${pathkeys}/${domain}/${selector}.txt", + user => $opendkim::owner, + notify => Service[$opendkim::service_name], + require => [ Package[$opendkim::package_name], File["${pathkeys}/${domain}"], ], + } + + # Add line into KeyTable + file_line { "${opendkim::pathconf}/${keytable}_${domain}": + path => "${opendkim::pathconf}/${keytable}", + line => "${selector}._domainkey.${domain} ${domain}:${selector}:${pathkeys}/${domain}/${selector}.private", + notify => Service[$opendkim::service_name], + require => Package[$opendkim::package_name], + } + + # Add line into SigningTable + file_line { "${opendkim::pathconf}/${signing_table}_${domain}": + path => "${opendkim::pathconf}/${signing_table}", + line => "*@${domain} ${selector}._domainkey.${domain}", + notify => Service[$opendkim::service_name], + require => Package[$opendkim::package_name], + } +} diff --git a/modules/opendkim/manifests/init.pp b/modules/opendkim/manifests/init.pp new file mode 100644 index 00000000..6e45345a --- /dev/null +++ b/modules/opendkim/manifests/init.pp @@ -0,0 +1,105 @@ +# == Class: opendkim +# +# === Examples +# +# class { 'opendkim':} +# +# === Authors +# +# Vladimir Bykanov <vladimir@bykanov.ru> +# +# === Copyright +# +# Copyright 2015 Vladimir Bykanov +# +class opendkim ( + $autorestart = 'Yes', + $autorestart_rate = '10/1h', + $log_why = 'Yes', + $syslog = 'Yes', + $syslog_success = 'Yes', + $mode = 's', + $canonicalization = 'relaxed/simple', + $external_ignore_list = 'refile:/etc/opendkim/TrustedHosts', + $internal_hosts = 'refile:/etc/opendkim/TrustedHosts', + $keytable = 'refile:/etc/opendkim/KeyTable', + $signing_table = 'refile:/etc/opendkim/SigningTable', + $signature_algorithm = 'rsa-sha256', + $socket = 'inet:8891@localhost', + $pidfile = '/var/run/opendkim/opendkim.pid', + $umask = '022', + $userid = 'opendkim:opendkim', + $temporary_directory = '/var/tmp', + $package_name = 'opendkim', + $service_name = 'opendkim', + $pathconf = '/etc/opendkim', + $owner = 'opendkim', + $group = 'opendkim', +) { + + package { $package_name: + ensure => present, + } + + case $::operatingsystem { + /^(Debian|Ubuntu)$/: { + package { 'opendkim-tools': + ensure => present, + } + # Debian/Ubuntu doesn't ship this directory in its package + file { $pathconf: + ensure => directory, + owner => 'root', + group => 'opendkim', + mode => '0755', + require => Package[$package_name], + } + file { "${pathconf}/keys": + ensure => directory, + owner => 'opendkim', + group => 'opendkim', + mode => '0750', + require => Package[$package_name], + } + file { "${pathconf}/KeyTable": + ensure => present, + owner => 'opendkim', + group => 'opendkim', + mode => '0640', + require => Package[$package_name], + } + file { "${pathconf}/SigningTable": + ensure => present, + owner => 'opendkim', + group => 'opendkim', + mode => '0640', + require => Package[$package_name], + } + file { "${pathconf}/TrustedHosts": + ensure => present, + owner => 'opendkim', + group => 'opendkim', + mode => '0644', + require => Package[$package_name], + } + } + default: {} + } + + file {'/etc/opendkim.conf': + ensure => file, + owner => 'root', + group => 'root', + mode => '0644', + content => template('opendkim/opendkim.conf'), + notify => Service[$service_name], + require => Package[$package_name], + } + + service { $service_name: + ensure => running, + enable => true, + require => Package[$package_name], + } +} + diff --git a/modules/opendkim/manifests/trusted.pp b/modules/opendkim/manifests/trusted.pp new file mode 100644 index 00000000..dcf0f8b8 --- /dev/null +++ b/modules/opendkim/manifests/trusted.pp @@ -0,0 +1,13 @@ +define opendkim::trusted ( + $host = $name, + $trusted_hosts = 'TrustedHosts', + +) { + # Add line into KeyTable + file_line { "${opendkim::pathconf}/${trusted_hosts}_${host}": + path => "${opendkim::pathconf}/${trusted_hosts}", + line => $host, + notify => Service[$opendkim::service_name], + require => Package[$opendkim::package_name], + } +} diff --git a/modules/opendkim/metadata.json b/modules/opendkim/metadata.json new file mode 100644 index 00000000..81b2f70d --- /dev/null +++ b/modules/opendkim/metadata.json @@ -0,0 +1,60 @@ +{ + "name": "bi4o4ek-opendkim", + "version": "0.0.7", + "author": "Vladimir Bykanov", + "summary": "Configures OpenDKIM", + "license": "Apache-2.0", + "source": "https://github.com/bi4o4ek/puppet-opendkim", + "project_page": "https://github.com/bi4o4ek/puppet-opendkim", + "issues_url": "https://github.com/bi4o4ek/puppet-opendkim/issues", + "operatingsystem_support": [ + { + "operatingsystem": "RedHat", + "operatingsystemrelease": [ + "5", + "6", + "7" + ] + }, + { + "operatingsystem": "CentOS", + "operatingsystemrelease": [ + "5", + "6", + "7" + ] + }, + { + "operatingsystem": "Mageia", + "operatingsystemrelease": [ + "7", + "8", + "9" + ] + } + ], + "dependencies": [ + { + } + ], + "description": "UNKNOWN", + "types": [ + + ], + "checksums": { + "Gemfile": "19456e851851a3bd7aa6729108429dde", + "LICENSE": "fa818a259cbed7ce8bc2a22d35a464fc", + "Modulefile": "9a3b46c73c1ae7309fe2d35c5e6fa549", + "Puppetfile": "607001b25e4f9d020b2ce4444174a654", + "README.md": "0764cc9bb9de221c97bce2664ba99657", + "Rakefile": "a162d9397ed53fa8fa49c57609feedcb", + "manifests/domain.pp": "61f78cbd4376e58a7b26f1298f38804b", + "manifests/init.pp": "4987dcd9ebc88e7ea0de3b74c9af6d9c", + "manifests/trusted.pp": "bcc132622e2c2e39bcbc3116c7788c8b", + "spec/classes/init_spec.rb": "0451831b29191c21b2cdc045c94a2243", + "spec/classes/opendkim_spec.rb": "9f06a3f005344875a0fb5753ab43cb34", + "spec/spec_helper.rb": "0db89c9a486df193c0e40095422e19dc", + "templates/opendkim.conf": "047e76e4c2a0a15754101f2da32ab2fe", + "tests/init.pp": "8c9ab8c85cd89dae1ad97cbe949a7e6e" + } +} diff --git a/modules/opendkim/spec/classes/init_spec.rb b/modules/opendkim/spec/classes/init_spec.rb new file mode 100644 index 00000000..5ce0a75d --- /dev/null +++ b/modules/opendkim/spec/classes/init_spec.rb @@ -0,0 +1,7 @@ +require 'spec_helper' +describe 'opendkim' do + + context 'with defaults for all parameters' do + it { should contain_class('opendkim') } + end +end diff --git a/modules/opendkim/spec/classes/opendkim_spec.rb b/modules/opendkim/spec/classes/opendkim_spec.rb new file mode 100644 index 00000000..1901c1c0 --- /dev/null +++ b/modules/opendkim/spec/classes/opendkim_spec.rb @@ -0,0 +1,13 @@ +require 'spec_helper' + +describe 'opendkim', :type => :class do + + describe "Opendkim class with no parameters, basic test" do + let(:params) { { } } + + it { + should contain_package('opendkim') + should contain_service('opendkim') + } + end +end diff --git a/modules/opendkim/spec/spec_helper.rb b/modules/opendkim/spec/spec_helper.rb new file mode 100644 index 00000000..2c6f5664 --- /dev/null +++ b/modules/opendkim/spec/spec_helper.rb @@ -0,0 +1 @@ +require 'puppetlabs_spec_helper/module_spec_helper' diff --git a/modules/opendkim/templates/opendkim.conf b/modules/opendkim/templates/opendkim.conf new file mode 100644 index 00000000..5dc61aa6 --- /dev/null +++ b/modules/opendkim/templates/opendkim.conf @@ -0,0 +1,52 @@ +<%- if @autorestart -%> +AutoRestart <%= @autorestart %> +<%- end -%> +<%- if @autorestart_rate -%> +AutoRestartRate <%= @autorestart_rate %> +<%- end -%> +<%- if @log_why -%> +LogWhy <%= @log_why %> +<%- end -%> +<%- if @syslog -%> +Syslog <%= @syslog %> +<%- end -%> +<%- if @syslog_success -%> +SyslogSuccess <%= @syslog_success %> +<%- end -%> +<%- if @mode -%> +Mode <%= @mode %> +<%- end -%> +<%- if @canonicalization -%> +Canonicalization <%= @canonicalization %> +<%- end -%> +<%- if @external_ignore_list -%> +ExternalIgnoreList <%= @external_ignore_list %> +<%- end -%> +<%- if @internal_hosts -%> +InternalHosts <%= @internal_hosts %> +<%- end -%> +<%- if @keytable -%> +KeyTable <%= @keytable %> +<%- end -%> +<%- if @signing_table -%> +SigningTable <%= @signing_table %> +<%- end -%> +<%- if @signature_algorithm -%> +SignatureAlgorithm <%= @signature_algorithm %> +<%- end -%> +<%- if @socket -%> +Socket <%= @socket %> +<%- end -%> +<%- if @pidfile -%> +PidFile <%= @pidfile %> +<%- end -%> +<%- if @umask -%> +UMask <%= @umask %> +<%- end -%> +<%- if @userid -%> +UserID <%= @userid %> +<%- end -%> +<%- if @temporary_directory -%> +TemporaryDirectory <%= @temporary_directory %> +<%- end -%> + diff --git a/modules/opendkim/tests/init.pp b/modules/opendkim/tests/init.pp new file mode 100644 index 00000000..ff3d3b06 --- /dev/null +++ b/modules/opendkim/tests/init.pp @@ -0,0 +1,15 @@ +# The baseline for module testing used by Puppet Labs is that each manifest +# should have a corresponding test manifest that declares that class or defined +# type. +# +# Tests are then run by using puppet apply --noop (to check for compilation +# errors and view a log of events) or by fully applying the test in a virtual +# environment (to compare the resulting system state to the desired state). +# +# Learn more about module testing here: +# http://docs.puppetlabs.com/guides/tests_smoke.html +# +Class['epel'] -> Class['opendkim'] + +include epel +include opendkim diff --git a/modules/openldap/lib/puppet/parser/functions/get_ldap_servers.rb b/modules/openldap/lib/puppet/parser/functions/get_ldap_servers.rb new file mode 100644 index 00000000..0d620926 --- /dev/null +++ b/modules/openldap/lib/puppet/parser/functions/get_ldap_servers.rb @@ -0,0 +1,13 @@ +# return a list of all ldap servers declared +module Puppet::Parser::Functions + newfunction(:get_ldap_servers, :type => :rvalue) do |args| + Puppet::Parser::Functions.autoloader.loadall + res = ["master"] + + function_list_exported_ressources(['Openldap::Exported_slave']).each { |i| + res << "slave-#{i}" + } + res.map! { |x| "ldap-#{x}." + lookupvar("domain") } + return res + end +end diff --git a/modules/openldap/manifests/config.pp b/modules/openldap/manifests/config.pp new file mode 100644 index 00000000..336f8a23 --- /dev/null +++ b/modules/openldap/manifests/config.pp @@ -0,0 +1,7 @@ +define openldap::config($content) { + file { $name: + require => Package['openldap-servers'], + content => $content, + notify => Exec["slaptest"], + } +} diff --git a/modules/openldap/manifests/exported_slave.pp b/modules/openldap/manifests/exported_slave.pp new file mode 100644 index 00000000..5b9f6b87 --- /dev/null +++ b/modules/openldap/manifests/exported_slave.pp @@ -0,0 +1,3 @@ +# this define is here only to be exported by slave +# and later used by get_ldap_servers +define openldap::exported_slave { } diff --git a/modules/openldap/manifests/init.pp b/modules/openldap/manifests/init.pp index 991aee40..34a214a2 100644 --- a/modules/openldap/manifests/init.pp +++ b/modules/openldap/manifests/init.pp @@ -1,71 +1,34 @@ class openldap { - class base { - package { 'openldap-servers': - ensure => installed - } + include openldap::var - service { ldap: - ensure => running, - subscribe => [ Package['openldap-servers']], - path => "/etc/init.d/ldap" - } + package { 'openldap-servers': } - file {"/etc/ssl/openldap/": - ensure => directory, - owner => root, - group => root, - mode => 755, - } - - openssl::self_signed_cert{ 'ldap': - directory => "/etc/ssl/openldap/" - } + service { $openldap::var::service: + subscribe => Package['openldap-servers'], + require => Openssl::Self_signed_cert["ldap.${::domain}"], } - # /etc/ - # 11:57:48| blingme> misc: nothing special, just copy slapd.conf, mandriva-dit-access.conf across, slapcat one side, slapadd other side - - file { '/etc/openldap/slapd.conf': - ensure => present, - owner => root, - group => root, - mode => 644, - require => Package["openldap-servers"], - content => "", - notify => [Service['ldap']] + exec { "slaptest": + refreshonly => true, + notify => Service[$openldap::var::service], } - file { '/etc/openldap/mandriva-dit-access.conf': - ensure => present, - owner => root, - group => root, - mode => 644, - require => Package["openldap-servers"], - content => "", - notify => [Service['ldap']] + file { '/etc/ssl/openldap/': + ensure => directory, } - file { '/etc/sysconfig/ldap': - ensure => present, - owner => root, - group => root, - mode => 644, - require => Package["openldap-servers"], - content => "", - notify => [Service['ldap']] - } - - class master inherits base { - file { '/etc/openldap/mandriva-dit-access.conf': - content => template("openldap/mandriva-dit-access.conf"), - } - - file { '/etc/openldap/slapd.conf': - content => template("openldap/slapd.conf"), - } + openssl::self_signed_cert{ "ldap.${::domain}": + directory => '/etc/ssl/openldap/', + } - file { '/etc/sysconfig/ldap': - content => template("openldap/ldap.sysconfig"), - } + openldap::config { + '/etc/openldap/slapd.conf': + content => ''; + '/etc/openldap/mandriva-dit-access.conf': + content => ''; + '/etc/sysconfig/ldap': + content => ''; + '/etc/sysconfig/slapd': + content => ''; } } diff --git a/modules/openldap/manifests/master.pp b/modules/openldap/manifests/master.pp new file mode 100644 index 00000000..53122628 --- /dev/null +++ b/modules/openldap/manifests/master.pp @@ -0,0 +1,50 @@ +class openldap::master inherits openldap { + include openldap::var + + Openldap::Config['/etc/openldap/mandriva-dit-access.conf'] { + content => template('openldap/mandriva-dit-access.conf'), + } + + $ldap_test_password = extlookup('ldap_test_password','x') + $ldap_test_directory = '/var/lib/ldap/test' + file { $ldap_test_directory: + ensure => directory, + group => 'ldap', + owner => 'ldap', + require => Package['openldap-servers'], + before => Service[$openldap::var::service], + } + + Openldap::Config['/etc/openldap/slapd.conf'] { + content => template('openldap/slapd.conf', 'openldap/slapd.test.conf'), + } + + Openldap::Config['/etc/sysconfig/ldap'] { + content => template('openldap/ldap.sysconfig'), + } + + Openldap::Config['/etc/sysconfig/slapd'] { + content => template('openldap/slapd.sysconfig'), + } + + host { "ldap.${::domain}": + ip => '127.0.0.1', + } + + if $::environment == 'test' { + # if we are in a test vm, we need to fill the directory + # with data + package { 'openldap-clients': } + + mga_common::local_script { 'init_ldap.sh': + content => template('openldap/init_ldap.sh'), + require => Package['openldap-clients'], + } + + exec { 'init_ldap.sh': + # taken arbitrary among all possible files + creates => '/var/lib/ldap/objectClass.bdb', + require => Mga_common::Local_script['init_ldap.sh'], + } + } +} diff --git a/modules/openldap/manifests/slave.pp b/modules/openldap/manifests/slave.pp new file mode 100644 index 00000000..ba0cfb9d --- /dev/null +++ b/modules/openldap/manifests/slave.pp @@ -0,0 +1,23 @@ +class openldap::slave($rid) inherits openldap { + + @@openldap::exported_slave { $rid: } + + $sync_password = extlookup("ldap_syncuser-${::hostname}",'x') + + # same access rights as master + Openldap::Config['/etc/openldap/mandriva-dit-access.conf'] { + content => template('openldap/mandriva-dit-access.conf'), + } + + Openldap::Config['/etc/openldap/slapd.conf'] { + content => template('openldap/slapd.conf','openldap/slapd.syncrepl.conf'), + } + + Openldap::Config['/etc/sysconfig/ldap'] { + content => template('openldap/ldap.sysconfig'), + } + + Openldap::Config['/etc/sysconfig/slapd'] { + content => template('openldap/slapd-slave.sysconfig'), + } +} diff --git a/modules/openldap/manifests/slave_instance.pp b/modules/openldap/manifests/slave_instance.pp new file mode 100644 index 00000000..fbf998c6 --- /dev/null +++ b/modules/openldap/manifests/slave_instance.pp @@ -0,0 +1,8 @@ +# TODO create the user for sync in ldap +# this define is mainly syntactic sugar +define openldap::slave_instance($rid) { + include openldap + class { 'openldap::slave': + rid => $rid, + } +} diff --git a/modules/openldap/manifests/var.pp b/modules/openldap/manifests/var.pp new file mode 100644 index 00000000..d6947eb8 --- /dev/null +++ b/modules/openldap/manifests/var.pp @@ -0,0 +1,3 @@ +class openldap::var { + $service = 'slapd' +} diff --git a/modules/openldap/templates/init_ldap.sh b/modules/openldap/templates/init_ldap.sh new file mode 100644 index 00000000..dfcaf236 --- /dev/null +++ b/modules/openldap/templates/init_ldap.sh @@ -0,0 +1,40 @@ +#!/bin/bash + +ldapadd -Y EXTERNAL -H ldapi:/// <<EOF +dn: <%= dc_suffix %> +dc: <%= dc_suffix.split(',')[0].split('=')[1] %> +objectClass: domain +objectClass: domainRelatedObject +associatedDomain: <%= domain %> + +<% for g in ['People','Group','Hosts'] %> +dn: ou=<%= g%>,<%= dc_suffix %> +ou: <%= g %> +objectClass: organizationalUnit +<% end %> + +<% +gid = 5000 +for g in ['packagers','web','sysadmin','packagers-committers','forum-developers'] %> +dn: cn=mga-<%= g %>,ou=Group,<%= dc_suffix %> +objectClass: groupOfNames +objectClass: posixGroup +cn: mga-<%= g %> +gidNumber: <%= gid %> +member: cn=manager,<%= dc_suffix %> +<%- +gid+=1 +end -%> + + +<% # FIXME automatically get the list of servers +for g in ['duvel','alamut'] %> +dn: cn=<%= g%>.<%= domain %>,ou=Hosts,<%= dc_suffix %> +objectClass: device +objectClass: simpleSecurityObject +cn: <%= g%>.<%= domain %> +userPassword: x +<% end %> + + +EOF diff --git a/modules/openldap/templates/mandriva-dit-access.conf b/modules/openldap/templates/mandriva-dit-access.conf index a4d9661a..361d956b 100644 --- a/modules/openldap/templates/mandriva-dit-access.conf +++ b/modules/openldap/templates/mandriva-dit-access.conf @@ -1,184 +1,195 @@ # mandriva-dit-access.conf -limits group="cn=LDAP Replicators,ou=System Groups,dc=mageia,dc=org" +limits group="cn=LDAP Replicators,ou=System Groups,<%= dc_suffix %>" limit size=unlimited limit time=unlimited -limits group="cn=LDAP Admins,ou=System Groups,dc=mageia,dc=org" +limits group="cn=LDAP Admins,ou=System Groups,<%= dc_suffix %>" limit size=unlimited limit time=unlimited -limits group="cn=Account Admins,ou=System Groups,dc=mageia,dc=org" +limits group="cn=Account Admins,ou=System Groups,<%= dc_suffix %>" limit size=unlimited limit time=unlimited # so we don't have to add these to every other acl down there -access to dn.subtree="dc=mageia,dc=org" - by group.exact="cn=LDAP Admins,ou=System Groups,dc=mageia,dc=org" write - by group.exact="cn=LDAP Replicators,ou=System Groups,dc=mageia,dc=org" read +access to dn.subtree="<%= dc_suffix %>" + by group.exact="cn=LDAP Admins,ou=System Groups,<%= dc_suffix %>" write + by group.exact="cn=LDAP Replicators,ou=System Groups,<%= dc_suffix %>" read by * break # userPassword access # Allow account registration to write userPassword of unprivileged users accounts -access to dn.subtree="ou=People,dc=mageia,dc=org" +access to dn.subtree="ou=People,<%= dc_suffix %>" filter="(&(objectclass=inetOrgPerson)(!(objectclass=posixAccount)))" - attrs=userPassword,pwdReset - by group/groupOfNames/member.exact="cn=registrars,ou=system groups,dc=mageia,dc=org" +a + attrs=userPassword + by group/groupOfNames/member.exact="cn=registrars,ou=system groups,<%= dc_suffix %>" +w by * +0 break # shadowLastChange is here because it needs to be writable by the user because # of pam_ldap, which will update this attr whenever the password is changed. # And this is done with the user's credentials -access to dn.subtree="dc=mageia,dc=org" +access to dn.subtree="<%= dc_suffix %>" attrs=shadowLastChange by self write - by group.exact="cn=Account Admins,ou=System Groups,dc=mageia,dc=org" write + by group.exact="cn=Account Admins,ou=System Groups,<%= dc_suffix %>" write by users read -access to dn.subtree="dc=mageia,dc=org" +access to dn.subtree="<%= dc_suffix %>" attrs=userPassword - by group.exact="cn=Account Admins,ou=System Groups,dc=mageia,dc=org" write + by group.exact="cn=Account Admins,ou=System Groups,<%= dc_suffix %>" write by self write by anonymous auth by * none # kerberos key access # "by auth" just in case... -access to dn.subtree="dc=mageia,dc=org" +access to dn.subtree="<%= dc_suffix %>" attrs=krb5Key by self write - by group.exact="cn=Account Admins,ou=System Groups,dc=mageia,dc=org" write + by group.exact="cn=Account Admins,ou=System Groups,<%= dc_suffix %>" write by anonymous auth by * none # password policies -access to dn.subtree="ou=Password Policies,dc=mageia,dc=org" - by group.exact="cn=Account Admins,ou=System Groups,dc=mageia,dc=org" write +access to dn.subtree="ou=Password Policies,<%= dc_suffix %>" + by group.exact="cn=Account Admins,ou=System Groups,<%= dc_suffix %>" write by users read # samba password attributes # by self not strictly necessary, because samba uses its own admin user to # change the password on the user's behalf # openldap also doesn't auth on these attributes, but maybe some day it will -access to dn.subtree="dc=mageia,dc=org" +access to dn.subtree="<%= dc_suffix %>" attrs=sambaLMPassword,sambaNTPassword - by group.exact="cn=Account Admins,ou=System Groups,dc=mageia,dc=org" write + by group.exact="cn=Account Admins,ou=System Groups,<%= dc_suffix %>" write by anonymous auth by self write by * none # password history attribute -# pwdHistory is read-only, but ACL is simplier with it here -access to dn.subtree="dc=mageia,dc=org" +# pwdHistory is read-only, but ACL is simpler with it here +access to dn.subtree="<%= dc_suffix %>" attrs=sambaPasswordHistory,pwdHistory by self read - by group.exact="cn=Account Admins,ou=System Groups,dc=mageia,dc=org" write + by group.exact="cn=Account Admins,ou=System Groups,<%= dc_suffix %>" write by * none # pwdReset, so the admin can force an user to change a password -access to dn.subtree="dc=mageia,dc=org" +access to dn.subtree="<%= dc_suffix %>" attrs=pwdReset,pwdAccountLockedTime - by group.exact="cn=Account Admins,ou=System Groups,dc=mageia,dc=org" write + by group.exact="cn=Account Admins,ou=System Groups,<%= dc_suffix %>" write by self read # group owner can add/remove/edit members to groups -access to dn.regex="^cn=[^,]+,ou=(System Groups|Group),dc=mageia,dc=org$" - attrs=member +access to dn.regex="^cn=[^,]+,ou=(System Groups|Group),<%= dc_suffix %>$" + attrs=member,owner by dnattr=owner write - by group.exact="cn=Account Admins,ou=System Groups,dc=mageia,dc=org" write - by users +sx + by group.exact="cn=Account Admins,ou=System Groups,<%= dc_suffix %>" write + by users +scrx -access to dn.regex="^cn=[^,]+,ou=(System Groups|Group),dc=mageia,dc=org$" +access to dn.regex="^cn=[^,]+,ou=(System Groups|Group),<%= dc_suffix %>$" attrs=cn,description,objectClass,gidNumber - by group.exact="cn=Account Admins,ou=System Groups,dc=mageia,dc=org" write + by group.exact="cn=Account Admins,ou=System Groups,<%= dc_suffix %>" write by users read # registration - allow registrar group to create basic unprivileged accounts -access to dn.subtree="ou=People,dc=mageia,dc=org" +access to dn.subtree="ou=People,<%= dc_suffix %>" attrs="objectClass" val="inetOrgperson" - by group/groupOfNames/member.exact="cn=registrars,ou=system groups,dc=mageia,dc=org" =asrx + by group/groupOfNames/member.exact="cn=registrars,ou=system groups,<%= dc_suffix %>" =asrx by * +0 break -access to dn.subtree="ou=People,dc=mageia,dc=org" +access to dn.subtree="ou=People,<%= dc_suffix %>" filter="(!(objectclass=posixAccount))" attrs=cn,sn,gn,mail,entry,children,preferredLanguage - by group/groupOfNames/member.exact="cn=registrars,ou=system groups,dc=mageia,dc=org" =asrx + by group/groupOfNames/member.exact="cn=registrars,ou=system groups,<%= dc_suffix %>" =asrx + by * +0 break + +# TODO maybe we should use a group instead of a user here +access to dn.subtree="ou=People,<%= dc_suffix %>" + filter="(objectclass=posixAccount)" + attrs=homeDirectory,cn,uid,loginShell,gidNumber,uidNumber + by dn.one="ou=Hosts,<%= dc_suffix %>" read by * +0 break # let the user change some of his/her attributes -access to dn.subtree="ou=People,dc=mageia,dc=org" - attrs=carLicense,homePhone,homePostalAddress,mobile,pager,telephoneNumber,mail,preferredLanguage +access to dn.subtree="ou=People,<%= dc_suffix %>" + attrs=cn,sn,givenName,carLicense,drink,homePhone,homePostalAddress,mobile,pager,telephoneNumber,mail,preferredLanguage,sshPublicKey by self write by users read +access to dn.subtree="ou=People,<%= dc_suffix %>" + attrs=memberOf + by users read + + # create new accounts -access to dn.regex="^([^,]+,)?ou=(People|Group|Hosts),dc=mageia,dc=org$" +access to dn.regex="^([^,]+,)?ou=(People|Group|Hosts),<%= dc_suffix %>$" attrs=children,entry - by group.exact="cn=Account Admins,ou=System Groups,dc=mageia,dc=org" write + by group.exact="cn=Account Admins,ou=System Groups,<%= dc_suffix %>" write by * break # access to existing entries -access to dn.regex="^[^,]+,ou=(People|Hosts|Group),dc=mageia,dc=org$" - by group.exact="cn=Account Admins,ou=System Groups,dc=mageia,dc=org" write +access to dn.regex="^[^,]+,ou=(People|Hosts|Group),<%= dc_suffix %>$" + by group.exact="cn=Account Admins,ou=System Groups,<%= dc_suffix %>" write by * break # sambaDomainName entry -access to dn.regex="^(sambaDomainName=[^,]+,)?dc=mageia,dc=org$" +access to dn.regex="^(sambaDomainName=[^,]+,)?<%= dc_suffix %>$" attrs=children,entry,@sambaDomain,@sambaUnixIdPool - by group.exact="cn=Account Admins,ou=System Groups,dc=mageia,dc=org" write + by group.exact="cn=Account Admins,ou=System Groups,<%= dc_suffix %>" write by users read # samba ID mapping -access to dn.regex="^(sambaSID=[^,]+,)?ou=Idmap,dc=mageia,dc=org$" +access to dn.regex="^(sambaSID=[^,]+,)?ou=Idmap,<%= dc_suffix %>$" attrs=children,entry,@sambaIdmapEntry - by group.exact="cn=Account Admins,ou=System Groups,dc=mageia,dc=org" write - by group.exact="cn=IDMAP Admins,ou=System Groups,dc=mageia,dc=org" write + by group.exact="cn=Account Admins,ou=System Groups,<%= dc_suffix %>" write + by group.exact="cn=IDMAP Admins,ou=System Groups,<%= dc_suffix %>" write by users read # global address book # XXX - which class(es) to use? -access to dn.regex="^(.*,)?ou=Address Book,dc=mageia,dc=org" +access to dn.regex="^(.*,)?ou=Address Book,<%= dc_suffix %>" attrs=children,entry,@inetOrgPerson,@evolutionPerson,@evolutionPersonList - by group.exact="cn=Address Book Admins,ou=System Groups,dc=mageia,dc=org" write + by group.exact="cn=Address Book Admins,ou=System Groups,<%= dc_suffix %>" write by users read # dhcp entries # XXX - open up read access to anybody? -access to dn.sub="ou=dhcp,dc=mageia,dc=org" +access to dn.sub="ou=dhcp,<%= dc_suffix %>" attrs=children,entry,@dhcpService,@dhcpServer,@dhcpSharedNetwork,@dhcpSubnet,@dhcpPool,@dhcpGroup,@dhcpHost,@dhcpClass,@dhcpSubClass,@dhcpOptions,@dhcpLeases,@dhcpLog - by group.exact="cn=DHCP Admins,ou=System Groups,dc=mageia,dc=org" write - by group.exact="cn=DHCP Readers,ou=System Groups,dc=mageia,dc=org" read + by group.exact="cn=DHCP Admins,ou=System Groups,<%= dc_suffix %>" write + by group.exact="cn=DHCP Readers,ou=System Groups,<%= dc_suffix %>" read by * read # sudoers -access to dn.regex="^([^,]+,)?ou=sudoers,dc=mageia,dc=org$" +access to dn.regex="^([^,]+,)?ou=sudoers,<%= dc_suffix %>$" attrs=children,entry,@sudoRole - by group.exact="cn=Sudo Admins,ou=System Groups,dc=mageia,dc=org" write + by group.exact="cn=Sudo Admins,ou=System Groups,<%= dc_suffix %>" write by users read # dns -access to dn="ou=dns,dc=mageia,dc=org" +access to dn="ou=dns,<%= dc_suffix %>" attrs=entry,@extensibleObject - by group.exact="cn=DNS Admins,ou=System Groups,dc=mageia,dc=org" write + by group.exact="cn=DNS Admins,ou=System Groups,<%= dc_suffix %>" write by users read -access to dn.sub="ou=dns,dc=mageia,dc=org" +access to dn.sub="ou=dns,<%= dc_suffix %>" attrs=children,entry,@dNSZone - by group.exact="cn=DNS Admins,ou=System Groups,dc=mageia,dc=org" write - by group.exact="cn=DNS Readers,ou=System Groups,dc=mageia,dc=org" read + by group.exact="cn=DNS Admins,ou=System Groups,<%= dc_suffix %>" write + by group.exact="cn=DNS Readers,ou=System Groups,<%= dc_suffix %>" read by * none # MTA # XXX - what else can we add here? Virtual Domains? With which schema? -access to dn.one="ou=People,dc=mageia,dc=org" +access to dn.one="ou=People,<%= dc_suffix %>" attrs=@inetLocalMailRecipient,mail - by group.exact="cn=MTA Admins,ou=System Groups,dc=mageia,dc=org" write + by group.exact="cn=MTA Admins,ou=System Groups,<%= dc_suffix %>" write by users read # KDE Configuration -access to dn.sub="ou=KDEConfig,dc=mageia,dc=org" - by group.exact="cn=KDEConfig Admins,ou=System Groups,dc=mageia,dc=org" write +access to dn.sub="ou=KDEConfig,<%= dc_suffix %>" + by group.exact="cn=KDEConfig Admins,ou=System Groups,<%= dc_suffix %>" write by * read # last one -access to dn.subtree="dc=mageia,dc=org" attrs=entry,uid,cn +access to dn.subtree="<%= dc_suffix %>" attrs=entry,uid,cn by users read - diff --git a/modules/openldap/templates/slapd-slave.sysconfig b/modules/openldap/templates/slapd-slave.sysconfig new file mode 100644 index 00000000..9bff24ff --- /dev/null +++ b/modules/openldap/templates/slapd-slave.sysconfig @@ -0,0 +1,38 @@ +# debug level for slapd +SLAPDSYSLOGLEVEL="0" +SLAPDSYSLOGLOCALUSER="local4" + +# SLAPD URL list +SLAPDURLLIST="ldap:/// ldaps:/// ldapi:///" + +# Config file to use for slapd +#SLAPDCONF=/etc/openldap/slapd.conf + +# Which user to run as +#LDAPUSER=ldap +#LDAPGROUP=ldap + +# Should file permissions on database files be fixed at startup. Default is yes +# FIXPERMS=no + +# Whether database recovery should be run before starting slapd in start +# (not strictly be necessary in 2.3). Default is no +# AUTORECOVER=yes + +# At what intervals to run ldap-hot-db-backup from cron, which will +# do hot database backups for all bdb/hdb databases, and archive +# unnecessary transaction logs, one of hourly,daily,weekly,monthly,yearly +# Default is daily +# Slave does not need a backup +RUN_DB_BACKUP=never + +# How many days to keep archived transaction logs for. This should be just +# greater than the backup interval on these files. Default is 7 +# KEEP_ARCHIVES_DAYS=7 + +# How many files slapd should be able to have open. By default, the process +# will inherit the default per-process limit (usually 1024), which may +# not be enough, so ulimit -n is run with the value in MAXFILES (which +# defaults to 1024 as well). 4096 is the maximum OpenLDAP will use without +# recompiling. +# MAXFILES=4096 diff --git a/modules/openldap/templates/slapd.conf b/modules/openldap/templates/slapd.conf index 7edab29b..d82fe088 100644 --- a/modules/openldap/templates/slapd.conf +++ b/modules/openldap/templates/slapd.conf @@ -11,7 +11,10 @@ include /usr/share/openldap/schema/rfc2307bis.schema include /usr/share/openldap/schema/openldap.schema #include /usr/share/openldap/schema/autofs.schema include /usr/share/openldap/schema/samba.schema -include /usr/share/openldap/schema/kolab.schema +# removed as it cause issue on 2010.0 : +# /usr/share/openldap/schema/kolab.schema: +# line 175 objectclass: Duplicate objectClass: "1.3.6.1.4.1.5322.13.1.1" +#include /usr/share/openldap/schema/kolab.schema include /usr/share/openldap/schema/evolutionperson.schema include /usr/share/openldap/schema/calendar.schema include /usr/share/openldap/schema/sudo.schema @@ -27,14 +30,23 @@ pidfile /var/run/ldap/slapd.pid argsfile /var/run/ldap/slapd.args modulepath <%= lib_dir %>/openldap +<% if @hostname == 'duvel' then %> +moduleload back_bdb.la +<% else %> +moduleload back_mdb.la +<% end %> moduleload back_monitor.la moduleload syncprov.la moduleload ppolicy.la #moduleload refint.la +moduleload memberof.la +moduleload unique.la +moduleload dynlist.la +moduleload constraint.la -TLSCertificateFile /etc/ssl/openldap/ldap.pem -TLSCertificateKeyFile /etc/ssl/openldap/ldap.pem -TLSCACertificateFile /etc/ssl/openldap/ldap.pem +TLSCertificateFile /etc/ssl/openldap/ldap.<%= domain %>.pem +TLSCertificateKeyFile /etc/ssl/openldap/ldap.<%= domain %>.pem +TLSCACertificateFile /etc/ssl/openldap/ldap.<%= domain %>.pem # Give ldapi connection some security localSSF 56 @@ -46,20 +58,34 @@ security ssf=56 loglevel 256 +database monitor +access to dn.subtree="cn=Monitor" + by group.exact="cn=LDAP Monitors,ou=System Groups,<%= dc_suffix %>" read + by group.exact="cn=LDAP Admins,ou=System Groups,<%= dc_suffix %>" read + by * none + +<% if @hostname == 'duvel' then %> database bdb +<% else %> +database mdb +# mdb defaults to 10MB max DB, so we need to hardcode some better value :( +maxsize 500000000 +<% end %> suffix "<%= dc_suffix %>" directory /var/lib/ldap rootdn "cn=manager,<%= dc_suffix %>" checkpoint 256 5 +<% if @hostname == 'duvel' then %> # 32Mbytes, can hold about 10k posixAccount entries dbconfig set_cachesize 0 33554432 1 dbconfig set_lg_bsize 2097152 cachesize 1000 idlcachesize 3000 +<% end %> index objectClass eq -index uidNumber,gidNumber,memberuid,member eq +index uidNumber,gidNumber,memberuid,member,owner eq index uid eq,subinitial index cn,mail,surname,givenname eq,subinitial index sambaSID eq,sub @@ -72,6 +98,8 @@ index sudouser eq,sub index entryCSN,entryUUID eq index dhcpHWAddress,dhcpClassData eq +overlay memberof + overlay syncprov syncprov-checkpoint 100 10 syncprov-sessionlog 100 @@ -81,6 +109,15 @@ ppolicy_default "cn=default,ou=Password Policies,<%= dc_suffix %>" ppolicy_hash_cleartext yes ppolicy_use_lockout yes +overlay unique +unique_uri ldap:///?mail?sub? + +overlay dynlist +dynlist-attrset groupOfURLs memberURL member + + +overlay constraint +constraint_attribute sshPublicKey regex "^ssh-(rsa|dss|ed25519) [[:graph:]]+ [[:graph:]]+$" # uncomment if you want to automatically update group # memberships when an user is removed from the tree @@ -89,16 +126,13 @@ ppolicy_use_lockout yes #refint_attributes member #refint_nothing "uid=LDAP Admin,ou=System Accounts,dc=example,dc=com" +<% if environment == "test" %> authz-regexp "gidNumber=0\\\+uidNumber=0,cn=peercred,cn=external,cn=auth" - "uid=Account Admin,ou=System Accounts,<%= dc_suffix %>" + "cn=manager,<%= dc_suffix %>" authz-regexp ^uid=([^,]+),cn=[^,]+,cn=auth$ uid=$1,ou=People,<%= dc_suffix %> +<% end %> include /etc/openldap/mandriva-dit-access.conf -database monitor -access to dn.subtree="cn=Monitor" - by group.exact="cn=LDAP Monitors,ou=System Groups,<%= dc_suffix %>" read - by group.exact="cn=LDAP Admins,ou=System Groups,<%= dc_suffix %>" read - by * none diff --git a/modules/openldap/templates/slapd.syncrepl.conf b/modules/openldap/templates/slapd.syncrepl.conf new file mode 100644 index 00000000..2bfe7d50 --- /dev/null +++ b/modules/openldap/templates/slapd.syncrepl.conf @@ -0,0 +1,11 @@ +syncrepl rid=<%= rid %> + provider=ldaps://ldap-master.<%= domain %>:636 + type=refreshAndPersist + searchbase="<%= dc_suffix %>" + schemachecking=off + bindmethod=simple + binddn="cn=syncuser-<%= hostname%>,ou=System Accounts,<%= dc_suffix %>" + credentials=<%= sync_password %> + tls_reqcert=never + +updateref ldaps://ldap-master.<%= domain %>:636 diff --git a/modules/openldap/templates/slapd.sysconfig b/modules/openldap/templates/slapd.sysconfig new file mode 100644 index 00000000..e6ae2e05 --- /dev/null +++ b/modules/openldap/templates/slapd.sysconfig @@ -0,0 +1,37 @@ +# debug level for slapd +SLAPDSYSLOGLEVEL="0" +SLAPDSYSLOGLOCALUSER="local4" + +# SLAPD URL list +SLAPDURLLIST="ldap:/// ldaps:/// ldapi:///" + +# Config file to use for slapd +#SLAPDCONF=/etc/openldap/slapd.conf + +# Which user to run as +#LDAPUSER=ldap +#LDAPGROUP=ldap + +# Should file permissions on database files be fixed at startup. Default is yes +# FIXPERMS=no + +# Whether database recovery should be run before starting slapd in start +# (not strictly be necessary in 2.3). Default is no +# AUTORECOVER=yes + +# At what intervals to run ldap-hot-db-backup from cron, which will +# do hot database backups for all bdb/hdb databases, and archive +# unnecessary transaction logs, one of hourly,daily,weekly,monthly,yearly +# Default is daily +# RUN_DB_BACKUP=daily + +# How many days to keep archived transaction logs for. This should be just +# greater than the backup interval on these files. Default is 7 +# KEEP_ARCHIVES_DAYS=7 + +# How many files slapd should be able to have open. By default, the process +# will inherit the default per-process limit (usually 1024), which may +# not be enough, so ulimit -n is run with the value in MAXFILES (which +# defaults to 1024 as well). 4096 is the maximum OpenLDAP will use without +# recompiling. +# MAXFILES=4096 diff --git a/modules/openldap/templates/slapd.test.conf b/modules/openldap/templates/slapd.test.conf new file mode 100644 index 00000000..8befa55a --- /dev/null +++ b/modules/openldap/templates/slapd.test.conf @@ -0,0 +1,9 @@ +database bdb +suffix "dc=test_ldap" +directory /var/lib/ldap/test +rootdn "cn=manager,dc=test_ldap" +rootpw "<%= ldap_test_password %>" +authz-regexp "gidNumber=0\\\+uidNumber=0,cn=peercred,cn=external,cn=auth" + "cn=manager,dc=test_ldap" +# force ssl +security ssf=56 diff --git a/modules/openssh/manifests/init.pp b/modules/openssh/manifests/init.pp index e55660fd..bae0fa5c 100644 --- a/modules/openssh/manifests/init.pp +++ b/modules/openssh/manifests/init.pp @@ -1,25 +1 @@ -class openssh { - - # some trick to manage sftp server, who is arch dependent on mdv - $path_to_sftp = "$lib_dir/ssh/" - - package { "openssh-server": - ensure => installed - } - - service { sshd: - ensure => running, - path => "/etc/init.d/sshd", - subscribe => [ Package["openssh-server"], File["sshd_config"] ] - } - - file { "sshd_config": - path => "/etc/ssh/sshd_config", - ensure => present, - owner => root, - group => root, - mode => 644, - require => Package["openssh-server"], - content => template("openssh/sshd_config") - } -} +class openssh { } diff --git a/modules/openssh/manifests/server.pp b/modules/openssh/manifests/server.pp new file mode 100644 index 00000000..c45268d2 --- /dev/null +++ b/modules/openssh/manifests/server.pp @@ -0,0 +1,17 @@ +class openssh::server { + # some trick to manage sftp server, who is arch dependent on mdv + # TODO: the path changed on Mageia 6 to /usr/libexec/openssh/sftp-server + $path_to_sftp = "${::lib_dir}/ssh/" + + package { 'openssh-server': } + + service { 'sshd': + subscribe => Package['openssh-server'], + } + + file { '/etc/ssh/sshd_config': + require => Package['openssh-server'], + content => template('openssh/sshd_config'), + notify => Service['sshd'] + } +} diff --git a/modules/openssh/manifests/ssh_keys_from_ldap.pp b/modules/openssh/manifests/ssh_keys_from_ldap.pp new file mode 100644 index 00000000..9ea6c139 --- /dev/null +++ b/modules/openssh/manifests/ssh_keys_from_ldap.pp @@ -0,0 +1,20 @@ +class openssh::ssh_keys_from_ldap inherits server { + package { 'python3-ldap': } + + $ldap_pwfile = '/etc/ldap.secret' + $nslcd_conf_file = '/etc/nslcd.conf' + $ldap_servers = get_ldap_servers() + mga_common::local_script { 'ldap-sshkey2file.py': + content => template('openssh/ldap-sshkey2file.py'), + require => Package['python3-ldap'] + } + + cron { 'sshkey2file': + command => '/bin/bash -c "/usr/local/bin/ldap-sshkey2file.py && ( [[ -f /usr/bin/mgagit && -d /var/lib/git/.gitolite ]] && /bin/su -c \'/usr/bin/mgagit glrun\' - git ) ||:"', + hour => '*', + minute => '*/10', + user => 'root', + environment => 'MAILTO=root', + require => Mga_common::Local_script['ldap-sshkey2file.py'], + } +} diff --git a/modules/openssh/templates/ldap-sshkey2file.py b/modules/openssh/templates/ldap-sshkey2file.py new file mode 100755 index 00000000..934e2865 --- /dev/null +++ b/modules/openssh/templates/ldap-sshkey2file.py @@ -0,0 +1,194 @@ +#!/usr/bin/python3 + +import argparse +import os +import random +import shutil +import sys +import tempfile +import textwrap +from typing import Iterable + +try: + import ldap +except ImportError: + print("Please install python-ldap before running this program") + sys.exit(1) + +basedn = "<%= @dc_suffix %>" +peopledn = f"ou=people,{basedn}" +<%- + ldap_servers.map! { |l| "'ldaps://#{l}'" } +-%> +uris = [<%= ldap_servers.join(", ") %>] +random.shuffle(uris) +uri = " ".join(uris) +timeout = 5 +binddn = f"cn=<%= @fqdn %>,ou=Hosts,{basedn}" +ldap_secret_file = "<%= @ldap_pwfile %>" +nslcd_conf_file = "<%= @nslcd_conf_file %>" +# filter out disabled accounts also +# too bad uidNumber doesn't support >= filters +objfilter = "(&(objectClass=inetOrgPerson)(objectClass=ldapPublicKey)(objectClass=posixAccount)(sshPublicKey=*))" +keypathprefix = "/home" + +parser = argparse.ArgumentParser( + formatter_class=argparse.RawDescriptionHelpFormatter, + description=textwrap.dedent(f'''\ + Will fetch all enabled user accounts under {peopledn} + with ssh keys in them and write each one to + {keypathprefix}/<login>/.ssh/authorized_keys + + It will return failure when no keys are updated and success + when one or more keys have changed. + + This script is intended to be run from cron as root; + ''')) +parser.add_argument('-n', '--dry-run', action='store_true') +parser.add_argument('-v', '--verbose', action='store_true') +args = parser.parse_args() + + +def get_bindpw() -> str: + try: + return get_nslcd_bindpw(nslcd_conf_file) + except: + pass + + try: + return get_ldap_secret(ldap_secret_file) + except: + pass + + print("Error while reading password file, aborting") + sys.exit(1) + + +def get_nslcd_bindpw(pwfile: str) -> str: + try: + with open(pwfile, 'r') as f: + pwfield = "bindpw" + for line in f: + ls = line.strip().split() + if len(ls) == 2 and ls[0] == pwfield: + return ls[1] + except IOError as e: + print("Error while reading nslcd file " + pwfile) + print(e) + raise + + print("No " + pwfield + " field found in nslcd file " + pwfile) + raise Exception() + + +def get_ldap_secret(pwfile: str) -> str: + try: + with open(pwfile, 'r') as f: + pw = f.readline().strip() + except IOError as e: + print("Error while reading password file " + pwfile) + print(e) + raise + return pw + + +def write_keys(keys: Iterable[bytes], user: bytes, uid: int, gid: int) -> bool: + userdir = f"{keypathprefix}/{user.decode('utf-8')}" + keyfile = f"{userdir}/.ssh/authorized_keys" + + fromldap = "" + for key in keys: + fromldap += key.decode("utf-8").strip() + "\n" + + fromfile = "" + try: + with open(keyfile, 'r') as f: + fromfile = f.read() + except FileNotFoundError: + pass + + if fromldap == fromfile: + return False + + if args.dry_run: + print(f"Would write {keyfile}") + return True + + if args.verbose: + print(f"Writing {keyfile}") + + if not os.path.isdir(userdir): + shutil.copytree('/etc/skel', userdir) + os.chown(userdir, uid, gid) + for root, dirs, files in os.walk(userdir): + for d in dirs: + os.chown(os.path.join(root, d), uid, gid) + for f in files: + os.chown(os.path.join(root, f), uid, gid) + + try: + os.makedirs(f"{userdir}/.ssh", 0o700) + except FileExistsError: + pass + os.chmod(f"{userdir}/.ssh", 0o700) + os.chown(f"{userdir}/.ssh", uid, gid) + + with tempfile.NamedTemporaryFile( + prefix='ldap-sshkey2file-', mode='w', delete=False) as tmpfile: + tmpfile.write(fromldap) + os.chmod(tmpfile.name, 0o600) + os.chown(tmpfile.name, uid, gid) + shutil.move(tmpfile.name, keyfile) + # Hmm, apparently shutil.move does not preserve user/group so let's reapply + # them. I still like doing it before as this should be more "atomic" + # if it actually worked, so it's "good practice", even if shutil.move sucks + os.chown(keyfile, uid, gid) + os.chmod(keyfile, 0o600) + return True + + +bindpw = get_bindpw() + +changed = False +try: + ld = ldap.initialize(uri) + ld.set_option(ldap.OPT_NETWORK_TIMEOUT, timeout) + if uri.startswith("ldap:/"): + ld.start_tls_s() + ld.bind_s(binddn, bindpw) + res = ld.search_s(peopledn, ldap.SCOPE_ONELEVEL, objfilter, + ['uid', 'sshPublicKey', 'uidNumber', 'gidNumber']) + try: + os.makedirs(keypathprefix, 0o701) + except FileExistsError: + pass + + if args.verbose: + print("Found users:", + ", ".join(sorted([x[1]['uid'][0].decode('utf-8') for x in res]))) + + for result in res: + dn, entry = result + # skip possible system users + if 'uidNumber' not in entry or int(entry['uidNumber'][0]) < 500: + continue + if write_keys(entry['sshPublicKey'], entry['uid'][0], + int(entry['uidNumber'][0]), int(entry['gidNumber'][0])): + changed = True + + ld.unbind_s() +except Exception: + print("Error") + raise + +if changed: + if args.verbose: + print("SSH keys changed") + sys.exit(0) + +if args.verbose: + print("No changes in SSH keys") +sys.exit(1) + + +# vim:ts=4:sw=4:et:ai:si diff --git a/modules/openssh/templates/sshd_config b/modules/openssh/templates/sshd_config index cb40a961..56ddd725 100644 --- a/modules/openssh/templates/sshd_config +++ b/modules/openssh/templates/sshd_config @@ -18,11 +18,10 @@ # The default requires explicit activation of protocol 1 #Protocol 2 -# HostKey for protocol version 1 -HostKey /etc/ssh/ssh_host_key # HostKeys for protocol version 2 HostKey /etc/ssh/ssh_host_rsa_key -HostKey /etc/ssh/ssh_host_dsa_key +HostKey /etc/ssh/ssh_host_ecdsa_key +HostKey /etc/ssh/ssh_host_ed25519_key # Lifetime and size of ephemeral version 1 server key #KeyRegenerationInterval 1h @@ -45,6 +44,7 @@ PermitRootLogin without-password #PubkeyAuthentication yes #AuthorizedKeysFile .ssh/authorized_keys + # For this to work you will also need host keys in /etc/ssh/ssh_known_hosts #RhostsRSAAuthentication no # similar for protocol version 2 @@ -56,11 +56,11 @@ PermitRootLogin without-password #IgnoreRhosts yes # To disable tunneled clear text passwords, change to no here! -#PasswordAuthentication yes +PasswordAuthentication no #PermitEmptyPasswords no # Change to no to disable s/key passwords -#ChallengeResponseAuthentication yes +ChallengeResponseAuthentication no # Kerberos options #KerberosAuthentication no @@ -81,7 +81,7 @@ PermitRootLogin without-password # If you just want the PAM account and session checks to run without # PAM authentication, then enable this but set PasswordAuthentication # and ChallengeResponseAuthentication to 'no'. -#UsePAM no +UsePAM no # Accept locale-related environment variables AcceptEnv LANG LC_CTYPE LC_NUMERIC LC_TIME LC_COLLATE LC_MONETARY LC_MESSAGES @@ -89,7 +89,7 @@ AcceptEnv LC_PAPER LC_NAME LC_ADDRESS LC_TELEPHONE LC_MEASUREMENT AcceptEnv LC_IDENTIFICATION LC_ALL #AllowAgentForwarding yes -#AllowTcpForwarding yes +AllowTcpForwarding no #GatewayPorts no X11Forwarding yes #X11DisplayOffset 10 @@ -98,7 +98,6 @@ X11Forwarding yes #PrintLastLog yes #TCPKeepAlive yes #UseLogin no -UsePrivilegeSeparation yes #PermitUserEnvironment no #Compression delayed #ClientAliveInterval 0 @@ -113,10 +112,15 @@ UsePrivilegeSeparation yes #Banner none # override default of no subsystems -Subsystem sftp <%= path_to_sftp %>/sftp-server +Subsystem sftp /usr/libexec/openssh/sftp-server # Example of overriding settings on a per-user basis #Match User anoncvs # X11Forwarding no # AllowTcpForwarding no # ForceCommand cvs server +<% if @hostname == 'duvel' then %> +# git command is already forced to "gitolite <username>" in /var/lib/git/.ssh/authorized_keys +Match User *,!schedbot,!root,!git Group *,!mga-sysadmin,!mga-unrestricted_shell_access + ForceCommand /usr/local/bin/sv_membersh.pl -c "$SSH_ORIGINAL_COMMAND" +<% end %> diff --git a/modules/openssl/manifests/init.pp b/modules/openssl/manifests/init.pp index fb1f9239..b8c4d91e 100644 --- a/modules/openssl/manifests/init.pp +++ b/modules/openssl/manifests/init.pp @@ -1,12 +1,40 @@ class openssl { - define self_signed_cert($directory = '/etc/certs') { - package { 'openssl': - ensure => installed + class base { + package { 'openssl': } + } + + define self_signed_cert($directory = '/etc/certs') { + include openssl::base + + $pem_file = "${name}.pem" + exec { "openssl req -x509 -nodes -days 365 -newkey rsa:2048 -keyout ${pem_file} -out ${pem_file} -subj '/CN=${name}'": + cwd => $directory, + creates => "${directory}/${name}.pem", + require => Package['openssl'] } - $pem_file = "$name.pem" - exec { "openssl req -x509 -nodes -days 365 -newkey rsa:2048 -keyout $pem_file -out $pem_file -subj '/CN=$name.$domain'": - cwd => "$directory", - creates => "$directory/$name.pem" + } + + define self_signed_splitted_cert( $filename = '', + $directory = '/etc/certs', + $owner = 'root', + $group = 'root', + $mode = '0600') { + include openssl::base + + $crt_file = "${filename}.crt" + $key_file = "${filename}.key" + exec { "openssl req -x509 -nodes -days 365 -newkey rsa:2048 -keyout ${key_file} -out ${crt_file} -subj '/CN=${name}'": + cwd => $directory, + creates => "${directory}/${key_file}", + require => Package['openssl'], + before => [File["${directory}/${key_file}"], + File["${directory}/${crt_file}"]] } - } + + file { ["${directory}/${key_file}","${directory}/${crt_file}"]: + owner => $owner, + group => $group, + mode => $mode, + } + } } diff --git a/modules/pam/manifests/base.pp b/modules/pam/manifests/base.pp new file mode 100644 index 00000000..e29c8555 --- /dev/null +++ b/modules/pam/manifests/base.pp @@ -0,0 +1,32 @@ +class pam::base { + include pam::multiple_ldap_access + package { ['nscd', 'nss-pam-ldapd']: } + + # This needs configuration or it generates an error every hour. + # If it's ever enabled, make sure restrict permissions on + # /var/db/passwd.db and /var/db/group.db at the same time. + package { 'nss_updatedb': + ensure => 'absent', + } + + service { 'nscd': + require => Package['nscd'], + } + + file { + '/etc/pam.d/system-auth': + content => template('pam/system-auth'); + '/etc/nsswitch.conf': + content => template('pam/nsswitch.conf'); + '/etc/ldap.conf': + content => template('pam/ldap.conf'); + '/etc/openldap/ldap.conf': + content => template('pam/openldap.ldap.conf'); + } + + $ldap_password = extlookup("${::fqdn}_ldap_password",'x') + file { '/etc/ldap.secret': + mode => '0600', + content => $ldap_password + } +} diff --git a/modules/pam/manifests/init.pp b/modules/pam/manifests/init.pp index 210526c9..180ad852 100644 --- a/modules/pam/manifests/init.pp +++ b/modules/pam/manifests/init.pp @@ -1,42 +1 @@ -class pam { - - class base { - package { ["pam_ldap","nss_ldap"]: - ensure => installed, - } - - file { "system-auth": - path => "/etc/pam.d/system-auth", - owner => root, - group => root, - mode => 644, - content => template("pam/system-auth") - } - - file { "nsswitch.conf": - path => "/etc/nsswitch.conf", - owner => root, - group => root, - mode => 644, - content => template("pam/nsswitch.conf") - } - file { "ldap.conf": - path => "/etc/ldap.conf", - owner => root, - group => root, - mode => 644, - content => template("pam/ldap.conf") - } - } - - # for server where only admin can connect - class admin_access inherits base { - $access_class = "admin" - # not sure if this line is needed anymore, wil check later - } - - # for server where people can connect with ssh ( git, svn ) - class commiters_access inherits base { - $access_class = "commiters" - } -} +class pam { } diff --git a/modules/pam/manifests/multiple_ldap_access.pp b/modules/pam/manifests/multiple_ldap_access.pp new file mode 100644 index 00000000..1c5a391f --- /dev/null +++ b/modules/pam/manifests/multiple_ldap_access.pp @@ -0,0 +1,15 @@ +class pam::multiple_ldap_access($access_classes, $restricted_shell = false) { + include stdlib + + $default_access_classes = [ 'mga-sysadmin', 'mga-unrestricted_shell_access' ] + if empty($access_classes) { + $allowed_access_classes = $default_access_classes + } else { + $allowed_access_classes = concat($default_access_classes, $access_classes) + } + + if $restricted_shell { + include restrictshell + } + include pam::base +} diff --git a/modules/pam/templates/ldap.conf b/modules/pam/templates/ldap.conf index 0b3a19fc..235a6aac 100644 --- a/modules/pam/templates/ldap.conf +++ b/modules/pam/templates/ldap.conf @@ -1,7 +1,10 @@ +rootbinddn cn=<%= fqdn %>,ou=Hosts,<%= dc_suffix %> -uri ldap://ldap.<%= domain %> +uri ldaps://ldap.<%= domain %> base <%= dc_suffix %> -pam_lookup_policy no +timelimit 4 +bind_timelimit 4 +pam_lookup_policy yes pam_password exop nss_base_passwd ou=People,<%= dc_suffix %>?one nss_base_shadow ou=People,<%= dc_suffix %>?one @@ -12,8 +15,10 @@ nss_map_attribute uniqueMember member sudoers_base ou=sudoers,<%= dc_suffix %> #sudoers_debug 2 -<% if access_class = 'commiters' %> +<%- +restricted_shell = scope.lookupvar('pam::multiple_ldap_access::restricted_shell') +if restricted_shell +-%> # for restricted access nss_override_attribute_value loginShell /usr/local/bin/sv_membersh.pl <% end %> - diff --git a/modules/pam/templates/nsswitch.conf b/modules/pam/templates/nsswitch.conf index f797885d..bfd042c1 100644 --- a/modules/pam/templates/nsswitch.conf +++ b/modules/pam/templates/nsswitch.conf @@ -1,7 +1,7 @@ passwd: files ldap [UNAVAIL=return] shadow: files ldap [UNAVAIL=return] group: files ldap [UNAVAIL=return] -hosts: files mdns4_minimal [NOTFOUND=return] dns +hosts: files dns bootparams: files ethers: files netmasks: files @@ -13,4 +13,3 @@ netgroup: files ldap publickey: files automount: files aliases: files - diff --git a/modules/pam/templates/openldap.ldap.conf b/modules/pam/templates/openldap.ldap.conf new file mode 100644 index 00000000..cd6ee640 --- /dev/null +++ b/modules/pam/templates/openldap.ldap.conf @@ -0,0 +1,25 @@ +#BASE dc=example, dc=com +#HOST ldap.example.com ldap-master.example.com +#URI ldap://ldap.example.com ldap://ldap-master.example.com:666 + +#SIZELIMIT 12 +#TIMELIMIT 15 +#DEREF never + +# SSL/TSL configuration. With CA-signed certs, TLS_REQCERT should be +# "demand", with the CA certificate accessible +#TLS_REQCERT ([demand],never,allow,try) +# We ship with allow by default as some LDAP clients (e.g. evolution) have +# no interactive SSL configuration + +TLS_REQCERT allow + +# CA Certificate locations +# Use the default self-signed cert generated by openldap-server postinstall +# by default +#TLS_CACERT /etc/pki/tls/certs/ldap.pem +#TLS_CACERT /etc/ssl/openldap/ldap.<%= domain %>.pem + +# If requiring support for certificates signed by all CAs (noting risks +# pam_ldap if doing DNS-based suffix lookup etc. +#TLS_CACERTDIR /etc/pki/tls/rootcerts diff --git a/modules/pam/templates/system-auth b/modules/pam/templates/system-auth index b02aec3a..37d1da7d 100644 --- a/modules/pam/templates/system-auth +++ b/modules/pam/templates/system-auth @@ -1,21 +1,22 @@ -auth required pam_env.so +auth required pam_env.so # this part is here if the module don't exist # basically, the idea is to copy the exact detail of sufficient, # and add abort=ignore auth [abort=ignore success=done new_authtok_reqd=done default=ignore] pam_tcb.so shadow fork nullok prefix=$2a$ count=8 -auth sufficient pam_unix.so likeauth nullok +auth sufficient pam_unix.so likeauth nullok try_first_pass auth sufficient pam_ldap.so use_first_pass -<% if access_class = 'admin' %> -auth required pam_wheel.so group=mga-sysadmin -<% end %> -<% if access_class = 'commiters' %> -auth required pam_wheel.so group=mga-commiters -<% end %> auth required pam_deny.so account sufficient pam_localuser.so -account sufficient pam_ldap.so +# not sure if the following bring something useful +account required pam_ldap.so +<%- allowed_access_classes = scope.lookupvar('pam::multiple_ldap_access::allowed_access_classes') -%> +<%- if allowed_access_classes -%> +<%- allowed_access_classes.each { |ldap_group| -%> +account sufficient pam_succeed_if.so quiet user ingroup <%= ldap_group %> +<%- } -%> +<%- end -%> account required pam_deny.so @@ -32,4 +33,3 @@ session optional pam_mkhomedir.so session required pam_limits.so session required pam_unix.so session optional pam_ldap.so - diff --git a/modules/phpbb/files/phpbb_apply_config.pl b/modules/phpbb/files/phpbb_apply_config.pl new file mode 100644 index 00000000..a58df24e --- /dev/null +++ b/modules/phpbb/files/phpbb_apply_config.pl @@ -0,0 +1,28 @@ +#!/usr/bin/perl +use strict; +use warnings; +use Env qw(VALUE); +use DBI; + +my $key = $ARGV[0]; + +# DBI will use default value coming from env +# see puppet manifests +my $dbh = DBI->connect("dbi:Pg:","","", { + AutoCommit => 0, + RaiseError => 1, +}); + +my $table = "phpbb_config"; + +# FIXME add rollback if there is a problem +# https://docstore.mik.ua/orelly/linux/dbi/ch06_03.htm +my $update = $dbh->prepare("UPDATE $table SET config_value = ?, is_dynamic = ? WHERE config_name = ?"); +my $insert = $dbh->prepare("INSERT INTO $table ( config_value, is_dynamic, config_name ) VALUES ( ? , ? , ? )"); + +my $res = $update->execute($VALUE, 1, $key) or die "cannot do update $?"; +if ($res == 0 ) { + $insert->execute($VALUE, 1, $key) or die "cannot do insert $?"; +} +$dbh->commit(); +$dbh->disconnect(); diff --git a/modules/phpbb/files/robots.txt b/modules/phpbb/files/robots.txt new file mode 100644 index 00000000..1c335a73 --- /dev/null +++ b/modules/phpbb/files/robots.txt @@ -0,0 +1,7 @@ +User-agent: * +Disallow: /*/faq.php? +Disallow: /*/memberlist.php? +Disallow: /*/posting.php? +Disallow: /*/search.php? +Disallow: /*/ucp.php? +Crawl-delay: 30 diff --git a/modules/phpbb/manifests/base.pp b/modules/phpbb/manifests/base.pp new file mode 100644 index 00000000..9f676cb4 --- /dev/null +++ b/modules/phpbb/manifests/base.pp @@ -0,0 +1,57 @@ +class phpbb::base { + $db = 'phpbb' + $user = 'phpbb' + $forums_dir = '/var/www/forums/' + + include apache::mod::php + + package {['php-gd', + 'php-xml', + 'php-zlib', + 'php-ftp', + 'php-magickwand', + 'php-pgsql', + 'php-ldap']: } + + package { 'perl-DBD-Pg': } + + file { '/usr/local/bin/phpbb_apply_config.pl': + mode => '0755', + source => 'puppet:///modules/phpbb/phpbb_apply_config.pl', + } + + $pgsql_password = extlookup('phpbb_pgsql','x') + postgresql::remote_user { $user: + password => $pgsql_password, + } + + file { $forums_dir: + ensure => directory, + } + + $robotsfile = "$forums_dir/robots.txt" + file { $robotsfile: + ensure => present, + mode => '0644', + owner => root, + group => root, + source => 'puppet:///modules/phpbb/robots.txt', + } + + # TODO check that everything is locked down + apache::vhost::base { "forums.${::domain}": + content => template('phpbb/forums_vhost.conf'), + } + + apache::vhost::base { "ssl_forums.${::domain}": + use_ssl => true, + vhost => "forums.${::domain}", + content => template('phpbb/forums_vhost.conf'), + } + + file { '/etc/httpd/conf/vhosts.d/forums.d/': + ensure => directory, + } +} + + diff --git a/modules/phpbb/manifests/config.pp b/modules/phpbb/manifests/config.pp new file mode 100644 index 00000000..553b0f74 --- /dev/null +++ b/modules/phpbb/manifests/config.pp @@ -0,0 +1,12 @@ +define phpbb::config($key, $value, $database) { + exec { "phpbb_apply ${name}": + command => "/usr/local/bin/phpbb_apply_config.pl ${key}", + user => 'root', + environment => ["PGDATABASE=${database}", + "PGUSER=${phpbb::base::user}", + "PGPASSWORD=${phpbb::base::pgsql_password}", + "PGHOST=pgsql.${::domain}", + "VALUE=${value}"], + require => File['/usr/local/bin/phpbb_apply_config.pl'], + } +} diff --git a/modules/phpbb/manifests/databases.pp b/modules/phpbb/manifests/databases.pp new file mode 100644 index 00000000..dc255f75 --- /dev/null +++ b/modules/phpbb/manifests/databases.pp @@ -0,0 +1,3 @@ +define phpbb::databases() { + Phpbb::Locale_db <<| |>> +} diff --git a/modules/phpbb/manifests/init.pp b/modules/phpbb/manifests/init.pp new file mode 100644 index 00000000..ccfa0ca2 --- /dev/null +++ b/modules/phpbb/manifests/init.pp @@ -0,0 +1 @@ +class phpbb { } diff --git a/modules/phpbb/manifests/instance.pp b/modules/phpbb/manifests/instance.pp new file mode 100644 index 00000000..e300d9e0 --- /dev/null +++ b/modules/phpbb/manifests/instance.pp @@ -0,0 +1,80 @@ +define phpbb::instance() { + include phpbb::base + + $lang = $name + $database = "${phpbb::base::db}_${lang}" + + $user = $phpbb::base::user + $pgsql_password = $phpbb::base::pgsql_password + $forums_dir = $phpbb::base::forums_dir + + include git::client + exec { "git_clone ${lang}": + command =>"git clone git://git.${::domain}/web/forums/ ${lang}", + cwd => $forums_dir, + creates => "${forums_dir}/${lang}", + require => File[$forums_dir], + notify => Exec["rm_install ${lang}"], + } + + # remove this or the forum will not work ( 'board disabled' ) + # maybe it would be better to move this elsewhere, I + # am not sure ( and in any case, that's still in git ) + exec { "rm_install ${lang}": + command => "rm -Rf ${forums_dir}/${lang}/phpBB/install", + onlyif => "test -d ${forums_dir}/${lang}/phpBB/install", + } + + # list found by reading ./install/install_install.php + # end of check_server_requirements ( 2 loops ) + + $writable_dirs = ['cache', + 'images/avatars/upload', + 'files', + 'store' ] + + $dir_names = regsubst($writable_dirs,'^',"${forums_dir}/${lang}/phpBB/") + + file { $dir_names: + ensure => directory, + owner => 'apache', + require => Exec["git_clone ${lang}"], + } + + file { "${forums_dir}/${lang}/phpBB/config.php": + content => template('phpbb/config.php'), + } + + @@phpbb::locale_db { $database: + user => $user, + } + + Phpbb::Config { + database => $database, + } + + $ldap_password = extlookup( 'phpbb_ldap','x') + + phpbb::config { + "ldap_user/${lang}": + key => 'ldap_user', value => "cn=phpbb-${::hostname},ou=System Accounts,${::dc_suffix}"; + "ldap_server/${lang}": + key => 'ldap_server', value => "ldaps://ldap.${::domain} ldaps://ldap-slave-1.${::domain}"; + "ldap_password/${lang}": + key => 'ldap_password', value => $ldap_password; + "ldap_base_dn/${lang}": + key => 'ldap_base_dn', value => "ou=People,${::dc_suffix}"; + "auth_method/${lang}": + key => 'auth_method', value => 'ldap'; + "ldap_mail/${lang}": + key => 'ldap_mail', value => 'mail'; + "ldap_uid/${lang}": + key => 'ldap_uid', value => 'uid'; + "cookie_domain/${lang}": + key => 'cookie_domain', value => "forums.${::domain}"; + "server_name/${lang}": + key => 'server_name', value => "forums.${::domain}"; + "default_lang/${lang}": + key => 'default_lang', value => $lang; + } +} diff --git a/modules/phpbb/manifests/locale_db.pp b/modules/phpbb/manifests/locale_db.pp new file mode 100644 index 00000000..70116962 --- /dev/null +++ b/modules/phpbb/manifests/locale_db.pp @@ -0,0 +1,12 @@ +# FIXME: In puppet >3.0 word 'tag' is reserved, so it have to be renamed +define phpbb::locale_db($tag = 'default', + $user = $phpbb::base::user) { + postgresql::database { $name: + description => "${lang} db for phpbb forum", + user => $user, + tag => $tag, +# this break due to the way it is remotely declared +# this should only be a issue in case of bootstrapping again +# require => Postgresql::User[$user] + } +} diff --git a/modules/phpbb/manifests/redirection_instance.pp b/modules/phpbb/manifests/redirection_instance.pp new file mode 100644 index 00000000..332eac53 --- /dev/null +++ b/modules/phpbb/manifests/redirection_instance.pp @@ -0,0 +1,7 @@ +define phpbb::redirection_instance($url) { + $lang = $name + file { "/etc/httpd/conf/vhosts.d/forums.d/redirect_${name}.conf": + content => template('phpbb/forums_redirect.conf'), + notify => Exec['apachectl configtest'], + } +} diff --git a/modules/phpbb/templates/config.php b/modules/phpbb/templates/config.php new file mode 100644 index 00000000..5d878235 --- /dev/null +++ b/modules/phpbb/templates/config.php @@ -0,0 +1,17 @@ +<?php +// phpBB 3.0.x auto-generated configuration file +// // Do not change anything in this file! +$dbms = 'postgres'; +$dbhost = 'pg.<%= domain %>'; +$dbport = ''; +$dbname = '<%= database %>'; +$dbuser = '<%= user %>'; +$dbpasswd = '<%= pgsql_password %>'; +$table_prefix = 'phpbb_'; +$acm_type = 'apc'; +$load_extensions = ''; + +@define('PHPBB_INSTALLED', true); +// @define('DEBUG', true); +// @define('DEBUG_EXTRA', true); +?> diff --git a/modules/phpbb/templates/forums_redirect.conf b/modules/phpbb/templates/forums_redirect.conf new file mode 100644 index 00000000..24747b4c --- /dev/null +++ b/modules/phpbb/templates/forums_redirect.conf @@ -0,0 +1,2 @@ +Redirect /<%= lang %> <%= url %> +Redirect /<%= lang %>/ <%= url %> diff --git a/modules/phpbb/templates/forums_vhost.conf b/modules/phpbb/templates/forums_vhost.conf new file mode 100644 index 00000000..440dad1f --- /dev/null +++ b/modules/phpbb/templates/forums_vhost.conf @@ -0,0 +1,62 @@ + # TODO redirect based on language settings + # and the presence of the forum + + # for locale redirection + Include conf/vhosts.d/forums.d/*.conf + + # Prevent including forum site in tier iframe + Header set X-Frame-Options DENY + + + # using Redirect create a loop, so we use mod_rewrite here + RewriteEngine On + RewriteRule ^/$ /en/ [R] + RewriteRule ^/(..)$ /$1/ [R] + + Alias /robots.txt <%= forums_dir %>/robots.txt + + AliasMatch ^/(..)/(.*) <%= forums_dir %>/$1/phpBB/$2 + + <Directory ~ "<%= forums_dir %>/.*/phpBB/"> + <IfModule mod_authz_core.c> + # Apache 2.4 + Require all granted + </IfModule> + <IfModule !mod_authz_core.c> + # Apache 2.2 + Order Allow,Deny + Allow from all + </IfModule> + </Directory> + +<%- +forbidden = ['install', + 'cache', + 'includes', + 'phpbb_seo/includes', + 'store', + 'images/avatars/upload', + 'files', + 'umil/error_files', + 'gym_sitemaps/acp', + 'gym_sitemaps/sources', + 'gym_sitemaps/cache', + 'gym_sitemaps/includes', + 'gym_sitemaps/display', + 'gym_sitemaps/modules', +] +for f in forbidden +-%> + <Directory <%= forums_dir %>/.*/phpBB/<%= f %>/ > + <IfModule mod_authz_core.c> + # Apache 2.4 + Require all denied + </IfModule> + <IfModule !mod_authz_core.c> + # Apache 2.2 + Order Deny,Allow + Deny from all + </IfModule> + </Directory> + +<%- end -%> diff --git a/modules/planet/manifests/init.pp b/modules/planet/manifests/init.pp new file mode 100644 index 00000000..8aacd5cc --- /dev/null +++ b/modules/planet/manifests/init.pp @@ -0,0 +1,57 @@ +class planet { + + user { 'planet': + groups => 'apache', + comment => 'Planet Mageia', + home => '/var/lib/planet', + } + + $vhost = "planet.${::domain}" + $location = "/var/www/vhosts/${vhost}" + + include apache::mod::php + + apache::vhost::base { $vhost: + location => $location, + content => template('planet/planet_vhosts.conf') + } + + apache::vhost::base { "ssl_${vhost}": + use_ssl => true, + vhost => $vhost, + location => $location, + content => template('planet/planet_vhosts.conf') + } + + mga_common::local_script { 'deploy_new-planet.sh': + content => template('planet/deploy_new-planet.sh') + } + + file { $location: + ensure => directory, + } + + file { "${location}/index.php": + content => template('planet/index.php') + } + + package { ['php-iconv']: } + + class files_backup inherits base { + file { '/var/lib/planet/backup': + ensure => directory, + } + + mga_common::local_script { 'backup_planet-files.sh': + content => template('blog/backup_planet-files.sh') + } + + cron { "Backup files (planet)": + user => root, + hour => '23', + minute => '42', + command => '/usr/local/bin/backup_planet-files.sh', + require => [File['backup_planet-files']], + } + } +} diff --git a/modules/planet/templates/backup_planet-files.sh b/modules/planet/templates/backup_planet-files.sh new file mode 100755 index 00000000..8cab8d1e --- /dev/null +++ b/modules/planet/templates/backup_planet-files.sh @@ -0,0 +1,23 @@ +#!/bin/sh + +# Initialization +PATH_TO_FILE=${PATH_TO_FILE:-/var/lib/planet/backup} +[ ! -f $PATH_TO_FILE/count ] && echo 0 > $PATH_TO_FILE/count +COUNT=$(cat "$PATH_TO_FILE/count") +# Backup each locale +for locale in de en es fr it pl +do + if [ ! -d $PATH_TO_FILE/$locale ] + then + /bin/mkdir $PATH_TO_FILE/$locale + fi + rsync -aHP --delete <%= location %>/$locale $PATH_TO_FILE/$locale/$locale-$COUNT +done +# Check count file to have a week of backup in the directory +if [ $COUNT -ne 6 ] +then + COUNT=$(expr $COUNT + 1) +else + COUNT="0" +fi +echo $COUNT > $PATH_TO_FILE/count diff --git a/modules/planet/templates/deploy_new-planet.sh b/modules/planet/templates/deploy_new-planet.sh new file mode 100755 index 00000000..b3889d31 --- /dev/null +++ b/modules/planet/templates/deploy_new-planet.sh @@ -0,0 +1,41 @@ +#!/bin/sh + +# Initialization +PATH_TO_FILE=${PATH_TO_FILE:-/var/lib/planet} +PATH_TO_PLANET=${PATH_TO_PLANET:-<%= location %>} + +#Ask for new locale name +echo -n "Locale name: " +read locale + +# Display the answer and ask for confirmation +echo -e -n "Do you confirm the entry: \"$locale\"? (y/n) " +read answer +if [ "$answer" == "y" ] +then + FILE="$PATH_TO_PLANET/$locale/" + if test -d $FILE + then + echo "Aborted, $FILE already exist." + exit 2 + else + # Deploy new planet with locale given + /bin/mkdir $FILE + /bin/chown planet:apache $FILE + # TODO: this URL returns 403 (2024-01) + /usr/bin/wget -O $PATH_TO_FILE"/moonmoon.tar.gz" https://damsweb.net/files/moonmoon_mageia.tar.gz + if [ $? -ne 0 ] + then + echo "Aborted, can't download GZIP file" + exit 2 + fi + /bin/tar zxvf $PATH_TO_FILE/moonmoon.tar.gz -C $FILE + /bin/mkdir $FILE"cache" + /bin/chown -R planet:apache $FILE + /bin/chmod g+w $FILE"custom" $FILE"custom/people.opml" $FILE"admin/inc/pwd.inc.php" $FILE"cache" + echo -e "Info: a new Planet had been deployed.\nThe locale is: \"$locale\" - https://planet.<%= domain %>/$locale \n-- \nMail sent by the script '$0' on `hostname`" | /bin/mail -s "New planet Mageia deployed" mageia-webteam@<%= domain %> mageia-marketing@<%= domain %> + fi +else + echo "Aborted, please try again." + exit 2 +fi diff --git a/modules/planet/templates/index.php b/modules/planet/templates/index.php new file mode 100644 index 00000000..6c08e763 --- /dev/null +++ b/modules/planet/templates/index.php @@ -0,0 +1,23 @@ +<html> +<body> +<h1>Planet Mageia</h1> +<h3>Please choose one of the following locales:</h3> +<ul> +<?php +function displayloc($path = ''){ + return array_slice(scandir($path), 2); +} + +foreach(displayloc('.') as $loc) + if(is_dir($loc) && $loc != "test" && $loc != "test-2") + { + echo '<li><a href="'.$loc.'">'.$loc.'</a></li>'; + } +?> +</ul> +<h3>How to be listed in Planet Mageia:</h3> +<ul> +<li>just candidate by sending us a RSS feed talking about Mageia in only one locale.</li> +</ul> +</body> +</html> diff --git a/modules/planet/templates/planet_vhosts.conf b/modules/planet/templates/planet_vhosts.conf new file mode 100644 index 00000000..b3a07ab9 --- /dev/null +++ b/modules/planet/templates/planet_vhosts.conf @@ -0,0 +1,11 @@ +<Directory <%= location %> > + Order deny,allow + Allow from All + AllowOverride All + Options FollowSymlinks + Options +Indexes +</Directory> +# Add a permanent redirection for '/*' as '/en/' for english planet +<IfModule mod_alias.c> + RedirectMatch permanent ^/?$ /en/ +</IfModule> diff --git a/modules/postfix/manifests/init.pp b/modules/postfix/manifests/init.pp index 855778da..8a4394df 100644 --- a/modules/postfix/manifests/init.pp +++ b/modules/postfix/manifests/init.pp @@ -1,63 +1,24 @@ class postfix { + package { postfix: } - class base { - package { postfix: - ensure => installed - } - package { 'nail': - ensure => installed - } - service { postfix: - ensure => running, - subscribe => [ Package['postfix']], - path => "/etc/init.d/postfix" - } - } - - file { '/etc/postfix/main.cf': - ensure => present, - owner => root, - group => root, - mode => 644, - require => Package["postfix"], - content => "", - notify => [Service['postfix']] - } - - - class simple_relay inherits base { - file { '/etc/postfix/main.cf': - content => template("postfix/simple_relay_main.cf"), - } + service { 'postfix': + subscribe => Package['postfix'], } - class smtp_server inherits base { - include postgrey - file { '/etc/postfix/main.cf': - content => template("postfix/main.cf"), - } - - file { '/etc/postfix/transport_regexp': - ensure => present, - owner => root, - group => root, - mode => 644, - content => template("postfix/transport_regexp"), - } - + file { '/etc/postfix/main.cf': + require => Package['postfix'], + content => '', + notify => Service['postfix'], } - class primary_smtp inherits smtp_server { - file { '/etc/postfix/master.cf': - ensure => present, - owner => root, - group => root, - mode => 644, - content => template("postfix/primary_master.cf"), - } + file { '/etc/ssl/postfix/': + ensure => directory, } - class secondary_smtp inherits smtp_server { + openssl::self_signed_splitted_cert { "${::hostname}.${::domain}": + filename => 'postfix', + directory => '/etc/ssl/postfix/', + owner => 'postfix', + group => 'postfix' } - } diff --git a/modules/postfix/manifests/server.pp b/modules/postfix/manifests/server.pp new file mode 100644 index 00000000..85ab261c --- /dev/null +++ b/modules/postfix/manifests/server.pp @@ -0,0 +1,13 @@ +class postfix::server inherits postfix { + include postgrey + include amavis + include spamassassin + + File['/etc/postfix/main.cf'] { + content => template('postfix/main.cf'), + } + + file { '/etc/postfix/transport_regexp': + content => template('postfix/transport_regexp'), + } +} diff --git a/modules/postfix/manifests/server/primary.pp b/modules/postfix/manifests/server/primary.pp new file mode 100644 index 00000000..c14a8606 --- /dev/null +++ b/modules/postfix/manifests/server/primary.pp @@ -0,0 +1,43 @@ +class postfix::server::primary inherits postfix::server { + + # Adding DKIM server + include opendkim + opendkim::domain{['mageia.org', 'sucuk.mageia.org', 'duvel.mageia.org', 'forums.mageia.org', 'madb.mageia.org','rabbit.mageia.org', 'fiona.mageia.org','identity.mageia.org', 'group.mageia.org', 'neru.mageia.org']:} + opendkim::trusted{['127.0.0.0/8', '212.85.158.0/24']:} + + package { ['postfix-ldap', 'sqlite3-tools', 'dovecot-plugins-sqlite','rspamd']: } + + # council is here until we fully decide who has aliases in com team, + + # see https://bugs.mageia.org/show_bug.cgi?id=1345 + # alumni is a special group for tracking previous members of + # the project, so they keep their aliases for a time + $aliases_group = ['mga-founders', + 'mga-packagers', + 'mga-sysadmin', + 'mga-council', + 'mga-alumni', + 'mga-i18n-committers'] + $ldap_password = extlookup('postfix_ldap','x') + $ldap_servers = get_ldap_servers() + + file { + '/etc/postfix/master.cf': + content => template('postfix/primary_master.cf'); + '/etc/postfix/ldap_aliases.conf': + content => template('postfix/ldap_aliases.conf'); + # TODO merge the file with the previous one, for common part (ldap, etc) + '/etc/postfix/group_aliases.conf': + content => template('postfix/group_aliases.conf'); + # TODO make it conditional to the presence of sympa + '/etc/postfix/sympa_aliases': + content => template('postfix/sympa_aliases'); + '/etc/postfix/virtual_aliases': + content => template('postfix/virtual_aliases'); + } + + exec { 'postmap /etc/postfix/virtual_aliases': + refreshonly => true, + subscribe => File['/etc/postfix/virtual_aliases'], + } +} diff --git a/modules/postfix/manifests/server/secondary.pp b/modules/postfix/manifests/server/secondary.pp new file mode 100644 index 00000000..e4dd8721 --- /dev/null +++ b/modules/postfix/manifests/server/secondary.pp @@ -0,0 +1 @@ +class postfix::server::secondary inherits postfix::server { } diff --git a/modules/postfix/manifests/simple_relay.pp b/modules/postfix/manifests/simple_relay.pp new file mode 100644 index 00000000..8911f781 --- /dev/null +++ b/modules/postfix/manifests/simple_relay.pp @@ -0,0 +1,9 @@ +class postfix::simple_relay inherits postfix { + File['/etc/postfix/main.cf'] { + content => template('postfix/simple_relay_main.cf'), + } + file { + '/etc/postfix/sympa_aliases': + content => template('postfix/sympa_aliases'); + } +} diff --git a/modules/postfix/templates/group_aliases.conf b/modules/postfix/templates/group_aliases.conf new file mode 100644 index 00000000..eac16dab --- /dev/null +++ b/modules/postfix/templates/group_aliases.conf @@ -0,0 +1,15 @@ +<%- + ldap = ldap_servers.map { |l| "ldaps://#{l}:636" } +-%> +server_host = <%= ldap.join(' ') %> +search_base = <%= dc_suffix %> +query_filter = (&(cn=mga-%u)(objectClass=groupOfNames)) +result_attribute = mail +special_result_attribute = member +bind = yes +bind_dn = cn=postfix-<%= hostname %>,ou=System Accounts,<%= dc_suffix %> +bind_pw = <%= ldap_password %> +# postfix complain on url +# warning: dict_ldap_open: URL scheme ldaps requires protocol version 3 +version = 3 +domain = group.<%= domain %> diff --git a/modules/postfix/templates/ldap_aliases.conf b/modules/postfix/templates/ldap_aliases.conf new file mode 100644 index 00000000..40d7da13 --- /dev/null +++ b/modules/postfix/templates/ldap_aliases.conf @@ -0,0 +1,20 @@ +<%- +# TODO I am sure that a more elegant way could be find +query_string = '' +aliases_group.each do |g| + query_string += '(memberOf=cn=' + g + ',ou=Group,' + dc_suffix + ')' +end + +ldap = ldap_servers.map { |l| "ldaps://#{l}:636" } +-%> +server_host = <%= ldap.join(' ') %> +search_base = <%= dc_suffix %> +query_filter = (&(uid=%u)(|<%= query_string %>)) +result_attribute = mail +bind = yes +bind_dn = cn=postfix-<%= hostname %>,ou=System Accounts,<%= dc_suffix %> +bind_pw = <%= ldap_password %> +# postfix complain on url +# warning: dict_ldap_open: URL scheme ldaps requires protocol version 3 +version = 3 +domain = <%= domain %> diff --git a/modules/postfix/templates/main.cf b/modules/postfix/templates/main.cf index 7b60f3a3..6b42a4de 100644 --- a/modules/postfix/templates/main.cf +++ b/modules/postfix/templates/main.cf @@ -11,79 +11,143 @@ sendmail_path = /usr/sbin/sendmail.postfix setgid_group = postdrop command_directory = /usr/sbin manpage_directory = /usr/share/man -daemon_directory = <%= lib_dir %>/postfix/ +daemon_directory = /usr/libexec/postfix +meta_directory = /etc/postfix +shlib_directory = /usr/lib64 +compatibility_level = 2 data_directory = /var/lib/postfix newaliases_path = /usr/bin/newaliases mailq_path = /usr/bin/mailq queue_directory = /var/spool/postfix mail_owner = postfix +<% if all_tags.include?('postfix::simple_relay') || all_tags.include?('postfix::server::secondary') %> +relayhost = sucuk.<%= domain %> +<%- end -%> # User configurable parameters <% if all_tags.include?('postfix::simple_relay') %> -inet_interfaces = localhost +inet_interfaces = localhost, 127.0.0.1 <% else %> inet_interfaces = all <% end %> inet_protocols = all -mynetworks_style = host +<% if @hostname == 'neru' then %> +# We do not have a reverse on ipv6 :( +smtp_address_preference = ipv4 +<%- end -%> + +# FIXME Do not hardcode this +mynetworks = 212.85.158.144/28 [2a02:2178:2:7::]/64 127.0.0.0/16 163.172.148.228 [2001:bc8:4400:2800::4115] myhostname = <%= fqdn %> mydomain = <%= domain %> -mydestination = <%= fqdn %> -myorigin = $mydomain -<%- if all_tags.include?('postfix::secondary_smtp') -%> -relay_domains = <%= domain %>, ml.<%= domain %> +<%- if all_tags.include?('postfix::server::secondary') -%> +relay_domains = <%= domain %>, + ml.<%= domain %>, + group.<%= domain %> <%- end -%> mydestination = <%= fqdn %> -<%- if all_tags.include?('postfix::primary_smtp') -%> - <%= domain %>, -<%- if classes.include?('sympa') -%> +<%- if all_tags.include?('postfix::server::primary') -%> ml.<%= domain %> -<%- end -%> +<%- end -%> + +<%- if all_tags.include?('postfix::server::primary') -%> + +virtual_mailbox_domains = <%= domain %>, + group.<%= domain %> + +# postfix complain if not set +# Mar 22 23:51:20 alamut postfix/virtual[22952]: fatal: bad string length 0 < 1: virtual_mailbox_base = +virtual_mailbox_base = /var/lib/mail + +# local_recipient_maps is disabled, as we need to route all +# non local email to ryu as long as mageia ml are hosted +# there. Hence the use of fallback_transport , but this is +# taken in account only of local_recipient_maps is empty +local_recipient_maps = +# route ml to ryu ( ml being mageia-*@mageia ) +fallback_transport_maps = regexp:/etc/postfix/transport_regexp + +# needed by sympa to handle bounce, according to the doc +recipient_delimiter = + -alias_maps = hash:/etc/aliases - # uncomment if we want to enable ldap based alias - # and create the file - #ldap:/etc/postfix/ldap_aliases.conf + +alias_maps = hash:/etc/postfix/aliases + +virtual_alias_maps = ldap:/etc/postfix/ldap_aliases.conf + ldap:/etc/postfix/group_aliases.conf + hash:/etc/postfix/virtual_aliases +<%- if classes.include?('sympa::server') -%> + regexp:/etc/postfix/sympa_aliases +<%- end -%> +<% else %> +<%- if classes.include?('sympa::server') -%> +virtual_alias_maps = regexp:/etc/postfix/sympa_aliases +<%- end -%> <%- end -%> +<%- if all_tags.include?('postfix::server::primary') -%> +# Adding DKIM Miler for primaryserver (sucuk) +smtpd_milters = inet:127.0.0.1:8891 +non_smtpd_milters = $smtpd_milters +milter_default_action = accept +milter_protocol = 2 + +# Adding Sender Rewriting Scheme +sender_canonical_maps = socketmap:inet:localhost:10003:forward +sender_canonical_classes = envelope_sender +recipient_canonical_maps = socketmap:inet:localhost:10003:reverse +recipient_canonical_classes= envelope_recipient,header_recipient +<%- end -%> -<%- if all_tags.include?('postfix::smtp_server') -%> +<%- if all_tags.include?('postfix::server') -%> transport_maps = regexp:/etc/postfix/transport_regexp +content_filter = smtp-filter:[127.0.0.1]:10025 <%- end -%> -<%- if classes.include?('sympa') -%> +<%- if classes.include?('sympa::server') -%> sympa_destination_recipient_limit = 1 sympabounce_destination_recipient_limit = 1 <%- end -%> #delay_warning_time = 4h -smtpd_banner = $myhostname ESMTP $mail_name ($mail_version) (Mandriva Linux) +smtpd_banner = $myhostname ESMTP $mail_name ($mail_version) (<%= lsbdistid %>) unknown_local_recipient_reject_code = 450 smtp-filter_destination_concurrency_limit = 2 lmtp-filter_destination_concurrency_limit = 2 +# enable opportunistic TLS when receiving smtpd_use_tls = yes -smtpd_tls_cert_file = /etc/pki/tls/certs/postfix.pem -smtpd_tls_key_file = /etc/pki/tls/private/postfix.pem +smtpd_tls_received_header = yes +smtpd_tls_cert_file = /etc/ssl/postfix/postfix.crt +smtpd_tls_key_file = /etc/ssl/postfix/postfix.key smtpd_tls_CAfile = /etc/pki/tls/certs/ca-bundle.crt +# enable opportunistic TLS when sending +smtp_tls_security_level = may +smtp_tls_CAfile = /etc/pki/tls/certs/ca-bundle.crt -<%- if all_tags.include?('postfix::smtp_server') -%> +<%- if all_tags.include?('postfix::server') -%> smtpd_etrn_restrictions = reject smtpd_helo_required = yes -smtpd_data_restrictions = reject_unauth_pipelining +smtpd_data_restrictions = permit_mynetworks + reject_unauth_pipelining reject_multi_recipient_bounce -smtpd_recipient_restrictions = reject_non_fqdn_recipient - reject_non_fqdn_sender +smtpd_recipient_restrictions = permit_mynetworks # not done yet, not sure if we need to offer this kind of service # permit_sasl_authenticated - permit_mynetworks - reject_unauth_destination reject_non_fqdn_helo_hostname + reject_non_fqdn_recipient + reject_non_fqdn_sender + check_sender_access hash:/etc/postfix/access + reject_rhsbl_helo sbl.spamhaus.org + reject_rhsbl_reverse_client sbl.spamhaus.org + reject_rhsbl_sender sbl.spamhaus.org + reject_rbl_client sbl.spamhaus.org + reject_unauth_destination reject_unknown_sender_domain reject_unknown_client <%- if classes.include?('postgrey') -%> @@ -91,3 +155,5 @@ smtpd_recipient_restrictions = reject_non_fqdn_recipient <%- end -%> <%- end -%> +# Needed for buggy clients +always_add_missing_headers = yes diff --git a/modules/postfix/templates/primary_master.cf b/modules/postfix/templates/primary_master.cf index 299bbd6c..e05d33dc 100644 --- a/modules/postfix/templates/primary_master.cf +++ b/modules/postfix/templates/primary_master.cf @@ -116,7 +116,7 @@ cyrus-inet unix - - y - - lmtp #mailman unix - n n - - pipe # flags=FR user=list argv=/usr/lib/mailman/bin/postfix-to-mailman.py # ${nexthop} ${user} -<% if classes.include?('sympa') %> +<% if classes.include?('sympa::server') %> sympa unix - n n - - pipe flags=R user=sympa argv=/usr/sbin/queue ${recipient} sympabounce unix - n n - - pipe @@ -174,4 +174,3 @@ smtp-filter unix - - y - - smtp -o max_use=20 # ##### END OF CONTENT FILTER CUSTOMIZATIONS ##### - diff --git a/modules/postfix/templates/simple_relay_main.cf b/modules/postfix/templates/simple_relay_main.cf index 5f8d44ca..e0c116a7 100644 --- a/modules/postfix/templates/simple_relay_main.cf +++ b/modules/postfix/templates/simple_relay_main.cf @@ -11,7 +11,7 @@ sendmail_path = /usr/sbin/sendmail.postfix setgid_group = postdrop command_directory = /usr/sbin manpage_directory = /usr/share/man -daemon_directory = <%= lib_dir %>/postfix/ +daemon_directory = /usr/libexec/postfix/ data_directory = /var/lib/postfix newaliases_path = /usr/bin/newaliases mailq_path = /usr/bin/mailq @@ -20,11 +20,12 @@ mail_owner = postfix # User configurable parameters -inet_interfaces = localhost +myhostname = <%= fqdn %> +mydomain = <%= domain %> inet_protocols = all mynetworks_style = host #delay_warning_time = 4h -smtpd_banner = $myhostname ESMTP $mail_name ($mail_version) (Mandriva Linux) +smtpd_banner = $myhostname ESMTP $mail_name ($mail_version) (Mageia Linux) unknown_local_recipient_reject_code = 450 smtp-filter_destination_concurrency_limit = 2 lmtp-filter_destination_concurrency_limit = 2 @@ -32,3 +33,19 @@ smtpd_use_tls = yes smtpd_tls_cert_file = /etc/pki/tls/certs/postfix.pem smtpd_tls_key_file = /etc/pki/tls/private/postfix.pem smtpd_tls_CAfile = /etc/pki/tls/certs/ca-bundle.crt + +<%- if classes.include?('sympa::server') -%> +local_recipient_maps = +fallback_transport_maps = regexp:/etc/postfix/transport_regexp +transport_maps = regexp:/etc/postfix/transport_regexp +mydestination = ml.<%= domain %> +sympa_destination_recipient_limit = 1 +sympabounce_destination_recipient_limit = 1 +virtual_alias_maps = regexp:/etc/postfix/sympa_aliases +# needed by sympa to handle bounce, according to the doc +recipient_delimiter = + +# This is ugly for a simple relay but we need ml.mageia.org to accept email :( +inet_interfaces = all +<%- else -%> +inet_interfaces = localhost +<%- end -%> diff --git a/modules/postfix/templates/sympa_aliases b/modules/postfix/templates/sympa_aliases new file mode 100644 index 00000000..436e7a28 --- /dev/null +++ b/modules/postfix/templates/sympa_aliases @@ -0,0 +1,8 @@ +# everything is handled with transports in postfix, +# but according to https://www.sympa.org/faq/postfix, we also need this one +<% escaped_domain = ( 'ml.' + domain ).sub('.','\.') %> +/^(.*)-owner\@<%= escaped_domain %>$/ $1+owner@ml.<%= domain %> +# redirect the mail from the ml domain to sysadmin +/^listmaster\@<%= escaped_domain %>$/ listmaster@<%= domain %> +# errors are sent there, so that should also be redirected +/^sympa-request\@<%= escaped_domain %>$/ listmaster@<%= domain %> diff --git a/modules/postfix/templates/transport_regexp b/modules/postfix/templates/transport_regexp index 5d005c7b..3eb5494f 100644 --- a/modules/postfix/templates/transport_regexp +++ b/modules/postfix/templates/transport_regexp @@ -1,8 +1,10 @@ <% ml_domain = 'ml\.' + domain.gsub('.','\.') %> -<%- if classes.include?('sympa') -%> +<%- if classes.include?('sympa::server') -%> /^.*+owner\@<%= ml_domain %>$/ sympabounce: +/^bounce+.*\@<%= ml_domain %>$/ sympabounce: /^.*\@<%= ml_domain %>$/ sympa: +<%- else -%> +/^.*\@<%= ml_domain %>$/ smtp:sucuk.mageia.org <%- end -%> - diff --git a/modules/postfix/templates/virtual_aliases b/modules/postfix/templates/virtual_aliases new file mode 100644 index 00000000..861e79c6 --- /dev/null +++ b/modules/postfix/templates/virtual_aliases @@ -0,0 +1,33 @@ +# do not forget to add $domain or it will not work +# do not hardcode the domain, or it will be harvested by bot + +treasurer@<%= domain %> treasurer@group.<%= domain %> +president@<%= domain %> ennael@<%= domain %> +secretary@<%= domain %> obgr_seneca@<%= domain %> + +contact@<%= domain %> council@group.<%= domain %> +press@<%= domain %> council@group.<%= domain %> + +# later switch to a team alias +root@<%= domain %> sysadmin@group.<%= domain %> + +security@<%= domain %> security@group.<%= domain %> + +# Temporary(?) alias until there is a real board-commits@ list +board-commits@ml.<%= domain %> board-public@ml.<%= domain %> + +# TODO see https://www.ietf.org/rfc/rfc2142.txt +<% +['postmaster','hostmaster','abuse','noc','listmaster','MAILER-DAEMON'].each { |a| +%> +<%= a %>@<%= domain %> root@<%= domain %> +<% +} + +['webmaster','www'].each { |a| +%> +<%= a %>@<%= domain %> web@group.<%= domain %> +<% } %> + +# TODO : +# info, marketing, sales -> marketing ( once we do have a team ) diff --git a/modules/postgresql/manifests/config.pp b/modules/postgresql/manifests/config.pp new file mode 100644 index 00000000..a9f2ad7f --- /dev/null +++ b/modules/postgresql/manifests/config.pp @@ -0,0 +1,10 @@ +define postgresql::config($content) { + file { $name: + owner => 'postgres', + group => 'postgres', + mode => '0600', + content => $content, + require => Package['postgresql-server'], + notify => Exec['service postgresql reload'], + } +} diff --git a/modules/postgresql/manifests/database.pp b/modules/postgresql/manifests/database.pp new file mode 100644 index 00000000..34cee2a6 --- /dev/null +++ b/modules/postgresql/manifests/database.pp @@ -0,0 +1,20 @@ +# TODO convert it to a regular type ( so we can later change user and so on ) +define postgresql::database($description = '', + $user = 'postgres', + $callback_notify = '') { + + exec { "createdb -O ${user} -U postgres ${name} '${description}' ": + user => 'root', + unless => "psql -A -t -U postgres -l | grep '^${name}|'", + require => Service['postgresql'], + } + + # this is fetched by the manifest asking the database creation, + # once the db have been created + # FIXME proper ordering ? + # FIXME In puppet >3.0 word 'tag' is reserved, so it has to be renamed + @@postgresql::database_callback { $name: + tag => $name, + callback_notify => $callback_notify, + } +} diff --git a/modules/postgresql/manifests/database_callback.pp b/modules/postgresql/manifests/database_callback.pp new file mode 100644 index 00000000..0ab1771f --- /dev/null +++ b/modules/postgresql/manifests/database_callback.pp @@ -0,0 +1,9 @@ +define postgresql::database_callback($callback_notify = '') { + # dummy declaration, so we can trigger the notify + if $callback_notify { + exec { "callback ${name}": + command => '/bin/true', + notify => $callback_notify, + } + } +} diff --git a/modules/postgresql/manifests/db_and_user.pp b/modules/postgresql/manifests/db_and_user.pp new file mode 100644 index 00000000..2d59e1ca --- /dev/null +++ b/modules/postgresql/manifests/db_and_user.pp @@ -0,0 +1,15 @@ +define postgresql::db_and_user( $password, + $description = '', + $callback_notify = '') { + + postgresql::database { $name: + callback_notify => $callback_notify, + description => $description, + user => $name, + require => Postgresql::User[$name], + } + + postgresql::user { $name: + password => $password + } +} diff --git a/modules/postgresql/manifests/hba_entry.pp b/modules/postgresql/manifests/hba_entry.pp new file mode 100644 index 00000000..30fccda0 --- /dev/null +++ b/modules/postgresql/manifests/hba_entry.pp @@ -0,0 +1,40 @@ +# == Define: postgresql::hba_entry +# +# Set a new entry to pg_hba.conf file +# +# === Parameters +# +# See pgsql doc for more details about pg_hba.conf parameters : +# https://www.postgresql.org/docs/9.1/static/auth-pg-hba-conf.html +# +# [*namevar*] +# namevar is not used. +# +# [*type*] +# can be local, host, hostssl, hostnossl +# +# [*database*] +# database name +# +# [*user*] +# user name +# +# [*address*] +# host name or IP address range +# +# [*method*] +# authentication method to use +# +define postgresql::hba_entry( + $type, + $database, + $user, + $address, + $method +) { + include postgresql::var + Postgresql::Pg_hba <| title == $postgresql::var::hba_file |> { + conf_lines +> "${type} ${database} ${user} ${address} ${method}", + } +} +# vim: sw=2 diff --git a/modules/postgresql/manifests/init.pp b/modules/postgresql/manifests/init.pp index fb3ea06b..faec8b8c 100644 --- a/modules/postgresql/manifests/init.pp +++ b/modules/postgresql/manifests/init.pp @@ -1,60 +1 @@ -class postgresql { - - $pgsql_data = "/var/lib/pgsql/data/" - - package { 'postgresql9.0-server': - alias => "postgresql-server", - ensure => installed - } - - service { postgresql: - ensure => running, - subscribe => Package["postgresql-server"], - hasstatus => true, - } - - exec { "service postgresql reload": - refreshonly => true, - subscribe => [ File["postgresql.conf"], - File["pg_ident.conf"], - File["pg_hba.conf"] ] - } - - file { '/etc/pam.d/postgresql': - ensure => present, - owner => root, - group => root, - mode => 644, - content => template("postgresql/pam"), - } - - file { "postgresql.conf": - path => "$pgsql_data/postgresql.conf", - ensure => present, - owner => postgres, - group => postgres, - mode => 600, - content => template("postgresql/postgresql.conf"), - require => Package["postgresql-server"], - } - - file { 'pg_hba.conf': - path => "$pgsql_data/pg_hba.conf", - ensure => present, - owner => postgres, - group => postgres, - mode => 600, - content => template("postgresql/pg_hba.conf"), - require => Package["postgresql-server"], - } - - file { 'pg_ident.conf': - path => "$pgsql_data/pg_ident.conf", - ensure => present, - owner => postgres, - group => postgres, - mode => 600, - content => template("postgresql/pg_ident.conf"), - require => Package["postgresql-server"], - } -} +class postgresql { } diff --git a/modules/postgresql/manifests/pg_hba.pp b/modules/postgresql/manifests/pg_hba.pp new file mode 100644 index 00000000..777eee47 --- /dev/null +++ b/modules/postgresql/manifests/pg_hba.pp @@ -0,0 +1,13 @@ +define postgresql::pg_hba( + $conf_lines = [] +) { + $db = list_exported_ressources('Postgresql::Db_and_user') + + $forum_lang = list_exported_ressources('Phpbb::Locale_db') + +# (tmb) disable rewriting config as we are moving to mariadb +# postgresql::config { $name: +# content => template('postgresql/pg_hba.conf'), +# } +} +# vim: sw=2 diff --git a/modules/postgresql/manifests/remote_database.pp b/modules/postgresql/manifests/remote_database.pp new file mode 100644 index 00000000..15b54651 --- /dev/null +++ b/modules/postgresql/manifests/remote_database.pp @@ -0,0 +1,15 @@ +# FIXME: In puppet >3.0 word 'tag' is reserved, so it has to be renamed +define postgresql::remote_database($description = '', + $user = 'postgresql', + $callback_notify = '', + $tag = 'default') { + @@postgresql::database { $name: + description => $description, + user => $user, + callback_notify => $callback_notify, + tag => $tag, + require => Postgresql::User[$user], + } + + Postgresql::Database_callback <<| tag == $name |>> +} diff --git a/modules/postgresql/manifests/remote_db_and_user.pp b/modules/postgresql/manifests/remote_db_and_user.pp new file mode 100644 index 00000000..07e3ea23 --- /dev/null +++ b/modules/postgresql/manifests/remote_db_and_user.pp @@ -0,0 +1,18 @@ +# FIXME: In puppet >3.0 word 'tag' is reserved, so it have to be renamed +define postgresql::remote_db_and_user($password, + $description = '', + $tag = 'default', + $callback_notify = '') { + + @@postgresql::db_and_user { $name: + callback_notify => $callback_notify, + tag => $tag, + description => $description, + password => $password, + } + + # fetch the exported resources that should have been exported + # once the db was created, and trigger a notify to the object + # passed as callback_notify + Postgresql::Database_callback <<| tag == $name |>> +} diff --git a/modules/postgresql/manifests/remote_user.pp b/modules/postgresql/manifests/remote_user.pp new file mode 100644 index 00000000..fb53df4c --- /dev/null +++ b/modules/postgresql/manifests/remote_user.pp @@ -0,0 +1,10 @@ +# FIXME: In puppet >3.0 word 'tag' is reserved, so it have to be renamed +define postgresql::remote_user( $password, + $tag = 'default') { + @@postgresql::user { $name: + tag => $tag, + password => $password, + } +} + + diff --git a/modules/postgresql/manifests/server.pp b/modules/postgresql/manifests/server.pp new file mode 100644 index 00000000..8b92bb2b --- /dev/null +++ b/modules/postgresql/manifests/server.pp @@ -0,0 +1,53 @@ +class postgresql::server { + include postgresql::var + + # missing requires is corrected in cooker, + # should be removed + # once the fix is in a stable release + package { "postgresql${postgresql::var::pg_version}-plpgsql": + alias => 'postgresql-plpgsql', + } + + package { "postgresql${postgresql::var::pg_version}-server": + alias => 'postgresql-server', + require => Package['postgresql-plpgsql'], + } + + service { 'postgresql': + subscribe => Package['postgresql-server'], + } + + exec { 'service postgresql reload': + refreshonly => true, + } + + openssl::self_signed_splitted_cert { "pgsql.${::domain}": + filename => 'server', + directory => $postgresql::var::pgsql_data, + owner => 'postgres', + group => 'postgres', + require => Package['postgresql-server'] + } + + + file { '/etc/pam.d/postgresql': + content => template('postgresql/pam'), + } + + @postgresql::pg_hba { $postgresql::var::hba_file: } + + postgresql::hba_entry { 'allow_local_ipv4': + type => 'host', + database => 'all', + user => 'all', + address => '127.0.0.1/32', + method => 'md5', + } + + postgresql::config { + "${postgresql::var::pgsql_data}/pg_ident.conf": + content => template('postgresql/pg_ident.conf'); + "${postgresql::var::pgsql_data}/postgresql.conf": + content => template('postgresql/postgresql.conf'); + } +} diff --git a/modules/postgresql/manifests/tagged.pp b/modules/postgresql/manifests/tagged.pp new file mode 100644 index 00000000..6a49e3ff --- /dev/null +++ b/modules/postgresql/manifests/tagged.pp @@ -0,0 +1,8 @@ +# FIXME: In puppet >3.0 word 'tag' is reserved, so it have to be renamed +define postgresql::tagged() { + # TODO add a system of tag so we can declare database on more than one + # server + Postgresql::User <<| tag == $name |>> + Postgresql::Database <<| tag == $name |>> + Postgresql::Db_and_user <<| tag == $name |>> +} diff --git a/modules/postgresql/manifests/user.pp b/modules/postgresql/manifests/user.pp new file mode 100644 index 00000000..5b73b243 --- /dev/null +++ b/modules/postgresql/manifests/user.pp @@ -0,0 +1,13 @@ +# TODO convert to a regular type, so we can later change password +# without erasing the current user +define postgresql::user($password) { + $sql = "CREATE ROLE ${name} ENCRYPTED PASSWORD '\${pass}' NOSUPERUSER NOCREATEDB NOCREATEROLE INHERIT LOGIN;" + + exec { "psql -U postgres -c \"${sql}\" ": + user => 'root', + # do not leak the password on commandline + environment => "pass=${password}", + unless => "psql -A -t -U postgres -c '\\du ${name}' | grep '${name}'", + require => Service['postgresql'], + } +} diff --git a/modules/postgresql/manifests/var.pp b/modules/postgresql/manifests/var.pp new file mode 100644 index 00000000..b31c7ffe --- /dev/null +++ b/modules/postgresql/manifests/var.pp @@ -0,0 +1,7 @@ +class postgresql::var { + + $pgsql_data = '/var/lib/pgsql/data/' + $pg_version = '9.6' + $hba_file = "${pgsql_data}/pg_hba.conf" +} +# vim: sw=2 diff --git a/modules/postgresql/templates/pg_hba.conf b/modules/postgresql/templates/pg_hba.conf index 4dd9906c..e4232a4e 100644 --- a/modules/postgresql/templates/pg_hba.conf +++ b/modules/postgresql/templates/pg_hba.conf @@ -75,31 +75,44 @@ # TYPE DATABASE USER CIDR-ADDRESS METHOD -# This file is in mageia svn: -# $Id$ + +<%- + for line in @conf_lines +-%> +<%= line %> +<%- + end +-%> # Nanar: # This bypass global config for specific user/base -host epoll epoll 127.0.0.1/32 md5 -host epoll epoll ::1/128 md5 -hostssl epoll epoll 212.85.158.146/32 md5 -hostssl epoll epoll 2a02:2178:2:7::2/128 md5 - -host mirrors mirrors 127.0.0.1/32 md5 -host mirrors mirrors ::1/128 md5 -hostssl mirrors mirrors 212.85.158.146/32 md5 -hostssl mirrors mirrors 2a02:2178:2:7::2/128 md5 - -host transifex transifex 127.0.0.1/32 md5 -host transifex transifex ::1/128 md5 -hostssl transifex transifex 212.85.158.146/32 md5 -hostssl transifex transifex 2a02:2178:2:7::2/128 md5 +<% -host bugs bugs 127.0.0.1/32 md5 -host bugs bugs ::1/128 md5 -hostssl bugs bugs 212.85.158.146/32 md5 -hostssl bugs bugs 2a02:2178:2:7::2/128 md5 +# FIXME ip v6 is hardcoded, facter do not seems to support +# fetch it +for i in db +%> +host <%= i %> <%= i %> 127.0.0.1/32 md5 +host <%= i %> <%= i %> ::1/128 md5 +hostssl <%= i %> <%= i %> <%= ipaddress %>/32 md5 +hostssl <%= i %> <%= i %> 2a02:2178:2:7::2/128 md5 +<% +end +%> +<% +lang = ['en','de'] +for l in lang +%> +host phpbb_<%= l %> phpbb 127.0.0.1/32 md5 +host phpbb_<%= l %> phpbb ::1/128 md5 +hostssl phpbb_<%= l %> phpbb <%= ipaddress %>/32 md5 +hostssl phpbb_<%= l %> phpbb 2a02:2178:2:7::2/128 md5 +# temporary, for the forum on friteuse vm +hostssl phpbb_<%= l %> phpbb 192.168.122.0/24 md5 +<% +end +%> # When creating the database ( with bin/checkstup.pl ) bugzilla need to # access to template1 ( https://bugzilla.mozilla.org/show_bug.cgi?id=542507 ) host template1 bugs 127.0.0.1/32 md5 @@ -107,17 +120,18 @@ host template1 bugs ::1/128 md5 hostssl template1 bugs 212.85.158.146/32 md5 hostssl template1 bugs 2a02:2178:2:7::2/128 md5 -host sympa sympa 127.0.0.1/32 md5 -host sympa sympa ::1/128 md5 -hostssl sympa sympa 212.85.158.146/32 md5 -hostssl sympa sympa 2a02:2178:2:7::2/128 md5 +# Allow youri-check on rabbit to access the results db +hostssl youri_check youri 88.190.12.224/32 md5 +# Allow local access too +hostssl youri_check youri 212.85.158.151/32 md5 +hostssl youri_check youri 2a02:2178:2:7::7/128 md5 # "local" is for Unix domain socket connections only local all all ident map=local # IPv4 local connections: -host all all 127.0.0.1/32 pam +host all all 127.0.0.1/32 md5 # IPv6 local connections: -host all all ::1/128 pam +host all all ::1/128 md5 -hostssl all all 0.0.0.0/0 pam -hostssl all all ::0/0 pam +hostssl all all 0.0.0.0/0 md5 +hostssl all all ::0/0 md5 diff --git a/modules/postgresql/templates/postgresql.conf b/modules/postgresql/templates/postgresql.conf index 813c0910..c1e7c994 100644 --- a/modules/postgresql/templates/postgresql.conf +++ b/modules/postgresql/templates/postgresql.conf @@ -113,7 +113,7 @@ ssl = on # - Memory - -shared_buffers = 24MB # min 128kB +shared_buffers = 2048MB # min 128kB # (change requires restart) #temp_buffers = 8MB # min 800kB #max_prepared_transactions = 0 # zero disables the feature @@ -122,8 +122,8 @@ shared_buffers = 24MB # min 128kB # per transaction slot, plus lock space (see max_locks_per_transaction). # It is not advisable to set max_prepared_transactions nonzero unless you # actively intend to use prepared transactions. -#work_mem = 1MB # min 64kB -#maintenance_work_mem = 16MB # min 1MB +work_mem = 64MB # min 64kB +maintenance_work_mem = 512MB # min 1MB #max_stack_depth = 2MB # min 100kB # - Kernel Resource Usage - @@ -144,7 +144,7 @@ shared_buffers = 24MB # min 128kB #bgwriter_delay = 200ms # 10-10000ms between rounds #bgwriter_lru_maxpages = 100 # 0-1000 max buffers written/round -#bgwriter_lru_multiplier = 2.0 # 0-10.0 multipler on buffers scanned/round +#bgwriter_lru_multiplier = 2.0 # 0-10.0 multiplier on buffers scanned/round # - Asynchronous Behavior - @@ -235,7 +235,7 @@ shared_buffers = 24MB # min 128kB #cpu_tuple_cost = 0.01 # same scale as above #cpu_index_tuple_cost = 0.005 # same scale as above #cpu_operator_cost = 0.0025 # same scale as above -#effective_cache_size = 128MB +effective_cache_size = 4096MB # - Genetic Query Optimizer - @@ -467,7 +467,7 @@ shared_buffers = 24MB # min 128kB datestyle = 'iso, mdy' #intervalstyle = 'postgres' -#timezone = unknown # actually, defaults to TZ environment +timezone = 'Europe/Paris' # actually, defaults to TZ environment # setting #timezone_abbreviations = 'Default' # Select the set of available time zone # abbreviations. Currently, there are diff --git a/modules/postgrey/manifests/init.pp b/modules/postgrey/manifests/init.pp index 8d55a77c..8a2c9c18 100644 --- a/modules/postgrey/manifests/init.pp +++ b/modules/postgrey/manifests/init.pp @@ -1,31 +1,19 @@ class postgrey { - package { postgrey: - ensure => installed - } - - service { postgrey: - ensure => running, - path => "/etc/init.d/postgrey", - subscribe => [ Package[postgrey]] + package { 'postgrey': } + + service { 'postgrey': + subscribe => Package['postgrey'], } - file { "/etc/sysconfig/postgrey": - ensure => present, - owner => root, - group => root, - mode => 644, - content => template("postgrey/postgrey.sysconfig"), - notify => [ Service[postgrey] ], - require => Package[postgrey], + File { + notify => Service['postgrey'], + require => Package['postgrey'], } - file { "/etc/postfix/postgrey_whitelist_clients.local": - ensure => present, - owner => root, - group => root, - mode => 644, - content => template("postgrey/whitelist_clients.local"), - require => Package[postgrey], - notify => [ Service[postgrey]], + file { + '/etc/sysconfig/postgrey': + content => template('postgrey/postgrey.sysconfig'); + '/etc/postfix/postgrey_whitelist_clients.local': + content => template('postgrey/whitelist_clients.local'); } } diff --git a/modules/postgrey/templates/postgrey.sysconfig b/modules/postgrey/templates/postgrey.sysconfig index ec4e6947..f08b8f6f 100644 --- a/modules/postgrey/templates/postgrey.sysconfig +++ b/modules/postgrey/templates/postgrey.sysconfig @@ -1,12 +1,10 @@ # change default configuration option here -# SOCKET=$(postconf -h queue_directory)/extern/postgrey/socket -# OPTIONS="--unix=$SOCKET" -# DBPATH=/var/lib/postgrey -# OPTIONS="$OPTIONS --dbdir=$DBPATH" +# default: unix socket +SOCKET="--unix=/var/spool/postfix/extern/postgrey/socket" -# to use an inet connection instead of a socket -#OPTIONS="--inet=127.0.0.1:10031" +# to use an inet socket instead +#SOCKET="--inet=127.0.0.1:10031" # enable whitelisting OPTIONS="$OPTIONS --auto-whitelist-clients" diff --git a/modules/postgrey/templates/whitelist_clients.local b/modules/postgrey/templates/whitelist_clients.local index 9457cc82..8c87b88c 100644 --- a/modules/postgrey/templates/whitelist_clients.local +++ b/modules/postgrey/templates/whitelist_clients.local @@ -1,5 +1,2 @@ -# zarb -ryu.zarb.org -cthulhu.zarb.org - - +# mageia +<%= domain %> diff --git a/modules/puppet/manifests/client.pp b/modules/puppet/manifests/client.pp new file mode 100644 index 00000000..1168373b --- /dev/null +++ b/modules/puppet/manifests/client.pp @@ -0,0 +1,15 @@ +class puppet::client inherits puppet { + + cron { 'puppet': + ensure => present, + command => 'puppet agent --onetime --no-daemonize -l syslog >/dev/null 2>&1', + user => 'root', + minute => fqdn_rand( 60 ), + } + + # we are using cron, so no need for the service + service { 'puppet': + enable => false, + hasstatus => true, + } +} diff --git a/modules/puppet/manifests/hiera.pp b/modules/puppet/manifests/hiera.pp new file mode 100644 index 00000000..02900cd7 --- /dev/null +++ b/modules/puppet/manifests/hiera.pp @@ -0,0 +1,14 @@ +class puppet::hiera { + package { ['ruby-hiera']: } + + # ease the use fo the command line tool + # who use a different location for the config file + file { '/etc/hiera.yaml': + ensure => link, + target => '/etc/puppet/hiera.yaml', + } + + file { '/etc/puppet/hiera.yaml': + content => template('puppet/hiera.yaml'), + } +} diff --git a/modules/puppet/manifests/init.pp b/modules/puppet/manifests/init.pp index b23e9d6a..be72d17d 100644 --- a/modules/puppet/manifests/init.pp +++ b/modules/puppet/manifests/init.pp @@ -1,52 +1,11 @@ - class puppet { - class client { - package { puppet: - ensure => installed - } - - service { puppet: - ensure => running, - subscribe => [ Package[puppet], File["/etc/puppet/puppet.conf"]] - } - - file { "/etc/puppet/puppet.conf": - ensure => present, - owner => root, - group => root, - mode => 644, - content => template("puppet/puppet.conf"), - require => Package[puppet] - } - } - - class master inherits client { - package { puppet-server: - ensure => installed - } - - service { puppetmaster: - ensure => running, - path => "/etc/init.d/puppetmaster", - subscribe => [ Package[puppet-server], File["/etc/puppet/puppet.conf"]] - } + include puppet::stored_config - file { "extdata": - path => "/etc/puppet/extdata", - ensure => directory, - owner => puppet, - group => puppet, - mode => 700, - recurse => true - } + package { 'puppet': } - file { '/etc/puppet/tagmail.conf': - ensure => present, - owner => puppet, - group => puppet, - mode => 700, - content => template("puppet/tagmail.conf"), - } - + # only here to be subclassed + file { '/etc/puppet/puppet.conf': + require => Package[puppet], + content => template('puppet/puppet.conf','puppet/puppet.agent.conf'), } } diff --git a/modules/puppet/manifests/master.pp b/modules/puppet/manifests/master.pp new file mode 100644 index 00000000..55529466 --- /dev/null +++ b/modules/puppet/manifests/master.pp @@ -0,0 +1,54 @@ +class puppet::master inherits puppet { + include puppet::client + include puppet::queue + include puppet::stored_config + include puppet::hiera +# do not enable until bug 4591 is solved +# include puppet::thin + + # rails and sqlite3 are used for stored config + package { ["ruby-${puppet::stored_config::database}"]: } + + File['/etc/puppet/puppet.conf'] { + content => template('puppet/puppet.conf', + 'puppet/puppet.agent.conf', + 'puppet/puppet.master.conf'), + } + + + package { 'puppet-server': } + + service { 'puppetmaster': +# uncomment once thin is enabled +# ensure => stopped, + subscribe => [Package['puppet-server'], + File['/etc/puppet/puppet.conf']], + } + + file { '/etc/puppet/extdata': + ensure => directory, + owner => puppet, + group => puppet, + mode => '0700', + recurse => true, + } + + file { '/etc/puppet/tagmail.conf': + content => template('puppet/tagmail.conf'), + } + + tidy { '/var/lib/puppet/reports': + age => '4w', + matches => '*.yaml', + recurse => true, + type => 'mtime', + } + + file { '/etc/puppet/autosign.conf': + ensure => $::environment ? { + 'test' => 'present', + default => 'absent', + }, + content => '*', + } +} diff --git a/modules/puppet/manifests/queue.pp b/modules/puppet/manifests/queue.pp new file mode 100644 index 00000000..770fc6df --- /dev/null +++ b/modules/puppet/manifests/queue.pp @@ -0,0 +1,13 @@ +class puppet::queue { + include stompserver + + package { 'ruby-stomp': } + + service { 'puppetqd': + provider => base, + start => 'puppet queue', + require => [Package['puppet-server'], + Package['ruby-stomp'], + File['/etc/puppet/puppet.conf']], + } +} diff --git a/modules/puppet/manifests/stored_config.pp b/modules/puppet/manifests/stored_config.pp new file mode 100644 index 00000000..51820d83 --- /dev/null +++ b/modules/puppet/manifests/stored_config.pp @@ -0,0 +1,26 @@ +class puppet::stored_config { +# TODO uncomment when the following problem have been fixed +# - how to bootstrap the installation of the infrastructure ( since we use +# stored_config for postgresql::remote_db_and_user, we need to have a +# sqlite3 database first and then declare the database, and then switch +# to it ) +# - how do we decide when we get sqlite3 ( for small test servers ) and +# when do we decide to get the real pgsql server ( for production setup ) +# +# if ($::environment == 'production') { +# # FIXME not really elegant, but we do not have much choice +# # this make servers not bootstrappable for now +# $pgsql_password = extlookup('puppet_pgsql','x') +# +# postgresql::remote_db_and_user { 'bugs': +# description => 'Puppet database', +# password => $pgsql_password, +# } +# +# $database = 'pg' +# } else { + $database = 'sqlite3' +# } +# + $db_config = template('puppet/db_config.erb') +} diff --git a/modules/puppet/manifests/thin.pp b/modules/puppet/manifests/thin.pp new file mode 100644 index 00000000..1ca03a7e --- /dev/null +++ b/modules/puppet/manifests/thin.pp @@ -0,0 +1,35 @@ +class puppet::thin { + package { 'ruby-thin': } + + include apache::base + include apache::mod::ssl + include apache::mod::proxy + + apache::vhost::other_app { 'puppet_proxy': + vhost_file => 'puppet/apache_proxy_vhost.conf', + } + + apache::config { "${apache::base::conf_d}/puppet.conf": + content => 'Listen 8140', + } + + $service_name = 'thin_puppet_master' + file { '/etc/puppet/thin.yml': + content => template('puppet/thin.yml'), + notify => Service[$service_name], + } + + file { '/usr/local/share/puppet.config.ru': + content => template('puppet/config.ru'), + } + + service { $service_name: + provider => base, + require => [ Package['ruby-thin'], + File['/etc/puppet/thin.yml'], + File['/usr/local/share/puppet.config.ru']], + start => 'thin -C /etc/puppet/thin.yml start', + stop => 'thin -C /etc/puppet/thin.yml stop', + restart => 'thin -C /etc/puppet/thin.yml restart', + } +} diff --git a/modules/puppet/templates/apache_proxy_vhost.conf b/modules/puppet/templates/apache_proxy_vhost.conf new file mode 100644 index 00000000..89157fc2 --- /dev/null +++ b/modules/puppet/templates/apache_proxy_vhost.conf @@ -0,0 +1,42 @@ +ProxyRequests Off + +<Proxy balancer://puppet> +# TODO dynamically adjust that with a variable + BalancerMember http://127.0.0.1:18140 + BalancerMember http://127.0.0.1:18141 + BalancerMember http://127.0.0.1:18142 +</Proxy> + +<VirtualHost *:8140> + SSLEngine on + ServerName puppet.<%= domain %> + + ErrorLog /var/log/httpd/puppet_proxy.<%= domain %>.error.log + CustomLog /var/log/httpd/puppet_proxy.<%= domain %>.access.log + + SSLCipherSuite SSLv2:-LOW:-EXPORT:RC4+RSA + + SSLCertificateFile /var/lib/puppet/ssl/certs/puppet.<%= domain %>.pem + SSLCertificateKeyFile /var/lib/puppet/ssl/private_keys/puppet.<%= domain %>.pem + SSLCertificateChainFile /var/lib/puppet/ssl/ca/ca_crt.pem + SSLCACertificateFile /var/lib/puppet/ssl/ca/ca_crt.pem + + SSLVerifyClient require + SSLVerifyDepth 1 + + SSLOptions +StdEnvVars + + RequestHeader set X-Client-DN %{SSL_CLIENT_S_DN}e + RequestHeader set X-Client-Verify %{SSL_CLIENT_VERIFY}e + + <Location /> + SetHandler balancer-manager + Order allow,deny + Allow from all + </Location> + + ProxyPass / balancer://puppet/ + ProxyPassReverse / balancer://puppet/ + ProxyPreserveHost on + +</VirtualHost> diff --git a/modules/puppet/templates/config.ru b/modules/puppet/templates/config.ru new file mode 100644 index 00000000..aba07857 --- /dev/null +++ b/modules/puppet/templates/config.ru @@ -0,0 +1,16 @@ +# a config.ru, for use with every rack-compatible webserver. +# SSL needs to be handled outside this, though. + +# if puppet is not in your RUBYLIB: +# $:.unshift('/opt/puppet/lib') + +$0 = '<%= service_name %>' + +# if you want debugging: +# ARGV << "--debug" + +ARGV << "--rack" +require 'puppet/application/master' +# we're usually running inside a Rack::Builder.new {} block, +# therefore we need to call run *here*. +run Puppet::Application[:master].run diff --git a/modules/puppet/templates/db_config.erb b/modules/puppet/templates/db_config.erb new file mode 100644 index 00000000..337a5043 --- /dev/null +++ b/modules/puppet/templates/db_config.erb @@ -0,0 +1,10 @@ +<%- if database == 'sqlite3' -%> + dbadapter = sqlite3 + dblocation = /var/lib/puppet/storeconfigs.db +<%- else -%> + dbadapter = postgresql + dbuser = puppet + dbpassword = <%= pgsql_password %> + dbserver = pgsql.<%= domain %> + dbname = puppet +<%- end -%> diff --git a/modules/puppet/templates/hiera.yaml b/modules/puppet/templates/hiera.yaml new file mode 100644 index 00000000..fcef4278 --- /dev/null +++ b/modules/puppet/templates/hiera.yaml @@ -0,0 +1,9 @@ +--- +:backends: + - yaml +:yaml: + :datadir: /etc/puppet/hieradata +:logger: console +:hierarchy: + - "%{::environment}" + - common diff --git a/modules/puppet/templates/puppet.agent.conf b/modules/puppet/templates/puppet.agent.conf new file mode 100644 index 00000000..44dfedb7 --- /dev/null +++ b/modules/puppet/templates/puppet.agent.conf @@ -0,0 +1,27 @@ +[agent] + server = puppet.<%= domain %> + + pluginsync = true + + # unfortunately, ecosse and jonund sync at the same time, thus causing problem + # the proper fix is to use something else than sqlite for stored config, but this would + # take more time to deploy, so the quick fix is this one (misc, 04/07/2011) + splay = true + + report = true + + graph = true +<% if environment %> + environment = <%= environment %> +<% end %> + # The file in which puppetd stores a list of the classes + # associated with the retrieved configuration. Can be loaded in + # the separate ``puppet`` executable using the ``--loadclasses`` + # option. + # The default value is '$confdir/classes.txt'. + classfile = $vardir/classes.txt + + # Where puppetd caches the local configuration. An + # extension indicating the cache format is added automatically. + # The default value is '$confdir/localconfig'. + localconfig = $vardir/localconfig diff --git a/modules/puppet/templates/puppet.conf b/modules/puppet/templates/puppet.conf index fcb81a35..28e8c363 100644 --- a/modules/puppet/templates/puppet.conf +++ b/modules/puppet/templates/puppet.conf @@ -1,4 +1,8 @@ +<% db_config = scope.lookupvar('puppet::stored_config::db_config') %> [main] + # listen on both ipv4 and ipv6 + bindaddress = * + # The Puppet log directory. # The default value is '$vardir/log'. logdir = /var/log/puppet @@ -11,24 +15,7 @@ # The default value is '$confdir/ssl'. ssldir = $vardir/ssl -[master] - certname = puppetmaster.<%= domain %> - reports = tagmail - -[agent] - server = puppetmaster.<%= domain %> - - pluginsync = true - - report = true - # The file in which puppetd stores a list of the classes - # associated with the retrieved configuratiion. Can be loaded in - # the separate ``puppet`` executable using the ``--loadclasses`` - # option. - # The default value is '$confdir/classes.txt'. - classfile = $vardir/classes.txt - - # Where puppetd caches the local configuration. An - # extension indicating the cache format is added automatically. - # The default value is '$confdir/localconfig'. - localconfig = $vardir/localconfig + modulepath = $confdir/modules:$confdir/deployment:$confdir/external:/usr/share/puppet/modules + queue_type = stomp + queue_source = stomp://localhost:61613 +<%= db_config %> diff --git a/modules/puppet/templates/puppet.master.conf b/modules/puppet/templates/puppet.master.conf new file mode 100644 index 00000000..0180fc2a --- /dev/null +++ b/modules/puppet/templates/puppet.master.conf @@ -0,0 +1,14 @@ +<% db_config = scope.lookupvar('puppet::stored_config::db_config') %> +[master] + certname = puppet.<%= domain %> + + # tagmail should be kept last, until this bug is fixed + # https://projects.puppetlabs.com/issues/5018 + reports = store,socket,tagmail + reportfrom = root@<%= domain %> + + # Never remove this: + # Store config is used to populate others configs + storeconfigs = true + async_storeconfigs = true +<%= db_config %> diff --git a/modules/puppet/templates/tagmail.conf b/modules/puppet/templates/tagmail.conf index cf988123..96b034aa 100644 --- a/modules/puppet/templates/tagmail.conf +++ b/modules/puppet/templates/tagmail.conf @@ -1 +1 @@ -err: mageia-sysadm@<%= domain %> +err: sysadmin-reports@ml.<%= domain %> diff --git a/modules/puppet/templates/thin.yml b/modules/puppet/templates/thin.yml new file mode 100644 index 00000000..8cf4231d --- /dev/null +++ b/modules/puppet/templates/thin.yml @@ -0,0 +1,18 @@ +--- +daemonize: true +require: [] + +timeout: 30 +user: puppet +group: puppet +wait: 30 +log: /var/log/thin.log +max_conns: 1024 +chdir: /etc/puppet +address: 127.0.0.1 +servers: 3 +environment: production +max_persistent_conns: 512 +pid: /var/run/puppet/puppetmaster.pid +rackup: /usr/local/share/puppet.config.ru +port: 18140 diff --git a/modules/report-socket/lib/puppet/reports/socket.rb b/modules/report-socket/lib/puppet/reports/socket.rb new file mode 100644 index 00000000..b1af057d --- /dev/null +++ b/modules/report-socket/lib/puppet/reports/socket.rb @@ -0,0 +1,33 @@ +require 'puppet' +require 'yaml' + +unless Puppet.version >= '2.6.5' + fail "This report processor requires Puppet version 2.6.5 or later" +end + +Puppet::Reports.register_report(:socket) do + configfile = File.join([File.dirname(Puppet.settings[:config]), "socket.yaml"]) + # do not raise a error since this will show in puppet log + # raise(Puppet::ParseError, "Socket report config file #{configfile} not readable") unless + if File.exist?(configfile) + + # TODO add support for using another user ? + config = YAML.load_file(configfile) + SOCKET_PATH = config[:socket_path] + else + SOCKET_PATH = nil + end + + desc <<-DESC + Send notification of failed reports to a socket. + DESC + + def process + if self.status == 'failed' + message = "Puppet run for #{self.host} #{self.status} at #{Time.now.asctime}." + if File.exist?(SOCKET_PATH) + Puppet::Util.execute("echo #{message} > #{SOCKET_PATH}" , "nobody", "nogroup") + end + end + end +end diff --git a/modules/restrictshell/manifests/allow.pp b/modules/restrictshell/manifests/allow.pp new file mode 100644 index 00000000..cb1fd9a2 --- /dev/null +++ b/modules/restrictshell/manifests/allow.pp @@ -0,0 +1,7 @@ +define restrictshell::allow { + include shell + file { "/etc/membersh-conf.d/allow_${name}.pl": + mode => '0755', + content => "\$use_${name} = 1;\n", + } +} diff --git a/modules/restrictshell/manifests/allow_git.pp b/modules/restrictshell/manifests/allow_git.pp new file mode 100644 index 00000000..ed12a577 --- /dev/null +++ b/modules/restrictshell/manifests/allow_git.pp @@ -0,0 +1,3 @@ +class restrictshell::allow_git { + restrictshell::allow { 'git': } +} diff --git a/modules/restrictshell/manifests/allow_maintdb.pp b/modules/restrictshell/manifests/allow_maintdb.pp new file mode 100644 index 00000000..e5123cf1 --- /dev/null +++ b/modules/restrictshell/manifests/allow_maintdb.pp @@ -0,0 +1,3 @@ +class restrictshell::allow_maintdb { + restrictshell::allow{ 'maintdb': } +} diff --git a/modules/restrictshell/manifests/allow_pkgsubmit.pp b/modules/restrictshell/manifests/allow_pkgsubmit.pp new file mode 100644 index 00000000..14c6357b --- /dev/null +++ b/modules/restrictshell/manifests/allow_pkgsubmit.pp @@ -0,0 +1,3 @@ +class restrictshell::allow_pkgsubmit { + restrictshell::allow { 'pkgsubmit': } +} diff --git a/modules/restrictshell/manifests/allow_rsync.pp b/modules/restrictshell/manifests/allow_rsync.pp new file mode 100644 index 00000000..6049122a --- /dev/null +++ b/modules/restrictshell/manifests/allow_rsync.pp @@ -0,0 +1,3 @@ +class restrictshell::allow_rsync { + restrictshell::allow { 'rsync': } +} diff --git a/modules/restrictshell/manifests/allow_scp.pp b/modules/restrictshell/manifests/allow_scp.pp new file mode 100644 index 00000000..3e6cb1fb --- /dev/null +++ b/modules/restrictshell/manifests/allow_scp.pp @@ -0,0 +1,3 @@ +class restrictshell::allow_scp { + restrictshell::allow{ 'scp': } +} diff --git a/modules/restrictshell/manifests/allow_sftp.pp b/modules/restrictshell/manifests/allow_sftp.pp new file mode 100644 index 00000000..55c1f396 --- /dev/null +++ b/modules/restrictshell/manifests/allow_sftp.pp @@ -0,0 +1,3 @@ +class restrictshell::allow_sftp { + restrictshell::allow { 'sftp': } +} diff --git a/modules/restrictshell/manifests/allow_svn.pp b/modules/restrictshell/manifests/allow_svn.pp new file mode 100644 index 00000000..99b2c9fa --- /dev/null +++ b/modules/restrictshell/manifests/allow_svn.pp @@ -0,0 +1,3 @@ +class restrictshell::allow_svn { + restrictshell::allow{ 'svn': } +} diff --git a/modules/restrictshell/manifests/allow_upload_bin.pp b/modules/restrictshell/manifests/allow_upload_bin.pp new file mode 100644 index 00000000..b55c41b3 --- /dev/null +++ b/modules/restrictshell/manifests/allow_upload_bin.pp @@ -0,0 +1,3 @@ +class restrictshell::allow_upload_bin { + allow{ 'upload_bin': } +} diff --git a/modules/restrictshell/manifests/init.pp b/modules/restrictshell/manifests/init.pp index c4569e94..c27f26dc 100644 --- a/modules/restrictshell/manifests/init.pp +++ b/modules/restrictshell/manifests/init.pp @@ -1,55 +1 @@ -class restrictshell { - $allow_svn = "0" - $allow_git = "0" - $allow_rsync = "0" - $allow_pkgsubmit = "0" - - $ldap_pwfile = "/etc/ldap.secret" - - class allow_svn_git_pkgsubmit { - $allow_svn = "1" - $allow_git = "1" - $allow_pkgsubmit = "1" - } - - file { '/usr/local/bin/sv_membersh.pl': - ensure => present, - owner => root, - group => root, - mode => 755, - content => template("restrictshell/sv_membersh.pl"), - } - - file { '/etc/membersh-conf.pl': - ensure => present, - owner => root, - group => root, - mode => 755, - content => template("restrictshell/membersh-conf.pl"), - } - - package { 'python-ldap': - ensure => installed, - } - - $pubkeys_directory = "/var/lib/pubkeys" - file { $pubkeys_directory: - ensure => directory, - owner => root, - group => root, - mode => 755, - } - - file { '/usr/local/bin/ldap-sshkey2file.py': - ensure => present, - owner => root, - group => root, - mode => 755, - content => template("restrictshell/ldap-sshkey2file.py"), - requires => Package['python-ldap'] - } - - - - -} +class restrictshell { } diff --git a/modules/restrictshell/manifests/shell.pp b/modules/restrictshell/manifests/shell.pp new file mode 100644 index 00000000..3ef2a036 --- /dev/null +++ b/modules/restrictshell/manifests/shell.pp @@ -0,0 +1,14 @@ +class restrictshell::shell { + file { '/etc/membersh-conf.d': + ensure => directory, + } + + mga_common::local_script { 'sv_membersh.pl': + content => template('restrictshell/sv_membersh.pl'), + } + + file { '/etc/membersh-conf.pl': + mode => '0755', + content => template('restrictshell/membersh-conf.pl'), + } +} diff --git a/modules/restrictshell/templates/ldap-sshkey2file.py b/modules/restrictshell/templates/ldap-sshkey2file.py deleted file mode 100755 index ec5afc8e..00000000 --- a/modules/restrictshell/templates/ldap-sshkey2file.py +++ /dev/null @@ -1,92 +0,0 @@ -#!/usr/bin/python - -import sys -import os -import random - -try: - import ldap -except ImportError, e: - print "Please install python-ldap before running this program" - sys.exit(1) - -basedn="<%= dc_suffix %>" -peopledn="ou=people,%s" % basedn -uris=['ldap://ldap.<%= domain %>'] -random.shuffle(uris) -uri = " ".join(uris) -timeout=5 -binddn="cn=<%= fqdn %>,ou=Hosts," % basedn -pwfile="<%= ldap_pwfile %>" -# filter out disabled accounts also -# too bad uidNumber doesn't support >= filters -filter="(&(objectClass=inetOrgPerson)(objectClass=ldapPublicKey)(objectClass=posixAccount)(sshPublicKey=*)(!(shadowExpire=*)))" -keypathprefix="<%= pubkeys_directory %>" - -def usage(): - print "%s" % sys.argv[0] - print - print "Will fetch all enabled user accounts under %s" % peopledn - print "with ssh keys in them and write each one to" - print "%s/<login>/authorized_keys" % keypathprefix - print - print "This script is intented to be run from cron as root" - print - -def get_pw(pwfile): - try: - f = open(pwfile, 'r') - except IOError, e: - print "Error while reading password file, aborting" - print e - sys.exit(1) - pw = f.readline().strip() - f.close() - return pw - -def write_keys(keys, user, uid, gid): - try: - os.makedirs("%s/%s" % (keypathprefix,user), 0700) - except: - pass - keyfile = "%s/%s/authorized_keys" % (keypathprefix,user) - f = open(keyfile, 'w') - for key in keys: - f.write(key.strip() + "\n") - f.close() - os.chmod(keyfile, 0600) - os.chown(keyfile, uid, gid) - os.chmod("%s/%s" % (keypathprefix,user), 0700) - os.chown("%s/%s" % (keypathprefix,user), uid, gid) - -if len(sys.argv) != 1: - usage() - sys.exit(1) - -bindpw = get_pw(pwfile) - -try: - ld = ldap.initialize(uri) - ld.set_option(ldap.OPT_NETWORK_TIMEOUT, timeout) - ld.start_tls_s() - ld.bind_s(binddn, bindpw) - res = ld.search_s(peopledn, ldap.SCOPE_ONELEVEL, filter, ['uid','sshPublicKey','uidNumber','gidNumber']) - try: - os.makedirs(keypathprefix, 0701) - except: - pass - for result in res: - dn, entry = result - # skip possible system users - if int(entry['uidNumber'][0]) < 500: - continue - write_keys(entry['sshPublicKey'], entry['uid'][0], int(entry['uidNumber'][0]), int(entry['gidNumber'][0])) - ld.unbind_s() -except Exception, e: - print "Error" - raise - -sys.exit(0) - - -# vim:ts=4:sw=4:et:ai:si diff --git a/modules/restrictshell/templates/membersh-conf.pl b/modules/restrictshell/templates/membersh-conf.pl index 0d9887e1..9e0c8bf5 100755 --- a/modules/restrictshell/templates/membersh-conf.pl +++ b/modules/restrictshell/templates/membersh-conf.pl @@ -1,16 +1,20 @@ -$use_svn = "<%= allow_svn %>"; + + $bin_svn = "/usr/bin/svnserve"; $regexp_svn = "^svnserve -t\$"; #@prepend_args_svn = ( '-r', '/svn' ); @prepend_args_svn = (); -$use_git = "<%= allow_git %>"; -$bin_git = "/usr/bin/git-shell"; +$bin_git = "/usr/share/gitolite/gitolite-shell"; -$use_rsync = "<%= allow_rsync %>"; $bin_rsync = "/usr/bin/rsync"; $regexp_rsync = "^rsync --server"; $regexp_dir_rsync = "^/.*"; -$use_pkgsubmit = "<%= allow_pkgsubmit %>"; +$bin_sftp = "<%= @lib_dir %>/ssh/sftp-server"; +$regexp_sftp = "^(/usr/lib{64,}/ssh/sftp-server|/usr/lib/sftp-server|/usr/libexec/sftp-server|/usr/lib/openssh/sftp-server)"; +foreach my $f (glob("/etc/membersh-conf.d/allow_*pl")) { + do($f) +} +1; diff --git a/modules/restrictshell/templates/sv_membersh.pl b/modules/restrictshell/templates/sv_membersh.pl index 521587d0..0b07f23a 100644 --- a/modules/restrictshell/templates/sv_membersh.pl +++ b/modules/restrictshell/templates/sv_membersh.pl @@ -62,8 +62,16 @@ our $use_git = "0"; our $bin_git = "/usr/bin/git-shell"; our $use_pkgsubmit = "0"; -our $regexp_pkgsubmit = "^/usr/share/repsys/create-srpm "; -our $bin_pkgsubmit = "/usr/share/repsys/create-srpm"; +our $regexp_pkgsubmit = "^/usr/share/repsys/create-srpm |^/usr/local/bin/submit_package "; +our $bin_pkgsubmit = "/usr/local/bin/submit_package"; + +our $use_maintdb = "0"; +our $regexp_maintdb = "^/usr/local/bin/wrapper.maintdb "; +our $bin_maintdb = "/usr/local/bin/wrapper.maintdb"; + +our $use_upload_bin = "0"; +our $regexp_upload_bin = "^/usr/local/bin/wrapper.upload-bin "; +our $bin_upload_bin = "/usr/local/bin/wrapper.upload-bin"; # Open configuration file if (-e "/etc/membersh-conf.pl") { @@ -92,6 +100,10 @@ if (-e "/etc/membersh-conf.pl") { # $regexp_dir_rsync = "^(/upload)|(/var/ftp)"; # # $use_pkgsubmit = "1"; +# +# $use_maintdb = "1"; +# +# $use_upload_bin = "1"; if ($#ARGV == 1 and $ARGV[0] eq "-c") { @@ -135,22 +147,37 @@ if ($#ARGV == 1 and $ARGV[0] eq "-c") { push( @args, @args_user ); exec($bin_svn, @args) or die("Failed to exec $bin_svn: $!"); - } elsif ($use_git and $ARGV[1] =~ m:git-.+:) { + } elsif ($use_git and $ARGV[1] =~ m:^$bin_git\b:) { - # Delegate filtering to git-shell - exec($bin_git, @ARGV) or die("Failed to exec $bin_git: $!"); + # Delegate filtering to gitolite-shell + my ($gitolite_bin, @rest) = split(' ', $ARGV[1]); + exec($bin_git, @rest) or die("Failed to exec $bin_git: $!"); } elsif ($use_pkgsubmit and $ARGV[1] =~ m:$regexp_pkgsubmit:) { my ($createsrpm, @rest) = split(' ', $ARGV[1]); exec($bin_pkgsubmit, @rest) or die("Failed to exec $bin_pkgsubmit: $!"); + } elsif ($use_maintdb and + $ARGV[1] =~ m:$regexp_maintdb:) { + my ($maintdb, @rest) = split(' ', $ARGV[1]); + exec($bin_maintdb, @rest) or die("Failed to exec $bin_maintdb: $!"); + } elsif ($use_upload_bin and + $ARGV[1] =~ m:$regexp_upload_bin:) { + my ($upload_bin, @rest) = split(' ', $ARGV[1]); + exec($bin_upload_bin, @rest) or die("Failed to exec $bin_upload_bin: $!"); } } unless (-e "/etc/membersh-errormsg") { - print STDERR "You tried to execute: @ARGV[1..$#ARGV]\n"; + if (@ARGV) { + print STDERR "You tried to execute: @ARGV[1..$#ARGV]\n"; + } else { + print STDERR "You tried to run a interactive shell.\n" + } print STDERR "Sorry, you are not allowed to execute that command.\n"; + print STDERR "You are member of the following groups :\n"; + print STDERR qx(groups); } else { open(ERRORMSG, "< /etc/membersh-errormsg"); while (<ERRORMSG>) { diff --git a/modules/rsnapshot/manifests/init.pp b/modules/rsnapshot/manifests/init.pp new file mode 100644 index 00000000..5d145172 --- /dev/null +++ b/modules/rsnapshot/manifests/init.pp @@ -0,0 +1,74 @@ +class rsnapshot { + class base($confdir = '/data/backups/conf') { + package { ['rsnapshot']: } + + file { $confdir: + ensure => directory, + owner => root, + group => root, + mode => '0700', + } + + @rsnapshot::cron_file { 'hourly': } + @rsnapshot::cron_file { 'daily': } + @rsnapshot::cron_file { 'weekly': } + @rsnapshot::cron_file { 'monthly': } + } + + define cron_file($rsnapshot_conf = []) { + $filepath = "/tmp/cron.${name}_rsnapshot-backups" + $rsnapshot_arg = $name + file { $filepath: + ensure => present, + content => template('rsnapshot/cron_file'), + owner => root, + group => root, + mode => '0755', + } + } + + # - 'backup' is an array of "source destination" to backup + # - 'backup_script' is an array of "script destination" + # - ${x}_interval is the number of hourly, daily, weekly, monthly + # backups that should be kept. If you don't want hourly, daily, + # weekly or monthly backups, set ${x}_interval to '0' + define backup( + $snapshot_root = '/data/backups', + $one_fs = '1', + $backup = [], + $backup_script = [], + $hourly_interval = '0', + $daily_interval = '6', + $weekly_interval = '4', + $monthly_interval = '3' + ) { + $conffile = "${rsnapshot::base::confdir}/${name}.conf" + file { $conffile: + owner => root, + group => root, + mode => '0700', + content => template('rsnapshot/rsnapshot.conf'), + } + + if ($hourly_interval != '0') { + Rsnapshot::Cron_file <| title == 'hourly' |> { + rsnapshot_conf +> $conffile, + } + } + if ($daily_interval != '0') { + Rsnapshot::Cron_file <| title == 'daily' |> { + rsnapshot_conf +> $conffile, + } + } + if ($weekly_interval != '0') { + Rsnapshot::Cron_file <| title == 'weekly' |> { + rsnapshot_conf +> $conffile, + } + } + if ($monthly_interval != '0') { + Rsnapshot::Cron_file <| title == 'monthly' |> { + rsnapshot_conf +> $conffile, + } + } + } +} diff --git a/modules/rsnapshot/templates/cron_file b/modules/rsnapshot/templates/cron_file new file mode 100644 index 00000000..43ca9e1b --- /dev/null +++ b/modules/rsnapshot/templates/cron_file @@ -0,0 +1,5 @@ +#!/bin/sh + +<%- for conf in @rsnapshot_conf -%> +/usr/bin/rsnapshot -c <%= conf %> <%= rsnapshot_arg %> +<%- end -%> diff --git a/modules/rsnapshot/templates/rsnapshot.conf b/modules/rsnapshot/templates/rsnapshot.conf new file mode 100644 index 00000000..4eeee4d0 --- /dev/null +++ b/modules/rsnapshot/templates/rsnapshot.conf @@ -0,0 +1,209 @@ +################################################# +# rsnapshot.conf - rsnapshot configuration file # +################################################# +# # +# PLEASE BE AWARE OF THE FOLLOWING RULES: # +# # +# This file requires tabs between elements # +# # +# Directories require a trailing slash: # +# right: /home/ # +# wrong: /home # +# # +################################################# + +####################### +# CONFIG FILE VERSION # +####################### + +config_version 1.2 + +########################### +# SNAPSHOT ROOT DIRECTORY # +########################### + +# All snapshots will be stored under this root directory. +# +snapshot_root <%= @snapshot_root %> + +# If no_create_root is enabled, rsnapshot will not automatically create the +# snapshot_root directory. This is particularly useful if you are backing +# up to removable media, such as a FireWire or USB drive. +# +#no_create_root 1 + +################################# +# EXTERNAL PROGRAM DEPENDENCIES # +################################# + +# LINUX USERS: Be sure to uncomment "cmd_cp". This gives you extra features. +# EVERYONE ELSE: Leave "cmd_cp" commented out for compatibility. +# +# See the README file or the man page for more details. +# +cmd_cp /bin/cp + +# uncomment this to use the rm program instead of the built-in perl routine. +# +cmd_rm /bin/rm + +# rsync must be enabled for anything to work. This is the only command that +# must be enabled. +# +cmd_rsync /usr/bin/rsync + +# Uncomment this to enable remote ssh backups over rsync. +# +cmd_ssh /usr/bin/ssh + +# Comment this out to disable syslog support. +# +cmd_logger /bin/logger + +# Uncomment this to specify the path to "du" for disk usage checks. +# If you have an older version of "du", you may also want to check the +# "du_args" parameter below. +# +cmd_du /usr/bin/du + +# Uncomment this to specify the path to rsnapshot-diff. +# +cmd_rsnapshot_diff /usr/bin/rsnapshot-diff + +# Specify the path to a script (and any optional arguments) to run right +# before rsnapshot syncs files +# +#cmd_preexec /path/to/preexec/script + +# Specify the path to a script (and any optional arguments) to run right +# after rsnapshot syncs files +# +#cmd_postexec /path/to/postexec/script + +######################################### +# BACKUP INTERVALS # +# Must be unique and in ascending order # +# i.e. hourly, daily, weekly, etc. # +######################################### + +<%- if @hourly_interval != '0' -%> +interval hourly <%= @hourly_interval %> +<%- end -%> +<%- if @daily_interval != '0' -%> +interval daily <%= @daily_interval %> +<%- end -%> +<%- if @weekly_interval != '0' -%> +interval weekly <%= @weekly_interval %> +<%- end -%> +<%- if @monthly_interval != '0' -%> +interval monthly <%= @monthly_interval %> +<%- end -%> + +############################################ +# GLOBAL OPTIONS # +# All are optional, with sensible defaults # +############################################ + +# Verbose level, 1 through 5. +# 1 Quiet Print fatal errors only +# 2 Default Print errors and warnings only +# 3 Verbose Show equivalent shell commands being executed +# 4 Extra Verbose Show extra verbose information +# 5 Debug mode Everything +# +verbose 2 + +# Same as "verbose" above, but controls the amount of data sent to the +# logfile, if one is being used. The default is 3. +# +loglevel 3 + +# If you enable this, data will be written to the file you specify. The +# amount of data written is controlled by the "loglevel" parameter. +# +logfile /var/log/rsnapshot + +# If enabled, rsnapshot will write a lockfile to prevent two instances +# from running simultaneously (and messing up the snapshot_root). +# If you enable this, make sure the lockfile directory is not world +# writable. Otherwise anyone can prevent the program from running. +# +lockfile /var/run/rsnapshot.pid + +# Default rsync args. All rsync commands have at least these options set. +# +#rsync_short_args -a +#rsync_long_args --delete --numeric-ids --relative --delete-excluded + +# ssh has no args passed by default, but you can specify some here. +# +#ssh_args -p 22 + +# Default arguments for the "du" program (for disk space reporting). +# The GNU version of "du" is preferred. See the man page for more details. +# If your version of "du" doesn't support the -h flag, try -k flag instead. +# +#du_args -csh + +# If this is enabled, rsync won't span filesystem partitions within a +# backup point. This essentially passes the -x option to rsync. +# The default is 0 (off). +# +one_fs <%= @one_fs %> + +# The include and exclude parameters, if enabled, simply get passed directly +# to rsync. If you have multiple include/exclude patterns, put each one on a +# separate line. Please look up the --include and --exclude options in the +# rsync man page for more details on how to specify file name patterns. +# +#include ??? +#include ??? +#exclude ??? +#exclude ??? + +# The include_file and exclude_file parameters, if enabled, simply get +# passed directly to rsync. Please look up the --include-from and +# --exclude-from options in the rsync man page for more details. +# +#include_file /path/to/include/file +#exclude_file /path/to/exclude/file + +# If your version of rsync supports --link-dest, consider enable this. +# This is the best way to support special files (FIFOs, etc) cross-platform. +# The default is 0 (off). +# +link_dest 1 + +# When sync_first is enabled, it changes the default behaviour of rsnapshot. +# Normally, when rsnapshot is called with its lowest interval +# (i.e.: "rsnapshot hourly"), it will sync files AND rotate the lowest +# intervals. With sync_first enabled, "rsnapshot sync" handles the file sync, +# and all interval calls simply rotate files. See the man page for more +# details. The default is 0 (off). +# +#sync_first 0 + +# If enabled, rsnapshot will move the oldest directory for each interval +# to [interval_name].delete, then it will remove the lockfile and delete +# that directory just before it exits. The default is 0 (off). +# +#use_lazy_deletes 0 + +# Number of rsync re-tries. If you experience any network problems or +# network card issues that tend to cause ssh to crap-out with +# "Corrupted MAC on input" errors, for example, set this to a non-zero +# value to have the rsync operation re-tried +# +#rsync_numtries 0 + +############################### +### BACKUP POINTS / SCRIPTS ### +############################### + +<%- for b in @backup -%> +<%= b.split().unshift("backup").join("\t") %> +<%- end -%> + +<%- for bs in @backup_script -%> +<%= bs.split().unshift("backup_script").join("\t") %> +<%- end -%> diff --git a/modules/rsyncd/manifests/init.pp b/modules/rsyncd/manifests/init.pp index 148cc426..5cc9e2fd 100644 --- a/modules/rsyncd/manifests/init.pp +++ b/modules/rsyncd/manifests/init.pp @@ -1,32 +1,12 @@ -class rsyncd { +class rsyncd($rsyncd_conf = 'rsyncd/rsyncd.conf') { - package { xinetd: - ensure => installed + xinetd::service { 'rsync': + content => template('rsyncd/xinetd') } - service { xinetd: - ensure => running, - path => "/etc/init.d/xinetd", - subscribe => [ Package["xinetd"], File["rsync"] ] - } - - file { "rsync": - path => "/etc/xinetd.d/rsync", - ensure => present, - owner => root, - group => root, - mode => 644, - require => Package["xinetd"], - content => template("rsyncd/xinetd") - } - - file { "rsyncd.conf": - path => "/etc/rsyncd.conf", - ensure => present, - owner => root, - group => root, - mode => 644, - require => Package["rsync"], - content => template("rsyncd/rsyncd.conf") + file { 'rsyncd.conf': + path => '/etc/rsyncd.conf', + require => Package['rsync'], + content => template($rsyncd_conf) } } diff --git a/modules/rsyncd/templates/rsyncd.conf b/modules/rsyncd/templates/rsyncd.conf index e5cfa6d2..11dbc6a4 100644 --- a/modules/rsyncd/templates/rsyncd.conf +++ b/modules/rsyncd/templates/rsyncd.conf @@ -1,15 +1,7 @@ # $Id$ uid = nobody -gid = nogroup +gid = nogroup -[mageia] - path = /distrib/mirror/ - comment = Mageia Mirror Tree - hosts allow = \ - distrib-coffee.ipsl.jussieu.fr \ - distribipsl.aero.jussieu.fr \ - ibiblio.org \ - 152.46.7.122 \ - 152.19.134.16 \ +# default empty rsyncd.conf diff --git a/modules/rsyncd/templates/xinetd b/modules/rsyncd/templates/xinetd index 46a3fd33..b477e413 100644 --- a/modules/rsyncd/templates/xinetd +++ b/modules/rsyncd/templates/xinetd @@ -1,4 +1,3 @@ -# $Id: xinetd 319 2009-02-28 17:05:16Z guillomovitch $ service rsync { disable = no @@ -8,5 +7,9 @@ service rsync server = /usr/bin/rsync server_args = --daemon log_on_failure += USERID + flags = IPv6 + # some mirrors do not seems to use locks when downloading from + # us and try to download the same stuff 15 times in a row + per_source = 4 } diff --git a/modules/serial_console/manifests/init.pp b/modules/serial_console/manifests/init.pp new file mode 100644 index 00000000..b6716954 --- /dev/null +++ b/modules/serial_console/manifests/init.pp @@ -0,0 +1 @@ +class serial_console {} diff --git a/modules/serial_console/manifests/serial_console.pp b/modules/serial_console/manifests/serial_console.pp new file mode 100644 index 00000000..dd68c84c --- /dev/null +++ b/modules/serial_console/manifests/serial_console.pp @@ -0,0 +1,8 @@ +# name: ttyS0 +define serial_console::serial_console() { + service { "serial-getty@${name}": + provider => systemd, + ensure => running, + enable => true, + } +} diff --git a/modules/shorewall/manifests/init.pp b/modules/shorewall/manifests/init.pp index 7c8e1f55..daea6b2c 100644 --- a/modules/shorewall/manifests/init.pp +++ b/modules/shorewall/manifests/init.pp @@ -2,101 +2,101 @@ class shorewall { include concat::setup define shorewallfile () { - $filename = "/tmp/shorewall/${name}" - $header = "puppet:///modules/shorewall/headers/${name}" - $footer = "puppet:///modules/shorewall/footers/${name}" - concat{$filename: - owner => root, - group => root, - mode => 600, - } + $filename = "/tmp/shorewall/${name}" + $header = "puppet:///modules/shorewall/headers/${name}" + $footer = "puppet:///modules/shorewall/footers/${name}" + concat{$filename: + owner => root, + group => root, + mode => '0600', + } - concat::fragment{"${name}_header": - target => $filename, - order => 1, - source => $header, - } + concat::fragment{"${name}_header": + target => $filename, + order => 1, + source => $header, + } - concat::fragment{"${name}_footer": - target => $filename, - order => 99, - source => $footer, - } + concat::fragment{"${name}_footer": + target => $filename, + order => 99, + source => $footer, + } } ### Rules shorewallfile{ rules: } define rule_line($order = 50) { - $filename = "/tmp/shorewall/rules" - $line = "${name}\n" - concat::fragment{"newline_${name}": - target => $filename, - order => $order, - content => $line, - } + $filename = "/tmp/shorewall/rules" + $line = "${name}\n" + concat::fragment{"newline_${name}": + target => $filename, + order => $order, + content => $line, + } } class allow_ssh_in { - rule_line { "ACCEPT all all tcp 22": - order => 5, - } + rule_line { "ACCEPT all all tcp 22": + order => 5, + } } class allow_dns_in { - rule_line { "ACCEPT net fw tcp 53": } - rule_line { "ACCEPT net fw udp 53": } + rule_line { "ACCEPT net fw tcp 53": } + rule_line { "ACCEPT net fw udp 53": } } class allow_smtp_in { - rule_line { "ACCEPT net fw tcp 25": } + rule_line { "ACCEPT net fw tcp 25": } } class allow_www_in { - rule_line { "ACCEPT net fw tcp 80": } + rule_line { "ACCEPT net fw tcp 80": } } ### Zones shorewallfile{ zones: } define zone_line($order = 50) { - $filename = "/tmp/shorewall/zones" - $line = "${name}\n" - concat::fragment{"newline_${name}": - target => $filename, - order => $order, - content => $line, - } + $filename = "/tmp/shorewall/zones" + $line = "${name}\n" + concat::fragment{"newline_${name}": + target => $filename, + order => $order, + content => $line, + } } class default_zones { - zone_line { "net ipv4": - order => 2, - } - zone_line { "fw firewall": - order => 3, - } + zone_line { "net ipv4": + order => 2, + } + zone_line { "fw firewall": + order => 3, + } } ### Policy shorewallfile{ policy: } define policy_line($order = 50) { - $filename = "/tmp/shorewall/policy" - $line = "${name}\n" - concat::fragment{"newline_${name}": - target => $filename, - order => $order, - content => $line, - } + $filename = "/tmp/shorewall/policy" + $line = "${name}\n" + concat::fragment{"newline_${name}": + target => $filename, + order => $order, + content => $line, + } } class default_policy { - policy_line{ "fw net ACCEPT": - order => 2, - } - policy_line{ "net all DROP info": - order => 3, - } - policy_line{ "all all REJECT info": - order => 4, - } + policy_line{ "fw net ACCEPT": + order => 2, + } + policy_line{ "net all DROP info": + order => 3, + } + policy_line{ "all all REJECT info": + order => 4, + } } class default_firewall { - include default_zones - include default_policy - include allow_ssh_in + include default_zones + include default_policy + include allow_ssh_in } } diff --git a/modules/spamassassin/manifests/init.pp b/modules/spamassassin/manifests/init.pp new file mode 100644 index 00000000..f0955513 --- /dev/null +++ b/modules/spamassassin/manifests/init.pp @@ -0,0 +1,18 @@ +class spamassassin { + # it should also requires make, bug fixed in cooker + package { 'spamassassin-sa-compile': + notify => Exec['sa-compile'], + } + + package { 'spamassassin': } + + file { '/etc/mail/spamassassin/local.cf': + require => Package['spamassassin'], + content => template('spamassassin/local.cf') + } + + exec { 'sa-compile': + refreshonly => true, + require => [Package['spamassassin-sa-compile'],Package['spamassassin']] + } +} diff --git a/modules/spamassassin/templates/local.cf b/modules/spamassassin/templates/local.cf new file mode 100644 index 00000000..0862cb87 --- /dev/null +++ b/modules/spamassassin/templates/local.cf @@ -0,0 +1,95 @@ +# This is the right place to customize your installation of SpamAssassin. +# +# See 'perldoc Mail::SpamAssassin::Conf' for details of what can be +# tweaked. +# +# Only a small subset of options are listed below +# +########################################################################### + +# Add *****SPAM***** to the Subject header of spam e-mails +# +# rewrite_header Subject *****SPAM***** + + +# Save spam messages as a message/rfc822 MIME attachment instead of +# modifying the original message (0: off, 2: use text/plain instead) +# +# report_safe 1 + + +# Set which networks or hosts are considered 'trusted' by your mail +# server (i.e. not spammers) +# +# trusted_networks 212.17.35. + + +# Set file-locking method (flock is not safe over NFS, but is faster) +# +# lock_method flock + + +# Set the threshold at which a message is considered spam (default: 5.0) +# +# required_score 5.0 + + +# Use Bayesian classifier (default: 1) +# +# use_bayes 1 + + +# Bayesian classifier auto-learning (default: 1) +# +# bayes_auto_learn 1 + + +# Set headers which may provide inappropriate cues to the Bayesian +# classifier +# +# bayes_ignore_header X-Bogosity +# bayes_ignore_header X-Spam-Flag +# bayes_ignore_header X-Spam-Status + + +# Some shortcircuiting, if the plugin is enabled +# +ifplugin Mail::SpamAssassin::Plugin::Shortcircuit +# +# default: strongly-whitelisted mails are *really* whitelisted now, if the +# shortcircuiting plugin is active, causing early exit to save CPU load. +# Uncomment to turn this on +# +# shortcircuit USER_IN_WHITELIST on +# shortcircuit USER_IN_DEF_WHITELIST on +# shortcircuit USER_IN_ALL_SPAM_TO on +# shortcircuit SUBJECT_IN_WHITELIST on + +# the opposite; blacklisted mails can also save CPU +# +# shortcircuit USER_IN_BLACKLIST on +# shortcircuit USER_IN_BLACKLIST_TO on +# shortcircuit SUBJECT_IN_BLACKLIST on + +# if you have taken the time to correctly specify your "trusted_networks", +# this is another good way to save CPU +# +# shortcircuit ALL_TRUSTED on + +# and a well-trained bayes DB can save running rules, too +# +# shortcircuit BAYES_99 spam +# shortcircuit BAYES_00 ham + +endif # Mail::SpamAssassin::Plugin::Shortcircuit + +required_hits 5 +rewrite_header Subject [SPAM] +report_safe 0 +ifplugin Mail::SpamAssassin::Plugin::AWL +auto_whitelist_path /var/spool/spamassassin/auto-whitelist +auto_whitelist_file_mode 0666 +endif # Mail::SpamAssassin::Plugin::AWL + +loadplugin Mail::SpamAssassin::Plugin::Rule2XSBody + diff --git a/modules/spec-tree-reports/manifests/init.pp b/modules/spec-tree-reports/manifests/init.pp new file mode 100644 index 00000000..dc78ea72 --- /dev/null +++ b/modules/spec-tree-reports/manifests/init.pp @@ -0,0 +1,50 @@ +# spec-rpm-mismatch is a report that compares the versions of RPMs available +# in the repository versus the versions created by the latest spec files and +# shows those that don't match. + +class spec-tree-reports( + $report = '/var/www/bs/spec-rpm-mismatch.html', + $srpms = 'file:///distrib/bootstrap/distrib/{version}/SRPMS/{media}/{section}/', + $release = "mga${buildsystem::var::distros::distros['cauldron']['version']}", +) { + $user = 'spec-tree-reports' + $home = "/var/lib/${user}" + $hour = 6 + $minute = 39 + + user { $user: + comment => 'spec-tree report generator', + home => $home, + } + + file { $home: + ensure => directory, + owner => $user, + mode => '0755', + } + + package { 'spec-tree': + ensure => installed, + } + + file { "${report}": + ensure => present, + owner => $user, + mode => '0644', + replace => false, + content => '*', + } + + mga_common::local_script { 'generate-spec-rpm-mismatch-report': + content => template('spec-tree-reports/generate-spec-rpm-mismatch-report'), + } + + cron { "rpm_mismatch_report": + command => "/usr/local/bin/generate-spec-rpm-mismatch-report |& systemd-cat -t generate-spec-rpm-mismatch-report", + hour => $hour, + minute => $minute, + user => $user, + environment => "MAILTO=root", + require => User[$user], + } +} diff --git a/modules/spec-tree-reports/templates/generate-spec-rpm-mismatch-report b/modules/spec-tree-reports/templates/generate-spec-rpm-mismatch-report new file mode 100644 index 00000000..4bc2db65 --- /dev/null +++ b/modules/spec-tree-reports/templates/generate-spec-rpm-mismatch-report @@ -0,0 +1,10 @@ +#!/bin/bash +# GENERATED BY PUPPET--DO NOT EDIT +set -e +trap 'test "$?" -ne 0 && echo Error in script' EXIT + +cd "$HOME" +test -e errors.log && mv -f errors.log errors.log.1 +/usr/share/doc/spec-tree/examples/generate-mismatch-report --srpm_source <%= scope.function_shellquote([scope.lookupvar('srpms')]) -%> --release <%= scope.function_shellquote([scope.lookupvar('release')]) %> +cp report.html <%= scope.function_shellquote([scope.lookupvar('report')]) %> +rm -f report.html diff --git a/modules/ssh/manifests/init.pp b/modules/ssh/manifests/init.pp deleted file mode 100644 index 08570add..00000000 --- a/modules/ssh/manifests/init.pp +++ /dev/null @@ -1,336 +0,0 @@ -# =========
-# ssh::auth
-# =========
-#
-# The latest official release and documentation for ssh::auth can always
-# be found at http://reductivelabs.com/trac/puppet/wiki/Recipes/ModuleSSHAuth .
-#
-# Version: 0.3.2
-# Release date: 2009-12-29
-
-class ssh::auth {
-
-$keymaster_storage = "/var/lib/keys"
-
-Exec { path => "/usr/bin:/usr/sbin:/bin:/sbin" }
-Notify { withpath => false }
-
-
-##########################################################################
-
-
-# ssh::auth::key
-
-# Declare keys. The approach here is just to define a bunch of
-# virtual resources, representing key files on the keymaster, client,
-# and server. The virtual keys are then realized by
-# ssh::auth::{keymaster,client,server}, respectively. The reason for
-# doing things that way is that it makes ssh::auth::key into a "one
-# stop shop" where users can declare their keys with all of their
-# parameters, whether those parameters apply to the keymaster, server,
-# or client. The real work of creating, installing, and removing keys
-# is done in the private definitions called by the virtual resources:
-# ssh_auth_key_{master,server,client}.
-
-define key ($ensure = "present", $filename = "", $force = false, $group = "puppet", $home = "", $keytype = "rsa", $length = 2048, $maxdays = "", $mindate = "", $options = "", $user = "") {
-
- ssh_auth_key_namecheck { "${title}-title": parm => "title", value => $title }
-
- # apply defaults
- $_filename = $filename ? { "" => "id_${keytype}", default => $filename }
- $_length = $keytype ? { "rsa" => $length, "dsa" => 1024 }
- $_user = $user ? {
- "" => regsubst($title, '^([^@]*)@?.*$', '\1'),
- default => $user,
- }
- $_home = $home ? { "" => "/home/$_user", default => $home }
-
- ssh_auth_key_namecheck { "${title}-filename": parm => "filename", value => $_filename }
-
- @ssh_auth_key_master { $title:
- ensure => $ensure,
- force => $force,
- keytype => $keytype,
- length => $_length,
- maxdays => $maxdays,
- mindate => $mindate,
- }
- @ssh_auth_key_client { $title:
- ensure => $ensure,
- filename => $_filename,
- group => $group,
- home => $_home,
- user => $_user,
- }
- @ssh_auth_key_server { $title:
- ensure => $ensure,
- group => $group,
- home => $_home,
- options => $options,
- user => $_user,
- }
-}
-
-
-##########################################################################
-
-
-# ssh::auth::keymaster
-#
-# Keymaster host:
-# Create key storage; create, regenerate, and remove key pairs
-
-class keymaster {
-
- # Set up key storage
-
- file { $ssh::auth::keymaster_storage:
- ensure => directory,
- owner => puppet,
- group => puppet,
- mode => 644,
- }
-
- # Realize all virtual master keys
- Ssh_auth_key_master <| |>
-
-} # class keymaster
-
-
-##########################################################################
-
-
-# ssh::auth::client
-#
-# Install generated key pairs onto clients
-
-define client ($ensure = "", $filename = "", $group = "", $home = "", $user = "") {
-
- # Realize the virtual client keys.
- # Override the defaults set in ssh::auth::key, as needed.
- if $ensure { Ssh_auth_key_client <| title == $title |> { ensure => $ensure } }
- if $filename { Ssh_auth_key_client <| title == $title |> { filename => $filename } }
- if $group { Ssh_auth_key_client <| title == $title |> { group => $group } }
-
- if $user { Ssh_auth_key_client <| title == $title |> { user => $user, home => "/home/$user" } }
- if $home { Ssh_auth_key_client <| title == $title |> { home => $home } }
-
- realize Ssh_auth_key_client[$title]
-
-} # define client
-
-
-##########################################################################
-
-
-# ssh::auth::server
-#
-# Install public keys onto clients
-
-define server ($ensure = "", $group = "", $home = "", $options = "", $user = "") {
-
- # Realize the virtual server keys.
- # Override the defaults set in ssh::auth::key, as needed.
- if $ensure { Ssh_auth_key_server <| title == $title |> { ensure => $ensure } }
- if $group { Ssh_auth_key_server <| title == $title |> { group => $group } }
- if $options { Ssh_auth_key_server <| title == $title |> { options => $options } }
-
- if $user { Ssh_auth_key_server <| title == $title |> { user => $user, home => "/home/$user" } }
- if $home { Ssh_auth_key_server <| title == $title |> { home => $home } }
-
- realize Ssh_auth_key_server[$title]
-
-} # define server
-
-} # class ssh::auth
-
-
-##########################################################################
-
-
-# ssh_auth_key_master
-#
-# Create/regenerate/remove a key pair on the keymaster.
-# This definition is private, i.e. it is not intended to be called directly by users.
-# ssh::auth::key calls it to create virtual keys, which are realized in ssh::auth::keymaster.
-
-define ssh_auth_key_master ($ensure, $force, $keytype, $length, $maxdays, $mindate) {
-
- Exec { path => "/usr/bin:/usr/sbin:/bin:/sbin" }
- File {
- owner => puppet,
- group => puppet,
- mode => 600,
- }
-
- $keydir = "${ssh::auth::keymaster_storage}/${title}"
- $keyfile = "${keydir}/key"
-
- file {
- "$keydir":
- ensure => directory,
- mode => 644;
- "$keyfile":
- ensure => $ensure;
- "${keyfile}.pub":
- ensure => $ensure,
- mode => 644;
- }
-
- if $ensure == "present" {
-
- # Remove the existing key pair, if
- # * $force is true, or
- # * $maxdays or $mindate criteria aren't met, or
- # * $keytype or $length have changed
-
- $keycontent = file("${keyfile}.pub", "/dev/null")
- if $keycontent {
-
- if $force {
- $reason = "force=true"
- }
- if !$reason and $mindate and generate("/usr/bin/find", $keyfile, "!", "-newermt", "${mindate}") {
- $reason = "created before ${mindate}"
- }
- if !$reason and $maxdays and generate("/usr/bin/find", $keyfile, "-mtime", "+${maxdays}") {
- $reason = "older than ${maxdays} days"
- }
- if !$reason and $keycontent =~ /^ssh-... [^ ]+ (...) (\d+)$/ {
- if $keytype != $1 { $reason = "keytype changed: $1 -> $keytype" }
- else { if $length != $2 { $reason = "length changed: $2 -> $length" } }
- }
- if $reason {
- exec { "Revoke previous key ${title}: ${reason}":
- command => "rm $keyfile ${keyfile}.pub",
- before => Exec["Create key $title: $keytype, $length bits"],
- }
- }
- }
-
- # Create the key pair.
- # We "repurpose" the comment field in public keys on the keymaster to
- # store data about the key, i.e. $keytype and $length. This avoids
- # having to rerun ssh-keygen -l on every key at every run to determine
- # the key length.
- exec { "Create key $title: $keytype, $length bits":
- command => "ssh-keygen -t ${keytype} -b ${length} -f ${keyfile} -C \"${keytype} ${length}\" -N \"\"",
- user => "puppet",
- group => "puppet",
- creates => $keyfile,
- require => File[$keydir],
- before => File[$keyfile, "${keyfile}.pub"],
- }
-
- } # if $ensure == "present"
-
-} # define ssh_auth_key_master
-
-
-##########################################################################
-
-
-# ssh_auth_key_client
-#
-# Install a key pair into a user's account.
-# This definition is private, i.e. it is not intended to be called directly by users.
-
-define ssh_auth_key_client ($ensure, $filename, $group, $home, $user) {
-
- File {
- owner => $user,
- group => $group,
- mode => 600,
- require => [ User[$user], File[$home]],
- }
-
- $key_src_file = "${ssh::auth::keymaster_storage}/${title}/key" # on the keymaster
- $key_tgt_file = "${home}/.ssh/${filename}" # on the client
-
- $key_src_content_pub = file("${key_src_file}.pub", "/dev/null")
- if $ensure == "absent" or $key_src_content_pub =~ /^(ssh-...) ([^ ]+)/ {
- $keytype = $1
- $modulus = $2
- file {
- $key_tgt_file:
- ensure => $ensure,
- content => file($key_src_file, "/dev/null");
- "${key_tgt_file}.pub":
- ensure => $ensure,
- content => "$keytype $modulus $title\n",
- mode => 644;
- }
- } else {
- notify { "Private key file $key_src_file for key $title not found on keymaster; skipping ensure => present": }
- }
-
-} # define ssh_auth_key_client
-
-
-##########################################################################
-
-
-# ssh_auth_key_server
-#
-# Install a public key into a server user's authorized_keys(5) file.
-# This definition is private, i.e. it is not intended to be called directly by users.
-
-define ssh_auth_key_server ($ensure, $group, $home, $options, $user) {
-
- # on the keymaster:
- $key_src_dir = "${ssh::auth::keymaster_storage}/${title}"
- $key_src_file = "${key_src_dir}/key.pub"
- # on the server:
- $key_tgt_file = "${home}/.ssh/authorized_keys"
-
- File {
- owner => $user,
- group => $group,
- require => User[$user],
- mode => 600,
- }
- Ssh_authorized_key {
- user => $user,
- target => $key_tgt_file,
- }
-
- if $ensure == "absent" {
- ssh_authorized_key { $title: ensure => "absent" }
- }
- else {
- $key_src_content = file($key_src_file, "/dev/null")
- if ! $key_src_content {
- notify { "Public key file $key_src_file for key $title not found on keymaster; skipping ensure => present": }
- } else { if $ensure == "present" and $key_src_content !~ /^(ssh-...) ([^ ]*)/ {
- err("Can't parse public key file $key_src_file")
- notify { "Can't parse public key file $key_src_file for key $title on the keymaster: skipping ensure => $ensure": }
- } else {
- $keytype = $1
- $modulus = $2
- ssh_authorized_key { $title:
- ensure => "present",
- type => $keytype,
- key => $modulus,
- options => $options ? { "" => undef, default => $options },
- }
- }} # if ... else ... else
- } # if ... else
-
-} # define ssh_auth_key_server
-
-
-##########################################################################
-
-
-# ssh_auth_key_namecheck
-#
-# Check a name (e.g. key title or filename) for the allowed form
-
-define ssh_auth_key_namecheck ($parm, $value) {
- if $value !~ /^[A-Za-z0-9]/ {
- fail("ssh::auth::key: $parm '$value' not allowed: must begin with a letter or digit")
- }
- if $value !~ /^[A-Za-z0-9_.:@-]+$/ {
- fail("ssh::auth::key: $parm '$value' not allowed: may only contain the characters A-Za-z0-9_.:@-")
- }
-} # define namecheck
diff --git a/modules/ssmtp/manifests/init.pp b/modules/ssmtp/manifests/init.pp new file mode 100644 index 00000000..fa4b94d2 --- /dev/null +++ b/modules/ssmtp/manifests/init.pp @@ -0,0 +1,7 @@ +class ssmtp { + package { 'ssmtp': } + + file { '/etc/ssmtp/ssmtp.conf': + content => template('ssmtp/ssmtp.conf') + } +} diff --git a/modules/ssmtp/templates/ssmtp.conf b/modules/ssmtp/templates/ssmtp.conf new file mode 100644 index 00000000..d7a9125f --- /dev/null +++ b/modules/ssmtp/templates/ssmtp.conf @@ -0,0 +1,9 @@ +root=mageia-sysadm@<%= @domain %> + +mailhub=mx.<%= @domain %> + +rewriteDomain= + +# The full hostname +hostname=<%= @fqdn %> + diff --git a/modules/stompserver/manifests/init.pp b/modules/stompserver/manifests/init.pp new file mode 100644 index 00000000..9c7e1770 --- /dev/null +++ b/modules/stompserver/manifests/init.pp @@ -0,0 +1,7 @@ +class stompserver { + package { 'stompserver': } + + service { 'stompserver': + require => Package['stompserver'], + } +} diff --git a/modules/stored_config/lib/puppet/parser/functions/get_fact.rb b/modules/stored_config/lib/puppet/parser/functions/get_fact.rb new file mode 100644 index 00000000..8acdb2d5 --- /dev/null +++ b/modules/stored_config/lib/puppet/parser/functions/get_fact.rb @@ -0,0 +1,19 @@ +require 'puppet/rails' + +# get_fact($node,$fact) +# -> return the fact, from stored config + +module Puppet::Parser::Functions + newfunction(:get_fact, :type => :rvalue) do |args| + node = args[0] + fact = args[1] + # TODO use + # Puppet::Node::Facts.indirection.find(Puppet[:certname]) + Puppet::Rails.connect() + return Puppet::Rails::FactValue.find( :first, + :joins => [ :host, :fact_name ], + :conditions => { :fact_names => {:name => fact }, + :hosts => {:name => node }} + ).value + end +end diff --git a/modules/stored_config/lib/puppet/parser/functions/get_param_values.rb b/modules/stored_config/lib/puppet/parser/functions/get_param_values.rb new file mode 100644 index 00000000..ee0c3440 --- /dev/null +++ b/modules/stored_config/lib/puppet/parser/functions/get_param_values.rb @@ -0,0 +1,25 @@ +require 'puppet/rails' + +# function : +# get_param_values($name, $type, $param_name) +# -> return the value corresponding to $param_name for the $name object of type $type + +module Puppet::Parser::Functions + newfunction(:get_param_values, :type => :rvalue) do |args| + resource_name = args[0] + exported_type = args[1] + param_name = args[2] + Puppet::Rails.connect() + # TODO use find_each + # TODO fail more gracefully when nothing match + # using a default value, maybe ? + return Puppet::Rails::ParamValue.find(:first, + :joins => [ :resource, :param_name ], + :conditions => { :param_names => {:name => param_name }, + :resources => { :exported => true, + :restype => exported_type, + :title => resource_name, + } } + ).value + end +end diff --git a/modules/stored_config/lib/puppet/parser/functions/list_exported_ressources.rb b/modules/stored_config/lib/puppet/parser/functions/list_exported_ressources.rb new file mode 100644 index 00000000..4c7459a8 --- /dev/null +++ b/modules/stored_config/lib/puppet/parser/functions/list_exported_ressources.rb @@ -0,0 +1,17 @@ +require 'puppet/rails' + +# function : +# list_exported_ressources($resource) +# -> return a array of title + +module Puppet::Parser::Functions + newfunction(:list_exported_ressources, :type => :rvalue) do |args| + exported_type = args[0] + #TODO manage tags + Puppet::Rails.connect() + # TODO use find_each + return Puppet::Rails::Resource.find(:all, + :conditions => { :exported => true, + :restype => exported_type }).map { |r| r.title } + end +end diff --git a/modules/subversion/manifests/client.pp b/modules/subversion/manifests/client.pp new file mode 100644 index 00000000..083a58da --- /dev/null +++ b/modules/subversion/manifests/client.pp @@ -0,0 +1,13 @@ +class subversion::client { + # svn spam log with + # Oct 26 13:30:01 valstar svn: No worthy mechs found + # without it, + # https://mail-index.netbsd.org/pkgsrc-users/2008/11/23/msg008706.html + # + $sasl2_package = $::architecture ? { + x86_64 => 'lib64sasl2-plug-anonymous', + default => 'libsasl2-plug-anonymous' + } + + package { ['subversion', $sasl2_package]: } +} diff --git a/modules/subversion/manifests/hook.pp b/modules/subversion/manifests/hook.pp new file mode 100644 index 00000000..a29ae22d --- /dev/null +++ b/modules/subversion/manifests/hook.pp @@ -0,0 +1,9 @@ +define subversion::hook($content, $type) { + $array = split($name,'\|') + $repo = $array[0] + $script = $array[1] + file { "${repo}/hooks/${type}.d/${script}": + content => $content, + mode => '0755', + } +} diff --git a/modules/subversion/manifests/hook/post_commit.pp b/modules/subversion/manifests/hook/post_commit.pp new file mode 100644 index 00000000..90d939cd --- /dev/null +++ b/modules/subversion/manifests/hook/post_commit.pp @@ -0,0 +1,6 @@ +define subversion::hook::post_commit($content) { + hook { $name: + content => $content, + type => 'post-commit', + } +} diff --git a/modules/subversion/manifests/hook/pre_commit.pp b/modules/subversion/manifests/hook/pre_commit.pp new file mode 100644 index 00000000..fa44b168 --- /dev/null +++ b/modules/subversion/manifests/hook/pre_commit.pp @@ -0,0 +1,6 @@ +define subversion::hook::pre_commit($content) { + hook { $name: + content => $content, + type => 'pre-commit', + } +} diff --git a/modules/subversion/manifests/init.pp b/modules/subversion/manifests/init.pp index 638fa1ec..9f009b5e 100644 --- a/modules/subversion/manifests/init.pp +++ b/modules/subversion/manifests/init.pp @@ -2,49 +2,62 @@ # https://github.com/reductivelabs/puppet-vcsrepo # but not integrated in puppet directly for the moment class subversion { + class server { + include subversion::tools + package { 'subversion-server': } - class server { - package { ["subversion-server", "subversion-tools"]: - ensure => installed, + $svn_base_path = '/svn/' + + xinetd::service { 'svnserve': + content => template('subversion/xinetd') } - package { ["perl-SVN-Notify-Config", "perl-SVN-Notify-Mirror"]: - ensure => installed, + file { $svn_base_path: + ensure => directory, } - - $local_dir = "/usr/local/share/subversion/" - $local_dirs = ["$local_dir/pre-commit.d", "$local_dir/post-commit.d"] + + package { ['perl-SVN-Notify-Config', 'perl-SVN-Notify-Mirror']: } + + $local_dir = '/usr/local/share/subversion/' + $local_dirs = ["${local_dir}/pre-commit.d", "${local_dir}/post-commit.d"] file { [$local_dir,$local_dirs]: - owner => root, - group => root, - mode => 755, - ensure => directory, + ensure => directory, } - # workaround the lack of umask command in puppet < 2.7 - file { "/usr/local/bin/create_svn_repo.sh": - ensure => present, - owner => root, - group => root, - mode => 755, - content => template('subversion/create_svn_repo.sh') - } + # workaround the lack of umask command in puppet < 2.7 + mga_common::local_script { 'create_svn_repo.sh': + content => template('subversion/create_svn_repo.sh') + } - file { "$local_dir/pre-commit.d/no_root_commit": - ensure => present, - owner => root, - group => root, - mode => 755, - content => template('subversion/no_root_commit') + file { "${local_dir}/pre-commit.d/no_binary": + mode => '0755', + content => template('subversion/no_binary') } - file { "$local_dir/pre-commit.d/no_empty_message": - ensure => present, - owner => root, - group => root, - mode => 755, - content => template('subversion/no_empty_message') + file { "${local_dir}/pre-commit.d/no_root_commit": + mode => '0755', + content => template('subversion/no_root_commit') + } + + file { "${local_dir}/pre-commit.d/no_empty_message": + mode => '0755', + content => template('subversion/no_empty_message') + } + + file { "${local_dir}/pre-commit.d/single_word_commit": + mode => '0755', + content => template('subversion/single_word_commit') + } + + file { "${local_dir}/pre-revprop-change": + mode => '0755', + content => template('subversion/pre-revprop-change') + } + + file { "${local_dir}/pre-commit.d/converted_to_git": + mode => '0755', + content => template('subversion/converted_to_git') } # TODO : add check for @@ -57,166 +70,46 @@ class subversion { # - openldap , like named define syntax_check($regexp_ext,$check_cmd) { - file { "$local_dir/pre-commit.d/$name": - ensure => present, - owner => root, - group => root, - mode => 755, - content => template('subversion/syntax_check.sh') + file { "${subversion::server::local_dir}/pre-commit.d/${name}": + mode => '0755', + content => template('subversion/syntax_check.sh') } } - syntax_check{"check_perl": - regexp_ext => "\.p[lm]$", - check_cmd => "perl -c" - } - - syntax_check{"check_puppet": - regexp_ext => "\.pp$", - check_cmd => "puppet --color=false --confdir=/tmp --vardir=/tmp --parseonly" - } - - syntax_check{"check_ruby": - regexp_ext => "\.rb$", - check_cmd => "ruby -c" + syntax_check{'check_perl': + regexp_ext => '\.p[lm]$', + check_cmd => 'perl -c' } - syntax_check{"check_puppet_templates": - regexp_ext => "modules/.*/templates/.*$", - check_cmd => "erb -x -T - | ruby -c" + syntax_check{'check_puppet': + regexp_ext => '\.pp$', + check_cmd => 'puppet parser validate -' } - } - - # FIXME ugly - define pre_commit_link($directory) { - file { "pre_commit_link-${name}": - path => "$directory/$name", - ensure => "/usr/local/share/subversion/pre-commit.d/$name", - owner => root, - group => root, - mode => 755, - } - } - - # TODO - # deploy a cronjob to make a backup file ( ie, dump in some directory ) - - # documentation : - # group : group that have commit access on the svn - # public : boolean if the svn is readable by anybody or not - # commit_mail : array of people who will receive mail after each commit - # syntax_check : array of pre-commit script with syntax check to add - # extract_dir : hash of directory to update upon commit ( with svn update ), - # initial checkout is not handled, nor the permission - # TODO, handle the tags ( see svn::notify::mirror ) - - define repository ($group = "svn", - $public = true, - $commit_mail = [], - $syntax_check = [], - $extract_dir = []) { - # check permissions - # http://svnbook.red-bean.com/nightly/fr/svn.serverconfig.multimethod.html - # $name ==> directory of the repo - include subversion::server - # TODO set umask -> requires puppet 2.7.0 - # unfortunatly, umask is required - # http://projects.puppetlabs.com/issues/4424 - exec { "/usr/local/bin/create_svn_repo.sh $name": - user => root, - group => $group, - creates => "$name/hooks", - require => Package['subversion-tools'], - } - - file { "$name": - group => $group, - owner => root, - mode => $public ? { - true => 644, - false => 640 - }, - ensure => directory - } - - file { ["$name/hooks/pre-commit","$name/hooks/post-commit"]: - ensure => present, - owner => root, - group => root, - mode => 755, - content => template("subversion/hook_commit.sh"), - require => Exec["/usr/local/bin/create_svn_repo.sh $name"], - } - - file { ["$name/hooks/post-commit.d", "$name/hooks/pre-commit.d"]: - ensure => directory, - owner => root, - group => root, - mode => 755, - require => File["$name/hooks/pre-commit"], - } - - if $commit_mail { - file { "$name/hooks/post-commit.d/send_mail": - ensure => present, - owner => root, - group => root, - mode => 755, - content => template("subversion/hook_sendmail.pl"), - require => [Package['perl-SVN-Notify-Config']], - } + syntax_check{'check_ruby': + regexp_ext => '\.rb$', + check_cmd => 'ruby -c' } - if $extract_dir { - file { "$name/hooks/post-commit.d/extract_dir": - ensure => present, - owner => root, - group => root, - mode => 755, - content => template("subversion/hook_extract.pl"), - require => [Package['perl-SVN-Notify-Mirror']], - } + syntax_check{'check_puppet_templates': + regexp_ext => 'modules/.*/templates/.*$', + check_cmd => 'erb -P -x -T - | ruby -c' } - pre_commit_link { ['no_empty_message','no_root_commit', $syntax_check]: - directory => "$name/hooks/pre-commit.d/" - } - } - - - class client { - package { subversion: - ensure => installed, - } - # svn spam log with - # Oct 26 13:30:01 valstar svn: No worthy mechs found - # without it, source http://mail-index.netbsd.org/pkgsrc-users/2008/11/23/msg008706.html - # - $sasl2_package = $architecture ? { - x86_64 => "lib64sasl2-plug-anonymous", - default => "libsasl2-plug-anonymous" - } - - package {"$sasl2_package": - ensure => "installed" + syntax_check{'check_po': + regexp_ext => '\.po$', + check_cmd => 'msgfmt -c -' } - } - - define snapshot($source, $refresh = '*/5', $user = 'root') { - include subversion::client - - exec { "/usr/bin/svn co $source $name": - creates => $name, - user => $user, + syntax_check{'check_php': + regexp_ext => '\.php$', + check_cmd => 'php -d display_errors=1 -d error_reporting="E_ALL|E_STRICT" -l' } - cron { "update $name": - command => "cd $name && /usr/bin/svn update -q", - user => $user, - minute => $refresh - } + # needed for check_php + package { 'php-cli': } } + # TODO + # deploy a cronjob to make a backup file ( ie, dump in some directory ) } diff --git a/modules/subversion/manifests/mirror.pp b/modules/subversion/manifests/mirror.pp new file mode 100644 index 00000000..2285ecb2 --- /dev/null +++ b/modules/subversion/manifests/mirror.pp @@ -0,0 +1,6 @@ +class subversion::mirror { + include subversion::tools + mga_common::local_script { 'create_svn_mirror.sh': + content => template('subversion/create_svn_mirror.sh') + } +} diff --git a/modules/subversion/manifests/mirror_repository.pp b/modules/subversion/manifests/mirror_repository.pp new file mode 100644 index 00000000..1e0fabd3 --- /dev/null +++ b/modules/subversion/manifests/mirror_repository.pp @@ -0,0 +1,15 @@ +define subversion::mirror_repository( $source, + $refresh = '*/5') { + include subversion::mirror + + exec { "/usr/local/bin/create_svn_mirror.sh ${name} ${source}": + creates => $name, + require => Package['subversion-tools'] + } + + cron { "update ${name}": + command => "/usr/bin/svnsync synchronize -q file://${name}", + minute => $refresh, + require => Exec["/usr/local/bin/create_svn_mirror.sh ${name} ${source}"], + } +} diff --git a/modules/subversion/manifests/pre_commit_link.pp b/modules/subversion/manifests/pre_commit_link.pp new file mode 100644 index 00000000..fa3c2b2c --- /dev/null +++ b/modules/subversion/manifests/pre_commit_link.pp @@ -0,0 +1,8 @@ +define subversion::pre_commit_link() { + $scriptname = regsubst($name,'^.*/', '') + file { $name: + ensure => 'link', + target => "/usr/local/share/subversion/pre-commit.d/${scriptname}", + mode => '0755', + } +} diff --git a/modules/subversion/manifests/repository.pp b/modules/subversion/manifests/repository.pp new file mode 100644 index 00000000..b223e6ae --- /dev/null +++ b/modules/subversion/manifests/repository.pp @@ -0,0 +1,132 @@ +# documentation : +# group : group that have commit access on the svn +# public : boolean if the svn is readable by anybody or not +# commit_mail : array of people who will receive mail after each commit +# irker_conf : hash containing irker config values. See man irkerhook +# for possible values in irker.conf. +# irkerhook_path : path to irkerhook.py script +# no_binary : do not accept files with common binary extensions +# on this repository +# restricted_to_user : restrict commits to select user +# syntax_check : array of pre-commit script with syntax check to add +# extract_dir : hash of directory to update upon commit ( with svn update ), +# initial checkout is not handled, nor the permission +# TODO, handle the tags ( see svn::notify::mirror ) + +define subversion::repository($group = 'svn', + $public = true, + $commit_mail = '', + $irker_conf = undef, + $irkerhook_path = '/usr/lib/irker/irkerhook.py', + $i18n_mail = '', + $no_binary = false, + $restricted_to_user = false, + $syntax_check = '', + $extract_dir = '') { + # check permissions + # https://svnbook.red-bean.com/nightly/fr/svn.serverconfig.multimethod.html + # $name ==> directory of the repo + include subversion::server + # TODO set umask -> requires puppet 2.7.0 + # unfortunately, umask is required + # https://projects.puppetlabs.com/issues/4424 + exec { "/usr/local/bin/create_svn_repo.sh ${name}": + user => 'root', + group => $group, + creates => "${name}/hooks", + require => Package['subversion-tools'], + } + + file { $name: + ensure => directory, + group => $group, + owner => 'root', + mode => $public ? { + true => '0644', + false => '0640', + }, + } + + file { ["${name}/hooks/pre-commit","${name}/hooks/post-commit"]: + mode => '0755', + content => template('subversion/hook_commit.sh'), + require => Exec["/usr/local/bin/create_svn_repo.sh ${name}"], + } + + file { ["${name}/hooks/post-commit.d", "${name}/hooks/pre-commit.d"]: + ensure => directory, + require => File["${name}/hooks/pre-commit"], + } + + file { "${name}/hooks/pre-revprop-change": + ensure => "${subversion::server::local_dir}/pre-revprop-change", + mode => '0755', + require => File["${name}/hooks/pre-commit"], + } + + if $restricted_to_user { + subversion::hook::pre_commit { "${name}|restricted_to_user": + content => template('subversion/restricted_to_user'), + } + } else { + file { "${name}/hooks/pre-commit.d/restricted_to_user": + ensure => absent, + } + } + + if $commit_mail { + subversion::hook::post_commit { "${name}|send_mail": + content => template('subversion/hook_sendmail.pl'), + require => Package['perl-SVN-Notify-Config'], + } + } else { + file { "${name}/hooks/post-commit.d/send_mail": + ensure => absent, + } + } + + + if $irker_conf { + subversion::hook::post_commit { "${name}|irker": + content => template('subversion/hook_irker'), + } + file { "${name}/irker.conf": + content => template('subversion/irker.conf'), + } + } else { + file { "${name}/hooks/post-commit.d/irker": + ensure => absent, + } + } + + + if $no_binary { + pre_commit_link { "${name}/hooks/pre-commit.d/no_binary": } + } else { + file { "${name}/hooks/pre-commit.d/no_binary": + ensure => absent, + } + } + + if $extract_dir { + subversion::hook::post_commit {"${name}|extract_dir": + content => template('subversion/hook_extract.pl'), + require => [Package['perl-SVN-Notify-Mirror']], + } + } else { + file { "${name}/hooks/post-commit.d/extract_dir": + ensure => absent, + } + } + + pre_commit_link { "${name}/hooks/pre-commit.d/no_empty_message": } + + pre_commit_link { "${name}/hooks/pre-commit.d/no_root_commit": } + + pre_commit_link { "${name}/hooks/pre-commit.d/converted_to_git": } + + if $syntax_check { + $syntax_check_array = regsubst($syntax_check,'^',"${name}/hooks/pre-commit.d/") + pre_commit_link { $syntax_check_array: } + } +} diff --git a/modules/subversion/manifests/snapshot.pp b/modules/subversion/manifests/snapshot.pp new file mode 100644 index 00000000..00e66dde --- /dev/null +++ b/modules/subversion/manifests/snapshot.pp @@ -0,0 +1,21 @@ +define subversion::snapshot($source, + $refresh = '*/5', + $user = 'root') { + + include subversion::client + + exec { "/usr/bin/svn co ${source} ${name}": + creates => $name, + user => $user, + require => Package['subversion'], + } + + if ($refresh != '0') { + cron { "update ${name}": + command => "cd ${name} && /usr/bin/svn update -q", + user => $user, + minute => $refresh, + require => Exec["/usr/bin/svn co ${source} ${name}"], + } + } +} diff --git a/modules/subversion/manifests/tools.pp b/modules/subversion/manifests/tools.pp new file mode 100644 index 00000000..39d86373 --- /dev/null +++ b/modules/subversion/manifests/tools.pp @@ -0,0 +1,3 @@ +class subversion::tools { + package { 'subversion-tools': } +} diff --git a/modules/subversion/templates/converted_to_git b/modules/subversion/templates/converted_to_git new file mode 100644 index 00000000..8f137506 --- /dev/null +++ b/modules/subversion/templates/converted_to_git @@ -0,0 +1,16 @@ +#!/bin/sh + +REPOS="$1" +TXN="$2" + +if [ ! -f "$REPOS/conf/git.conf" ]; then + exit 0 +fi + +REGEX=$(cat "$REPOS/conf/git.conf" | grep -v "^#" | grep -v "^ *$" | xargs | sed 's/ /|/g') + +if (svnlook dirs-changed -t $TXN "$REPOS" | grep -qE "^($REGEX)"); then + echo "The subversion path you have attempted to commit to has been converted to git." >&2 + echo "Please see: https://wiki.mageia.org/en/Git_Migration" >&2 + exit 1 +fi diff --git a/modules/subversion/templates/create_svn_mirror.sh b/modules/subversion/templates/create_svn_mirror.sh new file mode 100644 index 00000000..ab0ada1b --- /dev/null +++ b/modules/subversion/templates/create_svn_mirror.sh @@ -0,0 +1,13 @@ +#!/bin/bash +umask 0002 +LOCAL_REPOS=$1 +REMOTE_REPOS=$2 +svnadmin create $LOCAL_REPOS +# needed, as svnsync complain otherwise : +# svnsync: Repository has not been enabled to accept revision propchanges; +# ask the administrator to create a pre-revprop-change hook +ln -s /bin/true $LOCAL_REPOS/hooks/pre-revprop-change +svnsync init file://$1 $2 +# do not sync now, +# let cron do it or puppet will complain ( especially for long sync ) +#svnsync synchronize file://$1 diff --git a/modules/subversion/templates/hook_commit.sh b/modules/subversion/templates/hook_commit.sh index 0fdfc3e5..2b1b6ff3 100644 --- a/modules/subversion/templates/hook_commit.sh +++ b/modules/subversion/templates/hook_commit.sh @@ -1,5 +1,20 @@ #!/bin/sh -for script in $0.d/*; do + +REP="$1" +TXN="$2" + +author=$(svnlook author -t "$TXN" "$REP") + +# This is here only the time we use hook_sendmail.pl +# We will be able to remove it when updating to a better send mail hook + +if [ "$author" = 'schedbot' ]; then + LIST=`ls -1 $0.d/* | grep -v send_mail` +else + LIST=`ls -1 $0.d/*` +fi + +for script in $LIST; do if [ ! -x "$script" ]; then continue fi @@ -10,4 +25,3 @@ for script in $0.d/*; do $script $@ || exit 1 done - diff --git a/modules/subversion/templates/hook_irker b/modules/subversion/templates/hook_irker new file mode 100644 index 00000000..8fd7a874 --- /dev/null +++ b/modules/subversion/templates/hook_irker @@ -0,0 +1,4 @@ +#!/bin/sh +REPO=$1 +REV=$2 +<%= irkerhook_path %> --repository=$REPO $REV diff --git a/modules/subversion/templates/hook_sendmail.pl b/modules/subversion/templates/hook_sendmail.pl index 1fdc381f..cf3be6a4 100644 --- a/modules/subversion/templates/hook_sendmail.pl +++ b/modules/subversion/templates/hook_sendmail.pl @@ -6,8 +6,27 @@ handler: Alternative alternative: HTML::ColorDiff with-diff: 1 + max_diff_length: 20000 + ticket_map: + '(\bmga#(\d+)\b)': 'https://bugs.mageia.org/show_bug.cgi?id=%s' + revision-url: "https://svnweb.mageia.org/packages/?revision=%s&view=revision" + subject_cx: 1 + from: subversion_noreply@ml.<%= @domain %> to: <%- commit_mail.each do |mail| -%> - <%= mail %> <%- end -%> - from: root@<%= domain %> +<%- if i18n_mail != '' -%> +'.*\.pot$': + PATH: "/usr/bin:/usr/local/bin" + handler: Alternative + alternative: HTML::ColorDiff + with-diff: 1 + max_diff_length: 20000 + ticket_map: + '(\bmga#(\d+)\b)': 'https://bugs.mageia.org/show_bug.cgi?id=%s' + revision-url: "https://svnweb.mageia.org/packages/?revision=%s&view=revision" + subject_cx: 1 + from: subversion_noreply@ml.<%= @domain %> + to: <%= i18n_mail %> +<%- end -%> diff --git a/modules/subversion/templates/irker.conf b/modules/subversion/templates/irker.conf new file mode 100644 index 00000000..d037a120 --- /dev/null +++ b/modules/subversion/templates/irker.conf @@ -0,0 +1,7 @@ +<%- + content = '' + @irker_conf.keys.sort.each {|key| + content += key + ' = ' + @irker_conf[key] + "\n" + } +-%> +<%= content %> diff --git a/modules/subversion/templates/no_binary b/modules/subversion/templates/no_binary new file mode 100644 index 00000000..284642e5 --- /dev/null +++ b/modules/subversion/templates/no_binary @@ -0,0 +1,14 @@ +#!/bin/sh + +REP="$1" +TXN="$2" + +# Filter some binary files based on common filename extensions. +# It does not fully prevent commit of binary files, this script is only +# here to avoid simple mistakes +if svnlook changed -t "$TXN" "$REP" | grep -qi '\.\(gz\|bz2\|xz\|lzma\|Z\|7z\|tar\|tgz\|zip\|jpg\|gif\|png\|ogg\|mp3\|wav\|rar\|pdf\)$' +then + echo 'no binary files allowed on this repository' >&2 + exit 1 +fi + diff --git a/modules/subversion/templates/pre-revprop-change b/modules/subversion/templates/pre-revprop-change new file mode 100644 index 00000000..e9b18150 --- /dev/null +++ b/modules/subversion/templates/pre-revprop-change @@ -0,0 +1,15 @@ +#!/bin/sh + +# script taken from svn example hooks + +REPOS="$1" +REV="$2" +USER="$3" +PROPNAME="$4" +ACTION="$5" + +if [ "$ACTION" = "M" -a "$PROPNAME" = "svn:log" ]; then exit 0; fi + +echo "Changing revision properties other than svn:log is prohibited" >&2 +exit 1 + diff --git a/modules/subversion/templates/restricted_to_user b/modules/subversion/templates/restricted_to_user new file mode 100644 index 00000000..98297627 --- /dev/null +++ b/modules/subversion/templates/restricted_to_user @@ -0,0 +1,12 @@ +#!/bin/sh + +REP="$1" +TXN="$2" + +author=$(svnlook author -t "$TXN" "$REP") + +if [ "$author" != '<%= restricted_to_user %>' ]; then + echo "this repository is restricted to user <%= restricted_to_user %>" >&2 + exit 1 +fi + diff --git a/modules/subversion/templates/single_word_commit b/modules/subversion/templates/single_word_commit new file mode 100644 index 00000000..1b0ff8a5 --- /dev/null +++ b/modules/subversion/templates/single_word_commit @@ -0,0 +1,12 @@ +#!/bin/sh + +REP="$1" +TXN="$2" + +LOG=$(svnlook log -t "$TXN" "$REP") + +if ! echo "$LOG" | grep -qvP '^\s*\b\S+\b\s*$'; then + echo "one word commit message not allowed" >&2 + exit 1 +fi + diff --git a/modules/subversion/templates/syntax_check.sh b/modules/subversion/templates/syntax_check.sh index 74d7bf4a..3960cdab 100644 --- a/modules/subversion/templates/syntax_check.sh +++ b/modules/subversion/templates/syntax_check.sh @@ -2,6 +2,7 @@ REPOS="$1" TXN="$2" +export PATH="/bin/:/sbin/:/usr/bin/:/usr/sbin/:/usr/local/bin:/usr/local/sbin/" changed=`svnlook changed -t "$TXN" "$REPOS"` files=`echo $changed | awk '{print $2}'` @@ -11,7 +12,7 @@ then if [ $? -ne 0 ] then echo "Syntax error in $files." 1>&2 - echo "Check it with <%= check_cmd %>" + echo "Check it with <%= check_cmd %>" 1>&2 exit 1 fi fi diff --git a/modules/subversion/templates/xinetd b/modules/subversion/templates/xinetd new file mode 100644 index 00000000..0919ae60 --- /dev/null +++ b/modules/subversion/templates/xinetd @@ -0,0 +1,14 @@ +# default: off +# description: svnserve is the server part of Subversion. +service svnserve +{ + disable = no + port = 3690 + socket_type = stream + protocol = tcp + wait = no + user = svn + server = /usr/bin/svnserve + server_args = -i -r <%= svn_base_path %> + flags = IPv6 +} diff --git a/modules/sudo/manifests/init.pp b/modules/sudo/manifests/init.pp index 93ebc249..7d1277ce 100644 --- a/modules/sudo/manifests/init.pp +++ b/modules/sudo/manifests/init.pp @@ -1,20 +1,13 @@ class sudo { - package { sudo: - ensure => installed; - } + package { 'sudo': } - file { "/etc/sudoers.d": + file { '/etc/sudoers.d': ensure => directory, - mode => 711, - owner => root, - group => root, + mode => '0711', } - file { "/etc/sudoers": - ensure => present, - owner => root, - group => root, - mode => 440, - content => template("sudo/sudoers") + file { '/etc/sudoers': + mode => '0440', + content => template('sudo/sudoers'), } } diff --git a/modules/sudo/manifests/sudoers_config.pp b/modules/sudo/manifests/sudoers_config.pp new file mode 100644 index 00000000..fdc38e9b --- /dev/null +++ b/modules/sudo/manifests/sudoers_config.pp @@ -0,0 +1,6 @@ +define sudo::sudoers_config($content) { + file { "/etc/sudoers.d/${name}": + mode => '0440', + content => $content, + } +} diff --git a/modules/sudo/templates/sudoers b/modules/sudo/templates/sudoers index 80f4bfd7..5ac87f78 100644 --- a/modules/sudo/templates/sudoers +++ b/modules/sudo/templates/sudoers @@ -1 +1,14 @@ +Defaults env_reset +Defaults env_keep = "COLORS DISPLAY HOSTNAME HISTSIZE LS_COLORS" +Defaults env_keep += "MAIL PS1 PS2 USERNAME LANG LC_ADDRESS LC_CTYPE" +Defaults env_keep += "LC_COLLATE LC_IDENTIFICATION LC_MEASUREMENT LC_MESSAGES" +Defaults env_keep += "LC_MONETARY LC_NAME LC_NUMERIC LC_PAPER LC_TELEPHONE" +Defaults env_keep += "LC_TIME LC_ALL LANGUAGE LINGUAS _XKB_CHARSET XAUTHORITY" + +Defaults secure_path = /sbin:/bin:/usr/sbin:/usr/bin + +## Allow root to run any commands anywhere +root ALL=(ALL) ALL + +## Read drop-in files from /etc/sudoers.d (the # here does not mean a comment) #includedir /etc/sudoers.d diff --git a/modules/sympa/files/scenari/forbidden b/modules/sympa/files/scenari/forbidden new file mode 100644 index 00000000..6c0ac7a8 --- /dev/null +++ b/modules/sympa/files/scenari/forbidden @@ -0,0 +1,2 @@ +title.gettext nobody +true() smtp,md5,smime -> reject diff --git a/modules/sympa/files/scenari/open_web_only_notify b/modules/sympa/files/scenari/open_web_only_notify new file mode 100644 index 00000000..621e425c --- /dev/null +++ b/modules/sympa/files/scenari/open_web_only_notify @@ -0,0 +1,5 @@ +title.gettext anyone on the web, notification is sent to list owner + +# do not notify if it is just an update +is_subscriber([listname],[sender]) smtp,smime,md5 -> do_it +true() md5 -> do_it,notify diff --git a/modules/sympa/files/topics.conf b/modules/sympa/files/topics.conf new file mode 100644 index 00000000..92e1809c --- /dev/null +++ b/modules/sympa/files/topics.conf @@ -0,0 +1,32 @@ +bugsquad +title Bug triaging + +sysadmin +title System administration, infrastructure + +i18n +title Internationalization and translation + +developers +title Development + +qa +title Quality Assurance + +governance +title Board, Council and others governance group + +forums +title Forums + +doc +title Documentation + +local +title Local Community + +atelier +title Atelier (Artwork, Web, Marketing, Communication) + +users +title Users discussions diff --git a/modules/sympa/manifests/datasource/ldap_group.pp b/modules/sympa/manifests/datasource/ldap_group.pp new file mode 100644 index 00000000..6060bec4 --- /dev/null +++ b/modules/sympa/manifests/datasource/ldap_group.pp @@ -0,0 +1,5 @@ +define sympa::datasource::ldap_group { + file { "/etc/sympa/data_sources/${name}.incl": + content => template('sympa/data_sources/ldap_group.incl') + } +} diff --git a/modules/sympa/manifests/init.pp b/modules/sympa/manifests/init.pp index 3a68ddcd..7f6fcfe6 100644 --- a/modules/sympa/manifests/init.pp +++ b/modules/sympa/manifests/init.pp @@ -1,40 +1 @@ -class sympa { - - $package_list = ['sympa', 'sympa-www'] - - package { $package_list: - ensure => installed; - } - - $password = extlookup("sympa_password") - $ldappass = extlookup("sympa_ldap") - - file { '/etc/sympa/sympa.conf': - ensure => present, - # should be cleaner to have it root owned, but puppet do not support acl - # and in any case, config will be reset if it change - owner => sympa, - group => apache, - mode => 640, - content => template("sympa/sympa.conf") - } - - file { '/etc/sympa/auth.conf': - ensure => present, - owner => root, - group => root, - mode => 644, - content => template("sympa/auth.conf") - } - - - include apache::mod_fcgid - apache::webapp_other{"sympa": - webapp_file => "sympa/webapp_sympa.conf", - } - - apache::vhost_other_app { "ml.$domain": - vhost_file => "sympa/vhost_ml.conf", - } -} - +class sympa { } diff --git a/modules/sympa/manifests/list.pp b/modules/sympa/manifests/list.pp new file mode 100644 index 00000000..205d2719 --- /dev/null +++ b/modules/sympa/manifests/list.pp @@ -0,0 +1,57 @@ +define sympa::list( $subject, + $language = 'en', + $topics = false, + $reply_to = false, + $sender_subscriber = false, + $sender_email = false, + $sender_ldap_group = false, + $subscriber_ldap_group = false, + $public_archive = true, + $subscription_open = false, + $critical = false) { + + include sympa::variable + $ldap_password = extlookup('sympa_ldap','x') + $custom_subject = $name + + $xml_file = "/etc/sympa/lists_xml/${name}.xml" + + file { $xml_file: + content => template('sympa/list.xml'), + require => Package[sympa], + } + + exec { "sympa.pl --create_list --robot=${sympa::variable::vhost} --input_file=${xml_file}": + require => File[$xml_file], + creates => "/var/lib/sympa/expl/${name}", + before => File["/var/lib/sympa/expl/${name}/config"], + } + + file { "/var/lib/sympa/expl/${name}/config": + owner => 'sympa', + group => 'sympa', + mode => '0750', + content => template('sympa/config'), + notify => Service['sympa'], + } + + sympa::scenario::sender_restricted { $name: + ldap_group => $sender_ldap_group, + email => $sender_email, + allow_subscriber => $sender_subscriber, + } + + if $subscriber_ldap_group { + if ! defined(Sympa::Search_filter::Ldap[$subscriber_ldap_group]) { + sympa::search_filter::ldap { $subscriber_ldap_group: } + } + } + + if $sender_ldap_group { + if ! defined(Sympa::Search_filter::Ldap[$sender_ldap_group]) { + sympa::search_filter::ldap { $sender_ldap_group: } + } + } +} + + diff --git a/modules/sympa/manifests/list/announce.pp b/modules/sympa/manifests/list/announce.pp new file mode 100644 index 00000000..2dd1c647 --- /dev/null +++ b/modules/sympa/manifests/list/announce.pp @@ -0,0 +1,21 @@ +# list where announce are sent by $email or $ldap_group only +# reply_to is set to $reply_to +define sympa::list::announce($subject, + $reply_to, + $sender_email = false, + $sender_ldap_group = false, + $subscriber_ldap_group = false, + $language = 'en', + $topics = false, + $critical = false) { + list { $name: + subject => $subject, + language => $language, + topics => $topics, + reply_to => $reply_to, + sender_email => $sender_email, + sender_ldap_group => $sender_ldap_group, + subscriber_ldap_group => $subscriber_ldap_group, + critical => $critical + } +} diff --git a/modules/sympa/manifests/list/private.pp b/modules/sympa/manifests/list/private.pp new file mode 100644 index 00000000..c8d9b38e --- /dev/null +++ b/modules/sympa/manifests/list/private.pp @@ -0,0 +1,16 @@ +# list with private archive, restricted to member of $ldap_group +define sympa::list::private($subject, + $subscriber_ldap_group, + $sender_email = false, + $language ='en', + $topics = false) { + list { $name: + subject => $subject, + language => $language, + topics => $topics, + subscriber_ldap_group => $subscriber_ldap_group, + sender_ldap_group => $subscriber_ldap_group, + sender_email => $sender_email, + public_archive => false, + } +} diff --git a/modules/sympa/manifests/list/public.pp b/modules/sympa/manifests/list/public.pp new file mode 100644 index 00000000..7b97534a --- /dev/null +++ b/modules/sympa/manifests/list/public.pp @@ -0,0 +1,16 @@ +# public discussion list +# reply_to is set to the list +define sympa::list::public($subject, + $language = 'en', + $topics = false, + $sender_email = false) { + include sympa::variable + list { $name: + subject => $subject, + language => $language, + topics => $topics, + sender_email => $sender_email, + sender_subscriber => true, + reply_to => "${name}@${sympa::variable::vhost}", + } +} diff --git a/modules/sympa/manifests/list/public_restricted.pp b/modules/sympa/manifests/list/public_restricted.pp new file mode 100644 index 00000000..5c316368 --- /dev/null +++ b/modules/sympa/manifests/list/public_restricted.pp @@ -0,0 +1,17 @@ +# list where only people from the ldap_group can post, and where +# they are subscribed by default, but anybody else can subscribe +# to read and receive messages +define sympa::list::public_restricted($subject, + $subscriber_ldap_group, + $language = 'en', + $topics = false) { + list { $name: + subject => $subject, + topics => $topics, + language => $language, + subscriber_ldap_group => $subscriber_ldap_group, + sender_ldap_group => $subscriber_ldap_group, + subscription_open => true, + reply_to => "${name}@${sympa::variable::vhost}", + } +} diff --git a/modules/sympa/manifests/scenario/sender_restricted.pp b/modules/sympa/manifests/scenario/sender_restricted.pp new file mode 100644 index 00000000..c69d3669 --- /dev/null +++ b/modules/sympa/manifests/scenario/sender_restricted.pp @@ -0,0 +1,9 @@ +define sympa::scenario::sender_restricted( + $email = false, + $ldap_group = false, + $allow_subscriber = false +) { + file { "/etc/sympa/scenari/send.restricted_${name}": + content => template('sympa/scenari/sender.restricted') + } +} diff --git a/modules/sympa/manifests/search_filter/ldap.pp b/modules/sympa/manifests/search_filter/ldap.pp new file mode 100644 index 00000000..5cbc84f8 --- /dev/null +++ b/modules/sympa/manifests/search_filter/ldap.pp @@ -0,0 +1,5 @@ +define sympa::search_filter::ldap { + file { "/etc/sympa/search_filters/$name.ldap": + content => template('sympa/search_filters/group.ldap') + } +} diff --git a/modules/sympa/manifests/server.pp b/modules/sympa/manifests/server.pp new file mode 100644 index 00000000..bcdda789 --- /dev/null +++ b/modules/sympa/manifests/server.pp @@ -0,0 +1,103 @@ +class sympa::server( + $authentication_info_url = 'https://wiki.mageia.org/en/Mageia.org_user_account' + ) { + include sympa::variable + # perl-CGI-Fast is needed for fast cgi + # perl-Socket6 is required by perl-IO-Socket-SSL + # (optional requirement) + package {['sympa', + 'sympa-www', + 'perl-CGI-Fast', + 'perl-Socket6']: } + + # sympa script starts 5 different scripts; I am not + # sure that puppet will correctly handle this + service { 'sympa': + subscribe => [ Package['sympa'], File['/etc/sympa/sympa.conf']] + } + + service { 'sympa-outgoing': + ensure => running, + require => Service['sympa'] + } + + $pgsql_password = extlookup('sympa_pgsql','x') + $ldap_password = extlookup('sympa_ldap','x') + + postgresql::remote_db_and_user { 'sympa': + password => $pgsql_password, + description => 'Sympa database', + } + + File { + require => Package['sympa'], + } + + $vhost = $sympa::variable::vhost + file { '/etc/sympa/sympa.conf': + # should be cleaner to have it root owned, but puppet does not support acls + # and in any case, config will be reset if it changes + owner => 'sympa', + group => 'apache', + mode => '0640', + content => template('sympa/sympa.conf'), + } + + file { '/etc/sympa/auth.conf': + content => template('sympa/auth.conf'), + notify => Service['httpd'], + } + + + include apache::mod::fcgid + apache::webapp_other { 'sympa': + webapp_file => 'sympa/webapp_sympa.conf', + } + + apache::vhost::redirect_ssl { $sympa::variable::vhost: } + + apache::vhost::base { $sympa::variable::vhost: + use_ssl => true, + content => template('sympa/vhost_ml.conf'), + } + +# git::snapshot { '/etc/sympa/web_tt2': +# source => "git://git.${::domain}/web/templates/sympa", +# } + + file { ['/etc/sympa/lists_xml/', + '/etc/sympa/scenari/', + '/etc/sympa/data_sources/', + '/etc/sympa/search_filters/']: + ensure => directory, + purge => true, + recurse => true, + force => true, + } + + file { + '/etc/sympa/scenari/subscribe.open_web_only_notify': + source => 'puppet:///modules/sympa/scenari/open_web_only_notify'; + '/etc/sympa/scenari/unsubscribe.open_web_only_notify': + source => 'puppet:///modules/sympa/scenari/open_web_only_notify'; + '/etc/sympa/scenari/create_list.forbidden': + source => 'puppet:///modules/sympa/scenari/forbidden'; + '/etc/sympa/topics.conf': + source => 'puppet:///modules/sympa/topics.conf'; + } + + # add each group that could be used in a sympa ml either as + # - owner + # - editor ( moderation ) + sympa::datasource::ldap_group { 'mga-sysadmin': } + sympa::datasource::ldap_group { 'mga-ml_moderators': } + + + # directory that will hold the list data + # i am not sure of the name ( misc, 09/12/10 ) + file { '/var/lib/sympa/expl/': + ensure => directory, + owner => 'sympa', + } + +} diff --git a/modules/sympa/manifests/variable.pp b/modules/sympa/manifests/variable.pp new file mode 100644 index 00000000..26f60294 --- /dev/null +++ b/modules/sympa/manifests/variable.pp @@ -0,0 +1,3 @@ +class sympa::variable { + $vhost = "ml.${::domain}" +} diff --git a/modules/sympa/templates/auth.conf b/modules/sympa/templates/auth.conf index 220118b5..854fdf9c 100644 --- a/modules/sympa/templates/auth.conf +++ b/modules/sympa/templates/auth.conf @@ -1,13 +1,15 @@ ldap - host ldap.<%= domain %>:389 + host ldap.<%= domain %> timeout 30 suffix <%= dc_suffix %> get_dn_by_uid_filter (uid=[sender]) - get_dn_by_email (|(mail=[sender])(mailalternateaddress=[sender])) + get_dn_by_email_filter (|(mail=[sender])(mailalternateaddress=[sender])) email_attribute mail scope sub - use_ssl 1 - -user_table - regexp .* + use_tls ldaps + ssl_version tlsv1_2 + ca_verify none + bind_dn cn=sympa-<%= hostname %>,ou=System Accounts,<%= dc_suffix %> + bind_password <%= scope.lookupvar("sympa::server::ldap_password") %> + authentication_info_url <%= authentication_info_url %> diff --git a/modules/sympa/templates/config b/modules/sympa/templates/config new file mode 100644 index 00000000..4262f3ca --- /dev/null +++ b/modules/sympa/templates/config @@ -0,0 +1,103 @@ + +archive +period month +mail_access owner +<%- if public_archive and not @critical -%> +web_access public +<%- else -%> +web_access private +<%- end -%> + +visibility noconceal + +digest 1,4 13:26 + +<% if subscriber_ldap_group and not subscription_open %> +# TODO check scenari +subscribe closed + +unsubscribe closed +<% else %> +subscribe open_web_only_notify + +unsubscribe open_web_only_notify +<% end %> + +editor +email listmaster@<%= domain %> +reception nomail +gecos Moderator team +visibility conceal + +editor_include +reception nomail +source mga-ml_moderators +visibility conceal + +subject <%= subject %> + +custom_subject <%= custom_subject %> + +<%- if @critical -%> +info conceal + +subscribe auth owner + +unsubscribe auth_notify + +invite owner +<% end %> + +lang <%= language %> + +owner +gecos Sysadmin team +reception nomail +email postmaster@<%= domain %> +visibility noconceal +profile normal + +owner_include +profile normal +visibility conceal +source mga-sysadmin +reception nomail + + +<%- if @reply_to -%> +reply_to_header +value other_email +other_email <%= reply_to %> +apply forced +<%- end -%> + + +review owner + +<% if topics %> +topics <%= topics %> +<% end %> + +send restricted_<%= @name %> + +<% if subscriber_ldap_group %> +include_ldap_query + timeout 10 + scope one + select first + ssl_version tlsv1_2 + ca_verify none + use_tls ldaps + attrs mail + ssl_ciphers ALL + passwd <%= scope.lookupvar("sympa::server::ldap_password") %> + user cn=sympa-<%= hostname %>,ou=System Accounts,<%= dc_suffix %> + suffix ou=People,<%= dc_suffix %> + filter (memberOf=cn=<%= subscriber_ldap_group %>,ou=Group,<%= dc_suffix %>) + host ldap.<%= domain %> + +<% end %> + +process_archive on + +status open diff --git a/modules/sympa/templates/data_sources/ldap_group.incl b/modules/sympa/templates/data_sources/ldap_group.incl new file mode 100644 index 00000000..609a7e42 --- /dev/null +++ b/modules/sympa/templates/data_sources/ldap_group.incl @@ -0,0 +1,17 @@ +include_ldap_2level_query + host ldap.<%= domain %> + use_tls ldaps + ssl_version tlsv1_2 + ca_verify none + user cn=sympa-<%= hostname %>,ou=System Accounts,<%= dc_suffix %> + passwd <%= scope.lookupvar("sympa::server::ldap_password") %> + suffix1 ou=Group,<%= dc_suffix %> + scope1 one + filter1 (&(objectClass=groupOfNames)(cn=<%= name %>)) + attrs1 member + select1 all + suffix2 [attrs1] + scope2 base + filter2 (objectClass=inetOrgPerson) + attrs2 mail + select2 first diff --git a/modules/sympa/templates/list.xml b/modules/sympa/templates/list.xml new file mode 100644 index 00000000..74e4f07f --- /dev/null +++ b/modules/sympa/templates/list.xml @@ -0,0 +1,16 @@ +<?xml version="1.0" ?> +<list> + <listname><%= name %></listname> + <type>discussion_list</type> + <subject><%= subject %></subject> + <description/> + <status>open</status> + <language><%= language %></language> + <owner_include multiple="1"> + <source>mga-sysadmin</source> + </owner_include> + <editor_include multiple="1"> + <source>mga-ml_moderators</source> + </editor_include> + <topic><%= topics %></topic> +</list> diff --git a/modules/sympa/templates/scenari/sender.restricted b/modules/sympa/templates/scenari/sender.restricted new file mode 100644 index 00000000..66139e6c --- /dev/null +++ b/modules/sympa/templates/scenari/sender.restricted @@ -0,0 +1,17 @@ +title.gettext restricted list + +<%- if @ldap_group -%> +search(<%= @ldap_group %>.ldap) smtp,md5,smime -> do_it +<%- end -%> +<%- if @email -%> + <%- for e in @email -%> +equal([sender], '<%= e %>') smtp,md5,smime -> do_it + <%- end -%> +<%- end -%> +<%- if allow_subscriber -%> +equal([sender], 'sysadmin@group.mageia.org') smtp,smime,md5 -> do_it +match([sender], /@mageia\.org$/) smtp,smime,md5 -> do_it +is_subscriber([listname],[sender]) smtp,smime,md5 -> do_it +true() smime,md5 -> do_it +<%- end -%> +true() smtp,md5,smime -> reject(reason='send_subscriber') diff --git a/modules/sympa/templates/search_filters/group.ldap b/modules/sympa/templates/search_filters/group.ldap new file mode 100644 index 00000000..884e0db1 --- /dev/null +++ b/modules/sympa/templates/search_filters/group.ldap @@ -0,0 +1,9 @@ +host ldap.<%= domain %>:636 +bind_dn cn=sympa-<%= hostname %>,ou=System Accounts,<%= dc_suffix %> +bind_password <%= scope.lookupvar("sympa::server::ldap_password") %> +use_tls ldaps +ssl_version tlsv1_2 +ca_verify none +suffix ou=People,<%= dc_suffix %> +filter (&(mail=[sender])(memberOf=cn=<%= name %>,ou=Group,<%= dc_suffix %>)) +scope sub diff --git a/modules/sympa/templates/sympa.conf b/modules/sympa/templates/sympa.conf index a031da03..edfaba15 100644 --- a/modules/sympa/templates/sympa.conf +++ b/modules/sympa/templates/sympa.conf @@ -1,293 +1,627 @@ -###\\\\ Directories and file location ////### +###\\\\ Service description ////### -## Directory containing mailing lists subdirectories -home /var/lib/sympa +## Primary mail domain name +domain <%= vhost %> -## Directory for configuration files ; it also contains scenari/ and templates/ directories -etc /etc/sympa +## Email addresses of listmasters +## Email addresses of the listmasters (users authorized to perform global +## server commands). Some error reports may also be sent to these addresses. +## Listmasters can be defined for each virtual host, however, the default +## listmasters will have privileges to manage all virtual hosts. +listmaster listmaster@<%= vhost %> -## File containing Sympa PID while running. -## Sympa also locks this file to ensure that it is not running more than once. Caution : user sympa need to write access without special privilegee. -pidfile /var/run/sympa/sympa.pid +## Default language +## This is the default language used by Sympa. One of supported languages +## should be chosen. +lang en-US -pidfile_distribute /var/run/sympa/sympa-distribute.pid - -pidfile_creation /var/run/sympa/sympa-creation.pid - -pidfile_bulk /var/run/sympa/bulk.pid - -## Umask used for file creation by Sympa -umask 027 - -## Directory containing available NLS catalogues (Message internationalization) -localedir /usr/share/locale - -## The main spool containing various specialized spools -## All spool are created at runtime by sympa.pl -spool /var/spool/sympa - -## Incoming spool -queue /var/spool/sympa/msg - -## Bounce incoming spool -queuebounce /var/spool/sympa/bounce - -## Automatic list creation spool -queueautomatic /var/spool/sympa/automatic - -## -queuedigest /var/spool/sympa/digest - -## -queuemod /var/spool/sympa/moderation - -## -queuetopic /var/spool/sympa/topic - -## -queueauth /var/spool/sympa/auth - -## -queueoutgoing /var/spool/sympa/outgoing - -## -queuetask /var/spool/sympa/task - -## -queuesubscribe /var/spool/sympa/subscribe - -## URL to a virtual host. -http_host http://domain.tld - -## The directory where Sympa stores static contents (CSS, members pictures, documentation) directly delivered by Apache -static_content_path /var/lib/sympa/static_content - -## The URL mapped with the static_content_path directory defined above -static_content_url /static-sympa - -###\\\\ Syslog ////### - -## The syslog facility for sympa -## Do not forget to edit syslog.conf -syslog mail +## Supported languages +## All supported languages for the user interface. Languages proper locale +## information not installed are ignored. +supported_lang en_US + +## Title of service +## The name of your mailing list service. It will appear in the header of web +## interface and subjects of several service messages. +title Mageia Mailing lists service + +## Display name of Sympa +## This parameter is used for display name in the "From:" header field for the +## messages sent by Sympa itself. +gecos SYMPA + +## Support of legacy character set +## If set to "on", enables support of legacy character set according to +## charset.conf(5) configuration file. +## In some language environments, legacy encoding (character set) can be +## preferred for e-mail messages: for example iso-2022-jp in Japanese +## language. +legacy_character_support_feature off + +###\\\\ Database related ////### + +## Type of the database +## Possible types are "MySQL", "PostgreSQL", "Oracle", "Sybase" and "SQLite". +db_type PostgreSQL + +## Hostname of the database server +## With PostgreSQL, you can also use the path to Unix Socket Directory, e.g. +## "/var/run/postgresql" for connection with Unix domain socket. +db_host pg.<%= domain %> + +## Port of the database server +db_port 5432/tcp -## Communication mode with syslogd is either unix (via Unix sockets) or inet (use of UDP) -log_socket_type unix +## Name of the database +## With SQLite, this must be the full path to database file. With Oracle +## Database, this must be Oracle SID. +db_name sympa + +## User for the database connection +db_user sympa + +## Password for the database connection +## What ever you use a password or not, you must protect the SQL server (is it +## not a public internet service ?) +db_passwd <%= scope.lookupvar("sympa::server::pgsql_password") %> + +## Environment variables setting for database +## With Oracle Database, this is useful for defining ORACLE_HOME and NLS_LANG. +# db_env NLS_LANG=American_America.AL32UTF8;ORACLE_HOME=/u01/app/oracle/product/11.2.0/server + +## Database private extension to subscriber table +## Adds more fields to "subscriber_table" table. Sympa recognizes fields +## defined with this parameter. You will then be able to use them from within +## templates and scenarios: +## * for scenarios: [subscriber->field] +## * for templates: [% subscriber.field %] +## These fields will also appear in the list members review page and will be +## editable by the list owner. This parameter is a comma-separated list. +## You need to extend the database format with these fields +# db_additional_subscriber_fields billing_delay,subscription_expiration -## Log intensity -## 0 : normal, 2,3,4 for debug -log_level 0 +## Database private extension to user table +## Adds more fields to "user_table" table. Sympa recognizes fields defined +## with this parameter. You will then be able to use them from within +## templates: [% subscriber.field %] +## This parameter is a comma-separated list. +## You need to extend the database format with these fields +# db_additional_user_fields age,address -log_smtp off +###\\\\ System log ////### -## Number of months that elapse before a log is expired. -logs_expiration_period 3 +## System log facility for Sympa +## Do not forget to configure syslog server. +syslog mail -###\\\\ General definition ////### +## Communication mode with syslog server +log_socket_type unix -## Main robot hostname -domain ml.<%= domain %> +## Log verbosity +## Sets the verbosity of logs. +## 0: Only main operations are logged +## 3: Almost everything is logged. +log_level 0 -## Listmasters email list comma separated -## Sympa will associate listmaster privileges to these email addresses (mail and web interfaces). Some error reports may also be sent to these addresses. -listmaster listmaster@ml.<%= domain %> +###\\\\ Receiving ////### -## Local part of sympa email adresse -## Effective address will be \[EMAIL\]@\[HOST\] -email sympa +## Default maximum number of list members +## Default limit for the number of subscribers per list (0 means no limit). +default_max_list_members 0 -## Who is able to create lists -## This parameter is a scenario, check sympa documentation about scenarios if you want to define one -create_list public_listmaster +## Maximum size of messages +## Incoming messages smaller than this size is allowed distribution by Sympa. +max_size 5242880 -edit_list owner +## Reject mail sent from automated services to list +## Rejects messages that seem to be from automated services, based on a few +## header fields ("Content-Identifier:", "Auto-Submitted:"). +## Sympa also can be configured to reject messages based on the "From:" header +## field value (see "loop_prevention_regex"). +reject_mail_from_automates_feature off -###\\\\ Tuning ////### +## Priority for command messages +## Priority applied to messages sent to Sympa command address. +sympa_priority 1 -## Use of binary version of the list config structure on disk: none | binary_file -## Set this parameter to "binary_file" if you manage a big amount of lists (1000+) ; it should make the web interface startup faster -cache_list_config none +## Priority for messages bound for list owners +## Priority for processing of messages bound for "LIST-request" address, i.e. +## owners of the list +request_priority 0 -## Sympa commands priority -sympa_priority 1 +## Priority for non-VERP bounces +## Priority for processing of messages bound for "LIST-owner" address, i.e. +## non-delivery reports (bounces). +owner_priority 9 ## Default priority for list messages -default_list_priority 5 - -## Default timeout between two scheduled synchronizations of list members with data sources. -default_ttl 3600 - -## Default timeout between two action-triggered synchronizations of list members with data sources. -default_distribution_ttl 300 - -## Default priority for a packet to be sent by bulk. -sympa_packet_priority 5 - -request_priority 0 - -owner_priority 9 - -## The minimum number of packets in database before the bulk forks to increase sending rate -## -bulk_fork_threshold 1 - -## The max number of bulks that will run on the same server. -## -bulk_max_count 3 - -## the number of seconds a slave bulk will remain running without processing a message before it spontaneously dies. -## -bulk_lazytime 600 - -## The number of seconds a master bulk waits between two packets number checks. -## Keep it small if you expect brutal increases in the message sending load. -bulk_wait_to_fork 10 - -## the number of seconds a bulk sleeps between starting a new loop if it didn't find a message to send. +## Priority for processing of messages posted to list addresses. +default_list_priority 5 + +###\\\\ Sending related ////### + +## Header fields to be removed from incoming messages +## Use it, for example, to ensure some privacy for your users in case that +## "anonymous_sender" mode is inappropriate. +## The removal of these header fields is applied before Sympa adds its own +## header fields ("rfc2369_header_fields" and "custom_header"). +# was remove_headers ARRAY(0x4116e50) +remove_headers X-Sympa-To,X-Family-To,Return-Receipt-To,Precedence,X-Sequence,Disposition-Notification-To + +## RFC 2369 header fields +## Specify which RFC 2369 mailing list header fields to be added. +## "List-Id:" header field defined in RFC 2919 is always added. Sympa also +## adds "Archived-At:" header field defined in RFC 5064. +# was rfc2369_header_fields ARRAY(0x4116c88) +rfc2369_header_fields help,subscribe,unsubscribe,post,owner,archive + +## Default priority for a packet +## The default priority set to a packet to be sent by the bulk. +sympa_packet_priority 5 + +## Fork threshold of bulk daemon +## The minimum number of packets before bulk daemon forks the new worker to +## increase sending rate. +bulk_fork_threshold 1 + +## Maximum number of bulk workers +bulk_max_count 3 + +## Idle timeout of bulk workers +## The number of seconds a bulk worker will remain running without processing +## a message before it spontaneously exists. +bulk_lazytime 600 + +## Sleep time of bulk workers +## The number of seconds a bulk worker sleeps between starting a new loop if +## it didn't find a message to send. ## Keep it small if you want your server to be reactive. -bulk_sleep 1 - -## Secret used by Sympa to make MD5 fingerprint in web cookies secure -## Should not be changed ! May invalid all user password -#cookie 123456789 - -## If set to "on", enables support of legacy characters -## -legacy_character_support_feature off - -## The default maximum size (in bytes) for messages (can be re-defined for each list) -max_size 5242880 - -## comma separated list of operations for which blacklist filter is applied -## Setting this parameter to "none" will hide the blacklist feature -use_blacklist send,create_list - -## Specify which rfc2369 mailing list headers to add -rfc2369_header_fields help,subscribe,unsubscribe,post,owner,archive - -## Specify header fields to be removed before message distribution -remove_headers X-Sympa-To,X-Family-To,Return-Receipt-To,Precedence,X-Sequence,Disposition-Notification-To - -bounce_warn_rate 30 +bulk_sleep 1 -bounce_halt_rate 50 - -###\\\\ Internationalization ////### - -## Default lang (ca | cs | de | el | es | et_EE | en_US | fr | fi | hu | it | ja_JP | ko | nl | nb_NO | oc | pl | pt_BR | ru | sv | tr | vi | zh_CN | zh_TW) -## This is the default language used by Sympa -lang en_US - -## Supported languages -## This is the set of language that will be proposed to your users for the Sympa GUI. Don't select a language if you don't have the proper locale packages installed. -supported_lang ca,cs,de,el,es,et_EE,en_US,fr,fi,hu,it,ja_JP,ko,nl,nb_NO,oc,pl,pt_BR,ru,sv,tr,vi,zh_CN,zh_TW +## Interval between checks of packet numbers +## Number of seconds a master bulk daemon waits between two packets number +## checks. +## Keep it small if you expect brutal increases in the message sending load. +bulk_wait_to_fork 10 + +## Path to sendmail +## Absolute path to sendmail command line utility (e.g.: a binary named +## "sendmail" is distributed with Postfix). +## Sympa expects this binary to be sendmail compatible (exim, Postfix, qmail +## and so on provide it). Sympa also bundles "sympa_smtpc" program which may +## be a replacement to sendmail binary. +sendmail /usr/sbin/sendmail + +## Log invocation of sendmail +## This can be overwritten by "-m" option for sympa.pl. +log_smtp off + +## Maximum number of sendmail processes +## Maximum number of simultaneous child processes spawned by Sympa. This is +## the main load control parameter. +## Proposed value is quite low, but you can rise it up to 100, 200 or even 300 +## with powerful systems. +maxsmtp 40 + +## Maximum number of recipients per call to sendmail +## This grouping factor makes it possible for the sendmail processes to +## optimize the number of SMTP sessions for message distribution. If needed, +## you can limit the number of recipients for a particular domain. Check the +## "nrcpt_by_domain.conf" configuration file. +nrcpt 25 + +## Maximum number of different mail domains per call to sendmail +avg 10 + +###\\\\ Privileges ////### -###\\\\ Errors management ////### +## Who is able to create lists +## Defines who can create lists (or request list creation) by creating new +## lists or by renaming or copying existing lists. +create_list forbidden + +## Use blacklist +## List of operations separated by comma for which blacklist filter is +## applied. Setting this parameter to "none" will hide the blacklist feature. +use_blacklist send,create_list + +## List of required domains for list owner addresses +## Restrict list ownership to addresses in the specified domains. This can be +## used to reserve list ownership to a group of trusted users from a set of +## domains associated with an organization, while allowing editors and +## subscribers from the Internet at large. +# owner_domain domain1.tld domain2.tld + +## Minimum number of list owners that must match owner_domain restriction +## Minimum number of list owners that must satisfy the owner_domain +## restriction. The default of zero (0) means *all* list owners must match. +## Setting to 1 requires only one list owner to match owner_domain; all other +## owners can be from any domain. This setting can be used to ensure that +## there is always at least one known contact point for a mailing list. +owner_domain_min 0 + +###\\\\ Archives ////### + +## Store distributed messages into archive +## If enabled, distributed messages via lists will be archived. Otherwise +## archiving is disabled. +## Note that even if setting this parameter disabled, past archives will not +## be removed and will be accessible according to access settings by each +## list. +process_archive on + +## Path to MHonArc mail-to-HTML converter +## This is required for HTML mail archiving. +mhonarc /usr/bin/mhonarc + +# There is a need to protect Sympa website against spambot +spam_protection javascript + +# The same as spam_protection, but restricted to the web archive. +web_archive_spam_protection cookie + +###\\\\ Bounce management and tracking ////### + +## Default bounce warn rate +## The list owner receives a warning whenever a message is distributed and the +## number (percentage) of bounces exceeds this value. +bounce_warn_rate 30 + +## Default bounce halt rate +## NOT USED YET. If bounce rate reaches the halt_rate, messages for the list +## will be halted, i.e. they are retained for subsequent moderation. +bounce_halt_rate 50 + +## Remove bouncing new subscribers +## If set to unique, the welcome message is sent using a unique return path in +## order to remove the subscriber immediately in the case of a bounce. +welcome_return_path owner + +## Remove subscribers bouncing remind message +## Same as welcome_return_path, but applied to remind messages. +remind_return_path owner + +## Task for expiration of old bounces +## This task resets bouncing information for addresses not bouncing in the +## last 10 days after the latest message distribution. +expire_bounce_task daily + +###\\\\ Automatic lists ////### + +## Definition of automatic list families +## Defines the families the automatic lists are based on. It is a character +## string structured as follows: +## * each family is separated from the other by a semi-column (;) +## * inside a family definition, each field is separated from the other by a +## column (:) +## * each field has the structure: "<field name>=<filed value>" +## Basically, each time Sympa uses the automatic lists families, the values +## defined in this parameter will be available in the family object. +## * for scenarios: [family->name] +## * for templates: [% family.name %] +# automatic_list_families name=family_one:prefix=f1:display=My automatic lists:prefix_separator=+:classes separator=-:family_owners_list=alist@domain.tld;name=family_two:prefix=f2:display=My other automatic lists:prefix_separator=+:classes separator=-:family_owners_list=anotherlist@domain.tld; + +## Parsed files for families +## comma-separated list of files that will be parsed by Sympa when +## instantiating a family (no space allowed in file names) +parsed_family_files message.footer,message.header,message.footer.mime,message.header.mime,info + +###\\\\ Tag based spam filtering ////### + +## Header field to tag spams +## If a spam filter (like spamassassin or j-chkmail) add a header field to tag +## spams, name of this header field (example X-Spam-Status) +antispam_tag_header_name X-Spam-Status + +## Regular expression to check header field to tag spams +## Regular expression applied on this header to verify message is a spam +## (example Yes) +antispam_tag_header_spam_regexp ^\s*Yes + +## Regular expression to determine spam or ham. +## Regular expression applied on this header field to verify message is NOT a +## spam (example No) +antispam_tag_header_ham_regexp ^\s*No + +## Name of header field to inform +## Messages are supposed to be filtered by an spam filter that add one more +## headers to messages. This parameter is used to select a special scenario in +## order to decide the message spam status: ham, spam or unsure. This +## parameter replace antispam_tag_header_name, antispam_tag_header_spam_regexp +## and antispam_tag_header_ham_regexp. +spam_status x-spam-status + +###\\\\ Directories ////### + +## List home +## Base directory of list configurations. +home /var/lib/sympa/expl + +## Directory for configuration files +## Base directory of global configuration (except "sympa.conf"). +etc /etc/sympa + +## Base directory of spools +## Base directory of all spools which are created at runtime. This directory +## must be writable by Sympa user. +spool /var/spool/sympa + +## Directory for message incoming spool +## This spool is used both by "queue" program and "sympa_msg.pl" daemon." +queue /var/spool/sympa/msg + +## Directory for moderation spool +queuemod /var/spool/sympa/moderation + +## Directory for digest spool +queuedigest /var/spool/sympa/digest + +## Directory for held message spool +## This parameter is named such by historical reason. +queueauth /var/spool/sympa/auth + +## Directory for archive spool +## This parameter is named such by historical reason. +queueoutgoing /var/spool/sympa/outgoing + +## Directory for held request spool +## This parameter is named such by historical reason. +queuesubscribe /var/spool/sympa/subscribe + +## Directory for topic spool +queuetopic /var/spool/sympa/topic + +## Directory for bounce incoming spool +## This spool is used both by "bouncequeue" program and "bounced.pl" daemon. +queuebounce /var/spool/sympa/bounce + +## Directory for task spool +queuetask /var/spool/sympa/task + +## Directory for automatic list creation spool +## This spool is used both by "familyqueue" program and "sympa_automatic.pl" +## daemon. +queueautomatic /var/spool/sympa/automatic + +## Directory for message outgoing spool +## This parameter is named such by historical reason. +queuebulk /var/spool/sympa/bulk + +## Directory to cache formatted messages +## Base directory path of directories where HTML view of messages are cached. +viewmail_dir /var/spool/sympa/viewmail + +## Directory for storing bounces +## The directory where bounced.pl daemon will store the last bouncing message +## for each user. A message is stored in the file: <bounce_path>/<list +## name>@<mail domain name>/<email address>, or, if tracking is enabled: +## <bounce_path>/<list name>@<mail domain name>/<email address>_<envelope ID>. +## Users can access to these messages using web interface in the bounce +## management page. +## Don't confuse with "queuebounce" parameter which defines the spool where +## incoming error reports are stored and picked by bounced.pl daemon. +bounce_path /var/lib/sympa/bounce + +## Directory for storing archives +## Where to store HTML archives. This parameter is used by the "archived.pl" +## daemon. It is a good idea to install the archive outside the web document +## hierarchy to ensure accesses passing WWSympa's access control will be +## prevented. +arc_path /var/lib/sympa/arc + +###\\\\ Miscellaneous ////### + +## Local part of Sympa email address +## Local part (the part preceding the "@" sign) of the address by which mail +## interface of Sympa accepts mail commands. +## If you change the default value, you must modify the mail aliases too. +email sympa + +## Custom robot parameter +## Used to define a custom parameter for your server. Do not forget the +## semicolon between the parameter name and the parameter value. +## You will be able to access the custom parameter value in web templates by +## variable "conf.custom_robot_parameter.<param_name>" +# custom_robot_parameter param_name ; param_value + +## Use of binary cache of list configuration +## binary_file: Sympa processes will maintain a binary version of the list +## configuration, "config.bin" file on local disk. If you manage a big amount +## of lists (1000+), it should make the web interface startup faster. +## You can recreate cache by running "sympa.pl --reload_list_config". +cache_list_config none + +## Max age of logs in database +## Number of months that elapse before a log is expired +logs_expiration_period 3 + +## Umask +## Default mask for file creation (see umask(2)). Note that it will be +## interpreted as an octal value. +umask 027 + +## Secret string for generating unique keys +## This allows generated authentication keys to differ from a site to another. +## It is also used for encryption of user passwords stored in the database. +## The presence of this string is one reason why access to "sympa.conf" needs +## to be restricted to the "sympa" user. +## Note that changing this parameter will break all HTTP cookies stored in +## users' browsers, as well as all user passwords and lists X509 private keys. +## To prevent a catastrophe, Sympa refuses to start if this "cookie" parameter +## was changed. +# cookie 123456789 + +###\\\\ Web interface parameters ////### + +## URL prefix of web interface +## This is used to construct URLs of web interface. +wwsympa_url https://<%= vhost %>/l + +## URL prefix of WWSympa behind proxy +#http_host http://domain.tld + +## URL for static contents +## HTTP server have to map it with "static_content_path" directory. +static_content_url /static-sympa +css_url /static-sympa/css +pictures_url /static-sympa/pictures + +## Directory for static contents +static_content_path /var/lib/sympa/static_content +css_path /var/lib/sympa/static_content/css +pictures_path /var/lib/sympa/static_content/pictures + +## System log facility for web interface +## System log facility for WWSympa, archived.pl and bounced.pl. Default is to +## use value of "syslog" parameter. +log_facility LOCAL1 + +###\\\\ Web interface parameters: Appearances ////### + +## Type of main web page +## "lists" for the page of list of lists. "home" for home page. +default_home lists + +## Default index organization of web archive +## thrd: Threaded index. +## mail: Chronological index. +archive_default_index thrd + +## Size of review page +## Default number of lines of the array displaying users in the review page +review_page_size 25 + +## Size of viewlogs page +## Default number of lines of the array displaying the log entries in the logs +## page. +viewlogs_page_size 25 + +###\\\\ Web interface parameters: Miscellaneous ////### + +## HTTP cookies validity domain +## If beginning with a dot ("."), the cookie is available within the specified +## Internet domain. Otherwise, for the specified host. The only reason for +## replacing the default value would be where WWSympa's authentication process +## is shared with an application running on another host. +cookie_domain <%= vhost %> + +## HTTP cookies lifetime +## This is the default value when not set explicitly by users. "0" means the +## cookie may be retained during browser session. +cookie_expire 0 + +## Average interval to refresh HTTP session ID. +cookie_refresh 60 + +## Use HTML editor +## If set to "on", users will be able to post messages in HTML using a +## javascript WYSIWYG editor. +use_html_editor 0 + +## URL of HTML editor +## URL path to the javascript file making the WYSIWYG HTML editor available. +## Relative path under <static_content_url> or absolute path. +## Example is for TinyMCE 4 installed under <static_content_path>/js/tinymce/. +# html_editor_url js/tinymce/tinymce.min.js + +## HTML editor initialization +## Javascript excerpt that enables and configures the WYSIWYG HTML editor. +# html_editor_init tinymce.init({selector:"#body",language:lang.split(/[^a-zA-Z]+/).join("_")}); + +## Count limit of wrong password submission +## If this limit is reached, the account is locked until the user renews their +## password. The default value is chosen in order to block bots trying to log +## in using brute force strategy. This value should never be reached by real +## users that will probably uses the renew password service before they +## performs so many tries. +max_wrong_password 19 + +## Password case +## "insensitive" or "sensitive". +## If set to "insensitive", WWSympa's password check will be insensitive. This +## only concerns passwords stored in the Sympa database, not the ones in LDAP. +## Should not be changed! May invalid all user password. +password_case insensitive + +###\\\\ S/MIME and TLS ////### -## Bouncing email rate for warn list owner -#bounce_warn_rate 20 +## Password used to crypt lists private keys +## If not defined, Sympa assumes that list private keys are not encrypted. +# key_passwd your_password -## Bouncing email rate for halt the list (not implemented) -## Not yet used in current version, Default is 50 -#bounce_halt_rate 50 +## Directory containing user certificates +ssl_cert_dir /var/lib/sympa/X509-user-certs -## Task name for expiration of old bounces -#expire_bounce_task daily +###\\\\ Data sources setup ////### -## Welcome message return-path -## If set to unique, new subcriber is removed if welcome message bounce -#welcome_return_path unique +## Default of SQL fetch timeout +## Default timeout while performing a fetch with include_sql_query. +default_sql_fetch_timeout 300 -###\\\\ MTA related ////### +###\\\\ DKIM ////### -## Path to the MTA (sendmail, postfix, exim or qmail) -## should point to a sendmail-compatible binary (eg: a binary named "sendmail" is distributed with Postfix) -sendmail /usr/sbin/sendmail +## Enable DKIM +## If set to "on", Sympa may verify DKIM signatures of incoming messages and/ +## or insert DKIM signature to outgoing messages. +dkim_feature off -## Maximum number of recipients per call to Sendmail. The nrcpt_by_domain.conf file allows a different tuning per destination domain. -nrcpt 25 +## Which service messages to be signed +## Inserts a DKIM signature to service messages in context of robot, list or +## both +dkim_add_signature_to robot,list -## Max. number of different domains per call to Sendmail -avg 10 +## The "d=" tag as defined in rfc 4871 +## The DKIM "d=" tag, is the domain of the signing entity. Default is virtual +## host domain name +dkim_signer_domain <%= vhost %> -## Max. number of Sendmail processes (launched by Sympa) running simultaneously -## Proposed value is quite low, you can rise it up to 100, 200 or even 300 with powerfull systems. -maxsmtp 40 +## Rewrite header for DKIM signed messages and DMARC rejecting domains +dmarc_protection_mode dkim_signature,dmarc_reject -###\\\\ Plugin ////### +###\\\\ Antivirus plug-in ////### ## Path to the antivirus scanner engine -## supported antivirus : McAfee/uvscan, Fsecure/fsav, Sophos, AVP and Trend Micro/VirusWall -#antivirus_path /usr/local/uvscan/uvscan - -## Antivirus pluggin command argument -#antivirus_args --secure --summary --dat /usr/local/uvscan +## Supported antivirus: Clam AntiVirus/clamscan & clamdscan, McAfee/uvscan, +## Fsecure/fsav, Sophos, AVP and Trend Micro/VirusWall +# antivirus_path /usr/local/bin/clamscan -###\\\\ S/MIME pluggin ////### +## Antivirus plugin command line arguments +# antivirus_args --no-summary --database /usr/local/share/clamav -## Path to OpenSSL -## Sympa knowns S/MIME if openssl is installed -#openssl /usr/bin/ssl +###\\\\ Password validation ////### -## The directory path use by OpenSSL for trusted CA certificates -#capath /etc/sympa/ssl.crt +## Password validation +## The password validation techniques to be used against user passwords that +## are added to mailing lists. Options come from Data::Password +## (https://search.cpan.org/~razinf/Data-Password-1.07/Password.pm#VARIABLES) +# password_validation MINLEN=8,GROUPS=3,DICTIONARY=4,DICTIONARIES=/pentest/dictionaries -## This parameter sets the all-in-one file where you can assemble the Certificates of Certification Authorities (CA) -#cafile /usr/local/apache/conf/ssl.crt/ca-bundle.crt - -## User CERTs directory -ssl_cert_dir /var/lib/sympa/X509-user-certs - -crl_dir /var/lib/sympa/crl - -## Password used to crypt lists private keys -#key_passwd your_password - -###\\\\ Database ////### - -## Database type (mysql | Pg | Oracle | Sybase | SQLite) -## be carefull to the case -db_type Pg - -## Name of the database -## with SQLite, the name of the DB corresponds to the DB file -db_name sympa - -## The host hosting your sympa database -db_host pgsql.<%= domain %> - -## The database port -db_port 5432/tcp - -## Database user for connexion -db_user sympa - -## Database password (associated to the db_user) -## What ever you use a password or not, you must protect the SQL server (is it a not a public internet service ?) -db_passwd <%= password %> - -## Database private extention to user table -## You need to extend the database format with these fields -#db_additional_user_fields age,address - -## Database private extention to subscriber table -## You need to extend the database format with these fields -#db_additional_subscriber_fields billing_delay,subscription_expiration +###\\\\ Authentication with LDAP ////### -###\\\\ Web interface ////### +## Use canonical email address for LDAP authentication +## When using LDAP authentication, if the identifier provided by the user was +## a valid email, if this parameter is set to false, then the provided email +## will be used to authenticate the user. Otherwise, use of the first email +## returned by the LDAP server will be used. +ldap_force_canonical_email 1 -## Sympa's main page URL -wwsympa_url http://ml.<%= domain %>/ +###\\\\ Obsoleted parameters ////### -## If a spam filter (like spamassassin or j-chkmail) add a smtp headers to tag spams, name of this header (example X-Spam-Status) -antispam_tag_header_name X-Spam-Status +## Default timeout between two scheduled synchronizations of list members with +## data sources. +default_ttl 3600 -## The regexp applied on this header to verify message is a spam (example \s*Yes) -antispam_tag_header_spam_regexp ^\s*Yes +## Default timeout between two action-triggered synchronizations of list +## members with data sources. +default_distribution_ttl 300 -## The regexp applied on this header to verify message is NOT a spam (example \s*No) -antispam_tag_header_ham_regexp ^\s*No +edit_list owner -# Disable alias management, already managed in postfix -sendmail_aliases none +## Enable FastCGI +## Is FastCGI module for HTTP server installed. This module provide much +## faster web interface. +use_fast_cgi 1 +# Upgrade from 6.2.40 to 6.2.42 +# 22 May 2019 at 21:22:06 +shared_feature on diff --git a/modules/sympa/templates/vhost_ml.conf b/modules/sympa/templates/vhost_ml.conf index bd98b175..11aa7ae5 100644 --- a/modules/sympa/templates/vhost_ml.conf +++ b/modules/sympa/templates/vhost_ml.conf @@ -1,10 +1,20 @@ -<VirtualHost *:80> - ServerName ml.<%= domain %> -<%- -path_cgi_directory = "/usr/lib" + ( architecture == "x86_64" ? '64' : '') + "/sympa/cgi" --%> - DocumentRoot <%= path_cgi_directory %> - <Location /> - Allow from all - </Location> -</VirtualHost> + RewriteEngine On + RewriteRule ^/?$ /l/home [R] + RewriteRule ^/l$ /l/ + RewriteRule ^/l/(.*)$ /wwsympa-wrapper.fcgi/$1 + + DocumentRoot <%= lib_dir + "/sympa/cgi" %> + + Alias /static-sympa /var/lib/sympa/static_content + + <Directory /var/lib/sympa/static_content> + <IfModule mod_authz_core.c> + # Apache 2.4 + Require all granted + </IfModule> + <IfModule !mod_authz_core.c> + # Apache 2.2 + Order allow,deny + Allow from all + </IfModule> + </Directory> diff --git a/modules/sympa/templates/webapp_sympa.conf b/modules/sympa/templates/webapp_sympa.conf index 84debe38..1a508199 100644 --- a/modules/sympa/templates/webapp_sympa.conf +++ b/modules/sympa/templates/webapp_sympa.conf @@ -1,11 +1,16 @@ -<%- -path_cgi_directory = "/usr/lib" + ( architecture == "x86_64" ? '64' : '') + "/sympa/cgi" --%> -<Directory <%= path_cgi_directory %> > - Options ExecCGI - AddHandler fastcgi-script .fcgi +<Directory <%= lib_dir + "/sympa/cgi" %> > + SetHandler fcgid-script + Options +ExecCGI + AddHandler cgi-script .fcgi DirectoryIndex wwsympa-wrapper.fcgi - Order allow,deny - Allow from all + <IfModule mod_authz_core.c> + # Apache 2.4 + Require all granted + </IfModule> + <IfModule !mod_authz_core.c> + # Apache 2.2 + Order allow,deny + Allow from all + </IfModule> </Directory> diff --git a/modules/testvm/manifests/init.pp b/modules/testvm/manifests/init.pp index 93376e45..d8ca9564 100644 --- a/modules/testvm/manifests/init.pp +++ b/modules/testvm/manifests/init.pp @@ -1,33 +1,40 @@ class testvm { - $testvm_login = "testvm" - $testvmdir = "/home/testvm" + $testvm_login = 'testvm' + $testvmdir = '/home/testvm' - group {"$testvm_login": - ensure => present, + group {"${testvm_login}": + ensure => present, } - user {"$testvm_login": - ensure => present, - comment => "System user used to run test VMs", - managehome => true, - gid => $vmtest_login, - shell => "/bin/bash", + user {"${testvm_login}": + ensure => present, + comment => "System user used to run test VMs", + managehome => true, + gid => $vmtest_login, + shell => '/bin/bash', } - file { "$testvmdir/bin/_vm": - ensure => present, - owner => root, - group => root, - mode => 644, - source => "puppet:///modules/testvm/_vm", + file { "${testvmdir}/bin/": + ensure => directory, + require => User[$testvm_login], } - file { "$testvmdir/bin/vm-jonund": - ensure => present, - owner => root, - group => $testvm_login, - mode => 750, - source => "puppet:///modules/testvm/vm-jonund", + file { "${testvmdir}/bin/_vm": + ensure => present, + owner => root, + group => root, + mode => '0644', + source => "puppet:///modules/testvm/_vm", + require => File["${testvmdir}/bin"], + } + + file { "${testvmdir}/bin/vm-jonund": + ensure => present, + owner => root, + group => $testvm_login, + mode => '0750', + source => "puppet:///modules/testvm/vm-jonund", + require => File["${testvmdir}/bin"], } } diff --git a/modules/timezone/manifests/init.pp b/modules/timezone/manifests/init.pp index 0f33093a..67682f49 100644 --- a/modules/timezone/manifests/init.pp +++ b/modules/timezone/manifests/init.pp @@ -1,8 +1 @@ - -class timezone { - define timezone() { - file { "/etc/localtime": - ensure => "/usr/share/zoneinfo/$name" - } - } -} +class timezone {} diff --git a/modules/timezone/manifests/timezone.pp b/modules/timezone/manifests/timezone.pp new file mode 100644 index 00000000..8f3298a2 --- /dev/null +++ b/modules/timezone/manifests/timezone.pp @@ -0,0 +1,6 @@ +define timezone::timezone() { + file { '/etc/localtime': + ensure => link, + target => "/usr/share/zoneinfo/${name}" + } +} diff --git a/modules/transifex/manifests/init.pp b/modules/transifex/manifests/init.pp index 32069430..282b3f9a 100644 --- a/modules/transifex/manifests/init.pp +++ b/modules/transifex/manifests/init.pp @@ -1,28 +1,89 @@ class transifex { - package { 'transifex': - ensure => installed - } - - $password = extlookup("transifex_password") - file { "20-engines.conf": - path => "/etc/transifex/20-engines.conf", - ensure => present, - owner => root, - group => apache, - mode => 640, - content => template("transifex/20-engines.conf") - } - - file { "30-site.conf": - path => "/etc/transifex/30-site.conf", - ensure => present, - owner => root, - group => root, - mode => 644, - content => template("transifex/30-site.conf") - } - -# apache::vhost_django_app { "transifex.$domain": -# module => "transifex" -# } + include django_application + + package { 'transifex': } + + $pgsql_password = extlookup('transifex_pgsql','x') + $ldap_password = extlookup('transifex_ldap','x') + + $templates_dir = '/var/lib/transifex/templates' + + postgresql::remote_db_and_user { 'transifex': + description => 'Transifex database', + password => $pgsql_password, + } + + define config() { + $filename = $name + + file { "/etc/transifex/${filename}": + group => 'apache', + mode => '0640', + require => Package['transifex'], + notify => Service['apache'], + content => template("transifex/${filename}"), + } + } + + config { ['20-engines.conf', + '30-site.conf', + '40-apps.conf', + '45-ldap.conf', + '50-project.conf']: } + + git::snapshot { $templates_dir: + source => "git://git.${::domain}/web/templates/transifex" + } + + apache::vhost::django_app { "transifex.${::domain}": + module => 'transifex', + use_ssl => true, + module_path => ['/usr/share/transifex','/usr/share','/usr/local/lib/'], + aliases => { '/site_media/static/admin/' => '/usr/lib/python2.6/site-packages/django/contrib/admin/media/', }, + } + + # tx need write access there when running in apache + file { '/var/lib/transifex/scratchdir/storage_files': + ensure => directory, + owner => 'apache', + group => 'apache', + require => Package['transifex'], + } + + apache::vhost::redirect_ssl { "transifex.${::domain}": } + + # the group are mapped from ldap, since AUTH_LDAP_FIND_GROUP_PERMS is set to yes + # but the group need to exist in django first + django_application::create_group { ['mga-i18n','mga-i18n-committers']: + module => 'transifex', + path => '/usr/share/transifex:/usr/share', + } + + define committers_permission($app='') + { + # using django_application::add_permission_to_group may cause problem + # if we install a 2nd django application with the same permission name ( as it need + # to be unique ) + django_application::add_permission_to_group { $name: + app => $app, + group => 'mga-i18n-committers', + module => 'transifex', + path => '/usr/share/transifex:/usr/share', + require => Django_application::Create_group['mga-i18n-committers'], + } + } + + committers_permission {['add_project', + 'change_project', + 'delete_project']: } + + committers_permission {['add_release', + 'change_release', + 'delete_release']: } + + committers_permission {['add_resource', + 'change_resource', + 'delete_resource']: + app => 'resources', + } } diff --git a/modules/transifex/templates/20-engines.conf b/modules/transifex/templates/20-engines.conf index 1906a438..620a9556 100644 --- a/modules/transifex/templates/20-engines.conf +++ b/modules/transifex/templates/20-engines.conf @@ -3,14 +3,14 @@ ## Database configuration -# http://docs.djangoproject.com/en/dev/ref/settings/#database-engine +# https://docs.djangoproject.com/en/dev/ref/settings/#database-engine DATABASE_ENGINE = 'postgresql_psycopg2' # Use file path for sqlite3 DATABASE_NAME = 'transifex' # The following are not used for sqlite3 DATABASE_USER = 'transifex' -DATABASE_PASSWORD = '<%= password %>' -DATABASE_HOST = 'pgsql.<%= domain %>' # Set to empty string for local socket +DATABASE_PASSWORD = '<%= @pgsql_password %>' +DATABASE_HOST = 'pgsql.<%= @domain %>' # Set to empty string for local socket DATABASE_PORT = '' # Set to empty string for default ## Caching (optional) diff --git a/modules/transifex/templates/30-site.conf b/modules/transifex/templates/30-site.conf index 4d4e9e4c..3c386354 100644 --- a/modules/transifex/templates/30-site.conf +++ b/modules/transifex/templates/30-site.conf @@ -1,7 +1,7 @@ # Sites SITE_ID = 1 # Your site's domain. This is used only in this file. -SITE_DOMAIN = '<%= domain %>' +SITE_DOMAIN = '<%= @domain %>' ADMINS = ( # ('Your Name', 'your_email@domain.com'), diff --git a/modules/transifex/templates/40-apps.conf b/modules/transifex/templates/40-apps.conf new file mode 100644 index 00000000..dd92fb1c --- /dev/null +++ b/modules/transifex/templates/40-apps.conf @@ -0,0 +1,58 @@ +# Enable actionlog application +ACTIONLOG_ENABLED = True + +# Notifications +# Enable notifications (requires working email settings) +# TODO: Make notifications not crash the app if email sending doesn't work. +# To enable notices you also need to enable the context processor and +# application below. +ENABLE_NOTICES = True + +# If True it requires a `./manage.py emit_notices` from the command line to +# send the notifications/emails. +NOTIFICATION_QUEUE_ALL = True + +# Tagging +FORCE_LOWERCASE_TAGS = True + +# Registration - OpenID (Currently not used) +# Requires respective middleware and application +ENABLE_OPENID=False + +# Useful to work with another authentication backends +# When True the registration system (django-profile) is disabled +ENABLE_SIMPLEAUTH=True + +# Enable/Disable django-contact app URL. +ENABLE_CONTACT_FORM = True + +# Django-profile +AUTH_PROFILE_MODULE = 'txcommon.profile' +DEFAULT_AVATAR_WIDTH = 96 +AVATAR_WEBSEARCH = False +GOOGLE_MAPS_API_KEY = "ABQIAAAAfLle-Q79W6zCD3xcdCPsABQCULP4XOMyhPd8d_NrQQEO8sT8XBRbfo_kvrGWYPqQ7PnWFWJbDj4bQQ" +REQUIRE_EMAIL_CONFIRMATION = False + +ugettext = lambda s: s +LOGIN_URL = '/%s%s' % ('accounts/', 'login/') + +# Default timeout duration in days +# How many days should the user stay logged in if he selects "Stay signed in"? +LOGIN_DAYS = 21 + +# URL used to access the Django Admin Panel +# Ex. http://domain.com/admin/ +DJANGO_ADMIN_PANEL_URL = 'admin' + +# The directory where the vcs app will checkout stuff and play around. +# Warning: On production systems this should be a place outside of the source +# and with enough disk space. Eg. /var/lib/transifex. +# WARNING: Kept only for migration purposes. It will be removed in 1.1. +SCRATCH_DIR = os.path.join('/var/lib/transifex', 'scratchdir') + +AJAX_LOOKUP_CHANNELS = { + # the simplest case, pass a DICT with the model and field to search against : + 'users' : ('txcommon.lookups', 'UsersLookup'), + 'projects' : ('projects.lookups', 'ProjectsLookup'), + 'resources' : ('resources.lookups', 'ResourcesLookup'), +} diff --git a/modules/transifex/templates/45-ldap.conf b/modules/transifex/templates/45-ldap.conf new file mode 100644 index 00000000..2532edf5 --- /dev/null +++ b/modules/transifex/templates/45-ldap.conf @@ -0,0 +1,48 @@ +AUTHENTICATION_BACKENDS = ( + 'custom_backend.ForceUidLDAPBackend', + 'django.contrib.auth.backends.ModelBackend', +) + +# Use LDAP group membership to calculate group permissions. +AUTH_LDAP_FIND_GROUP_PERMS = True + +AUTH_LDAP_START_TLS = True + +# Cache group memberships for an hour to minimize LDAP traffic +AUTH_LDAP_CACHE_GROUPS = True +AUTH_LDAP_GROUP_CACHE_TIMEOUT = 3600 + +import ldap +from django_auth_ldap.config import LDAPSearch, GroupOfNamesType + + +# Baseline configuration. +AUTH_LDAP_SERVER_URI = "ldap://ldap.<%= @domain %> ldap://ldap-slave-1.<%= @domain %>" + +AUTH_LDAP_BIND_DN = "cn=transifex-<%= @hostname %>,ou=System Accounts,<%= @dc_suffix %>" +AUTH_LDAP_BIND_PASSWORD = "<%= @ldap_password %>" + +AUTH_LDAP_USER_SEARCH = LDAPSearch("ou=People,<%= @dc_suffix %> ", + ldap.SCOPE_SUBTREE, "(|(uid=%(user)s)(mail=%(user)s))") + +# Set up the basic group parameters. +AUTH_LDAP_GROUP_SEARCH = LDAPSearch("ou=Group,<%= @dc_suffix %>", + ldap.SCOPE_SUBTREE, "(objectClass=groupOfNames)" +) +AUTH_LDAP_GROUP_TYPE = GroupOfNamesType(name_attr="cn") + +# Only users in this group can log in. +#AUTH_LDAP_REQUIRE_GROUP = "cn=enabled,ou=groups,dc=example,dc=com" + +# Populate the Django user from the LDAP directory. +AUTH_LDAP_USER_ATTR_MAP = { + "first_name": "givenName", + "last_name": "sn", + "email": "mail" +} + +AUTH_LDAP_USER_FLAGS_BY_GROUP = { + "is_active": "cn=mga-i18n,ou=Group,<%= @dc_suffix %>", + "is_staff": "cn=mga-i18n-committers,ou=Group,<%= @dc_suffix %>", + "is_superuser": "cn=mga-sysadmin,ou=Group,<%= @dc_suffix %>" +} diff --git a/modules/transifex/templates/50-project.conf b/modules/transifex/templates/50-project.conf new file mode 100644 index 00000000..013741b2 --- /dev/null +++ b/modules/transifex/templates/50-project.conf @@ -0,0 +1,85 @@ +# List of callables that know how to import templates from various sources. +TEMPLATE_LOADERS = ( + 'django.template.loaders.filesystem.load_template_source', + 'django.template.loaders.app_directories.load_template_source', +# 'django.template.loaders.eggs.load_template_source', +) + +TEMPLATE_CONTEXT_PROCESSORS = [ + "django.core.context_processors.auth", + "django.core.context_processors.debug", + "django.core.context_processors.i18n", + "django.core.context_processors.media", + "django.core.context_processors.request", + "notification.context_processors.notification", + "staticfiles.context_processors.static_url", +] + +TEMPLATE_CONTEXT_PROCESSORS += ( + 'userprofile.context_processors.css_classes', + 'txcommon.context_processors.site_section', + 'txcommon.context_processors.bidi', +) + +MIDDLEWARE_CLASSES = [ + # Enable GZIP compression + 'django.middleware.gzip.GZipMiddleware', + 'django.middleware.common.CommonMiddleware', + # Enable protection against Cross Site Request Forgeries + # FIXME: Enable CSRF! + 'django.contrib.sessions.middleware.SessionMiddleware', + 'django.contrib.auth.middleware.AuthenticationMiddleware', + 'django.middleware.locale.LocaleMiddleware', + 'django.middleware.doc.XViewMiddleware', + 'django.contrib.flatpages.middleware.FlatpageFallbackMiddleware', + 'django_sorting.middleware.SortingMiddleware', +# 'django.middleware.transaction.TransactionMiddleware', + 'pagination.middleware.PaginationMiddleware', +] + +ROOT_URLCONF = 'urls' + +TEMPLATE_DIRS = [ + '<%= @templates_dir %>', + os.path.join(TX_ROOT, 'templates'), +] + +INSTALLED_APPS = [ + 'django.contrib.admin', + 'django.contrib.auth', + 'django.contrib.comments', + 'django.contrib.contenttypes', + 'django.contrib.flatpages', + 'django.contrib.markup', + 'django.contrib.sessions', + 'django.contrib.sites', + 'django.contrib.admindocs', + 'notification', + 'django_filters', + 'django_sorting', + 'south', + 'tagging', + 'pagination', + 'piston', + 'contact_form', + 'ajax_select', + 'threadedcomments', + 'staticfiles', + 'authority', + # Transifex specific apps: + 'transifex.txcommon', + # It's coming here due https://trac.transifex.org/ticket/596 + 'userprofile', + 'transifex.languages', + 'transifex.projects', + 'transifex.releases', + 'transifex.actionlog', + 'transifex.txpermissions', + 'transifex.teams', + 'transifex.resources', + 'transifex.storage', + # Must come in the end + 'django_addons', +] + +COMMENTS_APP = 'threadedcomments' diff --git a/modules/viewvc/files/robots.txt b/modules/viewvc/files/robots.txt new file mode 100644 index 00000000..dbb13834 --- /dev/null +++ b/modules/viewvc/files/robots.txt @@ -0,0 +1,29 @@ +User-agent: Googlebot +User-agent: Baiduspider +User-agent: bingbot +User-agent: YandexBot +User-agent: Mail.RU_Bot +User-agent: MJ12bot +User-agent: ClaudeBot +User-agent: Amazonbot +User-agent: PetalBot +User-agent: Bytespider +User-agent: facebookexternalhit +Disallow: /*/tags/ +Disallow: *?view=annotate* +Disallow: *?annotate=* +Disallow: *?view=diff* +Disallow: *?r1=* +Disallow: *sortby=* +Disallow: *sortdir=* +Disallow: *?revision=*&view=markup&* +Disallow: *pathrev=* +Disallow: *?*&view=log* +Disallow: *view=log&* +Disallow: *diff_format=* +User-agent: AhrefsBot +Disallow: / +User-agent: Sogou web spider +Disallow: / +User-agent: * +Crawl-delay: 30 diff --git a/modules/viewvc/files/setcookieredirect.html b/modules/viewvc/files/setcookieredirect.html new file mode 100644 index 00000000..fe98b9dc --- /dev/null +++ b/modules/viewvc/files/setcookieredirect.html @@ -0,0 +1,28 @@ +<!DOCTYPE html> +<html> + <head> + <title>User check</title> + <script type="text/javascript" defer> + const randomValue = "6436"; // Chosen by fair dice roll. Guaranteed to be random. + document.cookie = `session=${randomValue}; path=/; expires=${new Date(Date.now() + 24*3600*1000).toUTCString()}`; + const params = new Proxy(new URLSearchParams(window.location.search), { + get: (searchParams, prop) => searchParams.get(prop), + }); + let path = params.to; + // Sanitize redirect path to avoid malicious arbitrary redirects + if (/^\/[-a-zA-Z0-9~_.?&=/+]*$/.test(decodeURIComponent(path))) { + const current = new URL(window.location.toLocaleString()); + window.location.href = encodeURI(current.origin + decodeURIComponent(path)); + } else { + window.onload = function() { + document.getElementById('error').innerHTML = 'Error! Bad redirect location!'; + } + } + </script> + </head> + <body> + Redirecting back... + <br> + <p id="error"><!-- space for error message --></p> + </body> +</html> diff --git a/modules/viewvc/manifests/init.pp b/modules/viewvc/manifests/init.pp new file mode 100644 index 00000000..bd676f29 --- /dev/null +++ b/modules/viewvc/manifests/init.pp @@ -0,0 +1,74 @@ +class viewvc { + include apache::mod::fcgid + include viewvc::var + package {['viewvc', + 'python2-svn', + 'python-flup']: } + + # http_expiration_time = 600 + # svn_roots = admin: svn://svn.mageia.org/svn/adm/ + + file { '/etc/viewvc/viewvc.conf': + content => template($viewvc::var::tmpl_viewvc_conf), + notify => Service['apache'], + require => Package['viewvc'], + } + + apache::webapp_other { 'viewvc': + webapp_file => 'viewvc/webapp.conf', + } + + mga_common::local_script { 'kill_viewvc': + content => template('viewvc/kill_viewvc.sh'), + } + + cron { 'kill_viewvc': + command => '/usr/local/bin/kill_viewvc', + hour => '*', + minute => '*/5', + user => 'apache', + environment => 'MAILTO=root', + } + + $viewvc_docroot = '/usr/share/viewvc/templates/docroot' + $robotsfile = "$viewvc_docroot/robots.txt" + file { $robotsfile: + ensure => present, + mode => '0644', + owner => root, + group => root, + source => 'puppet:///modules/viewvc/robots.txt', + } + + file { "$viewvc_docroot/setcookieredirect.html": + ensure => present, + mode => '0644', + owner => root, + group => root, + source => 'puppet:///modules/viewvc/setcookieredirect.html', + } + + $vhost_aliases = { + '/viewvc' => $viewvc_docroot, + '/robots.txt' => $robotsfile, + '/_check' => "$viewvc_docroot/setcookieredirect.html", + } + + $script_aliases = { + '/' => '/usr/share/viewvc/bin/wsgi/viewvc.fcgi/', + } + + $process = 4 + + apache::vhost::base { $viewvc::var::hostname: + aliases => $vhost_aliases, + content => template('apache/vhost_fcgid_norobot.conf'), + } + + apache::vhost::base { "ssl_${viewvc::var::hostname}": + vhost => $viewvc::var::hostname, + use_ssl => true, + aliases => $vhost_aliases, + content => template('apache/vhost_fcgid_norobot.conf'), + } +} diff --git a/modules/viewvc/manifests/var.pp b/modules/viewvc/manifests/var.pp new file mode 100644 index 00000000..9027d808 --- /dev/null +++ b/modules/viewvc/manifests/var.pp @@ -0,0 +1,9 @@ +# $hostname: +# vhost used by viewvc +# $tmpl_viewvc_conf: +# path to /etc/viewvc.conf template file +class viewvc::var( + $hostname = "svnweb.${::domain}", + $tmpl_viewvc_conf = 'viewvc/viewvc.conf' +) { +} diff --git a/modules/viewvc/templates/kill_viewvc.sh b/modules/viewvc/templates/kill_viewvc.sh new file mode 100755 index 00000000..7283a10c --- /dev/null +++ b/modules/viewvc/templates/kill_viewvc.sh @@ -0,0 +1,12 @@ +#!/bin/sh +# Kill viewvc if the process gets too large +max_memory=1000000 # size in KiB + +for process in $(pgrep viewvc.fcgi) +do + process_mem=$(pmap "$process" | grep total | sed 's/ \+total \+\([[:digit:]]\+\)K/\1/') + if [ -n "$process_mem" ] && [ "$process_mem" -gt "$max_memory" ] + then + kill -15 "$process" + fi +done diff --git a/modules/viewvc/templates/viewvc.conf b/modules/viewvc/templates/viewvc.conf new file mode 100644 index 00000000..dec74771 --- /dev/null +++ b/modules/viewvc/templates/viewvc.conf @@ -0,0 +1,1002 @@ +##--------------------------------------------------------------------------- +## +## Configuration file for ViewVC +## +## Information on ViewVC is located at the following web site: +## http://viewvc.org/ +## +##--------------------------------------------------------------------------- + +## THE FORMAT OF THIS CONFIGURATION FILE +## +## This file is delineated by sections, specified in [brackets]. Within +## each section, are a number of configuration settings. These settings +## take the form of: name = value. Values may be continued on the +## following line by indenting the continued line. +## +## WARNING: Indentation *always* means continuation. Name=value lines +## should always start in column zero. +## +## Comments should always start in column zero, and are identified +## with "#". By default each of the configuration items is +## commented out, with the default value of the option shown. +## You'll need to remove the '#' that precedes configuration +## options whose values you wish to modify. +## +## Certain configuration settings may have multiple values. These should +## be separated by a comma. The settings where this is allowed are noted +## below. Any other setting that requires special syntax is noted at that +## setting. +## +## +## SOME TERMINOLOGY USED HEREIN +## +## "root" - This is a CVS or Subversion repository. For Subversion, the +## meaning is pretty clear, as the virtual, versioned directory tree +## stored inside a Subversion repository looks nothing like the actual +## tree visible with shell utilities that holds the repository. For +## CVS, this is more confusing, because CVS's repository layout mimics +## (actually, defines) the layout of the stuff housed in the repository. +## But a CVS repository can be identified by the presence of a CVSROOT +## subdirectory in its root directory. +## +## "module" - A module is a top-level subdirectory of a root, usually +## associated with the concept of a single "project" among many housed +## within a single repository. +## +## +## BASIC VIEWVC CONFIGURATION HINTS +## +## While ViewVC has quite a few configuration options, you generally +## only need to change a small subset of them to get your ViewVC +## installation working properly. Here are some options that we +## recommend you pay attention to. Of course, don't try to change the +## options here -- do so in the relevant section of the configuration +## file below. +## +## For correct operation, you will probably need to change the following +## configuration variables: +## +## cvs_roots (for CVS) +## svn_roots (for Subversion) +## root_parents (for CVS or Subversion) +## default_root +## root_as_url_component +## rcs_dir +## mime_types_files +## the many options in the [utilities] section +## +## It is usually desirable to change the following variables: +## +## address +## forbidden +## +## To optimize delivery of ViewVC static files: +## +## docroot +## +## To customize the display of ViewVC for your site: +## +## template_dir +## the [templates] override section +## + +##--------------------------------------------------------------------------- +[general] + +## cvs_roots: Specifies each of the CVS roots on your system and +## assigns names to them. Each root should be given by a "name: path" +## value. Multiple roots should be separated by commas and can be +## placed on separate lines. +## +## Example: +## cvs_roots = cvsroot: /opt/cvs/repos1, +## anotherroot: /usr/local/cvs/repos2 +## +#cvs_roots = + +## svn_roots: Specifies each of the Subversion roots (repositories) on +## your system and assigns names to them. Each root should be given by +## a "name: path" value. Multiple roots should be separated by commas +## and can be placed on separate lines. +## +## Example: +## svn_roots = svnrepos: /opt/svn/, +## anotherrepos: /usr/local/svn/repos2 +## +#svn_roots = adm: /svn/adm/ +# web: /svn/web/ +# packages: /svn/packages/ +# soft: /svn/soft/ + +## root_parents: Specifies a list of directories in which any number of +## repositories may reside. Rather than force you to add a new entry +## to 'cvs_roots' or 'svn_roots' each time you create a new repository, +## ViewVC rewards you for organising all your repositories under a few +## parent directories by allowing you to simply specify just those +## parent directories. ViewVC will then notice each repository in that +## directory as a new root whose name is the subdirectory of the parent +## path in which that repository lives. +## +## You can specify multiple parent paths separated by commas or new lines. +## +## WARNING: these names can, of course, clash with names you have +## defined in your cvs_roots or svn_roots configuration items. If this +## occurs, you can either rename the offending repository on disk, or +## grant new names to the clashing item in cvs_roots or svn_roots. +## Each parent path is processed sequentially, so repositories under +## later parent paths may override earlier ones. +## +## Example: +## root_parents = /opt/svn : svn, +## /opt/cvs : cvs +## +root_parents = /svn : svn + +## default_root: This is the name of the default root. Valid names +## include those explicitly listed in the cvs_roots and svn_roots +## configuration options, as well as those implicitly indicated by +## virtue of being the basenames of repositories found in the +## root_parents option locations. +## +## NOTE: This setting is ignored when root_as_url_component is enabled. +## +## Example: +## default_root = cvsroot +## +#default_root = + +## mime_types_files: This is a list of pathnames to a set of MIME type +## mapping files to help ViewVC guess the correct MIME type of a +## versioned file. The pathnames listed here are specified in order of +## authoritativeness either as absolute paths or relative to this +## configuration file. +## +## As a convenience, ViewVC provides a MIME type mapping file +## (mimetypes.conf) which is, by default, the preferred provider of +## MIME type mapping answers, but which is also empty. If you find +## that ViewVC is unable to accurately guess MIME types based on the +## extensions of some of your versioned files, you can add records of +## your preferred mappings to the provided mimetypes.conf file (or to +## your system's mapping files, if you wish). +## +## You might, for example, wish to have ViewVC also consult the mapping +## files provided by your operating system and Apache. +## +## Example: +## mime_types_files = mimetypes.conf, +## /etc/mime.types, +## /usr/local/apache2/conf/mime.types +## +mime_types_files = /etc/viewvc/mimetypes.conf, /etc/httpd/conf/mime.types + +## address: The address of the local repository maintainer. (This +## option is provided only as a convenience for ViewVC installations +## which are using the default template set, where the value of this +## option will be displayed in the footer of every ViewVC page.) +## +## Example: +## address = admin@server.com +## +#address = + +## kv_files: Provides a mechanism for custom key/value pairs to be +## available to templates. These are stored in key/value (KV) files. +## +## The paths of the KV files are listed here, specified either as +## absolute paths or relative to this configuration file. The files +## use the same format as this configuration file, containing one or +## more user-defined sections, and user-defined options in those +## sections. ViewVC makes these options available to template authors +## as: +## +## kv.SECTION.OPTION +## +## Note that an option name can be dotted. For example: +## +## [my_images] +## logos.small = /images/small-logo.png +## logos.big = /images/big-logo.png +## +## Templates can use these with a directive like: [kv.my_images.logos.small] +## +## Note that section names which are common to multiple KV files will +## be merged. If two files have a [my_images] section, then the +## options in those two like-named sections will be merged together. +## If two files have the same option name in a section, then one will +## overwrite the other (and which one "wins" is unspecified). +## +## To further categorize the KV files, and how the values are provided to +## the templates, a KV file name may be annotated with an additional level +## of dotted naming. For example: +## +## kv_files = [asf]kv/images.conf +## +## Assuming the same section as above, the template would refer to an image +## using [kv.asf.my_images.logos.small] +## +## Lastly, it is possible to use %lang% in the filenames to specify a +## substitution of the selected language-tag. +## +## Example: +## kv_files = kv/file1.conf, kv/file2.conf, [i18n]kv/%lang%_data.conf +## +#kv_files = + +## This option is a comma-separated list of language-tag values +## available to ViewVC. The first language-tag listed is the default +## language, and will be used if an Accept-Language header is not +## present in the request, or none of the user's requested languages +## are available. If there are ties on the selection of a language, +## then the first to appear in the list is chosen. +## +## Example: +## languages = en-us, en-gb, de +## +#languages = en-us + + +##--------------------------------------------------------------------------- +[utilities] + +## ViewVC uses (sometimes optionally) various third-party programs to do some +## of the heavy lifting. Generally, it will attempt to execute those utility +## programs in such a way that if they are found in ViewVC's executable +## search path ($PATH, %PATH%, etc.) all is well. But sometimes these tools +## aren't installed in the executable search path, so here's where you can +## tell ViewVC where to find them. +## +## NOTE: Options with a "_dir" suffix are for configuring the +## directories in which certain programs live. Note that this might +## not be the same directory into which the program's installer dumped +## the whole program package -- we want the deepest directory in which +## the executable program itself resides ("C:\rcstools\bin\win32" +## rather than just "C:\rcstools", for example). The values of options +## whose names lack the "_dir" suffix should point to the actual +## program itself (such as "C:\Program Files\cvsnt\cvs.exe"). + + +## rcs_dir: Directory in which the RCS utilities are installed, used +## for viewing CVS repositories. +## +## Example: +## rcs_dir = /usr/bin/ +## +#rcs_dir = + +## cvsnt: Location of cvsnt program. ViewVC can use CVSNT (www.cvsnt.org) +## instead of the RCS utilities to retrieve information from CVS +## repositories. To enable use of CVSNT, set the "cvsnt" value to the +## path of the CVSNT executable. (If CVSNT is on the standard path, you +## can also set it to the name of the CVSNT executable). By default +## "cvsnt" is set to "cvs" on Windows and is not set on other platforms. +## +## Examples: +## cvsnt = K:\Program Files\cvsnt\cvs.exe +## cvsnt = /usr/bin/cvs +## cvsnt = cvs +## +#cvsnt = + +## svn: Location of the Subversion command-line client, used for +## viewing Subversion repositories. +## +## Example: +## svn = /usr/bin/svn +## +#svn = + +## diff: Location of the GNU diff program, used for showing file +## version differences. +## +## Example: +## diff = /usr/bin/diff +## +#diff = + +## cvsgraph: Location of the CvsGraph program, a graphical CVS version +## graph generator (see options.use_cvsgraph). +## +## Example: +## cvsgraph = /usr/local/bin/cvsgraph +## +#cvsgraph = + + +##--------------------------------------------------------------------------- +[options] + +## root_as_url_component: Interpret the first path component in the URL +## after the script location as the root to use. This is an +## alternative to using the "root=" query key. If ViewVC is configured +## with multiple repositories, this results in more natural looking +## ViewVC URLs. +## +## NOTE: Enabling this option will break backwards compatibility with +## any old ViewCVS URL which doesn't have an explicit "root" parameter. +## +#root_as_url_component = 1 + +## checkout_magic: Use checkout links with magic /*checkout*/ prefixes so +## checked out HTML pages can have working links to other repository files +## +## NOTE: This option is DEPRECATED and should not be used in new ViewVC +## installations. Setting "default_file_view = co" achieves the same effect +## +#checkout_magic = 0 + +## allowed_views: List the ViewVC views which are enabled. Views not +## in this comma-delimited list will not be served (or, will return an +## error on attempted access). +## Possible values: "annotate", "co", "diff", "markup", "roots", "tar" +## +allowed_views = annotate, diff, markup, roots, co + +## authorizer: The name of the ViewVC authorizer plugin to use when +## authorizing access to repository contents. This value must be the +## name of a Python module addressable as vcauth.MODULENAME (most +## easily accomplished by placing it in ViewVC's lib/vcauth/ directory) +## and which implements a ViewVCAuthorizer class (as a subclass of +## vcauth.GenericViewVCAuthorizer). You can provide custom parameters +## to the authorizer module by defining configuration sections named +## authz-MODULENAME and adding the parameter keys and values there. +## +## ViewVC provides the following modules: +## svnauthz - based on Subversion authz files +## forbidden - simple path glob matches against top-level root directories +## forbiddenre - root and path matches against regular expressions +## +## NOTE: Only one authorizer may be in use for a given ViewVC request. +## It doesn't matter if you configure the parameters of multiple +## authorizer plugins -- only the authorizer whose name is configured +## here (or effectively configured here via per-vhost or per-root +## configuration) will be activated. +## +#authorizer = + +## hide_cvsroot: Don't show the CVSROOT directory +## 1 Hide CVSROOT directory +## 0 Show CVSROOT directory +## +## NOTE: Someday this option may be removed in favor of letting +## individual authorizer plugin hide the CVSROOT. +## +#hide_cvsroot = 1 + +## mangle_email_addresses: Mangle email addresses in marked-up output. +## There are various levels of mangling available: +## 0 - No mangling; markup un-mangled email addresses as hyperlinks +## 1 - Obfuscation (using entity encoding); no hyperlinking +## 2 - Data-dropping address truncation; no hyperlinking +## +## NOTE: this will not effect the display of versioned file contents, only +## addresses that appear in version control metadata (e.g. log messages). +## +#mangle_email_addresses = 0 + +## default_file_view: "log", "co", or "markup" +## Controls whether the default view for file URLs is a checkout view or +## a log view. "log" is the default for backwards compatibility with old +## ViewCVS URLs, but "co" has the advantage that it allows ViewVC to serve +## static HTML pages directly from a repository with working links +## to other repository files +## +## NOTE: Changing this option may break compatibility with existing +## bookmarked URLs. +## +## ALSO NOTE: If you choose one of the "co" or "markup" views, be sure +## to enable it (via the allowed_views option) +## +#default_file_view = log + +## http_expiration_time: Expiration time (in seconds) for cacheable +## pages served by ViewVC. Note that in most cases, a cache aware +## client will only revalidate the page after it expires (using the +## If-Modified-Since and/or If-None-Match headers) and that browsers +## will also revalidate the page when the reload button is pressed. +## Set to 0 to disable the transmission of these caching headers. +## +http_expiration_time = 600 + +## generate_etags: Generate Etag headers for relevant pages to assist +## in browser caching. +## 1 Generate Etags +## 0 Don't generate Etags +## +#generate_etags = 1 + +## svn_ignore_mimetype: Don't consult the svn:mime-type property to +## determine how to display a file in the markup view. This is +## especially helpful when versioned images carry the default +## Subversion-calculated MIME type of "application/octet-stream" (which +## isn't recognized as viewable type by browsers). +## +#svn_ignore_mimetype = 0 + +## svn_config_dir: Path of the Subversion runtime configuration +## directory ViewVC should consult for various things, including cached +## remote authentication credentials. If unset, Subversion will use +## the default location(s) ($HOME/.subversion, etc.) +## +#svn_config_dir = + +## use_rcsparse: Use the rcsparse Python module to retrieve CVS +## repository information instead of invoking rcs utilities [EXPERIMENTAL] +## +#use_rcsparse = 0 + +## sort_by: File sort order +## file Sort by filename +## rev Sort by revision number +## date Sort by commit date +## author Sort by author +## log Sort by log message +## +#sort_by = file + +## sort_group_dirs: Group directories when sorting +## 1 Group directories together +## 0 No grouping -- sort directories as any other item would be sorted +## +#sort_group_dirs = 1 + +## hide_attic: Hide or show the contents of the Attic subdirectory +## 1 Hide dead files inside Attic subdir +## 0 Show the files which are inside the Attic subdir +## +#hide_attic = 1 + +## hide_errorful_entries: Hide or show errorful directory entries +## (perhaps due to not being readable, or some other rlog parsing +## error, etc.) +## 1 Hide errorful entries from the directory display +## 0 Show errorful entries (with their errors) in the directory display +## +#hide_errorful_entries = 0 + +## log_sort: Sort order for log messages +## date Sort revisions by date +## rev Sort revision by revision number +## none Use the version control system's ordering +## +#log_sort = date + +## diff_format: Default diff format +## h Human readable +## u Unified diff +## c Context diff +## s Side by side +## l Long human readable (more context) +## f Full human readable (entire file) +## +#diff_format = h + +## hr_breakable: Diff view line breaks +## 1 lines break at spaces +## 0 no line breaking +## Or, use a positive integer > 1 to cut lines after that many characters +## +#hr_breakable = 1 + +## hr_funout: Give out function names in human readable diffs. +## (Only works well for C source files, otherwise diff's heuristic falls short.) +## ('-p' option to diff) +## +#hr_funout = 0 + +## hr_ignore_white: Ignore whitespace (indentation and stuff) for human +## readable diffs. +## ('-w' option to diff) +## +#hr_ignore_white = 0 + +## hr_ignore_keyword_subst: Ignore diffs which are caused by keyword +## substitution (such as "$Id - Stuff"). +## ('-kk' option to rcsdiff) +## +#hr_ignore_keyword_subst = 1 + +## hr_intraline: Enable highlighting of intraline changes in human +## readable diffs. [Requires Python 2.4] +## +#hr_intraline = 0 + +## allow_compress: Allow compression via gzip of output if the Browser +## accepts it (HTTP_ACCEPT_ENCODING contains "gzip"). +## +## NOTE: this relies on Python's gzip module, which has proven to be +## not-so-performant. Enabling this feature should reduce the overall +## transfer size of ViewVC's responses to the client's request, but +## will do so with a speed penalty. +## +#allow_compress = 0 + +## template_dir: The directory which contains the EZT templates used by +## ViewVC to customize the display of the various output views. ViewVC +## looks in this directory for files with names that match the name of +## the view ("log", "directory", etc.) plus the ".ezt" extension. If +## specified as a relative path, it is relative to the directory where +## this config file resides; absolute paths may be used as well. If +## %lang% occurs in the pathname, then the selected language will be +## substituted. +## +## SEE ALSO: the [templates] configuration section, where you can +## override templates on a per-view basis. +## +template_dir = /usr/share/viewvc/templates/ + +## docroot: Web path to a directory that contains ViewVC static files +## (stylesheets, images, etc.) If set, static files will get +## downloaded directory from this location. If unset, static files +## will be served by the ViewVC script (at a likely performance +## penalty, and from the "docroot" subdirectory of the directory +## specified by the "template_dir" option). +## +## NOTE: This option is evaluated outside the context of a particular +## root. Be careful when using per-root configuration to select an +## alternate template set as the default value for this option will +## still be based on the global default template set per 'template_dir' +## above, not on 'template_dir' as overridden for a given root. +## +docroot = /viewvc + +## show_subdir_lastmod: Show last changelog message for CVS subdirectories +## +## NOTE: The current implementation makes many assumptions and may show +## the incorrect file at some times. The main assumption is that the +## last modified file has the newest filedate. But some CVS operations +## touches the file without even when a new version is not checked in, +## and TAG based browsing essentially puts this out of order, unless +## the last checkin was on the same tag as you are viewing. Enable +## this if you like the feature, but don't rely on correct results. +## +## SECURITY WARNING: Enabling this will currently leak unauthorized +## path names. +## +#show_subdir_lastmod = 0 + +## show_logs: Show the most recent log entry in directory listings. +## +#show_logs = 1 + +## show_log_in_markup: Show log when viewing file contents. +## +#show_log_in_markup = 1 + +## cross_copies: Cross filesystem copies when traversing Subversion +## file revision histories. +## +#cross_copies = 1 + +## use_localtime: Display dates as UTC or in local time zone. +## +#use_localtime = 0 + +## short_log_len: The length (in characters) to which the most recent +## log entry should be truncated when shown in the directory view. +## +#short_log_len = 80 + +## enable_syntax_coloration: Should we colorize known file content +## syntaxes? [Requires Pygments Python module] +## +#enable_syntax_coloration = 1 + +## tabsize: The number of spaces into which tabstops are converted +## when viewing file contents. +## +#tabsize = 8 + +## detect_encoding: Should we attempt to detect versioned file +## character encodings? [Requires 'chardet' module, and is currently +## used only by the syntax coloration logic -- if enabled -- for the +## 'markup' and 'annotate' views; see 'enable_syntax_coloration'.] +## +#detect_encoding = 0 + +## use_cvsgraph: Use CvsGraph to offer visual graphs of CVS revision history. +## +#use_cvsgraph = 0 + +## cvsgraph_conf: Location of the customized cvsgraph configuration file. +## May be specified as an absolute path or as a path relative to this +## configuration file. +## +cvsgraph_conf = /etc/viewvc/cvsgraph.conf + +## use_re_search: Enable regular expression search of files in a directory. +## +## WARNING: Enabling this option can consume HUGE amounts of server +## time. A "checkout" must be performed on *each* file in a directory, +## and the result needs to be searched for a match against the regular +## expression. +## +## SECURITY WARNING: Since a user can enter the regular expression, it +## is possible for them to enter an expression with many alternatives +## and a lot of backtracking. Executing that search over thousands of +## lines over dozens of files can easily tie up a server for a long +## period of time. This option should only be used on sites with +## trusted users. It is highly inadvisable to use this on a public site. +## +#use_re_search = 0 + +## dir_pagesize: Maximum number of directory entries on a given page. +## This allows ViewVC to present discrete pages to the users instead of +## the entire directory. Set to 0 to disable pagination. +## +dir_pagesize = 100 + +## log_pagesize: Maximum number of revision log entries on a given page. +## This allows ViewVC to present discrete pages to the users instead of +## the entire revision log. Set to 0 to disable pagination. +## +log_pagesize = 100 + +## limit_changes: Maximum number of changed paths shown per commit in +## the Subversion revision view and in query results. This is not a +## hard limit (the UI provides options to show all changed paths), but +## it prevents ViewVC from generating enormous and hard to read pages +## by default when they happen to contain import or merge commits +## affecting hundreds or thousands of files. Set to 0 to disable the +## limit. +## +#limit_changes = 100 + +##--------------------------------------------------------------------------- +[templates] + +## You can override the templates used by various ViewVC views in this +## section. By default, ViewVC will look for templates in the +## directory specified by the "template_dir" configuration option (see +## the documentation for that option for details). But if you want to +## use a different template for a particular view, simply uncomment the +## appropriate option below and specify the correct location of the EZT +## template file you wish to use for that view. +## +## Templates are specified relative to the configured template +## directory (see the "template_dir" option), but absolute paths may +## also be used as well. +## +## If %lang% occurs in the pathname, then the selected language will be +## substituted. +## +## NOTE: the selected language is defined by the "languages" item in the +## [general] section, and based on the request's Accept-Language +## header. +## + +## diff: Template used for the file differences view. +## +#diff = + +## directory: Template used for the directory listing view. +## +#directory = + +## error: Template used for the ViewVC error display view. +## +#error = + +## file: Template used for the file contents/annotation view. +## +#file = + +## graph: Template used for the revision graph view. +## +#graph = + +## log: Template used for the revision log view. +## +#log = + +## query: Template used for the non-integrated query interface. +## +#query = + +## query_form: Template used for the query form view. +## +#query_form = + +## query_results: Template used for the query results view. +## +#query_results = + +## revision: Template used for the revision/changeset view. +## +#revision = + +## roots: Template used for the root listing view. +## +#roots = + +##--------------------------------------------------------------------------- +[cvsdb] + +## enabled: Enable database integration feature. +## +#enabled = 0 + +## host: Database hostname. Leave unset to use a local Unix socket +## connection. +## +#host = + +## post: Database listening port. +## +#port = 3306 + +## database_name: ViewVC database name. +## +#database_name = ViewVC + +## user: Username of user with read/write privileges to the database +## specified by the 'database_name' configuration option. +## +#user = + +## passwd: Password of user with read/write privileges to the database +## specified by the 'database_name' configuration option. +## +#passwd = + +## readonly_user: Username of user with read privileges to the database +## specified by the 'database_name' configuration option. +## +#readonly_user = + +## readonly_passwd: Password of user with read privileges to the database +## specified by the 'database_name' configuration option. +## +#readonly_passwd = + +## row_limit: Maximum number of rows returned by a given normal query +## to the database. +## +#row_limit = 1000 + +## rss_row_limit: Maximum number of rows returned by a given query to +## the database made as part of an RSS feed request. (Keeping in mind +## that RSS readers tend to poll regularly for new data, you might want +## to keep this set to a conservative number.) +## +#rss_row_limit = 100 + +## check_database_for_root: Check if the repository is found in the +## database before showing the query link and RSS feeds. +## +## WARNING: Enabling this check adds the cost of a database connection +## and query to most ViewVC requests. If all your roots are represented +## in the commits database, or if you don't care about the creation of +## RSS and query links that might lead ultimately to error pages for +## certain of your roots, or if you simply don't want to add this extra +## cost to your ViewVC requests, leave this disabled. +## +#check_database_for_root = 0 + +##--------------------------------------------------------------------------- +[vhosts] + +## Virtual hosts are individual logical servers accessible via +## different hostnames, but which are all really the same physical +## computer. For example, you might have your web server configured to +## accept incoming traffic for both http://www.yourdomain.com/ and +## http://viewvc.yourdomain.com/. Users pointing their web browsers at +## each of those two URLs might see entirely different content via one +## URL versus the other, but all that content actually lives on the +## same computer, is served up via the same web server, and so +## on. It just *looks* like its coming from multiple servers. +## +## ViewVC allows you to customize its configuration options for +## individual virtual hosts. You might, for example, wish to expose +## all of your Subversion repositories at http://svn.yourdomain.com/viewvc/ +## and all your CVS ones at http://cvs.yourdomain.com/viewvc/, with no +## cross-exposure. Using ViewVC's virtual host (vhost) configuration +## support, you can do this. Simply create two vhost configurations +## (one for each of your hostnames), then configure the cvs_roots +## option only for the vhost associated with cvs.yourdomain.com, and +## configure the svn_roots option only for the vhost associated with +## svn.yourdomain.com. +## +## This section is a freeform configuration section, where you create +## both the option names and their values. The names of the options +## are then treated as canonical names of virtual hosts, and their +## values are defined to be comma-delimited lists of hostname globs +## against which incoming ViewVC requests will be matched to figure out +## which vhost they apply to. +## +## After you've named and defined your vhosts, you may then create new +## configuration sections whose names are of the form +## vhost-VHOSTNAME/CONFIGSECTION. VHOSTNAME here is the canonical name +## of one of the virtual hosts you defined under the [vhosts] section. +## Inside those configuration sections, you override the standard +## ViewVC options typically found in the base configuration section +## named CONFIGSECTION ("general", "option", etc.) +## +## NOTE: Per-vhost overrides may only be applied to the following +## sections: +## +## general +## options +## utilities +## templates +## cvsdb +## authz-* +## +## Here is an example: +## +## [vhosts] +## libs = libs.yourdomain.*, *.yourlibs.* +## gui = guiproject.yourdomain.* +## +## [vhost-libs/general] +## cvs_roots = +## svn_roots = svnroot: /var/svn/libs-repos +## default_root = svnroot +## +## [vhost-libs/options] +## show_logs = 1 +## +## [vhost-gui/general] +## cvs_roots = cvsroot: /var/cvs/guiproject +## svn_roots = +## default_root = cvsroot +## + +##--------------------------------------------------------------------------- +## ViewVC recognizes per-root configuration overrides, too. To +## override the value of a configuration parameter only for a single +## root, create a configuration section whose names is of the form +## root-ROOTNAME/CONFIGSECTION. ROOTNAME here is the name of the root +## as defined explicitly in cvs_roots or svn_roots or implicitly as the +## basename of a root path in root_parents. Options found in this new +## configuration section override for this one root the corresponding +## options found in the base configuration section CONFIGSECTION +## ("options", "authz-*", etc.) as interpreted after per-vhost +## overrides (if any) have been applied. +## +## NOTE: Per-root overrides may only be applied to the following +## sections: +## +## options +## utilities +## authz-* +## +## WARNING: Do not use per-root overrides if your ViewVC instance is +## served via the standalone.py server option! Doing so could cause +## ViewVC to be unable to function properly (or at all). +## +## Here is an example showing how to enable Subversion authz-based +## authorization for only the single root named "svnroot": +## +## [root-svnroot/options] +## authorizer = svnauthz +## +## [root-svnroot/authz-svnauthz] +## authzfile = /path/to/authzfile +## + +##--------------------------------------------------------------------------- +[authz-forbidden] + +## The "forbidden" authorizer forbids access to repository modules, +## defined to be top-level subdirectories in a repository. +## +## NOTE: The options in this section apply only when the 'authorizer' +## option (in the [options] section) is set to 'forbidden'. + +## forbidden: A comma-delimited list of patterns which match modules +## that ViewVC should hide from users. +## +## You can use a simple list of modules, or something more complex: +## +## *) The "!" can be used before a module to explicitly state that it +## is NOT forbidden. Whenever this form is seen, then all modules will +## be forbidden unless one of the "!" modules match. +## +## *) Shell-style "glob" expressions may be used. "*" will match any +## sequence of zero or more characters, "?" will match any single +## character, "[seq]" will match any character in seq, and "[!seq]" +## will match any character not in seq. +## +## *) Tests are performed in sequence. The first match will terminate the +## testing. This allows for more complex allow/deny patterns. +## +## Tests are case-sensitive. +## +## NOTE: Again, this is for the hiding of modules within repositories, *not* +## for the hiding of repositories (roots) themselves. +## +## Some examples: +## +## Disallow "example" but allow all others: +## forbidden = example +## +## Disallow "example1" and "example2" but allow all others: +## forbidden = example1, example2 +## +## Allow *only* "example1" and "example2": +## forbidden = !example1, !example2 +## +## Forbid modules starting with "x": +## forbidden = x* +## +## Allow modules starting with "x" but no others: +## forbidden = !x* +## +## Allow "xml", forbid other modules starting with "x", and allow the rest: +## forbidden = !xml, x*, !* +## +#forbidden = + +##--------------------------------------------------------------------------- +[authz-forbiddenre] + +## The "forbiddenre" authorizer forbids access to repositories and +## repository paths by comparing a list of regular expressions +## (separated by commas) against paths consisting of the repository (or +## root) name plus the path of the versioned file or directory to be +## tested. For example, to see if the user is authorized to see the +## path "/trunk/www/index.html" in the repository whose root name is +## "svnrepos", this authorizer will check the path +## "svnrepos/trunk/www/index.html" against the list of forbidden +## regular expressions. Directory paths will be terminated by a forward +## slash. +## +## NOTE: The options in this section apply only when the 'authorizer' +## option (in the [options] section) is set to 'forbiddenre'. + +## forbiddenre: A comma-delimited list of regular expressions which +## match paths that ViewVC should hide from users. +## +## Like the "forbidden" authorizer... +## +## *) The "!" can be used before a module to explicitly state that it +## is NOT forbidden. Whenever this form is seen, then all modules will +## be forbidden unless one of the "!" modules match. +## +## *) Tests are performed in sequence. The first match will terminate the +## testing. This allows for more complex allow/deny patterns. +## +## Unlike the "forbidden" authorizer, you can can use this to hide roots, too. +## +## Some examples: +## +## Disallow files named "PRIVATE", but allow all others: +## forbiddenre = /PRIVATE$ +## +## Disallow the "hidden" repository, allowing all others: +## forbiddenre = ^hidden(/|$) +## +## Allow only the "example1" and "example2" roots and the paths inside them, +## disallowing all others (which can be done in multiple ways): +## forbiddenre = !^example1(/|$), !^example2(/|$)/ +## forbiddenre = !^example[12](/|$) +## +## Only allow visibility of HTML files and the directories that hold them: +## forbiddenre = !^([^/]+|.*(/|\.html))$ +## +#forbiddenre = + +##--------------------------------------------------------------------------- +[authz-svnauthz] + +## The "svnauthz" authorizer uses a Subversion authz configuration file +## to determine access to repository paths. +## +## NOTE: The options in this section apply only when the 'authorizer' +## option (in the [options] section) is set to 'svnauthz'. + +## authzfile: Specifies the location of the authorization rules file +## (using an absolute path). +## +#authzfile = + +## force_username_case: Like the AuthzForceUsernameCase httpd.conf +## directive, set this to "upper" or "lower" to force the normalization +## to upper- or lower-case, respectively, of incoming usernames prior +## to comparison against the authorization rules files. Leave the +## option unset to preserve the username case. +## +#force_username_case = + +##--------------------------------------------------------------------------- diff --git a/modules/viewvc/templates/webapp.conf b/modules/viewvc/templates/webapp.conf new file mode 100644 index 00000000..de257cc0 --- /dev/null +++ b/modules/viewvc/templates/webapp.conf @@ -0,0 +1,2 @@ +# this file must be empty for now +# signed $fqdn admin diff --git a/modules/xinetd/manifests/init.pp b/modules/xinetd/manifests/init.pp new file mode 100644 index 00000000..a86aaeee --- /dev/null +++ b/modules/xinetd/manifests/init.pp @@ -0,0 +1,7 @@ +class xinetd { + package { 'xinetd': } + + service { 'xinetd': + subscribe => Package['xinetd'] + } +} diff --git a/modules/xinetd/manifests/port_forward.pp b/modules/xinetd/manifests/port_forward.pp new file mode 100644 index 00000000..2717466e --- /dev/null +++ b/modules/xinetd/manifests/port_forward.pp @@ -0,0 +1,8 @@ +define xinetd::port_forward($target_ip, $target_port, $port, $proto = 'tcp') { + include xinetd + file { "/etc/xinetd.d/${name}": + require => Package['xinetd'], + content => template('xinetd/port_forward'), + notify => Service['xinetd'] + } +} diff --git a/modules/xinetd/manifests/service.pp b/modules/xinetd/manifests/service.pp new file mode 100644 index 00000000..24caafd9 --- /dev/null +++ b/modules/xinetd/manifests/service.pp @@ -0,0 +1,9 @@ +define xinetd::service($content) { + include xinetd + file { "/etc/xinetd.d/${name}": + require => Package['xinetd'], + content => $content, + notify => Service['xinetd'] + } +} + diff --git a/modules/xinetd/templates/port_forward b/modules/xinetd/templates/port_forward new file mode 100644 index 00000000..99518dcd --- /dev/null +++ b/modules/xinetd/templates/port_forward @@ -0,0 +1,15 @@ +service <%= @name %> +{ + disable = no + type = UNLISTED +<%- if @proto == 'tcp' -%> + socket_type = stream +<%- else -%> + socket_type = dgram +<%- end -%> + protocol = <%= @proto %> + user = nobody + wait = no + redirect = <%= @target_ip %> <%= @target_port %> + port = <%= @port %> +} diff --git a/modules/xymon/manifests/client.pp b/modules/xymon/manifests/client.pp new file mode 100644 index 00000000..cfde8134 --- /dev/null +++ b/modules/xymon/manifests/client.pp @@ -0,0 +1,19 @@ +class xymon::client { + package { 'xymon-client': } + + $service = 'xymon' + + service { $service: + hasstatus => false, + status => "${::lib_dir}/xymon/client/runclient.sh status", + require => Package['xymon-client'], + } + + # TODO replace with a exported resource + $server = extlookup('hobbit_server','x') + file { '/etc/sysconfig/xymon-client': + content => template('xymon/xymon-client'), + notify => Service[$service], + require => Package['xymon-client'], + } +} diff --git a/modules/xymon/manifests/init.pp b/modules/xymon/manifests/init.pp new file mode 100644 index 00000000..9b609048 --- /dev/null +++ b/modules/xymon/manifests/init.pp @@ -0,0 +1,2 @@ +class xymon { +} diff --git a/modules/xymon/manifests/server.pp b/modules/xymon/manifests/server.pp new file mode 100644 index 00000000..b6c269cf --- /dev/null +++ b/modules/xymon/manifests/server.pp @@ -0,0 +1,45 @@ +class xymon::server { + package { ['xymon','fping']: } + + File { + group => 'xymon', + require => Package['xymon'], + notify => Exec['service xymon reload'], + } + + file { + # Define hosts and web view layout, and lists tests to be run against + # host by e.g. network tests from xymon server + '/etc/xymon/hosts.cfg': + content => template('xymon/bb-hosts'); + + # Environment variables user by hobbitd,hobbitlaunch,hobbitd_rrd,CGIs + # and bbgen (which generates the static html pages) + # hobbitlaunch (started by init script) may need to be restarted for + # changes here, for hobbitd_rrd (e.g. TEST2RRD), it is sufficient to + # kill hobbitd_rrd, hobbitlaunch will respawn it + '/etc/xymon/hobbitserver.cfg': + content => template('xymon/hobbitserver.cfg'); + + # Defines thresholds for test data reported by clients, e.g. load + # disk, procs, ports, memory, as well as those which require some + # configuration server side to the client: files, msgs, + '/etc/xymon/hobbit-clients.cfg': + content => template('xymon/hobbit-clients.cfg'); + + # Configuration for the xymon clients, which log files to process etc. + '/etc/xymon/client-local.cfg': + content => template('xymon/client-local.cfg'); + + # Used for alerting, changes should be taken into effect immediately + '/etc/xymon/hobbit-alerts.cfg': + content => template('xymon/hobbit-alerts.cfg'); + } + + # Most changes should take effect immediately, but sometimes threshold + # changes take effect sooner if hobbit is HUPd + exec { 'service xymon reload': + refreshonly => true, + require => Package['xymon'], + } +} diff --git a/modules/xymon/templates/bb-hosts b/modules/xymon/templates/bb-hosts new file mode 100644 index 00000000..140932b5 --- /dev/null +++ b/modules/xymon/templates/bb-hosts @@ -0,0 +1,52 @@ +# +# Master configuration file for Xymon +# +# This file defines several things: +# +# 1) By adding hosts to this file, you define hosts that are monitored by Xymon +# 2) By adding "page", "subpage", "group" definitions, you define the layout +# of the Xymon webpages, and how hosts are divided among the various webpages +# that Xymon generates. +# 3) Several other definitions can be done for each host, see the bb-hosts(5) +# man-page. +# +# You need to define at least the Xymon server itself here. + +page visible Visible Services +0.0.0.0 blog.<%= domain %> # sni https://blog.<%= domain %>/en/ +0.0.0.0 identity.<%= domain %> # https://identity.<%= domain %> +0.0.0.0 bugs.<%= domain %> # https://bugs.<%= domain %> +0.0.0.0 ml.<%= domain %> # https://ml.<%= domain %> +0.0.0.0 www.<%= domain %> # https://www.<%= domain %> +0.0.0.0 svnweb.<%= domain %> # https://svnweb.<%= domain %> +0.0.0.0 epoll.<%= domain %> # https://epoll.<%= domain %> +0.0.0.0 planet.<%= domain %> # sni https://planet.<%= domain %>/en/ +# This checks the public reverse proxy +0.0.0.0 forums.<%= domain %> # sni https://forums.<%= domain %>=<%= @nodes_ipaddr['sucuk']['ipv4'] %>/ +0.0.0.0 check.<%= domain %> # https://check.<%= domain %> +0.0.0.0 madb.<%= domain %> # https://madb.mageia.org +0.0.0.0 pkgsubmit.<%= domain %> # sni https://pkgsubmit.<%= domain %> +#0.0.0.0 bcd.<%= domain %> # http://bcd.<%= domain %> +0.0.0.0 hugs.<%= domain %> # http://hugs.<%= domain %> +0.0.0.0 dashboard.<%= domain %> # http://dashboard.<%= domain %> +0.0.0.0 meetbot.<%= domain %> # sni https://meetbot.<%= domain %> + + +page servers Servers +group-compress Marseille +212.85.158.151 sucuk.<%= domain %> # testip bbd dns smtp ssh CLIENT:xymon.<%= domain %> http://xymon.<%= domain %> +212.85.158.148 ecosse.<%= domain %> # testip ssh +212.85.158.150 fiona.<%= domain %> # testip ssh +212.85.158.152 rabbit.<%= domain %> # testip ssh +212.85.158.153 duvel.<%= domain %> # testip ssh rsync svn git ldapssl ldap + +group-compress VM Sucuk +192.168.122.131 friteuse.<%= domain %> # testip ssh http://forums.<%= domain %>=<%= @nodes_ipaddr['friteuse']['ipv4'] %>/ %>/ + +group-compress Scaleway +163.172.148.228 neru.mageia.org # testip ssh dns ldap ldapssl smtp +163.172.201.211 madb.mageia.org # testip + +# NOTE: lines with IPv6 addresses are ignored in xymon versions before 4.4 or 5.0 +group-compress Oracle cloud +2603:c026:c101:f00::1:1 ociaa1.<%= domain %> # testip ssh diff --git a/modules/xymon/templates/client-local.cfg b/modules/xymon/templates/client-local.cfg new file mode 100644 index 00000000..44428778 --- /dev/null +++ b/modules/xymon/templates/client-local.cfg @@ -0,0 +1,131 @@ +# The client-local.cfg file contains configuration for +# the Xymon clients running on monitored systems. When +# clients contact the Xymon server, they get the section +# from this file which matches their hostname or operating +# system. +# +# The following configuration items are currently possible: +# "log:FILENAME:MAXDATA" +# Monitor the text-based logfile FILENAME, and report +# back at most MAXDATA bytes. The Xymon client will +# only report back entries generated during the past +# 30 minutes, so MAXDATA is an upper limit. +# "ignore EXPRESSION" +# Must follow a "log:..." entry. Lines matching the +# regular EXPRESSION are not sent to the Xymon server. +# "trigger EXPRESSION" +# Must follow a "log:..." entry. Lines matching the +# regular EXPRESSION are always sent to the Xymon server. +# Use this for extremely critical errors that must be +# reported. +# +# "linecount:FILENAME" +# Monitor the text-based logfile FILENAME, but just +# count the number of times certain expressions appear. +# This processes the entire file every time. It must +# be followed by one or more lines with +# "KEYWORD PATTERN" +# KEYWORD identifies this count. You can use any string +# except whitespace. PATTERN is a regular expression +# that you want to search for in the file. +# +# "file:FILENAME[:hash]" +# Monitor the file FILENAME by reporting file metadata. +# The Xymon client will report back all of the file +# meta-data, e.g. size, timestamp, filetype, permissions +# etc. The optional "hash" setting is "md5", "sha1" or +# "rmd160", and causes the Xymon client to compute a +# file hash using the MD5, SHA-1 or RMD160 algorithm. +# Note: Computing the hash value may be CPU-intensive, +# so You should use this sparingly. For large-scale +# file integrity monitoring, use a real host-based +# IDS (Tripwire, AIDE or similar). +# +# "dir:DIRECTORY" +# Monitor the size of DIRECTORY, including sub-directories. +# This causes the Xymon client to run a "du" on DIRECTORY +# and send this back to the Xymon server. +# Note: Running "du" on large/deep directory structures can +# cause a significant system load. +# +# NB: If FILENAME and/or DIRECTORY are of the form `COMMAND`, +# then COMMAND is run on the client, and the lines output +# by the command are used as the file- or directory-names. +# This allows you to monitor files where the names change, +# as long as you can script some way of determining the +# interesting filenames. + +[sunos] +log:/var/adm/messages:10240 + +[osf1] +log:/var/adm/messages:10240 + +[aix] +log:/var/adm/syslog/syslog.log:10240 + +[hp-ux] +log:/var/adm/syslog/syslog.log:10240 + +[win32] + +[freebsd] +log:/var/log/messages:10240 + +[netbsd] +log:/var/log/messages:10240 + +[openbsd] +log:/var/log/messages:10240 + +[linux] +log:/var/log/messages:10240 +ignore MARK +file:/var/lib/puppet/state/state.yaml + +[linux22] +log:/var/log/messages:10240 +ignore MARK + +[redhat] +log:/var/log/messages:10240 +ignore MARK + +[debian] +log:/var/log/messages:10240 +ignore MARK + +[suse] +log:/var/log/messages:10240 +ignore MARK + +[mageia] +log:/var/log/messages:10240 +ignore MARK + +[mandrivalinux] +log:/var/log/messages:10240 +#log:/var/log/secure:10240 +ignore MARK + +[redhatAS] +log:/var/log/messages:10240 +ignore MARK + +[redhatES] +log:/var/log/messages:10240 +ignore MARK + +[rhel3] +log:/var/log/messages:10240 +ignore MARK + +[irix] +log:/var/adm/SYSLOG:10240 + +[darwin] +log:/var/log/system.log:10240 + +[sco_sv] +log:/var/adm/syslog:10240 + diff --git a/modules/xymon/templates/hobbit-alerts.cfg b/modules/xymon/templates/hobbit-alerts.cfg new file mode 100644 index 00000000..763e253d --- /dev/null +++ b/modules/xymon/templates/hobbit-alerts.cfg @@ -0,0 +1,128 @@ +# +# The hobbit-alerts.cfg file controls who receives alerts +# when a status in the BB system goes into a critical +# state (usually: red, yellow or purple). +# +# This file is made up from RULES and RECIPIENTS. +# +# A RULE is a filter made from the PAGE where a host +# is located in BB; the HOST name, the SERVICE name, +# the COLOR of the status, the TIME of day, and the +# DURATION of the event. +# +# A RECIPIENT can be a MAIL address, or a SCRIPT. +# +# Recipients can also have rules associated with them, +# that modify the rules for a single recipient, e.g. +# you can define a rule for alerting, then add an +# extra criteria e.g. so a single recipient does not get +# alerted until after 20 minutes. +# +# A sample rule: +# +# HOST=www.foo.com SERVICE=http +# MAIL webadmin@foo.com REPEAT=20 RECOVERED +# MAIL cio@foo.com DURATION>60 COLOR=red +# SCRIPT /usr/local/bin/sendsms 1234567890 FORMAT=SMS +# +# The first line sets up a rule that catches alerts +# for the host "www.foo.com" and the "http" service. +# There are three recipients for these alerts: The first +# one is the "webadmin@foo.com" - they get alerted +# immediately when the status goes into an alert state, +# and the alert is repeated every 20 minutes until it +# recovers. When it recovers, a message is sent about +# the recovery. +# +# The second recipient is "cio@foo.com". He gets alerted +# only when the service goes "red" for more than 60 minutes. +# +# The third recipient is a script, "/usr/local/bin/sendsms". +# The real recipient is "1234567890", but it is handled +# by the script - the script receives a set of environment +# variables with the details about the alert, including the +# real recipient. The alert message is preformatted for +# an SMS recipient. +# +# You can use Perl-compatible "regular expressions" for +# the PAGE, HOST and SERVICE definitions, by putting a "%" +# in front of the regex. E.g. +# +# HOST=%^www.* +# MAIL webadmin@foo.com EXHOST=www.testsite.foo.com +# +# This sets up a rule so that alerts from any hostname +# beginning with "www" goes to "webadmin@foo.com", EXCEPT +# alerts from "www.testsite.foo.com" +# +# The following keywords are recognized: +# PAGE - rule matching an alert by the name of the +# page in BB. This is the name following +# the "page", "subpage" or "subparent" keyword +# in the bb-hosts file. +# EXPAGE - rule excluding an alert if the pagename matches. +# HOST - rule matching an alert by the hostname. +# EXHOST - rule excluding an alert by matching the hostname. +# SERVICE - rule matching an alert by the service name. +# EXSERVICE - rule excluding an alert by matching the hostname. +# GROUP - rule matching an alert by the group ID. +# (Group ID's are associated with a status through the +# hobbit-clients.cfg configuration). +# EXGROUP - rule excluding an alert by matching the group ID. +# COLOR - rule matching an alert by color. Can be "red", +# "yellow", or "purple". +# TIME - rule matching an alert by the time-of-day. This +# is specified as the DOWNTIME timespecification +# in the bb-hosts file (see bb-hosts(5)). +# DURATION - Rule matching an alert if the event has lasted +# longer/shorter than the given duration. E.g. +# DURATION>10 (lasted longer than 10 minutes) or +# DURATION<30 (only sends alerts the first 30 minutes). +# RECOVERED - Rule matches if the alert has recovered from an +# alert state. +# NOTICE - Rule matches if the message is a "notify" message +# (typically sent when a status is enabled or disabled). +# MAIL - Recipient who receives an e-mail alert. This takes +# one parameter, the e-mail address. +# SCRIPT - Recipient that invokes a script. This takes two +# parameters: The script filename, and the recipient +# that gets passed to the script. +# FORMAT - format of the text message with the alert. Default +# is "TEXT" (suitable for e-mail alerts). "SMS" is +# a short message with no subject for SMS alerts. +# "SCRIPT" is a brief message template for scripts. +# REPEAT - How often an alert gets repeated, in minutes. +# STOP - Valid for a recipient: If this recipient gets an +# alert, recipients further down in hobbit-alerts.cfg +# are ignored. +# UNMATCHED - Matches if no alerts have been sent so far. +# +# +# Script get the following environment variables pre-defined so +# that they can send a meaningful alert: +# +# BBCOLORLEVEL - The color of the alert: "red", "yellow" or "purple" +# BBALPHAMSG - The full text of the status log triggering the alert +# ACKCODE - The "cookie" that can be used to acknowledge the alert +# RCPT - The recipient, from the SCRIPT entry +# BBHOSTNAME - The name of the host that the alert is about +# MACHIP - The IP-address of the host that has a problem +# BBSVCNAME - The name of the service that the alert is about +# BBSVCNUM - The numeric code for the service. From SVCCODES definition. +# BBHOSTSVC - HOSTNAME.SERVICE that the alert is about. +# BBHOSTSVCCOMMAS - As BBHOSTSVC, but dots in the hostname replaced with commas +# BBNUMERIC - A 22-digit number made by BBSVCNUM, MACHIP and ACKCODE. +# RECOVERED - Is "1" if the service has recovered. +# DOWNSECS - Number of seconds the service has been down. +# DOWNSECSMSG - When recovered, holds the text "Event duration : N" where +# N is the DOWNSECS value. + +<% +builder = ['ecosse','rabbit'] +builders = builder.map{|x| x + "." + domain }.join(',') +%> +HOST=<%= builders %> SERVICE=cpu + MAIL=sysadmin-reports@ml.<%= domain %> DURATION>6h RECOVERED NOTICE REPEAT=3h STOP + +HOST=%.*.<%= domain %> + MAIL=sysadmin-reports@ml.<%= domain %> DURATION>5 RECOVERED NOTICE REPEAT=3h diff --git a/modules/xymon/templates/hobbit-clients.cfg b/modules/xymon/templates/hobbit-clients.cfg new file mode 100644 index 00000000..ff010681 --- /dev/null +++ b/modules/xymon/templates/hobbit-clients.cfg @@ -0,0 +1,380 @@ +# hobbit-clients.cfg - configuration file for clients reporting to Xymon +# +# This file is used by the hobbitd_client module, when it builds the +# cpu, disk, files, memory, msgs and procs status messages from the +# information reported by clients running on the monitored systems. +# +# This file must be installed on the Xymon server - client installations +# do not need this file. +# +# The file defines a series of rules: +# UP : Changes the "cpu" status when the system has rebooted recently, +# or when it has been running for too long. +# LOAD : Changes the "cpu" status according to the system load. +# CLOCK : Changes the "cpu" status if the client system clock is +# not synchronized with the clock of the Xymon server. +# DISK : Changes the "disk" status, depending on the amount of space +# used of filesystems. +# MEMPHYS: Changes the "memory" status, based on the percentage of real +# memory used. +# MEMACT : Changes the "memory" status, based on the percentage of "actual" +# memory used. Note: Not all systems report an "actual" value. +# MEMSWAP: Changes the "memory" status, based on the percentage of swap +# space used. +# PROC : Changes the "procs" status according to which processes were found +# in the "ps" listing from the client. +# LOG : Changes the "msgs" status according to entries in text-based logfiles. +# Note: The "client-local.cfg" file controls which logfiles the client will report. +# FILE : Changes the "files" status according to meta-data for files. +# Note: The "client-local.cfg" file controls which files the client will report. +# DIR : Changes the "files" status according to the size of a directory. +# Note: The "client-local.cfg" file controls which directories the client will report. +# PORT : Changes the "ports" status according to which tcp ports were found +# in the "netstat" listing from the client. +# DEFAULT: Set the default values that apply if no other rules match. +# +# All rules can be qualified so they apply only to certain hosts, or on certain +# times of the day (see below). +# +# Each type of rule takes a number of parameters: +# UP bootlimit toolonglimit +# The cpu status goes yellow if the system has been up for less than +# "bootlimit" time, or longer than "toolonglimit". The time is in +# minutes, or you can add h/d/w for hours/days/weeks - eg. "2h" for +# two hours, or "4w" for 4 weeks. +# Defaults: bootlimit=1h, toolonglimit=-1 (infinite). +# +# LOAD warnlevel paniclevel +# If the system load exceeds "warnlevel" or "paniclevel", the "cpu" +# status will go yellow or red, respectively. These are decimal +# numbers. +# Defaults: warnlevel=5.0, paniclevel=10.0 +# +# CLOCK maximum-offset +# If the system clock of the client differs from that of the Xymon +# server by more than "maximum-offset" seconds, then the CPU status +# column will go yellow. Note that the accuracy of this test is limited, +# since it is affected by the time it takes a client status report to +# go from the client to the Xymon server and be processed. You should +# therefore allow for a few seconds (5-10) of slack when you define +# your max. offset. +# It is not wise to use this test, unless your servers are synchronized +# to a common clock, e.g. through NTP. +# +# DISK filesystem warnlevel paniclevel +# DISK filesystem IGNORE +# If the utilization of "filesystem" is reported to exceed "warnlevel" +# or "paniclevel", the "disk" status will go yellow or red, respectively. +# "warnlevel" and "paniclevel" are either the percentage used, or the +# space available as reported by the local "df" command on the host. +# For the latter type of check, the "warnlevel" must be followed by the +# letter "U", e.g. "1024U". +# The special keyword "IGNORE" causes this filesystem to be ignored +# completely, i.e. it will not appear in the "disk" status column and +# it will not be tracked in a graph. This is useful for e.g. removable +# devices, backup-disks and similar hardware. +# "filesystem" is the mount-point where the filesystem is mounted, e.g. +# "/usr" or "/home". A filesystem-name that begins with "%" is interpreted +# as a Perl-compatible regular expression; e.g. "%^/oracle.*/" will match +# any filesystem whose mountpoint begins with "/oracle". +# Defaults: warnlevel=90%, paniclevel=95% +# +# MEMPHYS warnlevel paniclevel +# MEMACT warnlevel paniclevel +# MEMSWAP warnlevel paniclevel +# If the memory utilization exceeds the "warnlevel" or "paniclevel", the +# "memory" status will change to yellow or red, respectively. +# Note: The words "PHYS", "ACT" and "SWAP" are also recognized. +# Defaults: MEMPHYS warnlevel=100 paniclevel=101 (i.e. it will never go red) +# MEMSWAP warnlevel=50 paniclevel=80 +# MEMACT warnlevel=90 paniclevel=97 +# +# PROC processname minimumcount maximumcount color [TRACK=id] [TEXT=displaytext] +# The "ps" listing sent by the client will be scanned for how many +# processes containing "processname" are running, and this is then +# matched against the min/max settings defined here. If the running +# count is outside the thresholds, the color of the "procs" status +# changes to "color". +# To check for a process that must NOT be running: Set minimum and +# maximum to 0. +# +# "processname" can be a simple string, in which case this string must +# show up in the "ps" listing as a command. The scanner will find +# a ps-listing of e.g. "/usr/sbin/cron" if you only specify "processname" +# as "cron". +# "processname" can also be a Perl-compatible regular expression, e.g. +# "%java.*inst[0123]" can be used to find entries in the ps-listing for +# "java -Xmx512m inst2" and "java -Xmx256 inst3". In that case, +# "processname" must begin with "%" followed by the reg.expression. +# If "processname" contains whitespace (blanks or TAB), you must enclose +# the full string in double quotes - including the "%" if you use regular +# expression matching. E.g. +# PROC "%hobbitd_channel --channel=data.*hobbitd_rrd" 1 1 yellow +# or +# PROC "java -DCLASSPATH=/opt/java/lib" 2 5 +# +# You can have multiple "PROC" entries for the same host, all of the +# checks are merged into the "procs" status and the most severe +# check defines the color of the status. +# +# The TRACK=id option causes the number of processes found to be recorded +# in an RRD file, with "id" as part of the filename. This graph will then +# appear on the "procs" page as well as on the "trends" page. Note that +# "id" must be unique among the processes tracked for each host. +# +# The TEXT=displaytext option affects how the process appears on the +# "procs" status page. By default, the process is listed with the +# "processname" as identification, but if this is a regular expression +# it may be a bit difficult to understand. You can then use e.g. +# "TEXT=Apache" to make these processes appear with the name "Apache" +# instead. +# +# Defaults: mincount=1, maxcount=-1 (unlimited), color="red". +# Note: No processes are checked by default. +# +# Example: Check that "cron" is running: +# PROC cron +# Example: Check that at least 5 "httpd" processes are running, but +# not more than 20: +# PROC httpd 5 20 +# +# LOG filename match-pattern [COLOR=color] [IGNORE=ignore-pattern] [TEXT=displaytext] +# In the "client-local.cfg" file, you can list any number of files +# that the client will collect log data from. These are sent to the +# Xymon server together with the other client data, and you can then +# choose how to analyze the log data with LOG entries. +# +# ************ IMPORTANT *************** +# To monitor a logfile, you *MUST* configure both client-local.cfg +# and hobbit-clients.cfg. If you configure only the client-local.cfg +# file, the client will collect the log data and you can view it in +# the "client data" display, but it will not affect the color of the +# "msgs" status. On the other hand, if you configure only the +# hobbit-clients.cfg file, then there will be no log data to inspect, +# and you will not see any updates of the "msgs" status either. +# +# "filename" is a filename or pattern. The set of files reported by +# the client is matched against "filename", and if they match then +# this LOG entry is processed against the data from a file. +# +# "match-pattern": The log data is matched against this pattern. If +# there is a match, this log file causes a status change to "color". +# +# "ignore-pattern": The log data that matched "match-pattern" is also +# matched against "ignore-pattern". If the data matches the "ignore-pattern", +# this line of data does not affect the status color. In other words, +# the "ignore-pattern" can be used to refine the strings which cause +# a match. +# Note: The "ignore-pattern" is optional. +# +# "color": The color which this match will trigger. +# Note: "color" is optional, if omitted then "red" will be used. +# +# Example: Go yellow if the text "WARNING" shows up in any logfile. +# LOG %.* WARNING COLOR=yellow +# +# Example: Go red if the text "I/O error" or "read error" appears. +# LOG %/var/(adm|log)/messages %(I/O|read).error COLOR=red +# +# FILE filename [color] [things to check] [TRACK] +# NB: The files you wish to monitor must be listed in a "file:..." +# entry in the client-local.cfg file, in order for the client to +# report any data about them. +# +# "filename" is a filename or pattern. The set of files reported by +# the client is matched against "filename", and if they match then +# this FILE entry is processed against the data from that file. +# +# [things to check] can be one or more of the following: +# - "NOEXIST" triggers a warning if the file exists. By default, +# a warning is triggered for files that have a FILE entry, but +# which do not exist. +# - "TYPE=type" where "type" is one of "file", "dir", "char", "block", +# "fifo", or "socket". Triggers warning if the file is not of the +# specified type. +# - "OWNERID=owner" and "GROUPID=group" triggers a warning if the owner +# or group does not match what is listed here. "owner" and "group" is +# specified either with the numeric uid/gid, or the user/group name. +# - "MODE=mode" triggers a warning if the file permissions are not +# as listed. "mode" is written in the standard octal notation, e.g. +# "644" for the rw-r--r-- permissions. +# - "SIZE<max.size" and "SIZE>min.size" triggers a warning it the file +# size is greater than "max.size" or less than "min.size", respectively. +# You can append "K" (KB), "M" (MB), "G" (GB) or "T" (TB) to the size. +# If there is no such modifier, KB is assumed. +# E.g. to warn if a file grows larger than 1MB (1024 KB): "SIZE<1M". +# - "SIZE=size" triggers a warning it the file size is not what is listed. +# - "MTIME>min.mtime" and "MTIME<max.mtime" checks how long ago the file +# was last modified (in seconds). E.g. to check if a file was updated +# within the past 10 minutes (600 seconds): "MTIME<600". Or to check +# that a file has NOT been updated in the past 24 hours: "MTIME>86400". +# - "MTIME=timestamp" checks if a file was last modified at "timestamp". +# "timestamp" is a unix epoch time (seconds since midnight Jan 1 1970 UTC). +# - "CTIME>min.ctime", "CTIME<max.ctime", "CTIME=timestamp" acts as the +# mtime checks, but for the ctime timestamp (when the files' directory +# entry was last changed, eg. by chown, chgrp or chmod). +# - "MD5=md5sum", "SHA1=sha1sum", "RMD160=rmd160sum" trigger a warning +# if the file checksum using the MD5, SHA1 or RMD160 message digest +# algorithms do not match the one configured here. Note: The "file" +# entry in the client-local.cfg file must specify which algorithm to use. +# +# "TRACK" causes the size of this file to be tracked in an RRD file, and +# shown on the graph on the "files" display. +# +# Example: Check that the /var/log/messages file is not empty and was updated +# within the past 10 minutes, and go yellow if either fails: +# FILE /var/log/messages SIZE>0 MTIME<600 yellow +# +# Example: Check the timestamp, size and SHA-1 hash of the /bin/sh program: +# FILE /bin/sh MTIME=1128514608 SIZE=645140 SHA1=5bd81afecf0eb93849a2fd9df54e8bcbe3fefd72 +# +# DIR directory [color] [SIZE<maxsize] [SIZE>minsize] [TRACK] +# NB: The directories you wish to monitor must be listed in a "dir:..." +# entry in the client-local.cfg file, in order for the client to +# report any data about them. +# +# "directory" is a filename or pattern. The set of directories reported by +# the client is matched against "directory", and if they match then +# this DIR entry is processed against the data for that directory. +# +# "SIZE<maxsize" and "SIZE>minsize" defines the size limits that the +# directory must stay within. If it goes outside these limits, a warning +# will trigger. Note the Xymon uses the raw number reported by the +# local "du" command on the client. This is commonly KB, but it may be +# disk blocks which are often 512 bytes. +# +# "TRACK" causes the size of this directory to be tracked in an RRD file, +# and shown on the graph on the "files" display. +# +# PORT [LOCAL=addr] [EXLOCAL=addr] [REMOTE=addr] [EXREMOTE=addr] [STATE=state] [EXSTATE=state] [MIN=mincount] [MAX=maxcount] [COLOR=color] [TRACK=id] [TEXT=displaytext] +# The "netstat" listing sent by the client will be scanned for how many +# sockets match the criteria listed. +# "addr" is a (partial) address specification in the format used on +# the output from netstat. This is typically "10.0.0.1:80" for the IP +# 10.0.0.1, port 80. Or "*:80" for any local address, port 80. +# NB: The Xymon clients normally report only the numeric data for +# IP-addresses and port-numbers, so you must specify the port +# number (e.g. "80") instead of the service name ("www"). +# "state" causes only the sockets in the specified state to be included; +# it is usually LISTEN or ESTABLISHED. +# The socket count is then matched against the min/max settings defined +# here. If the count is outside the thresholds, the color of the "ports" +# status changes to "color". +# To check for a socket that must NOT exist: Set minimum and +# maximum to 0. +# +# "addr" and "state" can be a simple strings, in which case these string must +# show up in the "netstat" at the appropriate column. +# "addr" and "state" can also be a Perl-compatible regular expression, e.g. +# "LOCAL=%(:80|:443)" can be used to find entries in the netstat local port for +# both http (port 80) and https (port 443). In that case, portname or state must +# begin with "%" followed by the reg.expression. +# +# The TRACK=id option causes the number of sockets found to be recorded +# in an RRD file, with "id" as part of the filename. This graph will then +# appear on the "ports" page as well as on the "trends" page. Note that +# "id" must be unique among the ports tracked for each host. +# +# The TEXT=displaytext option affects how the port appears on the +# "ports" status page. By default, the port is listed with the +# local/remote/state rules as identification, but this may be somewhat +# difficult to understand. You can then use e.g. "TEXT=Secure Shell" to make +# these ports appear with the name "Secure Shell" instead. +# +# Defaults: state="LISTEN", mincount=1, maxcount=-1 (unlimited), color="red". +# Note: No ports are checked by default. +# +# Example: Check that there is someone listening on the https port: +# PORT "LOCAL=%([.:]443)$" state=LISTEN TEXT=https +# +# Example: Check that at least 5 "ssh" connections are established, but +# not more than 10; warn but do not error; graph the connection count: +# PORT "LOCAL=%([.:]22)$" state=ESTABLISHED min=5 max=20 color=yellow TRACK=ssh "TEXT=SSH logins" +# +# Example: Check that ONLY ports 22, 80 and 443 are open for incoming connections: +# PORT STATE=LISTEN LOCAL=%0.0.0.0[.:].* EXLOCAL=%[.:](22|80|443)$ MAX=0 "TEXT=Bad listeners" +# +# +# To apply rules to specific hosts, you can use the "HOST=", "EXHOST=", "PAGE=" +# "EXPAGE=", "CLASS=" or "EXCLASS=" qualifiers. (These act just as in the +# hobbit-alerts.cfg file). +# +# Hostnames are either a comma-separated list of hostnames (from the bb-hosts file), +# "*" to indicate "all hosts", or a Perl-compatible regular expression. +# E.g. "HOST=dns.foo.com,www.foo.com" identifies two specific hosts; +# "HOST=%www.*.foo.com EXHOST=www-test.foo.com" matches all hosts with a name +# beginning with "www", except the "www-test" host. +# "PAGE" and "EXPAGE" match the hostnames against the page on where they are +# located in the bb-hosts file, via the bb-hosts' page/subpage/subparent +# directives. This can be convenient to pick out all hosts on a specific page. +# +# Rules can be dependant on time-of-day, using the standard Xymon syntax +# (the bb-hosts(5) about the NKTIME parameter). E.g. "TIME=W:0800:2200" +# applied to a rule will make this rule active only on week-days between +# 8AM and 10PM. +# +# You can also associate a GROUP id with a rule. The group-id is passed to +# the alert module, which can then use it to control who gets an alert when +# a failure occurs. E.g. the following associates the "httpd" process check +# with the "web" group, and the "sshd" check with the "admins" group: +# PROC httpd 5 GROUP=web +# PROC sshd 1 GROUP=admins +# In the hobbit-alerts.cfg file, you could then have rules like +# GROUP=web +# MAIL webmaster@foo.com +# GROUP=admins +# MAIL root@foo.com +# +# Qualifiers must be placed after each rule, e.g. +# LOAD 8.0 12.0 HOST=db.foo.com TIME=*:0800:1600 +# +# If you have multiple rules that you want to apply the same qualifiers to, +# you can write the qualifiers *only* on one line, followed by the rules. E.g. +# HOST=%db.*.foo.com TIME=W:0800:1600 +# LOAD 8.0 12.0 +# DISK /db 98 100 +# PROC mysqld 1 +# will apply the three rules to all of the "db" hosts on week-days between 8AM +# and 4PM. This can be combined with per-rule qualifiers, in which case the +# per-rule qualifier overrides the general qualifier; e.g. +# HOST=%.*.foo.com +# LOAD 7.0 12.0 HOST=bax.foo.com +# LOAD 3.0 8.0 +# will result in the load-limits being 7.0/12.0 for the "bax.foo.com" host, +# and 3.0/8.0 for all other foo.com hosts. +# +# The special DEFAULT section can modify the built-in defaults - this must +# be placed at the end of the file. + +HOST=rabbit.<%= domain %> + DISK %.*stage2$ IGNORE + +# ecosse has 24 cores, is a builder, and we try to use them all +HOST=ecosse.<%= domain %> + LOAD 36.0 48.0 + +# rabbit has 12 cores and mksquashfs uses all of them +HOST=rabbit.<%= domain %> + LOAD 18.0 24.0 + +# duvel has 24 cores, dont trigger alarms too soon +HOST=duvel.<%= domain %> + LOAD 18.0 24.0 + DISK /var/lib/binrepo 95 98 + DISK /var/www 95 98 + +DEFAULT + # These are the built-in defaults. + UP 1h + LOAD 5.0 10.0 + DISK %^/mnt/cdrom 101 101 + DISK * 90 95 + MEMPHYS 100 101 + MEMSWAP 50 80 + MEMACT 90 97 + CLOCK 60 + FILE /var/lib/puppet/state/state.yaml yellow mtime<5400 + PORT state=LISTEN "LOCAL=%([.:]22)$" MIN=1 TEXT=ssh + PROC puppetd 0 3 red + # 10 , just in case something goes wrong + PROC crond 1 10 red diff --git a/modules/xymon/templates/hobbitserver.cfg b/modules/xymon/templates/hobbitserver.cfg new file mode 100644 index 00000000..a5a7aacf --- /dev/null +++ b/modules/xymon/templates/hobbitserver.cfg @@ -0,0 +1,230 @@ +# NB : Even though it might look like a shell-script, it is NOT. +# +BBSERVERROOT="<%= lib_dir %>/xymon" # Where Xymon is installed - holds the server and bbvar sub-dirs. +BBSERVERLOGS="/var/log/xymon" # Directory for server logs. The hobbit user must have write-access here. +HOBBITCLIENTHOME="<%= lib_dir %>/xymon/client" # BBHOME directory for the client + + +BBSERVERHOSTNAME="sucuk.<%= domain %>" # The hostname of your server +BBSERVERIP="<%= ipaddress %>" # The IP-address of your server. Use the real one, not 127.0.0.1 . +BBSERVEROS="linux" # The operating system of your server. linux,freebsd,solaris,hpux,aix,osf + +BBSERVERWWWNAME="xymon.<%= domain %>" # The name used for this hosts' webserver +BBSERVERWWWURL="/xymon" # The top URL for the Xymon webpages +BBSERVERCGIURL="/xymon-cgi" # The URL for the Xymon CGI scripts. +BBSERVERSECURECGIURL="/xymon-seccgi" # The URL for the secured Xymon CGI scripts. + +# BBLOCATION="foo" # The network location, makes bbtest-net test only hosts with NET:foo + # You only need to set this if you have multiple network test servers with + # a shared bb-hosts file. + +# Make sure the path includes the directories where you have fping, mail and (optionally) ntpdate installed, +# as well as the BBHOME/bin directory where all of the Xymon programs reside. +PATH="/bin:/usr/bin:/sbin:/usr/sbin:/usr/local/bin:/usr/local/sbin:/usr/lib64/xymon/server/bin" + +# Some systems need extra settings e.g. to locate run-time libraries. +# You can add these extra settings here: + +# fix error message from jonund : +# 2011-07-17 15:32:54 Oversize status msg from +# 212.85.158.149 for jonund.mageia.org:procs truncated (n=350049, limit=262144) +# +# https://en.wikibooks.org/wiki/System_Monitoring_with_Xymon/Other_Docs/FAQ#Q._How_do_I_fix_.22Oversize_status_msg_from_192.168.1.31_for_test.my.com:ports_truncated_.28n.3D508634.2C_limit.3D262144.29.22 +MAXMSG_STATUS="496" + +##### Normally you do not need to modify anything below this point ##### + +# General settings +BBPORT="1984" # Portnumber where hobbitd/bbd listens +BBDISP="$BBSERVERIP" # IP of a single hobbit/bbd server +BBDISPLAYS="" # IP of multiple hobbit/bbd servers. If used, BBDISP must be 0.0.0.0 +FQDN="TRUE" # Use fully-qualified hostnames internally. Keep it TRUE unless you know better. +BBGHOSTS="1" # How to handle status messages from unknown hosts. + # 0=accept message, 1=discard message, 2=discard message and log the event +BBLOGSTATUS="DYNAMIC" # Are HTML status logs statically or dynamically generated? + # Use DYNAMIC with Xymon, unless you run hobbitd_filestore --status --html + +PINGCOLUMN="conn" # Column where the ping-test reports results. +INFOCOLUMN="info" # Column where the info-pages are reported. +TRENDSCOLUMN="trends" # Column where the RRD graphs are reported. + +BBMAXMSGSPERCOMBO="100" # How many individual messages to combine in a combo-message. 0=unlimited. +BBSLEEPBETWEENMSGS="0" # Delay between sending each combo message, in milliseconds. + + +# Specific to this host +BBOSTYPE="$BBSERVEROS" # Hosttype (operating system). Not used by server-side, but clients use this. +MACHINEDOTS="$BBSERVERHOSTNAME" # This systems hostname +MACHINEADDR="$BBSERVERIP" # This systems IP-address + +# URL's generated/used by bbgen +BBWEBHOST="https://$BBSERVERWWWNAME" # Just the host part of the URL - http://www.foo.com +BBWEBHOSTURL="$BBWEBHOST$BBSERVERWWWURL" # Prefix for all static Xymon pages - http://www.foo.com/bb +BBWEBHTMLLOGS="$BBWEBHOSTURL/html" # Prefix for the Xymon HTML logs (only if BBLOGSTATUS=STATIC) +BBWEB="$BBSERVERWWWURL" # Xymon URL prefix without the host part +BBSKIN="$BBSERVERWWWURL/gifs" # Xymon URL prefix for the GIF files +BBHELPSKIN="$BBSERVERWWWURL/help" # Xymon URL prefix for the online help files. +BBNOTESSKIN="$BBSERVERWWWURL/notes" # Xymon URL prefix for the online notes-files. +BBMENUSKIN="$BBSERVERWWWURL/menu" # Xymon URL prefix for the webpage menu files. +BBREPURL="$BBSERVERWWWURL/rep" # Xymon URL prefix for the Xymon availability reports +BBSNAPURL="$BBSERVERWWWURL/snap" # Xymon URL prefix for the Xymon snapshots +BBWAP="$BBSERVERWWWURL/wml" # Xymon URL prefix for the WAP/WML files. +CGIBINURL="$BBSERVERCGIURL" # URL prefix for the Xymon CGI-scripts - /cgi-bin +SECURECGIBINURL="$BBSERVERSECURECGIURL" # URL prefix for the secured Xymon CGI-scripts - /cgi-secure + +# Locations of system-wide files and directories +BBHOME="<%= lib_dir %>/xymon/server" # The Xymon server directory, where programs and configurations go. +BBTMP="$BBHOME/tmp" # Directory used for temporary files. +BBHOSTS="$BBHOME/etc/bb-hosts" # The bb-hosts file +BB="$BBHOME/bin/bb" # The 'bb' client program +BBGEN="$BBHOME/bin/bbgen" # The bbgen program + +# Server specific directories +BBVAR="/var/lib/xymon" # The bbvar directory holds all monitoring data +BBACKS="$BBVAR/acks" # Acknowledge event info stored here (hobbitd_alert) +BBDATA="$BBVAR/data" # Data files go here (hobbitd_filestore --data) +BBDISABLED="$BBVAR/disabled" # Enabled/disabled flags are stored here (hobbitd_filestore --enadis) +BBHIST="$BBVAR/hist" # History logs are stored here (hobbitd_history) +BBHISTLOGS="$BBVAR/histlogs" # Historical detail status-loge are stored here (hobbitd_history) +BBLOGS="$BBVAR/logs" # Status logs go here (hobbitd_filestore --status). Not needed by Xymon. +BBWWW="$BBHOME/www" # The directory for Xymon webpage files. +BBHTML="$BBWWW/html" # HTML status logs go here (hobbitd_filestore --status --html) +BBNOTES="$BBWWW/notes" # For notes-files (hobbitd_filestore --notes) +BBREP="$BBWWW/rep" # Top-level directory for Xymon reports. +BBSNAP="$BBWWW/snap" # Top-level directory for Xymon snapshots. + +# For the hobbitd_history module +BBALLHISTLOG="TRUE" # Save a common log of all events (used for the bb2 webpage) +BBHOSTHISTLOG="TRUE" # Save a log of all events for a host (not used by any tool currently) +SAVESTATUSLOG="TRUE" # Save the detailed status log each time the status changes. + +# For the hobbitd_alert module +FROM="root@<%= domain %>" +MAILC="mail -r $FROM" # Command used to send an e-mail with no subject +MAIL="$MAILC -s" # Command used to send an e-mail with a subject +SVCCODES="disk:100,cpu:200,procs:300,svcs:350,msgs:400,conn:500,http:600,dns:800,smtp:725,telnet:723,ftp:721,pop:810,pop3:810,pop-3:810,ssh:722,imap:843,ssh1:722,ssh2:722,imap2:843,imap3:843,imap4:843,pop2:809,pop-2:809,nntp:819,test:901" +ALERTCOLORS="red,yellow,purple" # Colors that may trigger an alert message +OKCOLORS="green,blue,clear" # Colors that may trigger a recovery message +ALERTREPEAT="30" # The default interval between repeated alert-messages (in minutes) + +# For bbtest-net +CONNTEST="TRUE" # Should we 'ping' hosts ? +IPTEST_2_CLEAR_ON_FAILED_CONN="TRUE" # If TRUE, then failing network tests go CLEAR if conn-test fails. +NONETPAGE="" # Network tests that go YELLOW upon failure +FPING="/bin/fping -Ae" # Path and options for the ping program. +NTPDATE="ntpdate" # Path to the 'ntpdate' program +TRACEROUTE="traceroute" # How to do traceroute on failing ping tests. Requires "trace" in bb-hosts. +BBROUTERTEXT="router" # What to call a failing intermediate network device. +NETFAILTEXT="not OK" # Text indicating a network test failed + + +# Settings for the RRD graphs + +# Top level directory for the RRD files +BBRRDS="$BBVAR/rrd" + +# Size of the generated graph images +RRDHEIGHT="120" +RRDWIDTH="576" # The RRD's contain 576 data points, so this is a good value + +# TEST2RRD defines the status- and data-messages you want to collect RRD data +# about. You will normally not need to modify this, unless you have added a +# script to pick up RRD data from custom tests (the hobbitd_larrd --extra-script +# and --extra-tests options). +# Note that network tests defined in the bb-services file are automatically +# included. +# The format here is "COLUMN=RRDSERVICE". If you leave out the "=RRDSERVICE" +# part, it is assumed to be the same as the COLUMN value. +# +# This is also used by the bb-hostsvc.cgi script to determine if the detailed +# status view of a test should include a graph. +TEST2RRD="cpu=la,disk,inode,qtree,memory,$PINGCOLUMN=tcp,http=tcp,dns=tcp,dig=tcp,time=ntpstat,vmstat,iostat,netstat,temperature,apache,bind,sendmail,mailq,nmailq=mailq,socks,bea,iishealth,citrix,bbgen,bbtest,bbproxy,hobbitd,files,procs=processes,ports,clock,lines,ops,stats,cifs,JVM,JMS,HitCache,Session,JDBCConn,ExecQueue,JTA,TblSpace,RollBack,MemReq,InvObj,snapmirr,snaplist,snapshot,if_load=devmon,temp=devmon" + +# This defines which RRD files to include on the "trends" column webpage, +# and the order in which they appear. +GRAPHS="la,disk,inode,qtree,files,processes,memory,users,vmstat,iostat,tcp.http,tcp,ncv,netstat,ifstat,mrtg::1,ports,temperature,ntpstat,apache,bind,sendmail,mailq,socks,bea,iishealth,citrix,bbgen,bbtest,bbproxy,hobbitd,clock,lines,ops,stats,cifs,JVM,JMS,HitCache,Session,JDBCConn,ExecQueue,JTA,TblSpace,RollBack,MemReq,InvObj,snapmirr,snaplist,snapshot,devmon::1,if_load::1,temp" + +# These two settings can be used to restrict what filesystems are being +# tracked (i.e. have their utilisation graphed) by Xymon. +# NORRDDISKS="" # Filesystems that will NOT be tracked +# RRDDISKS="" # Only track these filesystems + + +############################################################ +# These determine some parts of how bbgen generates webpages +############################################################ +BBGENOPTS="--recentgifs --subpagecolumns=2" # Standard options for bbgen. +SUMMARY_SET_BKG="FALSE" # Do summaries affect the background color of the BB webpage ? +BBMKBB2EXT="eventlog.sh acklog.sh" # What extensions to have on the BB2 page. +DOTHEIGHT="16" # Height (in pixels) of the color GIF's +DOTWIDTH="16" # Width (in pixels) of the color GIF's +COLUMNDOCURL="$CGIBINURL/hobbitcolumn.sh?%s" # URL formatting string for column-links + +# HTML content +HTMLCONTENTTYPE="text/html" # You can add charset options here. + +# Fonts and texts +HOBBITLOGO="Mageia monitoring" # HTML inserted on all header pages at top-left corner. +MKBBLOCAL="<B><I>Pages Hosted Locally</I></B>" +MKBBREMOTE="<B><I>Remote Status Display</I></B>" +MKBBSUBLOCAL="<B><I>Subpages Hosted Locally</I></B>" +MKBBACKFONT="COLOR=\"#33ebf4\" SIZE=\"-1\"" # Size and color of the 'Current acknowledgement...' text in the html log. +MKBBCOLFONT="COLOR=\"#87a9e5\" SIZE=\"-1\"" # Size and color of the column headings text +MKBBROWFONT="SIZE=\"+1\" COLOR=\"#FFFFCC\" FACE=\"Tahoma, Arial, Helvetica\"" # Size,color,font of text in each row (hostname) +MKBBTITLE="COLOR=\"#FFFFF0\" SIZE=\"+1\"" # Size and color of the BB titles (the old "ivory" is invalid HTML) +BBDATEFORMAT="%a %b %d %H:%M:%S %Y" # Date format +BBRSSTITLE="Xymon Alerts" # Title for the RSS and WML outputs. +ACKUNTILMSG="Next update at: %H:%M %Y-%m-%d" # strftime format for the acknowledgement status display. + +# For WML output +WMLMAXCHARS="1500" # Max number of bytes in a WAP message + +# For BB reports +BBREPWARN="97" # Default availability causing yellow status on availability report. +BBREPGREEN="99.995" # Default availability causing green status on availability report. +BBGENREPOPTS="$BBGENOPTS" # bbgen(1) options used when generating availability reports. +BBREPEXT="" # What extensions to run on report pages. + +# For BB snapshots +BBGENSNAPOPTS="$BBGENOPTS" # bbgen(1) options used when generating snapshots. + +# For the bb-hist CGI +BBHISTEXT="" # What extensions to run on history pages. + + +# The following defines a bunch of commands that BB extensions expect to be present. +# Hobbit does not use them, but they are provided here so if you use BB extension +# scripts, then they will hopefully run without having to do a lot of tweaking. + +UPTIME="/usr/bin/uptime" +AWK="/usr/bin/awk" +CAT="/bin/cat" +CP="/bin/cp" +CUT="/usr/bin/cut" +DATE="/bin/date" +EGREP="/bin/egrep" +EXPR="/usr/bin/expr" +FIND="/usr/bin/find" +GREP="/bin/grep" +HEAD="/usr/bin/head" +ID="/bin/id" +LN="/bin/ln" +LS="/bin/ls" +MV="/bin/mv" +RM="/bin/rm" +SED="/bin/sed" +SORT="/bin/sort" +TAIL="/usr/bin/tail" +TOP="/usr/bin/top" +TOUCH="/bin/touch" +TR="/usr/bin/tr" +UNIQ="/usr/bin/uniq" +WHO="/usr/bin/who" +WC="/usr/bin/wc -l" +WCC="/usr/bin/wc" +# DF,DFCMD and PS are for compatibility only, NOT USED by the Hobbit client +DF="/bin/df -Pk" +DFCMD="/bin/df -Pk" +PS="ps ax" + +MAXLINE="32768" diff --git a/modules/xymon/templates/xymon-client b/modules/xymon/templates/xymon-client new file mode 100644 index 00000000..e846d2a5 --- /dev/null +++ b/modules/xymon/templates/xymon-client @@ -0,0 +1,21 @@ +# Configure the Hobbit client settings. + +# You MUST set the list of Hobbit servers that this +# client reports to. +# It is good to use IP-addresses here instead of DNS +# names - DNS might not work if there's a problem. +# +# E.g. (a single Hobbit server) +# HOBBITSERVERS="192.168.1.1" +# or (multiple servers) +# HOBBITSERVERS="10.0.0.1 192.168.1.1" +XYMONSERVERS="<%= server %>" + +# The defaults usually suffice for the rest of this file, +# but you can tweak the hostname that the client reports +# data with, and the OS name used (typically needed only on +# RHEL or RHAS servers). + +# CLIENTHOSTNAME="" +# CLIENTOS="rhel3" + diff --git a/modules/youri-check/manifests/init.pp b/modules/youri-check/manifests/init.pp new file mode 100644 index 00000000..ebdaa492 --- /dev/null +++ b/modules/youri-check/manifests/init.pp @@ -0,0 +1,133 @@ +class youri-check { + class base { + $vhost = "check.${::domain}" + $user = 'youri' + $home = '/var/lib/youri' + $home_check = '/var/www/youri-check' + $pgsql_password = extlookup('youri_pgsql','x') + + user { $user: + comment => 'Youri Check', + home => $home, + } + + file { $home: + ensure => directory, + owner => $user, + group => $user, + } + + file { $home_check: + ensure => directory, + owner => $user, + group => $user, + } + + $pgsql_server = "${vhost}" + + package { ['youri-check', 'perl-DBD-Pg', 'perl-Youri-Media']: } + + } + + + define config($version) { + include stdlib + include youri-check::base + + $config = "/etc/youri/${version}.conf" + $outdir = "/var/www/youri-check/${version}" + $pgsql_db = "youri_check_${version}" + $pgsql_server = $base::pgsql_server + $pgsql_user = "youri${version}" + $pgsql_password = extlookup('youri_pgsql','x') + # We want to alert for packages older than the cut-off for latest mass rebuild + # 1745539200 is 2025-04-25 + $max_days = (time() - 1745539200)/(24*3600) + + file { "${config}": + ensure => present, + owner => $base::user, + mode => '0640', + content => template("youri-check/${version}.conf"), + require => User[$base::user], + } + } + + + define createdb_user($version) { + $pgsql_db = "youri_check_${version}" + $pgsql_user = "youri${version}" + $pgsql_password = extlookup('youri_pgsql','x') + + postgresql::remote_user { $pgsql_user: + password => $base::pgsql_password, + } + + postgresql::remote_database { $pgsql_db: + description => "Youri Check results", + user => $pgsql_user, + } + } + + define check($version, $hour = "*", $minute = 0) { + include youri-check::base + $config = "/etc/youri/${version}.conf" + $pgsql_server = $base::pgsql_server + $pgsql_db = "youri_check_${version}" + $pgsql_user = "youri${version}" + $pgsql_password = extlookup('youri_pgsql','x') + + postgresql::remote_user { $pgsql_user: + password => $base::pgsql_password, + } + + postgresql::remote_database { $pgsql_db: + description => "Youri Check results", + user => $pgsql_user, + } + cron { "check_${version}": + command => "youri-check -c ${config} --parallel test", + hour => $hour, + minute => $minute, + user => $base::user, + environment => "MAILTO=root", + require => User[$base::user], + } + } + + define report_www { + include youri-check::base + $outdir = "/var/www/youri-check/" + apache::vhost::base { $base::vhost: + location => $outdir, + content => template('youri-check/vhost_check.conf'), + } + apache::vhost::base { "ssl_${base::vhost}": + vhost => $base::vhost, + use_ssl => true, + location => $outdir, + content => template('youri-check/vhost_check.conf'), + } + } + + define report($version, $hour = "*", $minute = 20) { + include youri-check::base + + $config = "/etc/youri/${version}.conf" + + $outdir = "/var/www/youri-check/${version}" + file { "${outdir}": + ensure => directory, + owner => $base::user, + mode => '0755', + } + + cron { "check_${version}": + command => "youri-check -c ${config} report", + hour => $hour, + minute => $minute, + user => $base::user, + require => User[$base::user], + } + } +} diff --git a/modules/youri-check/templates/9.conf b/modules/youri-check/templates/9.conf new file mode 100644 index 00000000..28028080 --- /dev/null +++ b/modules/youri-check/templates/9.conf @@ -0,0 +1,241 @@ +# vim:ft=yaml:et:sw=4 + +# helper variables +mirror: http://repository.mageia.org/distrib/9 +mirror_i586: ${mirror}/i586/media +mirror_x86_64: ${mirror}/x86_64/media + +# resultset definition +resultset: + class: Youri::Check::Resultset::DBI + options: + driver: Pg + host: <%= pgsql_server %>;sslmode=require + base: <%= pgsql_db %> + user: <%= pgsql_user %> + pass: <%= pgsql_password %> + +resolver: + class: Youri::Check::Maintainer::Resolver::CGI + options: + url: https://pkgsubmit.<%= domain %>/data/maintdb.txt + exceptions: + - nobody + + +# checks definitions +tests: + dependencies: + class: Youri::Check::Test::Dependencies + + missing: + class: Youri::Check::Test::Missing + +# reports definitions +reports: + file: + class: Youri::Check::Report::File + options: + to: <%= outdir %> + global: 1 + individual: 1 + formats: + html: + class: Youri::Check::Report::Format::HTML + text: + class: Youri::Check::Report::Format::Text + rss: + class: Youri::Check::Report::Format::RSS + +# media definitions +medias: + core.i586: + class: Youri::Media::URPM + options: + name: core + type: binary + hdlist: ${mirror_i586}/media_info/hdlist_core.cz + options: + dependencies: + allowed: + - core.i586 + missing: + allowed: + - core.sources + + core_updates.i586: + class: Youri::Media::URPM + options: + name: core_updates + type: binary + hdlist: ${mirror_i586}/media_info/hdlist_core_updates.cz + options: + dependencies: + allowed: + - core.i586 + - core_updates.i586 + missing: + allowed: + - core.sources + - core_updates.sources + + core_updates_testing.i586: + class: Youri::Media::URPM + options: + name: core_updates_testing + type: binary + hdlist: ${mirror_i586}/media_info/hdlist_core_updates_testing.cz + options: + dependencies: + allowed: + - core.i586 + - core_updates.i586 + - core_updates_testing.i586 + missing: + allowed: + - core.sources + - core_updates.sources + - core_updates_testing.sources + + core.x86_64: + class: Youri::Media::URPM + options: + name: core + type: binary + hdlist: ${mirror_x86_64}/media_info/hdlist_core.cz + options: + dependencies: + allowed: + - core.x86_64 + - core.i586 + missing: + allowed: + - core.sources + + core_updates.x86_64: + class: Youri::Media::URPM + options: + name: core_updates + type: binary + hdlist: ${mirror_x86_64}/media_info/hdlist_core_updates.cz + options: + dependencies: + allowed: + - core.i586 + - core_updates.i586 + - core.x86_64 + - core_updates.x86_64 + missing: + allowed: + - core.sources + - core_updates.sources + + core_updates_testing.x86_64: + class: Youri::Media::URPM + options: + name: core_updates_testing + type: binary + hdlist: ${mirror_x86_64}/media_info/hdlist_core_updates_testing.cz + options: + dependencies: + allowed: + - core.x86_64 + - core_updates.x86_64 + - core_updates_testing.x86_64 + - core.i586 + - core_updates.i586 + - core_updates_testing.i586 + missing: + allowed: + - core.sources + - core_updates.sources + - core_updates_testing.sources + + core.sources: + class: Youri::Media::URPM + options: + name: core + type: source + hdlist: ${mirror_i586}/media_info/hdlist_core.src.cz + options: + dependencies: + allowed: + - core.x86_64 + - core.i586 + + core_updates.sources: + class: Youri::Media::URPM + options: + name: core_updates + type: source + hdlist: ${mirror_i586}/media_info/hdlist_core_updates.src.cz + options: + dependencies: + allowed: + - core.x86_64 + - core_updates.x86_64 + - core.i586 + - core_updates.i586 + + core_updates_testing.sources: + class: Youri::Media::URPM + options: + name: core_updates_testing + type: source + hdlist: ${mirror_i586}/media_info/hdlist_core_updates_testing.src.cz + options: + dependencies: + allowed: + - core.x86_64 + - core_updates.x86_64 + - core_updates_testing.x86_64 + - core.i586 + - core_updates.i586 + - core_updates_testing.i586 + + nonfree.i586: + class: Youri::Media::URPM + options: + name: nonfree + type: binary + hdlist: ${mirror_i586}/media_info/hdlist_nonfree_release.cz + options: + dependencies: + allowed: + - core.i586 + - nonfree.i586 + missing: + allowed: + - nonfree.sources + + nonfree.x86_64: + class: Youri::Media::URPM + options: + name: nonfree + type: binary + hdlist: ${mirror_x86_64}/media_info/hdlist_nonfree_release.cz + options: + dependencies: + allowed: + - core.x86_64 + - core.i586 + - nonfree.x86_64 + - nonfree.i586 + missing: + allowed: + - nonfree.sources + + + nonfree.sources: + class: Youri::Media::URPM + options: + name: nonfree + type: source + hdlist: ${mirror_i586}/media_info/hdlist_nonfree_release.src.cz + options: + dependencies: + allowed: + - core.x86_64 + - nonfree.x86_64 + - core.i586 + - nonfree.i586 diff --git a/modules/youri-check/templates/cauldron.conf b/modules/youri-check/templates/cauldron.conf new file mode 100644 index 00000000..aeace447 --- /dev/null +++ b/modules/youri-check/templates/cauldron.conf @@ -0,0 +1,504 @@ +# vim:ft=yaml:et:sw=4 + +# helper variables +mirror: http://repository.mageia.org/distrib/cauldron +mirror_aarch64: ${mirror}/aarch64/media +mirror_armv7hl: ${mirror}/armv7hl/media +mirror_i686: ${mirror}/i686/media +mirror_x86_64: ${mirror}/x86_64/media + +# resultset definition +resultset: + class: Youri::Check::Resultset::DBI + options: + driver: Pg + host: <%= pgsql_server %>;sslmode=require + base: <%= pgsql_db %> + user: <%= pgsql_user %> + pass: <%= pgsql_password %> + +resolver: + class: Youri::Check::Maintainer::Resolver::CGI + options: + url: https://pkgsubmit.<%= domain %>/data/maintdb.txt + exceptions: + - nobody + + +# checks definitions +tests: + dependencies: + class: Youri::Check::Test::Dependencies + + missing: + class: Youri::Check::Test::Missing + + updates: + class: Youri::Check::Test::Updates + options: + aliases: + basesystem: ~ + drakxtools: ~ + drakx-installer-advertising: ~ + drakx-installer-binaries: ~ + drakx-installer-images: ~ + drakx-installer-rescue: ~ + drakx-installer-stage2: ~ + horde-accounts: accounts + horde-chora: chora + horde-forwards: forwards + horde-imp: imp + horde-ingo: ingo + horde-kronolith: kronolith + horde-mnemo: mnemo + horde-nag: nag + horde-passwd: passwd + horde-turba: turba + horde-vacation: vacation + freetype: freetype2 + gstreamer: ~ + gstreamer0.10: gstreamer + gnupg2: gnupg + gnupg: ~ + gnome-vfs2: gnome-vfs + gnome-vfs: ~ + ldetect: ~ + ldetect-lst: ~ + libutempter: utempter + perl-URPM: ~ + rpm: ~ + rpmdrake: ~ + rpmstats: ~ + rpmtools: ~ + urpmi: ~ + vte: ~ + vte3: vte + xine-lib: xine-lib1.2 + xine-lib-1.2: xine-lib1.2 + sources: + cpan: + order: 0 + class: Youri::Check::Test::Updates::Source::CPAN + options: + aliases: + libnet: ~ + perl-Catalyst-P-S-State-Cookie: Catalyst::Plugin::State::State::Cookie + perl-Catalyst-P-S-Store-FastMmap: Catalyst::Plugin::State::Store::FastMmap + perl-Catalyst-P-S-Store-File: Catalyst::Plugin::State::Store::File + gettext: ~ + pear: + order: 0 + class: Youri::Check::Test::Updates::Source::PEAR +# pypi: +# order: 0 +# class: Youri::Check::Test::Updates::Source::PyPI +# apache: +# order: 0 +# class: Youri::Check::Test::Updates::Source::Apache + debian: + order: 1 + class: Youri::Check::Test::Updates::Source::Debian + options: + aliases: + anjuta2: anjuta + anjuta: ~ + perl-Jcode: libjcode-pm-perl + makepasswd: ~ + sipp: ~ + zsnes: ~ + unclutter: ~ + python-id3: ~ + freetype: ~ + openldap2.3: ~ + git: git-core + nilfs-utils: nilfs-tools + mobile-broadband-provider-info: ~ + cpulimit: ~ + icecream: ~ + colorize: ~ + fedora: + order: 1 + class: Youri::Check::Test::Updates::Source::Fedora + options: + aliases: + authd: ~ + basesystem: ~ + bash: ~ + freetype: ~ + freetype2: freetype + gle: ~ + gtksourceview-sharp: ~ + modemmanager: ModemManager + netcat-openbsd: netcat + networkmanager: NetworkManager + networkmanager-applet: network-manager-applet + networkmanager-fortisslvpn: NetworkManager-fortisslvpn + networkmanager-l2tp: NetworkManager-l2tp + networkmanager-libreswan: NetworkManager-libreswan + networkmanager-openconnect: NetworkManager-openconnect + networkmanager-openvpn: NetworkManager-openvpn + networkmanager-pptp: NetworkManager-pptp + networkmanager-vpnc: NetworkManager-vpnc + ocaml-lablgtk: ~ + ocaml-lablgtk2: ocaml-lablgtk + OpenIPMI: OpenIPMI2 + sqlite: sqlite2 + gentoo: + order: 1 + class: Youri::Check::Test::Updates::Source::Gentoo + options: + aliases: + beagle: ~ + makepasswd: ~ + hibernate: hibernate-script + leif: ~ + sqlite3: sqlite + sqlite: ~ + cfengine3: cfengine + cfengine: ~ + kamikaze: ~ + knob: ~ + vertex: ~ + unclutter: ~ + pam-krb5: pam_krb5 + pam_krb5: ~ + akonadi: akonadi-server + attica: libattica + raptor2: raptor + raptor: ~ + libevent: ~ + wifi-radar: ~ + tuxmathscrabble: ~ + chromium: ~ + cpulimit: ~ + icecream: ~ + nodejs: ~ + gnome: + order: 1 + class: Youri::Check::Test::Updates::Source::GNOME + options: + url: https://download.gnome.org/sources/ + aliases: + acme: ~ + GConf: ~ + GConf2: GConf + gcr: ~ + gcr4: gcr + gdk-pixbuf2.0: gdk-pixbuf + glib: ~ + glib2.0: glib + glibmm2.4: ~ + goocanvas2: ~ + gtkmm-documentation3.0: ~ + gtkmm: ~ + gtkmm2.4: ~ + gtkmm3.0: ~ + gtkmm4.0: gtkmm + gtksourceviewmm3.0: ~ + gtk: ~ + gtk+2.0: ~ + gtk+3.0: ~ + gtk4.0: gtk + modemmanager: ModemManager + networkmanager: NetworkManager + networkmanager-applet: network-manager-applet + networkmanager-fortisslvpn: NetworkManager-fortisslvpn + networkmanager-l2tp: NetworkManager-l2tp + networkmanager-libreswan: NetworkManager-libreswan + networkmanager-openconnect: NetworkManager-openconnect + networkmanager-openvpn: NetworkManager-openvpn + networkmanager-pptp: NetworkManager-pptp + networkmanager-vpnc: NetworkManager-vpnc + notify-sharp: ~ + notify-sharp3: notify-sharp + pango: ~ + pango2.0: pango + netbsd: + order: 1 + class: Youri::Check::Test::Updates::Source::NetBSD +# sourceforge: +# class: Youri::Check::Test::Updates::Source::Sourceforge +# options: +# aliases: +# bigforth: ~ +# gtkmm: ~ +# hydrogen: ~ +# ltp: ~ +# pblogan: ~ +# console-tools: ~ +# maxima: ~ +# clisp: ~ + + updates_fedora: + class: Youri::Check::Test::Updates + options: + sources: + fedora: + order: 1 + class: Youri::Check::Test::Updates::Source::Fedora + options: + aliases: + authd: ~ + basesystem: ~ + bash: ~ + freetype: ~ + freetype2: freetype + gle: ~ + gtksourceview-sharp: ~ + modemmanager: ModemManager + netcat-openbsd: netcat + networkmanager: NetworkManager + networkmanager-applet: network-manager-applet + networkmanager-fortisslvpn: NetworkManager-fortisslvpn + networkmanager-l2tp: NetworkManager-l2tp + networkmanager-libreswan: NetworkManager-libreswan + networkmanager-openconnect: NetworkManager-openconnect + networkmanager-openvpn: NetworkManager-openvpn + networkmanager-pptp: NetworkManager-pptp + networkmanager-vpnc: NetworkManager-vpnc + ocaml-lablgtk: ~ + ocaml-lablgtk2: ocaml-lablgtk + OpenIPMI: OpenIPMI2 + sqlite: sqlite2 + updates_gnome: + class: Youri::Check::Test::Updates + options: + sources: + gnome: + order: 1 + class: Youri::Check::Test::Updates::Source::GNOME + options: + url: https://download.gnome.org/sources/ + aliases: + acme: ~ + GConf: ~ + GConf2: GConf + gcr: ~ + gcr4: gcr + gdk-pixbuf2.0: gdk-pixbuf + glib: ~ + glib2.0: glib + glibmm2.4: ~ + goocanvas2: ~ + gtkmm-documentation3.0: ~ + gtkmm: ~ + gtkmm2.4: ~ + gtkmm3.0: ~ + gtkmm4.0: gtkmm + gtksourceviewmm3.0: ~ + gtk: ~ + gtk+2.0: ~ + gtk+3.0: ~ + gtk4.0: gtk + modemmanager: ModemManager + networkmanager: NetworkManager + networkmanager-applet: network-manager-applet + networkmanager-fortisslvpn: NetworkManager-fortisslvpn + networkmanager-l2tp: NetworkManager-l2tp + networkmanager-libreswan: NetworkManager-libreswan + networkmanager-openconnect: NetworkManager-openconnect + networkmanager-openvpn: NetworkManager-openvpn + networkmanager-pptp: NetworkManager-pptp + networkmanager-vpnc: NetworkManager-vpnc + notify-sharp: ~ + notify-sharp3: notify-sharp + pango: ~ + pango2.0: pango + build: + class: Youri::Check::Test::Build + options: + sources: + iurt: + class: Youri::Check::Test::Build::Source::Iurt + options: + url: https://pkgsubmit.mageia.org/autobuild/cauldron + arches: + - x86_64 + medias: + - core + age: + class: Youri::Check::Test::Age + options: + max: <%= max_days %> days + pattern: "%d days" + +# reports definitions +reports: + file: + class: Youri::Check::Report::File + options: + to: <%= outdir %> + global: 1 + individual: 1 + formats: + html: + class: Youri::Check::Report::Format::HTML + text: + class: Youri::Check::Report::Format::Text + rss: + class: Youri::Check::Report::Format::RSS + +# media definitions +medias: + core.aarch64: + class: Youri::Media::URPM + options: + name: core + type: binary + hdlist: ${mirror_aarch64}/core/release/media_info/hdlist.cz + options: + dependencies: + allowed: + - core.aarch64 + missing: + allowed: + - core.sources + + core.armv7hl: + class: Youri::Media::URPM + options: + name: core + type: binary + hdlist: ${mirror_armv7hl}/core/release/media_info/hdlist.cz + options: + dependencies: + allowed: + - core.armv7hl + missing: + allowed: + - core.sources + + core.i686: + class: Youri::Media::URPM + options: + name: core + type: binary + hdlist: ${mirror_i686}/media_info/hdlist_core.cz + options: + dependencies: + allowed: + - core.i686 + missing: + allowed: + - core.sources + + core.x86_64: + class: Youri::Media::URPM + options: + name: core + type: binary + hdlist: ${mirror_x86_64}/media_info/hdlist_core.cz + options: + dependencies: + allowed: + - core.x86_64 + - core.i686 + missing: + allowed: + - core.sources + + + core.sources: + class: Youri::Media::URPM + options: + name: core + type: source + hdlist: ${mirror_i686}/media_info/hdlist_core.src.cz + options: + dependencies: + allowed: + - core.x86_64 + - core.i686 + + nonfree.i686: + class: Youri::Media::URPM + options: + name: nonfree + type: binary + hdlist: ${mirror_i686}/media_info/hdlist_nonfree_release.cz + options: + dependencies: + allowed: + - core.i686 + - nonfree.i686 + missing: + allowed: + - nonfree.sources + + nonfree.x86_64: + class: Youri::Media::URPM + options: + name: nonfree + type: binary + hdlist: ${mirror_x86_64}/media_info/hdlist_nonfree_release.cz + options: + dependencies: + allowed: + - core.x86_64 + - core.i686 + - nonfree.x86_64 + - nonfree.i686 + missing: + allowed: + - nonfree.sources + + + nonfree.sources: + class: Youri::Media::URPM + options: + name: nonfree + type: source + hdlist: ${mirror_i686}/media_info/hdlist_nonfree_release.src.cz + options: + dependencies: + allowed: + - core.x86_64 + - nonfree.x86_64 + - core.i686 + - nonfree.i686 + + tainted.i686: + class: Youri::Media::URPM + options: + name: nonfree + type: binary + hdlist: ${mirror_i686}/media_info/hdlist_tainted_release.cz + options: + dependencies: + allowed: + - core.i686 + - tainted.i686 + missing: + allowed: + - tainted.sources + + tainted.x86_64: + class: Youri::Media::URPM + options: + name: tainted + type: binary + hdlist: ${mirror_x86_64}/media_info/hdlist_tainted_release.cz + options: + dependencies: + allowed: + - core.x86_64 + - core.i686 + - tainted.x86_64 + - tainted.i686 + missing: + allowed: + - tainted.sources + + tainted.sources: + class: Youri::Media::URPM + options: + name: tainted + type: source + hdlist: ${mirror_i686}/media_info/hdlist_tainted_release.src.cz + options: + dependencies: + allowed: + - core.x86_64 + - tainted.x86_64 + - core.i686 + - tainted.i686 diff --git a/modules/youri-check/templates/vhost_check.conf b/modules/youri-check/templates/vhost_check.conf new file mode 100644 index 00000000..2cf598b5 --- /dev/null +++ b/modules/youri-check/templates/vhost_check.conf @@ -0,0 +1,2 @@ +Header set Access-Control-Allow-Origin "http://pkgsubmit.<%= domain %>" +Header set Access-Control-Allow-Origin "https://pkgsubmit.<%= domain %>" env=HTTPS |
