summaryrefslogtreecommitdiffstats
path: root/bin/reproducible_maintainance.sh
blob: 1d4e21c265c7e8cc0ecad8e2bfb6fbf4cedccf4a (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
#!/bin/bash

# Copyright 2014-2015 Holger Levsen <holger@layer-acht.org>
# released under the GPLv=2

DEBUG=false
. /srv/jenkins/bin/common-functions.sh
common_init "$@"

# common code defining db access
. /srv/jenkins/bin/reproducible_common.sh

DIRTY=false

# prepare backup
REP_RESULTS=/srv/reproducible-results
mkdir -p $REP_RESULTS/backup
cd $REP_RESULTS/backup

# keep 30 days and the 1st of the month
DAY=(date -d "30 day ago" '+%d')
DATE=$(date -d "30 day ago" '+%Y-%m-%d')
if [ "$DAY" != "01" ] &&  [ -f reproducible_$DATE.db.xz ] ; then
	rm -f reproducible_$DATE.db.xz
fi

# actually do the backup
DATE=$(date '+%Y-%m-%d')
if [ ! -f reproducible_$DATE.db.xz ] ; then
	cp -v $PACKAGES_DB .
	DATE=$(date '+%Y-%m-%d')
	mv -v reproducible.db reproducible_$DATE.db
	xz reproducible_$DATE.db
fi

# provide copy for external backups
cp -v $PACKAGES_DB /var/lib/jenkins/userContent/

# delete old temp directories
OLDSTUFF=$(find $REP_RESULTS -maxdepth 1 -type d -name "tmp.*" -mtime +2 -exec ls -lad {} \;)
if [ ! -z "$OLDSTUFF" ] ; then
	echo
	echo "Warning: old temp directories found in $REP_RESULTS"
	find $REP_RESULTS -maxdepth 1 -type d -name "tmp.*" -mtime +2 -exec rm -rv {} \;
	echo "These old directories have been deleted."
	echo
	DIRTY=true
fi

# find old schroots
OLDSTUFF=$(find /schroots/ -maxdepth 1 -type d -name "reproducible-*-*" -mtime +2 -exec ls -lad {} \;)
if [ ! -z "$OLDSTUFF" ] ; then
	echo
	echo "Warning: old schroots found in /schroots, which have been deleted:"
	find /schroots/ -maxdepth 1 -type d -name "reproducible-*-*" -mtime +2 -exec sudo rm -rf {} \;
	echo "$OLDSTUFF"
	echo
	DIRTY=true
fi

# find and warn about pbuild leftovers
OLDSTUFF=$(find /var/cache/pbuilder/result/ -mtime +1 -exec ls -lad {} \;)
if [ ! -z "$OLDSTUFF" ] ; then
	# delete known files, see #777537
	cd /var/cache/pbuilder/result/
	echo "Attempting file detection..."
	for i in $(find . -maxdepth 1 -mtime +1 -type f -exec basename {} \;) ; do
		case $i in
			stderr|stdout)	rm -v $i
					;;
			seqan-*.bed)	rm -v $i	# leftovers reported in #766741
					;;
			bootlogo)	rm -v $i
					;;
			org.daisy.paper.CustomPaperCollection.obj)	rm -v $i
					;;
			debian-faq.pdf.gz|debian-faq.ps.gz|debian-faq.txt.gz)		rm -v $i
					;;
			*)		;;
		esac
	done
	cd -
	# report the rest
	OLDSTUFF=$(find /var/cache/pbuilder/result/ -mtime +1 -exec ls -lad {} \;)
	if [ ! -z "$OLDSTUFF" ] ; then
		echo "Warning: old files or directories found in /var/cache/pbuilder/result/"
		echo "$OLDSTUFF"
		echo "Please cleanup manually."
	fi
	echo
	DIRTY=true
fi

# find failed builds due to network problems and reschedule them
# only grep through the last 5h (300 minutes) of builds...
# (ignore "*None.rbuild.log" because these are build which were just started)
# this job runs every 4h
FAILED_BUILDS=$(find /var/lib/jenkins/userContent/rbuild -type f ! -name "*None.rbuild.log" ! -mmin +300 -exec egrep -l -e 'E: Failed to fetch.*(Connection failed|Size mismatch|Cannot initiate the connection to)' {} \; || true)
if [ ! -z "$FAILED_BUILDS" ] ; then
	echo
	echo "Warning: the following failed builds have been found"
	echo "$FAILED_BUILDS"
	echo
	echo "Rescheduling packages: "
	for SUITE in $(echo $FAILED_BUILDS | sed "s# #\n#g" | cut -d "/" -f7 | sort -u) ; do
		CANDIDATES=$(for PKG in $(echo $FAILED_BUILDS | sed "s# #\n#g" | grep "/$SUITE/" | cut -d "/" -f9 | cut -d "_" -f1) ; do echo -n "$PKG " ; done)
		check_candidates
		if [ $TOTAL -ne 0 ] ; then
			echo " - in $SUITE: $CANDIDATES"
			# '0' here means the artifacts will not be saved
			schedule_packages 0 $PACKAGE_IDS
		fi
	done
	DIRTY=true
fi

# find+terminate processes which should not be there
HAYSTACK=$(mktemp)
RESULT=$(mktemp)
PBUIDS="1234 1111 2222"
ps axo pid,user,size,pcpu,cmd > $HAYSTACK
for i in $PBUIDS ; do
	for PROCESS in $(pgrep -u $i -P 1 || true) ; do
		# faked-sysv comes and goes...
		grep ^$PROCESS $HAYSTACK | grep -v faked-sysv >> $RESULT 2> /dev/null || true
	done
done
if [ -s $RESULT ] ; then
	echo
	echo "Warning: processes found which should not be there, maybe killing them now:"
	cat $RESULT
	echo
	for PROCESS in $(cat $RESULT | cut -d " " -f1 | xargs echo) ; do
		AGE=$(ps -p $PROCESS -o etimes= || echo 0)
		# a single build may only take half a day, so...
		if [ $AGE -gt 43200 ] ; then
			sudo kill -9 $PROCESS 2>&1
			echo "'kill -9 $PROCESS' done."
		else
			echo "Did not kill $PROCESS as it is only $AGE seconds old."
		fi
	done
	echo
	DIRTY=true
fi
rm $HAYSTACK $RESULT

# find packages which build didnt end correctly
QUERY="
	SELECT s.id, s.name, p.date_scheduled, p.date_build_started
		FROM schedule AS p JOIN sources AS s ON p.package_id=s.id
		WHERE p.date_scheduled != ''
		AND p.date_build_started != ''
		AND p.date_build_started < datetime('now', '-36 hours')
		ORDER BY p.date_scheduled
	"
PACKAGES=$(mktemp)
sqlite3 -init $INIT ${PACKAGES_DB} "$QUERY" > $PACKAGES 2> /dev/null || echo "Warning: SQL query '$QUERY' failed." 
if grep -q '|' $PACKAGES ; then
	echo
	echo "Warning: packages found where the build was started more than 36h ago:"
	echo "pkg_id|name|date_scheduled|date_build_started"
	echo
	cat $PACKAGES
	echo
	for PKG in $(cat $PACKAGES | cut -d "|" -f1) ; do
		echo "sqlite3 ${PACKAGES_DB}  \"DELETE FROM schedule WHERE package_id = '$PKG';\""
		sqlite3 -init $INIT ${PACKAGES_DB} "DELETE FROM schedule WHERE package_id = '$PKG';"
	done
	echo "Packages have been removed from scheduling."
	echo
	DIRTY=true
fi
rm $PACKAGES

# find packages which have been removed from unstable
# commented out for now. This can't be done using the database anymore
QUERY="SELECT source_packages.name FROM source_packages
		WHERE source_packages.name NOT IN
		(SELECT sources.name FROM sources)
	LIMIT 25"
#PACKAGES=$(sqlite3 -init $INIT ${PACKAGES_DB} "$QUERY")
PACKAGES=''
if [ ! -z "$PACKAGES" ] ; then
	echo
	echo "Removing these removed packages from database:"
	echo $PACKAGES
	echo
	QUERY="DELETE FROM source_packages
			WHERE source_packages.name NOT IN
			(SELECT sources.name FROM sources)
		LIMIT 25"
	sqlite3 -init $INIT ${PACKAGES_DB} "$QUERY"
	cd /var/lib/jenkins/userContent
	for i in PACKAGES ; do
		find rb-pkg/ rbuild/ notes/ dbd/ -name "${i}_*" -exec rm -v {} \;
	done
	cd -
fi

# delete jenkins html logs from reproducible_builder_* jobs as they are mostly redundant
# (they only provide the extended value of parsed console output, which we dont need here.)
OLDSTUFF=$(find /var/lib/jenkins/jobs/reproducible_builder_* -maxdepth 3 -mtime +0 -name log_content.html  -exec rm -v {} \; | wc -l)
if [ ! -z "$OLDSTUFF" ] ; then
	echo
	echo "Removed $OLDSTUFF jenkins html logs."
	echo
fi

# remove artifacts older than 3 days
ARTIFACTS=$(find /var/lib/jenkins/userContent/artifacts/* -maxdepth 0 -type d -mtime +3 -exec ls -lad {} \; || true)
if [ ! -z "$ARTIFACTS" ] ; then
	echo
	echo "Removed old artifacts:"
	find /var/lib/jenkins/userContent/artifacts/* -maxdepth 0 -type d -mtime +3 -exec rm -rv {} \;
	echo
fi

if ! $DIRTY ; then
	echo "Everything seems to be fine."
	echo
fi