-
Notifications
You must be signed in to change notification settings - Fork 8
/
.travis.yml
183 lines (167 loc) · 6.05 KB
/
.travis.yml
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
language: c
addons:
apt:
packages:
- protobuf-c-compiler
- libprotobuf-c-dev
- libprotobuf-c1
- libsasl2-dev
- libsasl2-2
- check
- curl
install:
# Only install valgrind if we're going to use it
- |
if [ "$HDFS_TEST_DEBUGGER" != "" ] ; then
sudo -E apt-get -yq --no-install-suggests --no-install-recommends $(travis_apt_get_options) install valgrind
fi
# Only download hadoop if we're going to test against the Hadoop CLI MiniCluster
- |
if [ "$CK_RUN_SUITE" = "" ] ; then
HADOOP_TGZ=$(curl https://dlcdn.apache.org/hadoop/common/stable/ | sed -nEe 's/.*href="(hadoop-([0-9]+\.){3}tar\.gz)".*/\1/p')
if [ -z "$HADOOP_TGZ" ] ; then echo "Unable to fetch stable Hadoop name"; exit 1; fi
HADOOP_VER="${HADOOP_TGZ%.tar.gz}"
echo "Fetching latest stable Hadoop version $HADOOP_VER"
curl -O "https://dlcdn.apache.org/hadoop/common/stable/$HADOOP_TGZ" \
&& tar xzf "$HADOOP_TGZ" \
&& sudo mv "$HADOOP_VER" /usr/local/hadoop
fi
env:
global:
CK_VERBOSITY: verbose
HDFS_TEST_NODE_ADDRESS: localhost
HDFS_TEST_NODE_PORT: 9000
HDFS_TEST_USER: travis
HADOOP_ROOT_LOGGER: WARN,DRFA # decrease cluster's log level and don't print to console
# Travis doesn't allow dist to be a sequence, so in order to test on both
# Ubuntu 16.04 and 18.04 we need to explicitly describe the build matrix
jobs:
include:
- dist: xenial
compiler: gcc
arch: amd64
env:
# gcc on 16.04 doesn't support -Wnull-dereference, so clear WARNS_NEW
WARNS_NEW:
CFLAGS: -fsanitize=address -fno-omit-frame-pointer
LDFLAGS: -fsanitize=address
- dist: xenial
compiler: clang
arch: amd64
env:
CFLAGS: -fsanitize=address -fno-omit-frame-pointer
LDFLAGS: -fsanitize=address
- dist: bionic
compiler: gcc
arch: amd64
env:
CFLAGS: -fsanitize=address -fno-omit-frame-pointer
LDFLAGS: -fsanitize=address
# Run valgrind on one of the test setups
- dist: bionic
compiler: gcc
arch: amd64
env:
- HDFS_TEST_DEBUGGER="valgrind --show-leak-kinds=all --leak-check=full --error-exitcode=1"
- dist: bionic
compiler: clang
arch: amd64
env:
CFLAGS: -fsanitize=address -fno-omit-frame-pointer
LDFLAGS: -fsanitize=address
# arm64 allows us to test the armv8 hardware crc32c implementation
- dist: xenial
compiler: gcc
arch: arm64
env:
# gcc on 16.04 doesn't support -Wnull-dereference, so clear WARNS_NEW
WARNS_NEW:
# gcc has a false positive for __has_attribute(ifunc) on arm64 for
# Ubuntu 16.04, so force use of the non-ifunc fallback code
CFLAGS: -DNO_IFUNC -fsanitize=address -fno-omit-frame-pointer
LDFLAGS: -fsanitize=address
# don't spin up the cluster and only run the "unit" test suite on arm64
CK_RUN_SUITE: unit
- dist: xenial
compiler: clang
arch: arm64
env:
# ASan hangs on arm64 xenial with clang
# don't spin up the cluster and only run the "unit" test suite on arm64
CK_RUN_SUITE: unit
- dist: bionic
compiler: gcc
arch: arm64
env:
# gcc has a false positive for __has_attribute(ifunc) on arm64 for
# Ubuntu 18.04, so force use of the non-ifunc fallback code
CFLAGS: -DNO_IFUNC -fsanitize=address -fno-omit-frame-pointer
LDFLAGS: -fsanitize=address
# don't spin up the cluster and only run the "unit" test suite on arm64
CK_RUN_SUITE: unit
- dist: bionic
compiler: clang
arch: arm64
env:
CFLAGS: -fsanitize=address -fno-omit-frame-pointer
LDFLAGS: -fsanitize=address
# don't spin up the cluster and only run the "unit" test suite on arm64
CK_RUN_SUITE: unit
# s390x allows us to test the big endian software crc32c implementation
- dist: xenial
compiler: gcc
arch: s390x
env:
# ASan not supported on s390x xenial
# gcc on 16.04 doesn't support -Wnull-dereference, so clear WARNS_NEW
WARNS_NEW:
# don't spin up the cluster and only run the "unit" test suite on s390x
CK_RUN_SUITE: unit
- dist: xenial
compiler: clang
arch: s390x
env:
# ASan not supported on s390x xenial
# clang's -fstack-protector does not properly link on s390x xenial
CFLAGS: -fno-stack-protector
# don't spin up the cluster and only run the "unit" test suite on s390x
CK_RUN_SUITE: unit
- dist: bionic
compiler: gcc
arch: s390x
env:
CFLAGS: -fsanitize=address -fno-omit-frame-pointer
LDFLAGS: -fsanitize=address
# don't spin up the cluster and only run the "unit" test suite on s390x
CK_RUN_SUITE: unit
- dist: bionic
compiler: clang
arch: s390x
env:
CFLAGS: -fsanitize=address -fno-omit-frame-pointer
LDFLAGS: -fsanitize=address
# don't spin up the cluster and only run the "unit" test suite on s390x
CK_RUN_SUITE: unit
script:
- make -j$(nproc) all
- |
# Only spin up the minicluster if we're testing against it (and if we downloaded it)
if [ "$CK_RUN_SUITE" = "" ] ; then
# There's a bug where the junit jar needs to be explicitly added to the class path for the minicluster to work
HADOOP_JUNIT_JAR_PATH=$(find /usr/local/hadoop -name "junit*.jar")
export HADOOP_CLASSPATH="${HADOOP_CLASSPATH}:${HADOOP_JUNIT_JAR_PATH}"
/usr/local/hadoop/bin/mapred minicluster -nomr -format -nnport $HDFS_TEST_NODE_PORT -datanodes 4 &
CLUSTER_PID=$!
# Wait until the cluster is up by polling the namenode port with netcat
cnt=0
while ! nc -vz localhost $HDFS_TEST_NODE_PORT ; do
echo "$cnt"
cnt=$((cnt + 1))
if [ "$cnt" -gt "5" ] ; then exit 1; fi
sleep 5;
done
# Wait a bit longer to ensure the rest of the cluster finalized setup
sleep 10
fi
- make test
- if [ "$CK_RUN_SUITE" = "" ] ; then kill $CLUSTER_PID; fi