Use different users for HDFS- and MapReduce-related daemons.

This way mapreduce tasks can't write to hdfs-owned files directly.
This commit is contained in:
Dmitry Sivachenko 2014-07-01 14:55:58 +00:00
parent 4f03e22963
commit a46b7d350b
Notes: svn2git 2021-03-31 03:12:20 +00:00
svn path=/head/; revision=360026
8 changed files with 26 additions and 22 deletions

3
UIDs
View File

@ -257,13 +257,14 @@ dnetfold:*:943:943::0:0:& user:/nonexistent:/usr/sbin/nologin
bbs:*:944:944::0:0:& user:/nonexistent:/usr/sbin/nologin
dbxml:*:945:945::0:0:& user:/nonexistent:/usr/sbin/nologin
sybase:*:946:946::0:0:& user:/usr/local/sybase-ase:/usr/sbin/nologin
mapred:*:947:955::0:0:Hadoop MapReduce user:/nonexistent:/usr/sbin/nologin
dnrd:*:948:948::0:0:& user:/nonexistent:/usr/sbin/nologin
zenoss:*:949:949::0:0:& user:/usr/local/zenoss:/bin/sh
openerpd:*:951:951::0:0:Openerpd user:/nonexistent:/usr/sbin/nologin
bitten-slave:*:952:952:daemon:0:0:Bitten slave user:/var/lib/bitten-slave:/usr/sbin/nologin
_neubot:*:953:953::0:0:neubot daemon:/nonexistent:/usr/sbin/nologin
oops:*:954:65534::0:0:oops user:/nonexistent:/usr/sbin/nologin
hadoop:*:955:955::0:0:hadoop user:/nonexistent:/usr/sbin/nologin
hdfs:*:955:955::0:0:Hadoop HDFS user:/nonexistent:/usr/sbin/nologin
pandora:*:956:956::0:0:Pandora FMS user:/usr/local/etc/pandora/home:/usr/sbin/nologin
razorback:*:957:957::0:0:Razorback user:/var/run/razorback:/usr/sbin/nologin
gnunet:*:958:958::0:0:GNUnet daemon:/var/lib/gnunet:/usr/sbin/nologin

View File

@ -30,9 +30,10 @@ OPTIONS_DEFINE= DOCS
HADOOP_LOGDIR= /var/log/${PORTNAME}
HADOOP_RUNDIR= /var/run/${PORTNAME}
HADOOP_USER= hadoop
HDFS_USER= hdfs
MAPRED_USER= mapred
HADOOP_GROUP= hadoop
USERS= ${HADOOP_USER}
USERS= ${HDFS_USER} ${MAPRED_USER}
GROUPS= ${HADOOP_GROUP}
SUB_FILES= hadoop 000.java_home.env
@ -41,9 +42,11 @@ USE_RC_SUBR= tasktracker jobtracker datanode namenode secondarynamenode
PLIST_SUB= PORTVERSION="${PORTVERSION}" \
HADOOP_LOGDIR="${HADOOP_LOGDIR}" \
HADOOP_RUNDIR="${HADOOP_RUNDIR}" \
HADOOP_USER="${HADOOP_USER}" \
HDFS_USER="${HDFS_USER}" \
MAPRED_USER="${MAPRED_USER}" \
HADOOP_GROUP="${HADOOP_GROUP}"
SUB_LIST= HADOOP_USER="${HADOOP_USER}" \
SUB_LIST= HDFS_USER="${HDFS_USER}" \
MAPRED_USER="${MAPRED_USER}" \
HADOOP_GROUP="${HADOOP_GROUP}" \
JAVA_HOME="${JAVA_HOME}"

View File

@ -8,9 +8,9 @@
#
# Add the following lines to /etc/rc.conf to enable this service:
#
# datanode_enable (bool): Set to NO by default.
# datanode_enable (bool): Set to NO by default.
# Set it to YES to enable datanode.
# datanode_user (str): Set to %%HADOOP_USER%% by default.
# datanode_user (str): Set to %%HDFS_USER%% by default.
# Set to a valid user to change default
# datanode user.
# datanode_java_home (str): Unset by default.
@ -28,7 +28,7 @@ rcvar=datanode_enable
load_rc_config "${name}"
: ${datanode_enable:=NO}
: ${datanode_user:=%%HADOOP_USER%%}
: ${datanode_user:=%%HDFS_USER%%}
: ${datanode_group:=%%HADOOP_GROUP%%}
command="%%DATADIR%%/bin/hadoop-daemon.sh"

View File

@ -8,9 +8,9 @@
#
# Add the following lines to /etc/rc.conf to enable this service:
#
# jobtracker_enable (bool): Set to NO by default.
# jobtracker_enable (bool): Set to NO by default.
# Set it to YES to enable jobtracker.
# jobtracker_user (str): Set to %%HADOOP_USER%% by default.
# jobtracker_user (str): Set to %%MAPRED_USER%% by default.
# Set to a valid user to change default
# jobtracker user.
# jobtracker_java_home (str): Unset by default.
@ -28,7 +28,7 @@ rcvar=jobtracker_enable
load_rc_config "${name}"
: ${jobtracker_enable:=NO}
: ${jobtracker_user:=%%HADOOP_USER%%}
: ${jobtracker_user:=%%MAPRED_USER%%}
: ${jobtracker_group:=%%HADOOP_GROUP%%}
command="%%DATADIR%%/bin/hadoop-daemon.sh"

View File

@ -8,9 +8,9 @@
#
# Add the following lines to /etc/rc.conf to enable this service:
#
# namenode_enable (bool): Set to NO by default.
# namenode_enable (bool): Set to NO by default.
# Set it to YES to enable namenode.
# namenode_user (str): Set to %%HADOOP_USER%% by default.
# namenode_user (str): Set to %%HDFS_USER%% by default.
# Set to a valid user to change default
# namenode user.
# namenode_java_home (str): Unset by default.
@ -28,7 +28,7 @@ rcvar=namenode_enable
load_rc_config "${name}"
: ${namenode_enable:=NO}
: ${namenode_user:=%%HADOOP_USER%%}
: ${namenode_user:=%%HDFS_USER%%}
: ${namenode_group:=%%HADOOP_GROUP%%}
command="%%DATADIR%%/bin/hadoop-daemon.sh"

View File

@ -10,7 +10,7 @@
#
# secondarynamenode_enable (bool): Set to NO by default.
# Set it to YES to enable secondarynamenode.
# secondarynamenode_user (str): Set to %%HADOOP_USER%% by default.
# secondarynamenode_user (str): Set to %%HDFS_USER%% by default.
# Set to a valid user to change default
# secondarynamenode user.
# secondarynamenode_java_home (str): Unset by default.
@ -28,7 +28,7 @@ rcvar=secondarynamenode_enable
load_rc_config "${name}"
: ${secondarynamenode_enable:=NO}
: ${secondarynamenode_user:=%%HADOOP_USER%%}
: ${secondarynamenode_user:=%%HDFS_USER%%}
: ${secondarynamenode_group:=%%HADOOP_GROUP%%}
command="%%DATADIR%%/bin/hadoop-daemon.sh"

View File

@ -10,7 +10,7 @@
#
# tasktracker_enable (bool): Set to NO by default.
# Set it to YES to enable tasktracker.
# tasktracker_user (str): Set to %%HADOOP_USER%% by default.
# tasktracker_user (str): Set to %%MAPRED_USER%% by default.
# Set to a valid user to change default
# tasktracker user.
# tasktracker_java_home (str): Unset by default.
@ -28,7 +28,7 @@ rcvar=tasktracker_enable
load_rc_config "${name}"
: ${tasktracker_enable:=NO}
: ${tasktracker_user:=%%HADOOP_USER%%}
: ${tasktracker_user:=%%MAPRED_USER%%}
: ${tasktracker_group:=%%HADOOP_GROUP%%}
command="%%DATADIR%%/bin/hadoop-daemon.sh"

View File

@ -301,10 +301,10 @@ lib/libhdfs.so
lib/libhdfs.la
@exec mkdir -p %D/%%DATADIR%%/webapps/secondary/WEB-INF
@exec mkdir -p %D/%%DATADIR%%/contrib/hdfsproxy/logs
@exec install -d -o %%HADOOP_USER%% -g %%HADOOP_GROUP%% -m 0755 %%HADOOP_LOGDIR%%
@exec install -d -o %%HADOOP_USER%% -g %%HADOOP_GROUP%% -m 0755 %%HADOOP_RUNDIR%%
@unexec rmdir %%HADOOP_RUNDIR%% 2>/dev/null || true
@unexec rmdir %%HADOOP_LOGDIR%% 2>/dev/null || true
@exec install -d -o root -g %%HADOOP_GROUP%% -m 0775 %%HADOOP_LOGDIR%%
@exec install -d -o root -g %%HADOOP_GROUP%% -m 0775 %%HADOOP_RUNDIR%%
@unexec rm -d %%HADOOP_RUNDIR%% 2>/dev/null || true
@unexec rm -d %%HADOOP_LOGDIR%% 2>/dev/null || true
@dirrm %%EXAMPLESDIR%%/conf
@dirrm %%EXAMPLESDIR%%
@dirrm %%DATADIR%%/lib/native/Linux-i386-32