aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--Documentation/filesystems/Locking3
-rw-r--r--Documentation/filesystems/proc.txt4
-rw-r--r--Documentation/filesystems/vfs.txt4
-rw-r--r--Documentation/networking/ip-sysctl.txt7
-rw-r--r--android/configs/android-base.cfg2
-rw-r--r--arch/arm/configs/flo_defconfig9
-rw-r--r--arch/arm/include/asm/pmu.h7
-rw-r--r--arch/arm/kernel/perf_event.c86
-rw-r--r--arch/arm/kernel/ptrace.c7
-rw-r--r--arch/arm/mach-msm/pmu.c9
-rw-r--r--arch/um/os-Linux/start_up.c2
-rw-r--r--build.config12
-rw-r--r--drivers/gpu/msm/adreno_drawctxt.c9
-rw-r--r--drivers/misc/Kconfig6
-rw-r--r--drivers/misc/Makefile1
-rw-r--r--drivers/misc/uid_cputime.c238
-rw-r--r--drivers/mmc/core/core.c8
-rw-r--r--drivers/mmc/core/host.c4
-rw-r--r--drivers/staging/android/Kconfig14
-rw-r--r--drivers/staging/android/Makefile2
-rw-r--r--drivers/staging/android/alarm-dev.c373
-rw-r--r--drivers/staging/android/android_alarm.h62
-rw-r--r--drivers/staging/android/logger.c789
-rw-r--r--drivers/staging/android/logger.h70
-rwxr-xr-xdrivers/staging/prima/CORE/HDD/src/wlan_hdd_assoc.c9
-rw-r--r--drivers/staging/prima/CORE/HDD/src/wlan_hdd_early_suspend.c8
-rw-r--r--drivers/staging/prima/CORE/MAC/inc/qwlan_version.h4
-rw-r--r--drivers/usb/gadget/android.c62
-rw-r--r--drivers/usb/gadget/f_audio_source.c92
-rw-r--r--drivers/usb/gadget/f_midi.c42
-rw-r--r--drivers/video/msm/vidc/1080p/ddl/vcd_ddl.h2
-rw-r--r--drivers/video/msm/vidc/1080p/ddl/vcd_ddl_helper.c19
-rw-r--r--drivers/video/msm/vidc/1080p/ddl/vcd_ddl_properties.c8
-rw-r--r--fs/dcache.c6
-rw-r--r--fs/file_table.c17
-rw-r--r--fs/fs_struct.c32
-rw-r--r--fs/fuse/file.c4
-rw-r--r--fs/inode.c56
-rw-r--r--fs/internal.h4
-rw-r--r--fs/mount.h16
-rw-r--r--fs/namei.c34
-rw-r--r--fs/namespace.c559
-rw-r--r--fs/ncpfs/file.c6
-rw-r--r--fs/ntfs/file.c4
-rw-r--r--fs/pipe.c7
-rw-r--r--fs/pnode.c9
-rw-r--r--fs/pnode.h1
-rw-r--r--fs/proc/base.c43
-rw-r--r--fs/proc/generic.c39
-rw-r--r--fs/proc/inode.c6
-rw-r--r--fs/proc/namespaces.c181
-rw-r--r--fs/proc/task_mmu.c17
-rw-r--r--fs/proc_namespace.c11
-rw-r--r--fs/splice.c6
-rw-r--r--fs/xfs/xfs_file.c7
-rw-r--r--include/linux/fs.h19
-rw-r--r--include/linux/hashtable.h192
-rw-r--r--include/linux/ipc_namespace.h2
-rw-r--r--include/linux/ipv6.h2
-rw-r--r--include/linux/lglock.h179
-rw-r--r--include/linux/lsm_audit.h7
-rw-r--r--include/linux/mm.h5
-rw-r--r--include/linux/mnt_namespace.h3
-rw-r--r--include/linux/pid_namespace.h1
-rw-r--r--include/linux/proc_fs.h25
-rw-r--r--include/linux/security.h12
-rw-r--r--include/linux/user_namespace.h1
-rw-r--r--include/linux/utsname.h1
-rw-r--r--include/net/net_namespace.h2
-rw-r--r--init/version.c2
-rw-r--r--ipc/msgutil.c2
-rw-r--r--ipc/namespace.c16
-rw-r--r--kernel/Makefile2
-rw-r--r--kernel/audit_tree.c10
-rw-r--r--kernel/exit.c4
-rw-r--r--kernel/lglock.c89
-rw-r--r--kernel/nsproxy.c2
-rw-r--r--kernel/pid.c1
-rw-r--r--kernel/pid_namespace.c5
-rw-r--r--kernel/printk.c3
-rw-r--r--kernel/sched/core.c24
-rw-r--r--kernel/user.c2
-rw-r--r--kernel/user_namespace.c8
-rw-r--r--kernel/utsname.c17
-rw-r--r--lib/memory_alloc.c16
-rw-r--r--mm/filemap.c4
-rw-r--r--mm/filemap_xip.c4
-rw-r--r--mm/mlock.c4
-rw-r--r--net/core/neighbour.c3
-rw-r--r--net/core/net_namespace.c24
-rw-r--r--net/ipv4/ping.c13
-rw-r--r--net/ipv4/tcp.c2
-rw-r--r--net/ipv6/addrconf.c19
-rw-r--r--net/ipv6/ping.c5
-rw-r--r--net/ipv6/route.c2
-rw-r--r--security/capability.c4
-rw-r--r--security/lsm_audit.c15
-rw-r--r--security/security.c4
-rw-r--r--security/selinux/avc.c429
-rw-r--r--security/selinux/hooks.c60
-rw-r--r--security/selinux/include/avc.h5
-rw-r--r--security/selinux/include/security.h37
-rw-r--r--security/selinux/nlmsgtab.c9
-rw-r--r--security/selinux/ss/avtab.c94
-rw-r--r--security/selinux/ss/avtab.h25
-rw-r--r--security/selinux/ss/conditional.c32
-rw-r--r--security/selinux/ss/conditional.h6
-rw-r--r--security/selinux/ss/constraint.h1
-rw-r--r--security/selinux/ss/context.h20
-rw-r--r--security/selinux/ss/mls.c24
-rw-r--r--security/selinux/ss/policydb.c145
-rw-r--r--security/selinux/ss/policydb.h25
-rw-r--r--security/selinux/ss/services.c245
-rw-r--r--security/selinux/ss/services.h6
-rw-r--r--security/smack/smack_lsm.c4
-rw-r--r--security/tomoyo/common.h2
-rw-r--r--security/tomoyo/mount.c5
-rw-r--r--security/tomoyo/tomoyo.c4
118 files changed, 2822 insertions, 2152 deletions
diff --git a/Documentation/filesystems/Locking b/Documentation/filesystems/Locking
index 4fca82e5276..d5a269a51a9 100644
--- a/Documentation/filesystems/Locking
+++ b/Documentation/filesystems/Locking
@@ -62,6 +62,7 @@ ata *);
int (*removexattr) (struct dentry *, const char *);
void (*truncate_range)(struct inode *, loff_t, loff_t);
int (*fiemap)(struct inode *, struct fiemap_extent_info *, u64 start, u64 len);
+ void (*update_time)(struct inode *, struct timespec *, int);
locking rules:
all may block
@@ -89,6 +90,8 @@ listxattr: no
removexattr: yes
truncate_range: yes
fiemap: no
+update_time: no
+
Additionally, ->rmdir(), ->unlink() and ->rename() have ->i_mutex on
victim.
cross-directory ->rename() has (per-superblock) ->s_vfs_rename_sem.
diff --git a/Documentation/filesystems/proc.txt b/Documentation/filesystems/proc.txt
index 87fb3e9ccd7..8051b91296c 100644
--- a/Documentation/filesystems/proc.txt
+++ b/Documentation/filesystems/proc.txt
@@ -445,6 +445,10 @@ To clear the bits for the file mapped pages associated with the process
> echo 3 > /proc/PID/clear_refs
Any other value written to /proc/PID/clear_refs will have no effect.
+To reset the peak resident set size ("high water mark") to the process's
+current value:
+ > echo 5 > /proc/PID/clear_refs
+
The /proc/pid/pagemap gives the PFN, which can be used to find the pageflags
using /proc/kpageflags and number of times a page is mapped using
/proc/kpagecount. For detailed explanation, see Documentation/vm/pagemap.txt.
diff --git a/Documentation/filesystems/vfs.txt b/Documentation/filesystems/vfs.txt
index 0d049202808..b2aa722e5ea 100644
--- a/Documentation/filesystems/vfs.txt
+++ b/Documentation/filesystems/vfs.txt
@@ -364,6 +364,7 @@ struct inode_operations {
ssize_t (*listxattr) (struct dentry *, char *, size_t);
int (*removexattr) (struct dentry *, const char *);
void (*truncate_range)(struct inode *, loff_t, loff_t);
+ void (*update_time)(struct inode *, struct timespec *, int);
};
Again, all methods are called without any locks being held, unless
@@ -475,6 +476,9 @@ otherwise noted.
truncate_range: a method provided by the underlying filesystem to truncate a
range of blocks , i.e. punch a hole somewhere in a file.
+ update_time: called by the VFS to update a specific time or the i_version of
+ an inode. If this is not defined the VFS will update the inode itself
+ and call mark_inode_dirty_sync.
The Address Space Object
========================
diff --git a/Documentation/networking/ip-sysctl.txt b/Documentation/networking/ip-sysctl.txt
index 77f5f55ffcd..bf38b80b06d 100644
--- a/Documentation/networking/ip-sysctl.txt
+++ b/Documentation/networking/ip-sysctl.txt
@@ -1199,6 +1199,13 @@ router_solicitations - INTEGER
routers are present.
Default: 3
+use_oif_addrs_only - BOOLEAN
+ When enabled, the candidate source addresses for destinations
+ routed via this interface are restricted to the set of addresses
+ configured on this interface (vis. RFC 6724, section 4).
+
+ Default: false
+
use_tempaddr - INTEGER
Preference for Privacy Extensions (RFC3041).
<= 0 : disable Privacy Extensions
diff --git a/android/configs/android-base.cfg b/android/configs/android-base.cfg
index 225f3e28590..94d7aff12b7 100644
--- a/android/configs/android-base.cfg
+++ b/android/configs/android-base.cfg
@@ -4,8 +4,6 @@
# CONFIG_OABI_COMPAT is not set
CONFIG_ANDROID=y
CONFIG_ANDROID_BINDER_IPC=y
-CONFIG_ANDROID_INTF_ALARM_DEV=y
-CONFIG_ANDROID_LOGGER=y
CONFIG_ANDROID_LOW_MEMORY_KILLER=y
CONFIG_ASHMEM=y
CONFIG_BLK_DEV_DM=y
diff --git a/arch/arm/configs/flo_defconfig b/arch/arm/configs/flo_defconfig
index 8f76cd3a8c1..d5924bc9b02 100644
--- a/arch/arm/configs/flo_defconfig
+++ b/arch/arm/configs/flo_defconfig
@@ -92,6 +92,7 @@ CONFIG_SCHED_MC=y
CONFIG_PREEMPT=y
CONFIG_AEABI=y
CONFIG_HIGHMEM=y
+CONFIG_SECCOMP=y
CONFIG_CC_STACKPROTECTOR=y
CONFIG_CPU_FREQ=y
CONFIG_CPU_FREQ_GOV_POWERSAVE=y
@@ -130,7 +131,6 @@ CONFIG_IPV6_MIP6=y
CONFIG_IPV6_MULTIPLE_TABLES=y
CONFIG_IPV6_SUBTREES=y
CONFIG_NETFILTER=y
-CONFIG_NETFILTER_NETLINK_LOG=y
CONFIG_NF_CONNTRACK=y
CONFIG_NF_CONNTRACK_SECMARK=y
CONFIG_NF_CONNTRACK_EVENTS=y
@@ -153,6 +153,7 @@ CONFIG_NETFILTER_XT_TARGET_CONNSECMARK=y
CONFIG_NETFILTER_XT_TARGET_IDLETIMER=y
CONFIG_NETFILTER_XT_TARGET_LOG=y
CONFIG_NETFILTER_XT_TARGET_MARK=y
+CONFIG_NETFILTER_XT_TARGET_NFLOG=y
CONFIG_NETFILTER_XT_TARGET_NFQUEUE=y
CONFIG_NETFILTER_XT_TARGET_SECMARK=y
CONFIG_NETFILTER_XT_TARGET_TCPMSS=y
@@ -244,6 +245,7 @@ CONFIG_UID_STAT=y
CONFIG_PMIC8XXX_VIBRATOR=y
CONFIG_QSEECOM=y
CONFIG_USB_HSIC_SMSC_HUB=y
+CONFIG_UID_CPUTIME=y
CONFIG_SLIMPORT_ANX7808=y
CONFIG_SCSI=y
CONFIG_SCSI_TGT=y
@@ -303,6 +305,8 @@ CONFIG_STM_LIS3DH=y
CONFIG_N_SMUX=y
CONFIG_N_SMUX_LOOPBACK=y
CONFIG_SMUX_CTL=y
+# CONFIG_DEVMEM is not set
+# CONFIG_DEVKMEM is not set
CONFIG_SERIAL_MSM_HS=y
CONFIG_SERIAL_MSM_HSL=y
CONFIG_SERIAL_MSM_HSL_CONSOLE=y
@@ -535,6 +539,5 @@ CONFIG_SECURITY=y
CONFIG_SECURITY_NETWORK=y
CONFIG_LSM_MMAP_MIN_ADDR=4096
CONFIG_SECURITY_SELINUX=y
-CONFIG_CRYPTO_TWOFISH=y
-CONFIG_SECCOMP=y
CONFIG_CRYPTO_AES_ARM=y
+CONFIG_CRYPTO_TWOFISH=y
diff --git a/arch/arm/include/asm/pmu.h b/arch/arm/include/asm/pmu.h
index 921ba8140d9..d0a37785ece 100644
--- a/arch/arm/include/asm/pmu.h
+++ b/arch/arm/include/asm/pmu.h
@@ -25,6 +25,12 @@ enum arm_pmu_type {
ARM_NUM_PMU_DEVICES,
};
+enum arm_pmu_state {
+ ARM_PMU_STATE_OFF = 0,
+ ARM_PMU_STATE_GOING_DOWN,
+ ARM_PMU_STATE_RUNNING,
+};
+
/*
* struct arm_pmu_platdata - ARM PMU platform data
*
@@ -115,6 +121,7 @@ struct arm_pmu {
cpumask_t active_irqs;
const char *name;
int num_events;
+ int pmu_state;
atomic_t active_events;
struct mutex reserve_mutex;
u64 max_period;
diff --git a/arch/arm/kernel/perf_event.c b/arch/arm/kernel/perf_event.c
index ff8b24c1a38..2bbaec621ad 100644
--- a/arch/arm/kernel/perf_event.c
+++ b/arch/arm/kernel/perf_event.c
@@ -413,6 +413,12 @@ armpmu_release_hardware(struct arm_pmu *armpmu)
int i, irq, irqs;
struct platform_device *pmu_device = armpmu->plat_device;
+ /*
+ * If a cpu comes online during this function, do not enable its irq.
+ * If a cpu goes offline, it should disable its irq.
+ */
+ armpmu->pmu_state = ARM_PMU_STATE_GOING_DOWN;
+
irqs = min(pmu_device->num_resources, num_possible_cpus());
for (i = 0; i < irqs; ++i) {
@@ -422,6 +428,8 @@ armpmu_release_hardware(struct arm_pmu *armpmu)
armpmu->free_pmu_irq(irq);
}
+ armpmu->pmu_state = ARM_PMU_STATE_OFF;
+
release_pmu(armpmu->type);
}
@@ -493,6 +501,7 @@ armpmu_reserve_hardware(struct arm_pmu *armpmu)
cpumask_set_cpu(i, &armpmu->active_irqs);
}
+ armpmu->pmu_state = ARM_PMU_STATE_RUNNING;
return 0;
}
@@ -810,68 +819,41 @@ static int __cpuinit pmu_cpu_notify(struct notifier_block *b,
unsigned long action, void *hcpu)
{
int irq;
- struct pmu *pmu;
- int cpu = (int)hcpu;
+ unsigned long masked_action = action & ~CPU_TASKS_FROZEN;
+ int ret = NOTIFY_DONE;
- switch ((action & ~CPU_TASKS_FROZEN)) {
- case CPU_DOWN_PREPARE:
- if (cpu_pmu && cpu_pmu->save_pm_registers)
- smp_call_function_single(cpu,
- cpu_pmu->save_pm_registers,
- hcpu, 1);
- break;
- case CPU_STARTING:
- if (cpu_pmu && cpu_pmu->reset)
- cpu_pmu->reset(NULL);
- if (cpu_pmu && cpu_pmu->restore_pm_registers)
- smp_call_function_single(cpu,
- cpu_pmu->restore_pm_registers,
- hcpu, 1);
+ if ((masked_action != CPU_DOWN_PREPARE) && (masked_action != CPU_STARTING)) {
+ return NOTIFY_DONE;
}
-
- if (cpu_has_active_perf((int)hcpu)) {
- switch ((action & ~CPU_TASKS_FROZEN)) {
-
- case CPU_DOWN_PREPARE:
- armpmu_update_counters();
- /*
- * If this is on a multicore CPU, we need
- * to disarm the PMU IRQ before disappearing.
- */
- if (cpu_pmu &&
- cpu_pmu->plat_device->dev.platform_data) {
+ if (masked_action == CPU_STARTING) {
+ ret = NOTIFY_OK;
+ }
+ switch (masked_action) {
+ case CPU_DOWN_PREPARE:
+ if (cpu_pmu->pmu_state != ARM_PMU_STATE_OFF) {
+ /* Disarm the PMU IRQ before disappearing. */
+ if (cpu_pmu->plat_device) {
irq = platform_get_irq(cpu_pmu->plat_device, 0);
- smp_call_function_single((int)hcpu,
- disable_irq_callback, &irq, 1);
+ smp_call_function_single((int)hcpu, disable_irq_callback, &irq, 1);
}
- return NOTIFY_DONE;
+ }
+ break;
- case CPU_STARTING:
- /*
- * If this is on a multicore CPU, we need
- * to arm the PMU IRQ before appearing.
- */
- if (cpu_pmu &&
- cpu_pmu->plat_device->dev.platform_data) {
+ case CPU_STARTING:
+ /* Reset PMU to clear counters for ftrace buffer. */
+ if (cpu_pmu->reset) {
+ cpu_pmu->reset(NULL);
+ }
+ if (cpu_pmu->pmu_state == ARM_PMU_STATE_RUNNING) {
+ /* Arm the PMU IRQ before appearing. */
+ if (cpu_pmu->plat_device) {
irq = platform_get_irq(cpu_pmu->plat_device, 0);
enable_irq_callback(&irq);
}
-
- if (cpu_pmu) {
- __get_cpu_var(from_idle) = 1;
- pmu = &cpu_pmu->pmu;
- pmu->pmu_enable(pmu);
- return NOTIFY_OK;
- }
- default:
- return NOTIFY_DONE;
}
+ break;
}
-
- if ((action & ~CPU_TASKS_FROZEN) != CPU_STARTING)
- return NOTIFY_DONE;
-
- return NOTIFY_OK;
+ return ret;
}
static struct notifier_block __cpuinitdata pmu_cpu_notifier = {
diff --git a/arch/arm/kernel/ptrace.c b/arch/arm/kernel/ptrace.c
index 6f5e63ed18b..032919ebfdd 100644
--- a/arch/arm/kernel/ptrace.c
+++ b/arch/arm/kernel/ptrace.c
@@ -932,13 +932,6 @@ asmlinkage int syscall_trace(int why, struct pt_regs *regs, int scno)
ip = regs->ARM_ip;
regs->ARM_ip = why;
- /*
- * IP is used to denote syscall entry/exit:
- * IP = 0 -> entry, =1 -> exit
- */
- ip = regs->ARM_ip;
- regs->ARM_ip = why;
-
/* the 0x80 provides a way for the tracing parent to distinguish
between a syscall stop and SIGTRAP delivery */
ptrace_notify(SIGTRAP | ((current->ptrace & PT_TRACESYSGOOD)
diff --git a/arch/arm/mach-msm/pmu.c b/arch/arm/mach-msm/pmu.c
index fe45fa432b2..f3fe04acb37 100644
--- a/arch/arm/mach-msm/pmu.c
+++ b/arch/arm/mach-msm/pmu.c
@@ -10,6 +10,7 @@
* GNU General Public License for more details.
*/
+#include <linux/irq.h>
#include <linux/platform_device.h>
#include <linux/irq.h>
#include <asm/pmu.h>
@@ -57,12 +58,10 @@ multicore_free_irq(int irq)
int cpu;
struct irq_desc *desc = irq_to_desc(irq);
- if (irq >= 0) {
- for_each_cpu(cpu, desc->percpu_enabled) {
- if (!armpmu_cpu_up(cpu))
- smp_call_function_single(cpu,
+ if ((irq >= 0) && desc) {
+ for_each_cpu(cpu, desc->percpu_enabled)
+ smp_call_function_single(cpu,
disable_irq_callback, &irq, 1);
- }
free_percpu_irq(irq, &pmu_irq_cookie);
}
}
diff --git a/arch/um/os-Linux/start_up.c b/arch/um/os-Linux/start_up.c
index 425162e22af..2f53b892fd8 100644
--- a/arch/um/os-Linux/start_up.c
+++ b/arch/um/os-Linux/start_up.c
@@ -15,6 +15,8 @@
#include <sys/mman.h>
#include <sys/stat.h>
#include <sys/wait.h>
+#include <sys/time.h>
+#include <sys/resource.h>
#include <asm/unistd.h>
#include "init.h"
#include "os.h"
diff --git a/build.config b/build.config
new file mode 100644
index 00000000000..5f1c89da838
--- /dev/null
+++ b/build.config
@@ -0,0 +1,12 @@
+ARCH=arm
+BRANCH=android-msm-flo-3.4-wip
+CROSS_COMPILE=arm-eabi-
+DEFCONFIG=flo_defconfig
+EXTRA_CMDS=''
+KERNEL_DIR=private/msm-asus
+LINUX_GCC_CROSS_COMPILE_PREBUILTS_BIN=prebuilts/gcc/linux-x86/arm/arm-eabi-4.8/bin
+FILES="
+arch/arm/boot/zImage
+vmlinux
+System.map
+"
diff --git a/drivers/gpu/msm/adreno_drawctxt.c b/drivers/gpu/msm/adreno_drawctxt.c
index fa03a06cf6c..16448610afe 100644
--- a/drivers/gpu/msm/adreno_drawctxt.c
+++ b/drivers/gpu/msm/adreno_drawctxt.c
@@ -545,9 +545,14 @@ int adreno_drawctxt_detach(struct kgsl_context *context)
*/
BUG_ON(!mutex_is_locked(&device->mutex));
- /* Wait for the last global timestamp to pass before continuing */
+ /* Wait for the last global timestamp to pass before continuing.
+ * The maxumum wait time is 30s, some large IB's can take longer
+ * than 10s and if hang happens then the time for the context's
+ * commands to retire will be greater than 10s. 30s should be sufficient
+ * time to wait for the commands even if a hang happens.
+ */
ret = adreno_drawctxt_wait_global(adreno_dev, context,
- drawctxt->internal_timestamp, 10 * 1000);
+ drawctxt->internal_timestamp, 30 * 1000);
/*
* If the wait for global fails then nothing after this point is likely
diff --git a/drivers/misc/Kconfig b/drivers/misc/Kconfig
index 6b01bfee672..e155019a26b 100644
--- a/drivers/misc/Kconfig
+++ b/drivers/misc/Kconfig
@@ -666,6 +666,12 @@ config BU52031NVX_POUCHDETECT
help
depends on BU52031NVX
+config UID_CPUTIME
+ tristate "Per-UID cpu time statistics"
+ depends on PROFILING
+ help
+ Per UID based cpu time statistics exported to /proc/uid_cputime
+
source "drivers/misc/c2port/Kconfig"
source "drivers/misc/eeprom/Kconfig"
source "drivers/misc/cb710/Kconfig"
diff --git a/drivers/misc/Makefile b/drivers/misc/Makefile
index a2294fc9328..439e43d4f7f 100644
--- a/drivers/misc/Makefile
+++ b/drivers/misc/Makefile
@@ -72,3 +72,4 @@ obj-$(CONFIG_QFP_FUSE) += qfp_fuse.o
obj-y += tspdrv/
obj-$(CONFIG_BU52031NVX) += pm8xxx-cradle.o
obj-$(CONFIG_SLIMPORT_ANX7808) += slimport_anx7808/
+obj-$(CONFIG_UID_CPUTIME) += uid_cputime.o
diff --git a/drivers/misc/uid_cputime.c b/drivers/misc/uid_cputime.c
new file mode 100644
index 00000000000..1bcb6d11fb1
--- /dev/null
+++ b/drivers/misc/uid_cputime.c
@@ -0,0 +1,238 @@
+/* drivers/misc/uid_cputime.c
+ *
+ * Copyright (C) 2014 - 2015 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/atomic.h>
+#include <linux/err.h>
+#include <linux/hashtable.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/list.h>
+#include <linux/proc_fs.h>
+#include <linux/profile.h>
+#include <linux/sched.h>
+#include <linux/seq_file.h>
+#include <linux/slab.h>
+#include <linux/uaccess.h>
+
+#define UID_HASH_BITS 10
+DECLARE_HASHTABLE(hash_table, UID_HASH_BITS);
+
+static DEFINE_MUTEX(uid_lock);
+static struct proc_dir_entry *parent;
+
+struct uid_entry {
+ uid_t uid;
+ cputime_t utime;
+ cputime_t stime;
+ cputime_t active_utime;
+ cputime_t active_stime;
+ struct hlist_node hash;
+};
+
+static struct uid_entry *find_uid_entry(uid_t uid)
+{
+ struct uid_entry *uid_entry;
+ struct hlist_node *node;
+
+ hash_for_each_possible(hash_table, uid_entry, node, hash, uid) {
+ if (uid_entry->uid == uid)
+ return uid_entry;
+ }
+ return NULL;
+}
+
+static struct uid_entry *find_or_register_uid(uid_t uid)
+{
+ struct uid_entry *uid_entry;
+
+ uid_entry = find_uid_entry(uid);
+ if (uid_entry)
+ return uid_entry;
+
+ uid_entry = kzalloc(sizeof(struct uid_entry), GFP_ATOMIC);
+ if (!uid_entry)
+ return NULL;
+
+ uid_entry->uid = uid;
+
+ hash_add(hash_table, &uid_entry->hash, uid);
+
+ return uid_entry;
+}
+
+static int uid_stat_show(struct seq_file *m, void *v)
+{
+ struct uid_entry *uid_entry;
+ struct task_struct *task;
+ struct hlist_node *node;
+ cputime_t utime;
+ cputime_t stime;
+ unsigned long bkt;
+
+ mutex_lock(&uid_lock);
+
+ hash_for_each(hash_table, bkt, node, uid_entry, hash) {
+ uid_entry->active_stime = 0;
+ uid_entry->active_utime = 0;
+ }
+
+ read_lock(&tasklist_lock);
+ for_each_process(task) {
+ uid_entry = find_or_register_uid(task_uid(task));
+ if (!uid_entry) {
+ read_unlock(&tasklist_lock);
+ mutex_unlock(&uid_lock);
+ pr_err("%s: failed to find the uid_entry for uid %d\n",
+ __func__, task_uid(task));
+ return -ENOMEM;
+ }
+ task_times(task, &utime, &stime);
+ uid_entry->active_utime += utime;
+ uid_entry->active_stime += stime;
+ }
+ read_unlock(&tasklist_lock);
+
+ hash_for_each(hash_table, bkt, node, uid_entry, hash) {
+ cputime_t total_utime = uid_entry->utime +
+ uid_entry->active_utime;
+ cputime_t total_stime = uid_entry->stime +
+ uid_entry->active_stime;
+ seq_printf(m, "%d: %u %u\n", uid_entry->uid,
+ cputime_to_usecs(total_utime),
+ cputime_to_usecs(total_stime));
+ }
+
+ mutex_unlock(&uid_lock);
+ return 0;
+}
+
+static int uid_stat_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, uid_stat_show, PDE(inode)->data);
+}
+
+static const struct file_operations uid_stat_fops = {
+ .open = uid_stat_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+static int uid_remove_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, NULL, NULL);
+}
+
+static ssize_t uid_remove_write(struct file *file,
+ const char __user *buffer, size_t count, loff_t *ppos)
+{
+ struct uid_entry *uid_entry;
+ struct hlist_node *node, *tmp;
+ char uids[128];
+ char *start_uid, *end_uid = NULL;
+ long int uid_start = 0, uid_end = 0;
+
+ if (count >= sizeof(uids))
+ count = sizeof(uids) - 1;
+
+ if (copy_from_user(uids, buffer, count))
+ return -EFAULT;
+
+ uids[count] = '\0';
+ end_uid = uids;
+ start_uid = strsep(&end_uid, "-");
+
+ if (!start_uid || !end_uid)
+ return -EINVAL;
+
+ if (kstrtol(start_uid, 10, &uid_start) != 0 ||
+ kstrtol(end_uid, 10, &uid_end) != 0) {
+ return -EINVAL;
+ }
+
+ mutex_lock(&uid_lock);
+
+ for (; uid_start <= uid_end; uid_start++) {
+ hash_for_each_possible_safe(hash_table, uid_entry, node, tmp,
+ hash, uid_start) {
+ hash_del(&uid_entry->hash);
+ kfree(uid_entry);
+ }
+ }
+
+ mutex_unlock(&uid_lock);
+ return count;
+}
+
+static const struct file_operations uid_remove_fops = {
+ .open = uid_remove_open,
+ .release = single_release,
+ .write = uid_remove_write,
+};
+
+static int process_notifier(struct notifier_block *self,
+ unsigned long cmd, void *v)
+{
+ struct task_struct *task = v;
+ struct uid_entry *uid_entry;
+ cputime_t utime, stime;
+ uid_t uid;
+
+ if (!task)
+ return NOTIFY_OK;
+
+ mutex_lock(&uid_lock);
+ uid = task_uid(task);
+ uid_entry = find_or_register_uid(uid);
+ if (!uid_entry) {
+ pr_err("%s: failed to find uid %d\n", __func__, uid);
+ goto exit;
+ }
+
+ task_times(task, &utime, &stime);
+ uid_entry->utime += utime;
+ uid_entry->stime += stime;
+
+exit:
+ mutex_unlock(&uid_lock);
+ return NOTIFY_OK;
+}
+
+static struct notifier_block process_notifier_block = {
+ .notifier_call = process_notifier,
+};
+
+static int __init proc_uid_cputime_init(void)
+{
+ hash_init(hash_table);
+
+ parent = proc_mkdir("uid_cputime", NULL);
+ if (!parent) {
+ pr_err("%s: failed to create proc entry\n", __func__);
+ return -ENOMEM;
+ }
+
+ proc_create_data("remove_uid_range", S_IWUGO, parent, &uid_remove_fops,
+ NULL);
+
+ proc_create_data("show_uid_stat", S_IRUGO, parent, &uid_stat_fops,
+ NULL);
+
+ profile_event_register(PROFILE_TASK_EXIT, &process_notifier_block);
+
+ return 0;
+}
+
+early_initcall(proc_uid_cputime_init);
diff --git a/drivers/mmc/core/core.c b/drivers/mmc/core/core.c
index b19a8897153..7734cec59b1 100644
--- a/drivers/mmc/core/core.c
+++ b/drivers/mmc/core/core.c
@@ -50,14 +50,6 @@
extern void kernel_restart(char *cmd);
-#define CREATE_TRACE_POINTS
-#include <trace/events/mmc.h>
-
-/* If the device is not responding */
-#define MMC_CORE_TIMEOUT_MS (10 * 60 * 1000) /* 10 minute timeout */
-
-static void mmc_clk_scaling(struct mmc_host *host, bool from_wq);
-
/*
* Background operations can take a long time, depending on the housekeeping
* operations the card has to perform.
diff --git a/drivers/mmc/core/host.c b/drivers/mmc/core/host.c
index 9d5ff61c84d..2d1fdae357d 100644
--- a/drivers/mmc/core/host.c
+++ b/drivers/mmc/core/host.c
@@ -28,9 +28,7 @@
#include "core.h"
#include "host.h"
-#ifdef CONFIG_KEYBOARD_GPIO
#include <linux/gpio_keys.h>
-#endif
#define cls_dev_to_mmc_host(d) container_of(d, struct mmc_host, class_dev)
@@ -343,10 +341,8 @@ struct mmc_host *mmc_alloc_host(int extra, struct device *dev)
#ifdef CONFIG_PM
host->pm_notify.notifier_call = mmc_pm_notify;
#endif
-#ifdef CONFIG_KEYBOARD_GPIO
host->force_poweroff_notifier.notifier_call = force_poweroff_notify;
register_resetkey_notifier(&host->force_poweroff_notifier);
-#endif
/*
* By default, hosts do not support SGIO or large requests.
diff --git a/drivers/staging/android/Kconfig b/drivers/staging/android/Kconfig
index 43d17c27695..3092425ed49 100644
--- a/drivers/staging/android/Kconfig
+++ b/drivers/staging/android/Kconfig
@@ -21,9 +21,8 @@ config ASHMEM
POSIX SHM but with different behavior and sporting a simpler
file-based API.
-config ANDROID_LOGGER
- tristate "Android log driver"
- default n
+ It is, in theory, a good memory allocator for low-memory devices,
+ because it can discard shared memory units when under memory pressure.
config ANDROID_PERSISTENT_RAM
bool
@@ -78,15 +77,6 @@ config ANDROID_LOW_MEMORY_KILLER_AUTODETECT_OOM_ADJ_VALUES
source "drivers/staging/android/switch/Kconfig"
-config ANDROID_INTF_ALARM_DEV
- bool "Android alarm driver"
- depends on RTC_CLASS
- default n
- help
- Provides non-wakeup and rtc backed wakeup alarms based on rtc or
- elapsed realtime, and a non-wakeup alarm on the monotonic clock.
- Also exports the alarm interface to user-space.
-
endif # if ANDROID
endmenu
diff --git a/drivers/staging/android/Makefile b/drivers/staging/android/Makefile
index 8769e325d49..803ef56db1a 100644
--- a/drivers/staging/android/Makefile
+++ b/drivers/staging/android/Makefile
@@ -2,14 +2,12 @@ ccflags-y += -I$(src) # needed for trace events
obj-$(CONFIG_ANDROID_BINDER_IPC) += binder.o
obj-$(CONFIG_ASHMEM) += ashmem.o
-obj-$(CONFIG_ANDROID_LOGGER) += logger.o
obj-$(CONFIG_ANDROID_PERSISTENT_RAM) += persistent_ram.o
obj-$(CONFIG_ANDROID_RAM_CONSOLE) += ram_console.o
obj-$(CONFIG_ANDROID_TIMED_OUTPUT) += timed_output.o
obj-$(CONFIG_ANDROID_TIMED_GPIO) += timed_gpio.o
obj-$(CONFIG_ANDROID_LOW_MEMORY_KILLER) += lowmemorykiller.o
obj-$(CONFIG_ANDROID_SWITCH) += switch/
-obj-$(CONFIG_ANDROID_INTF_ALARM_DEV) += alarm-dev.o
obj-$(CONFIG_PERSISTENT_TRACER) += trace_persistent.o
CFLAGS_REMOVE_trace_persistent.o = -pg
diff --git a/drivers/staging/android/alarm-dev.c b/drivers/staging/android/alarm-dev.c
deleted file mode 100644
index e001fe586a8..00000000000
--- a/drivers/staging/android/alarm-dev.c
+++ /dev/null
@@ -1,373 +0,0 @@
-/* drivers/rtc/alarm-dev.c
- *
- * Copyright (C) 2007-2009 Google, Inc.
- *
- * This software is licensed under the terms of the GNU General Public
- * License version 2, as published by the Free Software Foundation, and
- * may be copied, distributed, and modified under those terms.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- */
-
-#include <linux/time.h>
-#include <linux/module.h>
-#include <linux/device.h>
-#include <linux/miscdevice.h>
-#include <linux/fs.h>
-#include <linux/platform_device.h>
-#include <linux/sched.h>
-#include <linux/spinlock.h>
-#include <linux/uaccess.h>
-#include <linux/alarmtimer.h>
-#include <linux/wakelock.h>
-#include "android_alarm.h"
-
-#define ANDROID_ALARM_PRINT_INFO (1U << 0)
-#define ANDROID_ALARM_PRINT_IO (1U << 1)
-#define ANDROID_ALARM_PRINT_INT (1U << 2)
-
-static int debug_mask = ANDROID_ALARM_PRINT_INFO;
-module_param_named(debug_mask, debug_mask, int, S_IRUGO | S_IWUSR | S_IWGRP);
-
-#define pr_alarm(debug_level_mask, args...) \
- do { \
- if (debug_mask & ANDROID_ALARM_PRINT_##debug_level_mask) { \
- pr_info(args); \
- } \
- } while (0)
-
-#define ANDROID_ALARM_WAKEUP_MASK ( \
- ANDROID_ALARM_RTC_WAKEUP_MASK | \
- ANDROID_ALARM_ELAPSED_REALTIME_WAKEUP_MASK)
-
-/* support old usespace code */
-#define ANDROID_ALARM_SET_OLD _IOW('a', 2, time_t) /* set alarm */
-#define ANDROID_ALARM_SET_AND_WAIT_OLD _IOW('a', 3, time_t)
-
-static int alarm_opened;
-static DEFINE_SPINLOCK(alarm_slock);
-static struct wake_lock alarm_wake_lock;
-static DECLARE_WAIT_QUEUE_HEAD(alarm_wait_queue);
-static uint32_t alarm_pending;
-static uint32_t alarm_enabled;
-static uint32_t wait_pending;
-
-struct devalarm {
- union {
- struct hrtimer hrt;
- struct alarm alrm;
- } u;
- enum android_alarm_type type;
-};
-
-static struct devalarm alarms[ANDROID_ALARM_TYPE_COUNT];
-
-
-static int is_wakeup(enum android_alarm_type type)
-{
- if (type == ANDROID_ALARM_RTC_WAKEUP ||
- type == ANDROID_ALARM_ELAPSED_REALTIME_WAKEUP)
- return 1;
- return 0;
-}
-
-
-static void devalarm_start(struct devalarm *alrm, ktime_t exp)
-{
- if (is_wakeup(alrm->type))
- alarm_start(&alrm->u.alrm, exp);
- else
- hrtimer_start(&alrm->u.hrt, exp, HRTIMER_MODE_ABS);
-}
-
-
-static int devalarm_try_to_cancel(struct devalarm *alrm)
-{
- int ret;
- if (is_wakeup(alrm->type))
- ret = alarm_try_to_cancel(&alrm->u.alrm);
- else
- ret = hrtimer_try_to_cancel(&alrm->u.hrt);
- return ret;
-}
-
-static void devalarm_cancel(struct devalarm *alrm)
-{
- if (is_wakeup(alrm->type))
- alarm_cancel(&alrm->u.alrm);
- else
- hrtimer_cancel(&alrm->u.hrt);
-}
-
-
-static long alarm_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
-{
- int rv = 0;
- unsigned long flags;
- struct timespec new_alarm_time;
- struct timespec new_rtc_time;
- struct timespec tmp_time;
- struct rtc_time new_rtc_tm;
- struct rtc_device *rtc_dev;
- enum android_alarm_type alarm_type = ANDROID_ALARM_IOCTL_TO_TYPE(cmd);
- uint32_t alarm_type_mask = 1U << alarm_type;
-
- if (alarm_type >= ANDROID_ALARM_TYPE_COUNT)
- return -EINVAL;
-
- if (ANDROID_ALARM_BASE_CMD(cmd) != ANDROID_ALARM_GET_TIME(0)) {
- if ((file->f_flags & O_ACCMODE) == O_RDONLY)
- return -EPERM;
- if (file->private_data == NULL &&
- cmd != ANDROID_ALARM_SET_RTC) {
- spin_lock_irqsave(&alarm_slock, flags);
- if (alarm_opened) {
- spin_unlock_irqrestore(&alarm_slock, flags);
- return -EBUSY;
- }
- alarm_opened = 1;
- file->private_data = (void *)1;
- spin_unlock_irqrestore(&alarm_slock, flags);
- }
- }
-
- switch (ANDROID_ALARM_BASE_CMD(cmd)) {
- case ANDROID_ALARM_CLEAR(0):
- spin_lock_irqsave(&alarm_slock, flags);
- pr_alarm(IO, "alarm %d clear\n", alarm_type);
- devalarm_try_to_cancel(&alarms[alarm_type]);
- if (alarm_pending) {
- alarm_pending &= ~alarm_type_mask;
- if (!alarm_pending && !wait_pending)
- wake_unlock(&alarm_wake_lock);
- }
- alarm_enabled &= ~alarm_type_mask;
- spin_unlock_irqrestore(&alarm_slock, flags);
- break;
-
- case ANDROID_ALARM_SET_OLD:
- case ANDROID_ALARM_SET_AND_WAIT_OLD:
- if (get_user(new_alarm_time.tv_sec, (int __user *)arg)) {
- rv = -EFAULT;
- goto err1;
- }
- new_alarm_time.tv_nsec = 0;
- goto from_old_alarm_set;
-
- case ANDROID_ALARM_SET_AND_WAIT(0):
- case ANDROID_ALARM_SET(0):
- if (copy_from_user(&new_alarm_time, (void __user *)arg,
- sizeof(new_alarm_time))) {
- rv = -EFAULT;
- goto err1;
- }
-from_old_alarm_set:
- spin_lock_irqsave(&alarm_slock, flags);
- pr_alarm(IO, "alarm %d set %ld.%09ld\n", alarm_type,
- new_alarm_time.tv_sec, new_alarm_time.tv_nsec);
- alarm_enabled |= alarm_type_mask;
- devalarm_start(&alarms[alarm_type],
- timespec_to_ktime(new_alarm_time));
- spin_unlock_irqrestore(&alarm_slock, flags);
- if (ANDROID_ALARM_BASE_CMD(cmd) != ANDROID_ALARM_SET_AND_WAIT(0)
- && cmd != ANDROID_ALARM_SET_AND_WAIT_OLD)
- break;
- /* fall though */
- case ANDROID_ALARM_WAIT:
- spin_lock_irqsave(&alarm_slock, flags);
- pr_alarm(IO, "alarm wait\n");
- if (!alarm_pending && wait_pending) {
- wake_unlock(&alarm_wake_lock);
- wait_pending = 0;
- }
- spin_unlock_irqrestore(&alarm_slock, flags);
- rv = wait_event_interruptible(alarm_wait_queue, alarm_pending);
- if (rv)
- goto err1;
- spin_lock_irqsave(&alarm_slock, flags);
- rv = alarm_pending;
- wait_pending = 1;
- alarm_pending = 0;
- spin_unlock_irqrestore(&alarm_slock, flags);
- break;
- case ANDROID_ALARM_SET_RTC:
- if (copy_from_user(&new_rtc_time, (void __user *)arg,
- sizeof(new_rtc_time))) {
- rv = -EFAULT;
- goto err1;
- }
- rtc_time_to_tm(new_rtc_time.tv_sec, &new_rtc_tm);
- rtc_dev = alarmtimer_get_rtcdev();
- rv = do_settimeofday(&new_rtc_time);
- if (rv < 0)
- goto err1;
- if (rtc_dev)
- rv = rtc_set_time(rtc_dev, &new_rtc_tm);
- spin_lock_irqsave(&alarm_slock, flags);
- alarm_pending |= ANDROID_ALARM_TIME_CHANGE_MASK;
- wake_up(&alarm_wait_queue);
- spin_unlock_irqrestore(&alarm_slock, flags);
- if (rv < 0)
- goto err1;
- break;
- case ANDROID_ALARM_GET_TIME(0):
- switch (alarm_type) {
- case ANDROID_ALARM_RTC_WAKEUP:
- case ANDROID_ALARM_RTC:
- getnstimeofday(&tmp_time);
- break;
- case ANDROID_ALARM_ELAPSED_REALTIME_WAKEUP:
- case ANDROID_ALARM_ELAPSED_REALTIME:
- get_monotonic_boottime(&tmp_time);
- break;
- case ANDROID_ALARM_TYPE_COUNT:
- case ANDROID_ALARM_SYSTEMTIME:
- ktime_get_ts(&tmp_time);
- break;
- }
- if (copy_to_user((void __user *)arg, &tmp_time,
- sizeof(tmp_time))) {
- rv = -EFAULT;
- goto err1;
- }
- break;
-
- default:
- rv = -EINVAL;
- goto err1;
- }
-err1:
- return rv;
-}
-
-static int alarm_open(struct inode *inode, struct file *file)
-{
- file->private_data = NULL;
- return 0;
-}
-
-static int alarm_release(struct inode *inode, struct file *file)
-{
- int i;
- unsigned long flags;
-
- spin_lock_irqsave(&alarm_slock, flags);
- if (file->private_data != 0) {
- for (i = 0; i < ANDROID_ALARM_TYPE_COUNT; i++) {
- uint32_t alarm_type_mask = 1U << i;
- if (alarm_enabled & alarm_type_mask) {
- pr_alarm(INFO, "alarm_release: clear alarm, "
- "pending %d\n",
- !!(alarm_pending & alarm_type_mask));
- alarm_enabled &= ~alarm_type_mask;
- }
- spin_unlock_irqrestore(&alarm_slock, flags);
- devalarm_cancel(&alarms[i]);
- spin_lock_irqsave(&alarm_slock, flags);
- }
- if (alarm_pending | wait_pending) {
- if (alarm_pending)
- pr_alarm(INFO, "alarm_release: clear "
- "pending alarms %x\n", alarm_pending);
- wake_unlock(&alarm_wake_lock);
- wait_pending = 0;
- alarm_pending = 0;
- }
- alarm_opened = 0;
- }
- spin_unlock_irqrestore(&alarm_slock, flags);
- return 0;
-}
-
-static void devalarm_triggered(struct devalarm *alarm)
-{
- unsigned long flags;
- uint32_t alarm_type_mask = 1U << alarm->type;
-
- pr_alarm(INT, "devalarm_triggered type %d\n", alarm->type);
- spin_lock_irqsave(&alarm_slock, flags);
- if (alarm_enabled & alarm_type_mask) {
- wake_lock_timeout(&alarm_wake_lock, 5 * HZ);
- alarm_enabled &= ~alarm_type_mask;
- alarm_pending |= alarm_type_mask;
- wake_up(&alarm_wait_queue);
- }
- spin_unlock_irqrestore(&alarm_slock, flags);
-}
-
-
-static enum hrtimer_restart devalarm_hrthandler(struct hrtimer *hrt)
-{
- struct devalarm *devalrm = container_of(hrt, struct devalarm, u.hrt);
-
- devalarm_triggered(devalrm);
- return HRTIMER_NORESTART;
-}
-
-static enum alarmtimer_restart devalarm_alarmhandler(struct alarm *alrm,
- ktime_t now)
-{
- struct devalarm *devalrm = container_of(alrm, struct devalarm, u.alrm);
-
- devalarm_triggered(devalrm);
- return ALARMTIMER_NORESTART;
-}
-
-
-static const struct file_operations alarm_fops = {
- .owner = THIS_MODULE,
- .unlocked_ioctl = alarm_ioctl,
- .open = alarm_open,
- .release = alarm_release,
-};
-
-static struct miscdevice alarm_device = {
- .minor = MISC_DYNAMIC_MINOR,
- .name = "alarm",
- .fops = &alarm_fops,
-};
-
-static int __init alarm_dev_init(void)
-{
- int err;
- int i;
-
- err = misc_register(&alarm_device);
- if (err)
- return err;
-
- alarm_init(&alarms[ANDROID_ALARM_RTC_WAKEUP].u.alrm,
- ALARM_REALTIME, devalarm_alarmhandler);
- hrtimer_init(&alarms[ANDROID_ALARM_RTC].u.hrt,
- CLOCK_REALTIME, HRTIMER_MODE_ABS);
- alarm_init(&alarms[ANDROID_ALARM_ELAPSED_REALTIME_WAKEUP].u.alrm,
- ALARM_BOOTTIME, devalarm_alarmhandler);
- hrtimer_init(&alarms[ANDROID_ALARM_ELAPSED_REALTIME].u.hrt,
- CLOCK_BOOTTIME, HRTIMER_MODE_ABS);
- hrtimer_init(&alarms[ANDROID_ALARM_SYSTEMTIME].u.hrt,
- CLOCK_MONOTONIC, HRTIMER_MODE_ABS);
-
- for (i = 0; i < ANDROID_ALARM_TYPE_COUNT; i++) {
- alarms[i].type = i;
- if (!is_wakeup(i))
- alarms[i].u.hrt.function = devalarm_hrthandler;
- }
-
- wake_lock_init(&alarm_wake_lock, WAKE_LOCK_SUSPEND, "alarm");
-
- return 0;
-}
-
-static void __exit alarm_dev_exit(void)
-{
- misc_deregister(&alarm_device);
- wake_lock_destroy(&alarm_wake_lock);
-}
-
-module_init(alarm_dev_init);
-module_exit(alarm_dev_exit);
-
diff --git a/drivers/staging/android/android_alarm.h b/drivers/staging/android/android_alarm.h
deleted file mode 100644
index d0cafd63719..00000000000
--- a/drivers/staging/android/android_alarm.h
+++ /dev/null
@@ -1,62 +0,0 @@
-/* include/linux/android_alarm.h
- *
- * Copyright (C) 2006-2007 Google, Inc.
- *
- * This software is licensed under the terms of the GNU General Public
- * License version 2, as published by the Free Software Foundation, and
- * may be copied, distributed, and modified under those terms.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- */
-
-#ifndef _LINUX_ANDROID_ALARM_H
-#define _LINUX_ANDROID_ALARM_H
-
-#include <linux/ioctl.h>
-#include <linux/time.h>
-
-enum android_alarm_type {
- /* return code bit numbers or set alarm arg */
- ANDROID_ALARM_RTC_WAKEUP,
- ANDROID_ALARM_RTC,
- ANDROID_ALARM_ELAPSED_REALTIME_WAKEUP,
- ANDROID_ALARM_ELAPSED_REALTIME,
- ANDROID_ALARM_SYSTEMTIME,
-
- ANDROID_ALARM_TYPE_COUNT,
-
- /* return code bit numbers */
- /* ANDROID_ALARM_TIME_CHANGE = 16 */
-};
-
-enum android_alarm_return_flags {
- ANDROID_ALARM_RTC_WAKEUP_MASK = 1U << ANDROID_ALARM_RTC_WAKEUP,
- ANDROID_ALARM_RTC_MASK = 1U << ANDROID_ALARM_RTC,
- ANDROID_ALARM_ELAPSED_REALTIME_WAKEUP_MASK =
- 1U << ANDROID_ALARM_ELAPSED_REALTIME_WAKEUP,
- ANDROID_ALARM_ELAPSED_REALTIME_MASK =
- 1U << ANDROID_ALARM_ELAPSED_REALTIME,
- ANDROID_ALARM_SYSTEMTIME_MASK = 1U << ANDROID_ALARM_SYSTEMTIME,
- ANDROID_ALARM_TIME_CHANGE_MASK = 1U << 16
-};
-
-/* Disable alarm */
-#define ANDROID_ALARM_CLEAR(type) _IO('a', 0 | ((type) << 4))
-
-/* Ack last alarm and wait for next */
-#define ANDROID_ALARM_WAIT _IO('a', 1)
-
-#define ALARM_IOW(c, type, size) _IOW('a', (c) | ((type) << 4), size)
-/* Set alarm */
-#define ANDROID_ALARM_SET(type) ALARM_IOW(2, type, struct timespec)
-#define ANDROID_ALARM_SET_AND_WAIT(type) ALARM_IOW(3, type, struct timespec)
-#define ANDROID_ALARM_GET_TIME(type) ALARM_IOW(4, type, struct timespec)
-#define ANDROID_ALARM_SET_RTC _IOW('a', 5, struct timespec)
-#define ANDROID_ALARM_BASE_CMD(cmd) (cmd & ~(_IOC(0, 0, 0xf0, 0)))
-#define ANDROID_ALARM_IOCTL_TO_TYPE(cmd) (_IOC_NR(cmd) >> 4)
-
-#endif
diff --git a/drivers/staging/android/logger.c b/drivers/staging/android/logger.c
deleted file mode 100644
index eb3d4ca5fef..00000000000
--- a/drivers/staging/android/logger.c
+++ /dev/null
@@ -1,789 +0,0 @@
-/*
- * drivers/misc/logger.c
- *
- * A Logging Subsystem
- *
- * Copyright (C) 2007-2008 Google, Inc.
- *
- * Robert Love <rlove@google.com>
- *
- * This software is licensed under the terms of the GNU General Public
- * License version 2, as published by the Free Software Foundation, and
- * may be copied, distributed, and modified under those terms.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- */
-
-#include <linux/sched.h>
-#include <linux/module.h>
-#include <linux/fs.h>
-#include <linux/miscdevice.h>
-#include <linux/uaccess.h>
-#include <linux/poll.h>
-#include <linux/slab.h>
-#include <linux/time.h>
-#include "logger.h"
-
-#include <asm/ioctls.h>
-
-/*
- * struct logger_log - represents a specific log, such as 'main' or 'radio'
- *
- * This structure lives from module insertion until module removal, so it does
- * not need additional reference counting. The structure is protected by the
- * mutex 'mutex'.
- */
-struct logger_log {
- unsigned char *buffer;/* the ring buffer itself */
- struct miscdevice misc; /* misc device representing the log */
- wait_queue_head_t wq; /* wait queue for readers */
- struct list_head readers; /* this log's readers */
- struct mutex mutex; /* mutex protecting buffer */
- size_t w_off; /* current write head offset */
- size_t head; /* new readers start here */
- size_t size; /* size of the log */
-};
-
-/*
- * struct logger_reader - a logging device open for reading
- *
- * This object lives from open to release, so we don't need additional
- * reference counting. The structure is protected by log->mutex.
- */
-struct logger_reader {
- struct logger_log *log; /* associated log */
- struct list_head list; /* entry in logger_log's list */
- size_t r_off; /* current read head offset */
- bool r_all; /* reader can read all entries */
- int r_ver; /* reader ABI version */
-};
-
-/* logger_offset - returns index 'n' into the log via (optimized) modulus */
-size_t logger_offset(struct logger_log *log, size_t n)
-{
- return n & (log->size-1);
-}
-
-
-/*
- * file_get_log - Given a file structure, return the associated log
- *
- * This isn't aesthetic. We have several goals:
- *
- * 1) Need to quickly obtain the associated log during an I/O operation
- * 2) Readers need to maintain state (logger_reader)
- * 3) Writers need to be very fast (open() should be a near no-op)
- *
- * In the reader case, we can trivially go file->logger_reader->logger_log.
- * For a writer, we don't want to maintain a logger_reader, so we just go
- * file->logger_log. Thus what file->private_data points at depends on whether
- * or not the file was opened for reading. This function hides that dirtiness.
- */
-static inline struct logger_log *file_get_log(struct file *file)
-{
- if (file->f_mode & FMODE_READ) {
- struct logger_reader *reader = file->private_data;
- return reader->log;
- } else
- return file->private_data;
-}
-
-/*
- * get_entry_header - returns a pointer to the logger_entry header within
- * 'log' starting at offset 'off'. A temporary logger_entry 'scratch' must
- * be provided. Typically the return value will be a pointer within
- * 'logger->buf'. However, a pointer to 'scratch' may be returned if
- * the log entry spans the end and beginning of the circular buffer.
- */
-static struct logger_entry *get_entry_header(struct logger_log *log,
- size_t off, struct logger_entry *scratch)
-{
- size_t len = min(sizeof(struct logger_entry), log->size - off);
- if (len != sizeof(struct logger_entry)) {
- memcpy(((void *) scratch), log->buffer + off, len);
- memcpy(((void *) scratch) + len, log->buffer,
- sizeof(struct logger_entry) - len);
- return scratch;
- }
-
- return (struct logger_entry *) (log->buffer + off);
-}
-
-/*
- * get_entry_msg_len - Grabs the length of the message of the entry
- * starting from from 'off'.
- *
- * An entry length is 2 bytes (16 bits) in host endian order.
- * In the log, the length does not include the size of the log entry structure.
- * This function returns the size including the log entry structure.
- *
- * Caller needs to hold log->mutex.
- */
-static __u32 get_entry_msg_len(struct logger_log *log, size_t off)
-{
- struct logger_entry scratch;
- struct logger_entry *entry;
-
- entry = get_entry_header(log, off, &scratch);
- return entry->len;
-}
-
-static size_t get_user_hdr_len(int ver)
-{
- if (ver < 2)
- return sizeof(struct user_logger_entry_compat);
- else
- return sizeof(struct logger_entry);
-}
-
-static ssize_t copy_header_to_user(int ver, struct logger_entry *entry,
- char __user *buf)
-{
- void *hdr;
- size_t hdr_len;
- struct user_logger_entry_compat v1;
-
- if (ver < 2) {
- v1.len = entry->len;
- v1.__pad = 0;
- v1.pid = entry->pid;
- v1.tid = entry->tid;
- v1.sec = entry->sec;
- v1.nsec = entry->nsec;
- hdr = &v1;
- hdr_len = sizeof(struct user_logger_entry_compat);
- } else {
- hdr = entry;
- hdr_len = sizeof(struct logger_entry);
- }
-
- return copy_to_user(buf, hdr, hdr_len);
-}
-
-/*
- * do_read_log_to_user - reads exactly 'count' bytes from 'log' into the
- * user-space buffer 'buf'. Returns 'count' on success.
- *
- * Caller must hold log->mutex.
- */
-static ssize_t do_read_log_to_user(struct logger_log *log,
- struct logger_reader *reader,
- char __user *buf,
- size_t count)
-{
- struct logger_entry scratch;
- struct logger_entry *entry;
- size_t len;
- size_t msg_start;
-
- /*
- * First, copy the header to userspace, using the version of
- * the header requested
- */
- entry = get_entry_header(log, reader->r_off, &scratch);
- if (copy_header_to_user(reader->r_ver, entry, buf))
- return -EFAULT;
-
- count -= get_user_hdr_len(reader->r_ver);
- buf += get_user_hdr_len(reader->r_ver);
- msg_start = logger_offset(log,
- reader->r_off + sizeof(struct logger_entry));
-
- /*
- * We read from the msg in two disjoint operations. First, we read from
- * the current msg head offset up to 'count' bytes or to the end of
- * the log, whichever comes first.
- */
- len = min(count, log->size - msg_start);
- if (copy_to_user(buf, log->buffer + msg_start, len))
- return -EFAULT;
-
- /*
- * Second, we read any remaining bytes, starting back at the head of
- * the log.
- */
- if (count != len)
- if (copy_to_user(buf + len, log->buffer, count - len))
- return -EFAULT;
-
- reader->r_off = logger_offset(log, reader->r_off +
- sizeof(struct logger_entry) + count);
-
- return count + get_user_hdr_len(reader->r_ver);
-}
-
-/*
- * get_next_entry_by_uid - Starting at 'off', returns an offset into
- * 'log->buffer' which contains the first entry readable by 'euid'
- */
-static size_t get_next_entry_by_uid(struct logger_log *log,
- size_t off, uid_t euid)
-{
- while (off != log->w_off) {
- struct logger_entry *entry;
- struct logger_entry scratch;
- size_t next_len;
-
- entry = get_entry_header(log, off, &scratch);
-
- if (entry->euid == euid)
- return off;
-
- next_len = sizeof(struct logger_entry) + entry->len;
- off = logger_offset(log, off + next_len);
- }
-
- return off;
-}
-
-/*
- * logger_read - our log's read() method
- *
- * Behavior:
- *
- * - O_NONBLOCK works
- * - If there are no log entries to read, blocks until log is written to
- * - Atomically reads exactly one log entry
- *
- * Will set errno to EINVAL if read
- * buffer is insufficient to hold next entry.
- */
-static ssize_t logger_read(struct file *file, char __user *buf,
- size_t count, loff_t *pos)
-{
- struct logger_reader *reader = file->private_data;
- struct logger_log *log = reader->log;
- ssize_t ret;
- DEFINE_WAIT(wait);
-
-start:
- while (1) {
- mutex_lock(&log->mutex);
-
- prepare_to_wait(&log->wq, &wait, TASK_INTERRUPTIBLE);
-
- ret = (log->w_off == reader->r_off);
- mutex_unlock(&log->mutex);
- if (!ret)
- break;
-
- if (file->f_flags & O_NONBLOCK) {
- ret = -EAGAIN;
- break;
- }
-
- if (signal_pending(current)) {
- ret = -EINTR;
- break;
- }
-
- schedule();
- }
-
- finish_wait(&log->wq, &wait);
- if (ret)
- return ret;
-
- mutex_lock(&log->mutex);
-
- if (!reader->r_all)
- reader->r_off = get_next_entry_by_uid(log,
- reader->r_off, current_euid());
-
- /* is there still something to read or did we race? */
- if (unlikely(log->w_off == reader->r_off)) {
- mutex_unlock(&log->mutex);
- goto start;
- }
-
- /* get the size of the next entry */
- ret = get_user_hdr_len(reader->r_ver) +
- get_entry_msg_len(log, reader->r_off);
- if (count < ret) {
- ret = -EINVAL;
- goto out;
- }
-
- /* get exactly one entry from the log */
- ret = do_read_log_to_user(log, reader, buf, ret);
-
-out:
- mutex_unlock(&log->mutex);
-
- return ret;
-}
-
-/*
- * get_next_entry - return the offset of the first valid entry at least 'len'
- * bytes after 'off'.
- *
- * Caller must hold log->mutex.
- */
-static size_t get_next_entry(struct logger_log *log, size_t off, size_t len)
-{
- size_t count = 0;
-
- do {
- size_t nr = sizeof(struct logger_entry) +
- get_entry_msg_len(log, off);
- off = logger_offset(log, off + nr);
- count += nr;
- } while (count < len);
-
- return off;
-}
-
-/*
- * is_between - is a < c < b, accounting for wrapping of a, b, and c
- * positions in the buffer
- *
- * That is, if a<b, check for c between a and b
- * and if a>b, check for c outside (not between) a and b
- *
- * |------- a xxxxxxxx b --------|
- * c^
- *
- * |xxxxx b --------- a xxxxxxxxx|
- * c^
- * or c^
- */
-static inline int is_between(size_t a, size_t b, size_t c)
-{
- if (a < b) {
- /* is c between a and b? */
- if (a < c && c <= b)
- return 1;
- } else {
- /* is c outside of b through a? */
- if (c <= b || a < c)
- return 1;
- }
-
- return 0;
-}
-
-/*
- * fix_up_readers - walk the list of all readers and "fix up" any who were
- * lapped by the writer; also do the same for the default "start head".
- * We do this by "pulling forward" the readers and start head to the first
- * entry after the new write head.
- *
- * The caller needs to hold log->mutex.
- */
-static void fix_up_readers(struct logger_log *log, size_t len)
-{
- size_t old = log->w_off;
- size_t new = logger_offset(log, old + len);
- struct logger_reader *reader;
-
- if (is_between(old, new, log->head))
- log->head = get_next_entry(log, log->head, len);
-
- list_for_each_entry(reader, &log->readers, list)
- if (is_between(old, new, reader->r_off))
- reader->r_off = get_next_entry(log, reader->r_off, len);
-}
-
-/*
- * do_write_log - writes 'len' bytes from 'buf' to 'log'
- *
- * The caller needs to hold log->mutex.
- */
-static void do_write_log(struct logger_log *log, const void *buf, size_t count)
-{
- size_t len;
-
- len = min(count, log->size - log->w_off);
- memcpy(log->buffer + log->w_off, buf, len);
-
- if (count != len)
- memcpy(log->buffer, buf + len, count - len);
-
- log->w_off = logger_offset(log, log->w_off + count);
-
-}
-
-/*
- * do_write_log_user - writes 'len' bytes from the user-space buffer 'buf' to
- * the log 'log'
- *
- * The caller needs to hold log->mutex.
- *
- * Returns 'count' on success, negative error code on failure.
- */
-static ssize_t do_write_log_from_user(struct logger_log *log,
- const void __user *buf, size_t count)
-{
- size_t len;
-
- len = min(count, log->size - log->w_off);
- if (len && copy_from_user(log->buffer + log->w_off, buf, len))
- return -EFAULT;
-
- if (count != len)
- if (copy_from_user(log->buffer, buf + len, count - len))
- /*
- * Note that by not updating w_off, this abandons the
- * portion of the new entry that *was* successfully
- * copied, just above. This is intentional to avoid
- * message corruption from missing fragments.
- */
- return -EFAULT;
-
- log->w_off = logger_offset(log, log->w_off + count);
-
- return count;
-}
-
-/*
- * logger_aio_write - our write method, implementing support for write(),
- * writev(), and aio_write(). Writes are our fast path, and we try to optimize
- * them above all else.
- */
-ssize_t logger_aio_write(struct kiocb *iocb, const struct iovec *iov,
- unsigned long nr_segs, loff_t ppos)
-{
- struct logger_log *log = file_get_log(iocb->ki_filp);
- size_t orig = log->w_off;
- struct logger_entry header;
- struct timespec now;
- ssize_t ret = 0;
-
- now = current_kernel_time();
-
- header.pid = current->tgid;
- header.tid = current->pid;
- header.sec = now.tv_sec;
- header.nsec = now.tv_nsec;
- header.euid = current_euid();
- header.len = min_t(size_t, iocb->ki_left, LOGGER_ENTRY_MAX_PAYLOAD);
- header.hdr_size = sizeof(struct logger_entry);
-
- /* null writes succeed, return zero */
- if (unlikely(!header.len))
- return 0;
-
- mutex_lock(&log->mutex);
-
- /*
- * Fix up any readers, pulling them forward to the first readable
- * entry after (what will be) the new write offset. We do this now
- * because if we partially fail, we can end up with clobbered log
- * entries that encroach on readable buffer.
- */
- fix_up_readers(log, sizeof(struct logger_entry) + header.len);
-
- do_write_log(log, &header, sizeof(struct logger_entry));
-
- while (nr_segs-- > 0) {
- size_t len;
- ssize_t nr;
-
- /* figure out how much of this vector we can keep */
- len = min_t(size_t, iov->iov_len, header.len - ret);
-
- /* write out this segment's payload */
- nr = do_write_log_from_user(log, iov->iov_base, len);
- if (unlikely(nr < 0)) {
- log->w_off = orig;
- mutex_unlock(&log->mutex);
- return nr;
- }
-
- iov++;
- ret += nr;
- }
-
- mutex_unlock(&log->mutex);
-
- /* wake up any blocked readers */
- wake_up_interruptible(&log->wq);
-
- return ret;
-}
-
-static struct logger_log *get_log_from_minor(int);
-
-/*
- * logger_open - the log's open() file operation
- *
- * Note how near a no-op this is in the write-only case. Keep it that way!
- */
-static int logger_open(struct inode *inode, struct file *file)
-{
- struct logger_log *log;
- int ret;
-
- ret = nonseekable_open(inode, file);
- if (ret)
- return ret;
-
- log = get_log_from_minor(MINOR(inode->i_rdev));
- if (!log)
- return -ENODEV;
-
- if (file->f_mode & FMODE_READ) {
- struct logger_reader *reader;
-
- reader = kmalloc(sizeof(struct logger_reader), GFP_KERNEL);
- if (!reader)
- return -ENOMEM;
-
- reader->log = log;
- reader->r_ver = 1;
- reader->r_all = in_egroup_p(inode->i_gid) ||
- capable(CAP_SYSLOG);
-
- INIT_LIST_HEAD(&reader->list);
-
- mutex_lock(&log->mutex);
- reader->r_off = log->head;
- list_add_tail(&reader->list, &log->readers);
- mutex_unlock(&log->mutex);
-
- file->private_data = reader;
- } else
- file->private_data = log;
-
- return 0;
-}
-
-/*
- * logger_release - the log's release file operation
- *
- * Note this is a total no-op in the write-only case. Keep it that way!
- */
-static int logger_release(struct inode *ignored, struct file *file)
-{
- if (file->f_mode & FMODE_READ) {
- struct logger_reader *reader = file->private_data;
- struct logger_log *log = reader->log;
-
- mutex_lock(&log->mutex);
- list_del(&reader->list);
- mutex_unlock(&log->mutex);
-
- kfree(reader);
- }
-
- return 0;
-}
-
-/*
- * logger_poll - the log's poll file operation, for poll/select/epoll
- *
- * Note we always return POLLOUT, because you can always write() to the log.
- * Note also that, strictly speaking, a return value of POLLIN does not
- * guarantee that the log is readable without blocking, as there is a small
- * chance that the writer can lap the reader in the interim between poll()
- * returning and the read() request.
- */
-static unsigned int logger_poll(struct file *file, poll_table *wait)
-{
- struct logger_reader *reader;
- struct logger_log *log;
- unsigned int ret = POLLOUT | POLLWRNORM;
-
- if (!(file->f_mode & FMODE_READ))
- return ret;
-
- reader = file->private_data;
- log = reader->log;
-
- poll_wait(file, &log->wq, wait);
-
- mutex_lock(&log->mutex);
- if (!reader->r_all)
- reader->r_off = get_next_entry_by_uid(log,
- reader->r_off, current_euid());
-
- if (log->w_off != reader->r_off)
- ret |= POLLIN | POLLRDNORM;
- mutex_unlock(&log->mutex);
-
- return ret;
-}
-
-static long logger_set_version(struct logger_reader *reader, void __user *arg)
-{
- int version;
- if (copy_from_user(&version, arg, sizeof(int)))
- return -EFAULT;
-
- if ((version < 1) || (version > 2))
- return -EINVAL;
-
- reader->r_ver = version;
- return 0;
-}
-
-static long logger_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
-{
- struct logger_log *log = file_get_log(file);
- struct logger_reader *reader;
- long ret = -EINVAL;
- void __user *argp = (void __user *) arg;
-
- mutex_lock(&log->mutex);
-
- switch (cmd) {
- case LOGGER_GET_LOG_BUF_SIZE:
- ret = log->size;
- break;
- case LOGGER_GET_LOG_LEN:
- if (!(file->f_mode & FMODE_READ)) {
- ret = -EBADF;
- break;
- }
- reader = file->private_data;
- if (log->w_off >= reader->r_off)
- ret = log->w_off - reader->r_off;
- else
- ret = (log->size - reader->r_off) + log->w_off;
- break;
- case LOGGER_GET_NEXT_ENTRY_LEN:
- if (!(file->f_mode & FMODE_READ)) {
- ret = -EBADF;
- break;
- }
- reader = file->private_data;
-
- if (!reader->r_all)
- reader->r_off = get_next_entry_by_uid(log,
- reader->r_off, current_euid());
-
- if (log->w_off != reader->r_off)
- ret = get_user_hdr_len(reader->r_ver) +
- get_entry_msg_len(log, reader->r_off);
- else
- ret = 0;
- break;
- case LOGGER_FLUSH_LOG:
- if (!(file->f_mode & FMODE_WRITE)) {
- ret = -EBADF;
- break;
- }
- list_for_each_entry(reader, &log->readers, list)
- reader->r_off = log->w_off;
- log->head = log->w_off;
- ret = 0;
- break;
- case LOGGER_GET_VERSION:
- if (!(file->f_mode & FMODE_READ)) {
- ret = -EBADF;
- break;
- }
- reader = file->private_data;
- ret = reader->r_ver;
- break;
- case LOGGER_SET_VERSION:
- if (!(file->f_mode & FMODE_READ)) {
- ret = -EBADF;
- break;
- }
- reader = file->private_data;
- ret = logger_set_version(reader, argp);
- break;
- }
-
- mutex_unlock(&log->mutex);
-
- return ret;
-}
-
-static const struct file_operations logger_fops = {
- .owner = THIS_MODULE,
- .read = logger_read,
- .aio_write = logger_aio_write,
- .poll = logger_poll,
- .unlocked_ioctl = logger_ioctl,
- .compat_ioctl = logger_ioctl,
- .open = logger_open,
- .release = logger_release,
-};
-
-/*
- * Defines a log structure with name 'NAME' and a size of 'SIZE' bytes, which
- * must be a power of two, and greater than
- * (LOGGER_ENTRY_MAX_PAYLOAD + sizeof(struct logger_entry)).
- */
-#define DEFINE_LOGGER_DEVICE(VAR, NAME, SIZE) \
-static unsigned char _buf_ ## VAR[SIZE]; \
-static struct logger_log VAR = { \
- .buffer = _buf_ ## VAR, \
- .misc = { \
- .minor = MISC_DYNAMIC_MINOR, \
- .name = NAME, \
- .fops = &logger_fops, \
- .parent = NULL, \
- }, \
- .wq = __WAIT_QUEUE_HEAD_INITIALIZER(VAR .wq), \
- .readers = LIST_HEAD_INIT(VAR .readers), \
- .mutex = __MUTEX_INITIALIZER(VAR .mutex), \
- .w_off = 0, \
- .head = 0, \
- .size = SIZE, \
-};
-
-DEFINE_LOGGER_DEVICE(log_main, LOGGER_LOG_MAIN, 256*1024)
-DEFINE_LOGGER_DEVICE(log_events, LOGGER_LOG_EVENTS, 256*1024)
-DEFINE_LOGGER_DEVICE(log_radio, LOGGER_LOG_RADIO, 256*1024)
-DEFINE_LOGGER_DEVICE(log_system, LOGGER_LOG_SYSTEM, 256*1024)
-
-static struct logger_log *get_log_from_minor(int minor)
-{
- if (log_main.misc.minor == minor)
- return &log_main;
- if (log_events.misc.minor == minor)
- return &log_events;
- if (log_radio.misc.minor == minor)
- return &log_radio;
- if (log_system.misc.minor == minor)
- return &log_system;
- return NULL;
-}
-
-static int __init init_log(struct logger_log *log)
-{
- int ret;
-
- ret = misc_register(&log->misc);
- if (unlikely(ret)) {
- printk(KERN_ERR "logger: failed to register misc "
- "device for log '%s'!\n", log->misc.name);
- return ret;
- }
-
- printk(KERN_INFO "logger: created %luK log '%s'\n",
- (unsigned long) log->size >> 10, log->misc.name);
-
- return 0;
-}
-
-static int __init logger_init(void)
-{
- int ret;
-
- ret = init_log(&log_main);
- if (unlikely(ret))
- goto out;
-
- ret = init_log(&log_events);
- if (unlikely(ret))
- goto out;
-
- ret = init_log(&log_radio);
- if (unlikely(ret))
- goto out;
-
- ret = init_log(&log_system);
- if (unlikely(ret))
- goto out;
-
-out:
- return ret;
-}
-device_initcall(logger_init);
diff --git a/drivers/staging/android/logger.h b/drivers/staging/android/logger.h
deleted file mode 100644
index 3f612a3b101..00000000000
--- a/drivers/staging/android/logger.h
+++ /dev/null
@@ -1,70 +0,0 @@
-/* include/linux/logger.h
- *
- * Copyright (C) 2007-2008 Google, Inc.
- * Author: Robert Love <rlove@android.com>
- *
- * This software is licensed under the terms of the GNU General Public
- * License version 2, as published by the Free Software Foundation, and
- * may be copied, distributed, and modified under those terms.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- */
-
-#ifndef _LINUX_LOGGER_H
-#define _LINUX_LOGGER_H
-
-#include <linux/types.h>
-#include <linux/ioctl.h>
-
-/*
- * The userspace structure for version 1 of the logger_entry ABI.
- * This structure is returned to userspace unless the caller requests
- * an upgrade to a newer ABI version.
- */
-struct user_logger_entry_compat {
- __u16 len; /* length of the payload */
- __u16 __pad; /* no matter what, we get 2 bytes of padding */
- __s32 pid; /* generating process's pid */
- __s32 tid; /* generating process's tid */
- __s32 sec; /* seconds since Epoch */
- __s32 nsec; /* nanoseconds */
- char msg[0]; /* the entry's payload */
-};
-
-/*
- * The structure for version 2 of the logger_entry ABI.
- * This structure is returned to userspace if ioctl(LOGGER_SET_VERSION)
- * is called with version >= 2
- */
-struct logger_entry {
- __u16 len; /* length of the payload */
- __u16 hdr_size; /* sizeof(struct logger_entry_v2) */
- __s32 pid; /* generating process's pid */
- __s32 tid; /* generating process's tid */
- __s32 sec; /* seconds since Epoch */
- __s32 nsec; /* nanoseconds */
- uid_t euid; /* effective UID of logger */
- char msg[0]; /* the entry's payload */
-};
-
-#define LOGGER_LOG_RADIO "log_radio" /* radio-related messages */
-#define LOGGER_LOG_EVENTS "log_events" /* system/hardware events */
-#define LOGGER_LOG_SYSTEM "log_system" /* system/framework messages */
-#define LOGGER_LOG_MAIN "log_main" /* everything else */
-
-#define LOGGER_ENTRY_MAX_PAYLOAD 4076
-
-#define __LOGGERIO 0xAE
-
-#define LOGGER_GET_LOG_BUF_SIZE _IO(__LOGGERIO, 1) /* size of log */
-#define LOGGER_GET_LOG_LEN _IO(__LOGGERIO, 2) /* used log len */
-#define LOGGER_GET_NEXT_ENTRY_LEN _IO(__LOGGERIO, 3) /* next entry len */
-#define LOGGER_FLUSH_LOG _IO(__LOGGERIO, 4) /* flush log */
-#define LOGGER_GET_VERSION _IO(__LOGGERIO, 5) /* abi version */
-#define LOGGER_SET_VERSION _IO(__LOGGERIO, 6) /* abi version */
-
-#endif /* _LINUX_LOGGER_H */
diff --git a/drivers/staging/prima/CORE/HDD/src/wlan_hdd_assoc.c b/drivers/staging/prima/CORE/HDD/src/wlan_hdd_assoc.c
index e235595251e..0fa1bbf7216 100755
--- a/drivers/staging/prima/CORE/HDD/src/wlan_hdd_assoc.c
+++ b/drivers/staging/prima/CORE/HDD/src/wlan_hdd_assoc.c
@@ -2299,9 +2299,12 @@ eHalStatus hdd_smeRoamCallback( void *pContext, tCsrRoamInfo *pRoamInfo, tANI_U3
if (pHddCtx->hdd_mcastbcast_filter_set == TRUE)
{
hdd_conf_mcastbcast_filter(pHddCtx, FALSE);
- pHddCtx->configuredMcastBcastFilter =
- pHddCtx->sus_res_mcastbcast_filter;
- pHddCtx->sus_res_mcastbcast_filter_valid = VOS_FALSE;
+
+ if (VOS_TRUE == pHddCtx->sus_res_mcastbcast_filter_valid) {
+ pHddCtx->configuredMcastBcastFilter =
+ pHddCtx->sus_res_mcastbcast_filter;
+ pHddCtx->sus_res_mcastbcast_filter_valid = VOS_FALSE;
+ }
hddLog(VOS_TRACE_LEVEL_INFO,
"offload: disassociation happening, restoring configuredMcastBcastFilter");
diff --git a/drivers/staging/prima/CORE/HDD/src/wlan_hdd_early_suspend.c b/drivers/staging/prima/CORE/HDD/src/wlan_hdd_early_suspend.c
index 6af5370921b..56b4ed4242d 100644
--- a/drivers/staging/prima/CORE/HDD/src/wlan_hdd_early_suspend.c
+++ b/drivers/staging/prima/CORE/HDD/src/wlan_hdd_early_suspend.c
@@ -1153,9 +1153,11 @@ static void hdd_conf_resume_ind(hdd_adapter_t *pAdapter)
pHddCtx->hdd_mcastbcast_filter_set = FALSE;
- pHddCtx->configuredMcastBcastFilter =
- pHddCtx->sus_res_mcastbcast_filter;
- pHddCtx->sus_res_mcastbcast_filter_valid = VOS_FALSE;
+ if (VOS_TRUE == pHddCtx->sus_res_mcastbcast_filter_valid) {
+ pHddCtx->configuredMcastBcastFilter =
+ pHddCtx->sus_res_mcastbcast_filter;
+ pHddCtx->sus_res_mcastbcast_filter_valid = VOS_FALSE;
+ }
hddLog(VOS_TRACE_LEVEL_INFO,
"offload: in hdd_conf_resume_ind, restoring configuredMcastBcastFilter");
diff --git a/drivers/staging/prima/CORE/MAC/inc/qwlan_version.h b/drivers/staging/prima/CORE/MAC/inc/qwlan_version.h
index 2c8fdff9d36..548d9e215c1 100644
--- a/drivers/staging/prima/CORE/MAC/inc/qwlan_version.h
+++ b/drivers/staging/prima/CORE/MAC/inc/qwlan_version.h
@@ -60,9 +60,9 @@ BRIEF DESCRIPTION:
#define QWLAN_VERSION_MINOR 2
#define QWLAN_VERSION_PATCH 3
#define QWLAN_VERSION_EXTRA ""
-#define QWLAN_VERSION_BUILD 22
+#define QWLAN_VERSION_BUILD 23
-#define QWLAN_VERSIONSTR "3.2.3.22"
+#define QWLAN_VERSIONSTR "3.2.3.23"
#endif /* QWLAN_VERSION_H */
diff --git a/drivers/usb/gadget/android.c b/drivers/usb/gadget/android.c
index 6b0c4c091fa..4ad4e709c2a 100644
--- a/drivers/usb/gadget/android.c
+++ b/drivers/usb/gadget/android.c
@@ -51,6 +51,7 @@
#include "f_rmnet_smd_sdio.c"
#include "f_rmnet.c"
#include "f_audio_source.c"
+#include "f_midi.c"
#include "f_mass_storage.c"
#include "u_serial.c"
#include "u_sdio.c"
@@ -97,6 +98,12 @@ static const char longname[] = "Gadget Android";
#define ANDROID_DEVICE_NODE_NAME_LENGTH 11
+/* f_midi configuration */
+#define MIDI_INPUT_PORTS 1
+#define MIDI_OUTPUT_PORTS 1
+#define MIDI_BUFFER_SIZE 256
+#define MIDI_QUEUE_LENGTH 32
+
struct android_usb_function {
char *name;
void *config;
@@ -1809,6 +1816,60 @@ static struct android_usb_function uasp_function = {
.bind_config = uasp_function_bind_config,
};
+static int midi_function_init(struct android_usb_function *f,
+ struct usb_composite_dev *cdev)
+{
+ struct midi_alsa_config *config;
+
+ config = kzalloc(sizeof(struct midi_alsa_config), GFP_KERNEL);
+ f->config = config;
+ if (!config)
+ return -ENOMEM;
+ config->card = -1;
+ config->device = -1;
+ return 0;
+}
+
+static void midi_function_cleanup(struct android_usb_function *f)
+{
+ kfree(f->config);
+}
+
+static int midi_function_bind_config(struct android_usb_function *f,
+ struct usb_configuration *c)
+{
+ struct midi_alsa_config *config = f->config;
+
+ return f_midi_bind_config(c, SNDRV_DEFAULT_IDX1, SNDRV_DEFAULT_STR1,
+ MIDI_INPUT_PORTS, MIDI_OUTPUT_PORTS, MIDI_BUFFER_SIZE,
+ MIDI_QUEUE_LENGTH, config);
+}
+
+static ssize_t midi_alsa_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct android_usb_function *f = dev_get_drvdata(dev);
+ struct midi_alsa_config *config = f->config;
+
+ /* print ALSA card and device numbers */
+ return sprintf(buf, "%d %d\n", config->card, config->device);
+}
+
+static DEVICE_ATTR(alsa, S_IRUGO, midi_alsa_show, NULL);
+
+static struct device_attribute *midi_function_attributes[] = {
+ &dev_attr_alsa,
+ NULL
+};
+
+static struct android_usb_function midi_function = {
+ .name = "midi",
+ .init = midi_function_init,
+ .cleanup = midi_function_cleanup,
+ .bind_config = midi_function_bind_config,
+ .attributes = midi_function_attributes,
+};
+
static struct android_usb_function *supported_functions[] = {
&mbim_function,
&ecm_qc_function,
@@ -1833,6 +1894,7 @@ static struct android_usb_function *supported_functions[] = {
&mass_storage_function,
&accessory_function,
&audio_source_function,
+ &midi_function,
&uasp_function,
NULL
};
diff --git a/drivers/usb/gadget/f_audio_source.c b/drivers/usb/gadget/f_audio_source.c
index 1c029dc148e..cb12f1deedc 100644
--- a/drivers/usb/gadget/f_audio_source.c
+++ b/drivers/usb/gadget/f_audio_source.c
@@ -34,7 +34,7 @@
#define AUDIO_NUM_INTERFACES 2
/* B.3.1 Standard AC Interface Descriptor */
-static struct usb_interface_descriptor ac_interface_desc = {
+static struct usb_interface_descriptor audio_ac_interface_desc = {
.bLength = USB_DT_INTERFACE_SIZE,
.bDescriptorType = USB_DT_INTERFACE,
.bNumEndpoints = 0,
@@ -50,7 +50,7 @@ DECLARE_UAC_AC_HEADER_DESCRIPTOR(2);
+ UAC_DT_INPUT_TERMINAL_SIZE + UAC_DT_OUTPUT_TERMINAL_SIZE \
+ UAC_DT_FEATURE_UNIT_SIZE(0))
/* B.3.2 Class-Specific AC Interface Descriptor */
-static struct uac1_ac_header_descriptor_2 ac_header_desc = {
+static struct uac1_ac_header_descriptor_2 audio_ac_header_desc = {
.bLength = UAC_DT_AC_HEADER_LENGTH,
.bDescriptorType = USB_DT_CS_INTERFACE,
.bDescriptorSubtype = UAC_HEADER,
@@ -64,7 +64,7 @@ static struct uac1_ac_header_descriptor_2 ac_header_desc = {
};
#define INPUT_TERMINAL_ID 1
-static struct uac_input_terminal_descriptor input_terminal_desc = {
+static struct uac_input_terminal_descriptor audio_input_terminal_desc = {
.bLength = UAC_DT_INPUT_TERMINAL_SIZE,
.bDescriptorType = USB_DT_CS_INTERFACE,
.bDescriptorSubtype = UAC_INPUT_TERMINAL,
@@ -77,7 +77,7 @@ static struct uac_input_terminal_descriptor input_terminal_desc = {
DECLARE_UAC_FEATURE_UNIT_DESCRIPTOR(0);
#define FEATURE_UNIT_ID 2
-static struct uac_feature_unit_descriptor_0 feature_unit_desc = {
+static struct uac_feature_unit_descriptor_0 audio_feature_unit_desc = {
.bLength = UAC_DT_FEATURE_UNIT_SIZE(0),
.bDescriptorType = USB_DT_CS_INTERFACE,
.bDescriptorSubtype = UAC_FEATURE_UNIT,
@@ -87,7 +87,7 @@ static struct uac_feature_unit_descriptor_0 feature_unit_desc = {
};
#define OUTPUT_TERMINAL_ID 3
-static struct uac1_output_terminal_descriptor output_terminal_desc = {
+static struct uac1_output_terminal_descriptor audio_output_terminal_desc = {
.bLength = UAC_DT_OUTPUT_TERMINAL_SIZE,
.bDescriptorType = USB_DT_CS_INTERFACE,
.bDescriptorSubtype = UAC_OUTPUT_TERMINAL,
@@ -98,7 +98,7 @@ static struct uac1_output_terminal_descriptor output_terminal_desc = {
};
/* B.4.1 Standard AS Interface Descriptor */
-static struct usb_interface_descriptor as_interface_alt_0_desc = {
+static struct usb_interface_descriptor audio_as_interface_alt_0_desc = {
.bLength = USB_DT_INTERFACE_SIZE,
.bDescriptorType = USB_DT_INTERFACE,
.bAlternateSetting = 0,
@@ -107,7 +107,7 @@ static struct usb_interface_descriptor as_interface_alt_0_desc = {
.bInterfaceSubClass = USB_SUBCLASS_AUDIOSTREAMING,
};
-static struct usb_interface_descriptor as_interface_alt_1_desc = {
+static struct usb_interface_descriptor audio_as_interface_alt_1_desc = {
.bLength = USB_DT_INTERFACE_SIZE,
.bDescriptorType = USB_DT_INTERFACE,
.bAlternateSetting = 1,
@@ -117,7 +117,7 @@ static struct usb_interface_descriptor as_interface_alt_1_desc = {
};
/* B.4.2 Class-Specific AS Interface Descriptor */
-static struct uac1_as_header_descriptor as_header_desc = {
+static struct uac1_as_header_descriptor audio_as_header_desc = {
.bLength = UAC_DT_AS_HEADER_SIZE,
.bDescriptorType = USB_DT_CS_INTERFACE,
.bDescriptorSubtype = UAC_AS_GENERAL,
@@ -128,7 +128,7 @@ static struct uac1_as_header_descriptor as_header_desc = {
DECLARE_UAC_FORMAT_TYPE_I_DISCRETE_DESC(1);
-static struct uac_format_type_i_discrete_descriptor_1 as_type_i_desc = {
+static struct uac_format_type_i_discrete_descriptor_1 audio_as_type_i_desc = {
.bLength = UAC_FORMAT_TYPE_I_DISCRETE_DESC_SIZE(1),
.bDescriptorType = USB_DT_CS_INTERFACE,
.bDescriptorSubtype = UAC_FORMAT_TYPE,
@@ -139,7 +139,7 @@ static struct uac_format_type_i_discrete_descriptor_1 as_type_i_desc = {
};
/* Standard ISO IN Endpoint Descriptor for highspeed */
-static struct usb_endpoint_descriptor hs_as_in_ep_desc = {
+static struct usb_endpoint_descriptor audio_hs_as_in_ep_desc = {
.bLength = USB_DT_ENDPOINT_AUDIO_SIZE,
.bDescriptorType = USB_DT_ENDPOINT,
.bEndpointAddress = USB_DIR_IN,
@@ -150,7 +150,7 @@ static struct usb_endpoint_descriptor hs_as_in_ep_desc = {
};
/* Standard ISO IN Endpoint Descriptor for highspeed */
-static struct usb_endpoint_descriptor fs_as_in_ep_desc = {
+static struct usb_endpoint_descriptor audio_fs_as_in_ep_desc = {
.bLength = USB_DT_ENDPOINT_AUDIO_SIZE,
.bDescriptorType = USB_DT_ENDPOINT,
.bEndpointAddress = USB_DIR_IN,
@@ -161,7 +161,7 @@ static struct usb_endpoint_descriptor fs_as_in_ep_desc = {
};
/* Class-specific AS ISO OUT Endpoint Descriptor */
-static struct uac_iso_endpoint_descriptor as_iso_in_desc = {
+static struct uac_iso_endpoint_descriptor audio_as_iso_in_desc = {
.bLength = UAC_ISO_ENDPOINT_DESC_SIZE,
.bDescriptorType = USB_DT_CS_ENDPOINT,
.bDescriptorSubtype = UAC_EP_GENERAL,
@@ -171,40 +171,40 @@ static struct uac_iso_endpoint_descriptor as_iso_in_desc = {
};
static struct usb_descriptor_header *hs_audio_desc[] = {
- (struct usb_descriptor_header *)&ac_interface_desc,
- (struct usb_descriptor_header *)&ac_header_desc,
+ (struct usb_descriptor_header *)&audio_ac_interface_desc,
+ (struct usb_descriptor_header *)&audio_ac_header_desc,
- (struct usb_descriptor_header *)&input_terminal_desc,
- (struct usb_descriptor_header *)&output_terminal_desc,
- (struct usb_descriptor_header *)&feature_unit_desc,
+ (struct usb_descriptor_header *)&audio_input_terminal_desc,
+ (struct usb_descriptor_header *)&audio_output_terminal_desc,
+ (struct usb_descriptor_header *)&audio_feature_unit_desc,
- (struct usb_descriptor_header *)&as_interface_alt_0_desc,
- (struct usb_descriptor_header *)&as_interface_alt_1_desc,
- (struct usb_descriptor_header *)&as_header_desc,
+ (struct usb_descriptor_header *)&audio_as_interface_alt_0_desc,
+ (struct usb_descriptor_header *)&audio_as_interface_alt_1_desc,
+ (struct usb_descriptor_header *)&audio_as_header_desc,
- (struct usb_descriptor_header *)&as_type_i_desc,
+ (struct usb_descriptor_header *)&audio_as_type_i_desc,
- (struct usb_descriptor_header *)&hs_as_in_ep_desc,
- (struct usb_descriptor_header *)&as_iso_in_desc,
+ (struct usb_descriptor_header *)&audio_hs_as_in_ep_desc,
+ (struct usb_descriptor_header *)&audio_as_iso_in_desc,
NULL,
};
static struct usb_descriptor_header *fs_audio_desc[] = {
- (struct usb_descriptor_header *)&ac_interface_desc,
- (struct usb_descriptor_header *)&ac_header_desc,
+ (struct usb_descriptor_header *)&audio_ac_interface_desc,
+ (struct usb_descriptor_header *)&audio_ac_header_desc,
- (struct usb_descriptor_header *)&input_terminal_desc,
- (struct usb_descriptor_header *)&output_terminal_desc,
- (struct usb_descriptor_header *)&feature_unit_desc,
+ (struct usb_descriptor_header *)&audio_input_terminal_desc,
+ (struct usb_descriptor_header *)&audio_output_terminal_desc,
+ (struct usb_descriptor_header *)&audio_feature_unit_desc,
- (struct usb_descriptor_header *)&as_interface_alt_0_desc,
- (struct usb_descriptor_header *)&as_interface_alt_1_desc,
- (struct usb_descriptor_header *)&as_header_desc,
+ (struct usb_descriptor_header *)&audio_as_interface_alt_0_desc,
+ (struct usb_descriptor_header *)&audio_as_interface_alt_1_desc,
+ (struct usb_descriptor_header *)&audio_as_header_desc,
- (struct usb_descriptor_header *)&as_type_i_desc,
+ (struct usb_descriptor_header *)&audio_as_type_i_desc,
- (struct usb_descriptor_header *)&fs_as_in_ep_desc,
- (struct usb_descriptor_header *)&as_iso_in_desc,
+ (struct usb_descriptor_header *)&audio_fs_as_in_ep_desc,
+ (struct usb_descriptor_header *)&audio_as_iso_in_desc,
NULL,
};
@@ -560,12 +560,12 @@ static void audio_build_desc(struct audio_dev *audio)
int rate;
/* Set channel numbers */
- input_terminal_desc.bNrChannels = 2;
- as_type_i_desc.bNrChannels = 2;
+ audio_input_terminal_desc.bNrChannels = 2;
+ audio_as_type_i_desc.bNrChannels = 2;
/* Set sample rates */
rate = SAMPLE_RATE;
- sam_freq = as_type_i_desc.tSamFreq[0];
+ sam_freq = audio_as_type_i_desc.tSamFreq[0];
memcpy(sam_freq, &rate, 3);
}
@@ -586,26 +586,32 @@ audio_bind(struct usb_configuration *c, struct usb_function *f)
status = usb_interface_id(c, f);
if (status < 0)
goto fail;
- ac_interface_desc.bInterfaceNumber = status;
+ audio_ac_interface_desc.bInterfaceNumber = status;
+
+ /* AUDIO_AC_INTERFACE */
+ audio_ac_header_desc.baInterfaceNr[0] = status;
status = usb_interface_id(c, f);
if (status < 0)
goto fail;
- as_interface_alt_0_desc.bInterfaceNumber = status;
- as_interface_alt_1_desc.bInterfaceNumber = status;
+ audio_as_interface_alt_0_desc.bInterfaceNumber = status;
+ audio_as_interface_alt_1_desc.bInterfaceNumber = status;
+
+ /* AUDIO_AS_INTERFACE */
+ audio_ac_header_desc.baInterfaceNr[1] = status;
status = -ENODEV;
/* allocate our endpoint */
- ep = usb_ep_autoconfig(cdev->gadget, &fs_as_in_ep_desc);
+ ep = usb_ep_autoconfig(cdev->gadget, &audio_fs_as_in_ep_desc);
if (!ep)
goto fail;
audio->in_ep = ep;
ep->driver_data = audio; /* claim */
if (gadget_is_dualspeed(c->cdev->gadget))
- hs_as_in_ep_desc.bEndpointAddress =
- fs_as_in_ep_desc.bEndpointAddress;
+ audio_hs_as_in_ep_desc.bEndpointAddress =
+ audio_fs_as_in_ep_desc.bEndpointAddress;
for (i = 0, status = 0; i < IN_EP_REQ_COUNT && status == 0; i++) {
req = audio_request_new(ep, IN_EP_MAX_PACKET_SIZE);
diff --git a/drivers/usb/gadget/f_midi.c b/drivers/usb/gadget/f_midi.c
index 2f7e8f2930c..f7097800bc3 100644
--- a/drivers/usb/gadget/f_midi.c
+++ b/drivers/usb/gadget/f_midi.c
@@ -66,6 +66,11 @@ struct gmidi_in_port {
uint8_t data[2];
};
+struct midi_alsa_config {
+ int card;
+ int device;
+};
+
struct f_midi {
struct usb_function func;
struct usb_gadget *gadget;
@@ -98,7 +103,7 @@ DECLARE_USB_MIDI_OUT_JACK_DESCRIPTOR(1);
DECLARE_USB_MS_ENDPOINT_DESCRIPTOR(16);
/* B.3.1 Standard AC Interface Descriptor */
-static struct usb_interface_descriptor ac_interface_desc __initdata = {
+static struct usb_interface_descriptor ac_interface_desc /* __initdata */ = {
.bLength = USB_DT_INTERFACE_SIZE,
.bDescriptorType = USB_DT_INTERFACE,
/* .bInterfaceNumber = DYNAMIC */
@@ -109,7 +114,7 @@ static struct usb_interface_descriptor ac_interface_desc __initdata = {
};
/* B.3.2 Class-Specific AC Interface Descriptor */
-static struct uac1_ac_header_descriptor_1 ac_header_desc __initdata = {
+static struct uac1_ac_header_descriptor_1 ac_header_desc /* __initdata */ = {
.bLength = UAC_DT_AC_HEADER_SIZE(1),
.bDescriptorType = USB_DT_CS_INTERFACE,
.bDescriptorSubtype = USB_MS_HEADER,
@@ -120,7 +125,7 @@ static struct uac1_ac_header_descriptor_1 ac_header_desc __initdata = {
};
/* B.4.1 Standard MS Interface Descriptor */
-static struct usb_interface_descriptor ms_interface_desc __initdata = {
+static struct usb_interface_descriptor ms_interface_desc /* __initdata */ = {
.bLength = USB_DT_INTERFACE_SIZE,
.bDescriptorType = USB_DT_INTERFACE,
/* .bInterfaceNumber = DYNAMIC */
@@ -131,7 +136,7 @@ static struct usb_interface_descriptor ms_interface_desc __initdata = {
};
/* B.4.2 Class-Specific MS Interface Descriptor */
-static struct usb_ms_header_descriptor ms_header_desc __initdata = {
+static struct usb_ms_header_descriptor ms_header_desc /* __initdata */ = {
.bLength = USB_DT_MS_HEADER_SIZE,
.bDescriptorType = USB_DT_CS_INTERFACE,
.bDescriptorSubtype = USB_MS_HEADER,
@@ -410,7 +415,7 @@ static void f_midi_unbind(struct usb_configuration *c, struct usb_function *f)
card = midi->card;
midi->card = NULL;
if (card)
- snd_card_free(card);
+ snd_card_free_when_closed(card);
kfree(midi->id);
midi->id = NULL;
@@ -734,7 +739,7 @@ fail:
/* MIDI function driver setup/binding */
-static int __init
+static int /* __init */
f_midi_bind(struct usb_configuration *c, struct usb_function *f)
{
struct usb_descriptor_header **midi_function;
@@ -918,16 +923,22 @@ fail:
*
* Returns zero on success, else negative errno.
*/
-int __init f_midi_bind_config(struct usb_configuration *c,
+int /* __init */ f_midi_bind_config(struct usb_configuration *c,
int index, char *id,
unsigned int in_ports,
unsigned int out_ports,
unsigned int buflen,
- unsigned int qlen)
+ unsigned int qlen,
+ struct midi_alsa_config* config)
{
struct f_midi *midi;
int status, i;
+ if (config) {
+ config->card = -1;
+ config->device = -1;
+ }
+
/* sanity check */
if (in_ports > MAX_PORTS || out_ports > MAX_PORTS)
return -EINVAL;
@@ -956,6 +967,10 @@ int __init f_midi_bind_config(struct usb_configuration *c,
tasklet_init(&midi->tasklet, f_midi_in_tasklet, (unsigned long) midi);
/* set up ALSA midi devices */
+ midi->id = kstrdup(id, GFP_KERNEL);
+ midi->index = index;
+ midi->buflen = buflen;
+ midi->qlen = qlen;
midi->in_ports = in_ports;
midi->out_ports = out_ports;
status = f_midi_register_card(midi);
@@ -969,15 +984,16 @@ int __init f_midi_bind_config(struct usb_configuration *c,
midi->func.set_alt = f_midi_set_alt;
midi->func.disable = f_midi_disable;
- midi->id = kstrdup(id, GFP_KERNEL);
- midi->index = index;
- midi->buflen = buflen;
- midi->qlen = qlen;
-
status = usb_add_function(c, &midi->func);
if (status)
goto setup_fail;
+
+ if (config) {
+ config->card = midi->rmidi->card->number;
+ config->device = midi->rmidi->device;
+ }
+
return 0;
setup_fail:
diff --git a/drivers/video/msm/vidc/1080p/ddl/vcd_ddl.h b/drivers/video/msm/vidc/1080p/ddl/vcd_ddl.h
index 30e3c52c4c9..6487ff783be 100644
--- a/drivers/video/msm/vidc/1080p/ddl/vcd_ddl.h
+++ b/drivers/video/msm/vidc/1080p/ddl/vcd_ddl.h
@@ -395,6 +395,8 @@ struct ddl_decoder_data {
struct ddl_mp2_datadumpenabletype mp2_datadump_enable;
u32 mp2_datadump_status;
u32 extn_user_data_enable;
+ u32 adaptive_width;
+ u32 adaptive_height;
};
union ddl_codec_data{
struct ddl_codec_data_hdr hdr;
diff --git a/drivers/video/msm/vidc/1080p/ddl/vcd_ddl_helper.c b/drivers/video/msm/vidc/1080p/ddl/vcd_ddl_helper.c
index f5fb24fdba4..d192a9bc922 100644
--- a/drivers/video/msm/vidc/1080p/ddl/vcd_ddl_helper.c
+++ b/drivers/video/msm/vidc/1080p/ddl/vcd_ddl_helper.c
@@ -1035,6 +1035,8 @@ u32 ddl_check_reconfig(struct ddl_client_context *ddl)
if (decoder->cont_mode) {
if ((decoder->actual_output_buf_req.sz <=
decoder->client_output_buf_req.sz) &&
+ decoder->frame_size.width <= decoder->adaptive_width &&
+ decoder->frame_size.height <= decoder->adaptive_height &&
(decoder->actual_output_buf_req.actual_count <=
decoder->client_output_buf_req.actual_count)) {
need_reconfig = false;
@@ -1091,8 +1093,23 @@ u32 ddl_check_reconfig(struct ddl_client_context *ddl)
void ddl_handle_reconfig(u32 res_change, struct ddl_client_context *ddl)
{
struct ddl_decoder_data *decoder = &ddl->codec_data.decoder;
+ struct vidc_1080p_dec_disp_info *dec_disp_info =
+ &(decoder->dec_disp_info);
+
+ u32 width = 0;
+ u32 height = 0;
+ u32 adaptive_width = 0;
+ u32 adaptive_height = 0;
+
+ width = DDL_ALIGN(dec_disp_info->img_size_x, DDL_TILE_ALIGN_WIDTH);
+ height = DDL_ALIGN(dec_disp_info->img_size_y, DDL_TILE_ALIGN_HEIGHT);
+
+ adaptive_width = DDL_ALIGN(decoder->adaptive_width, DDL_TILE_ALIGN_WIDTH);
+ adaptive_height = DDL_ALIGN(decoder->adaptive_height, DDL_TILE_ALIGN_HEIGHT);
+
if ((decoder->cont_mode) &&
- (res_change == DDL_RESL_CHANGE_DECREASED)) {
+ (res_change == DDL_RESL_CHANGE_DECREASED) &&
+ width <= adaptive_width && height <= adaptive_height) {
DDL_MSG_LOW("%s Resolution decreased, continue decoding\n",
__func__);
vidc_sm_get_min_yc_dpb_sizes(
diff --git a/drivers/video/msm/vidc/1080p/ddl/vcd_ddl_properties.c b/drivers/video/msm/vidc/1080p/ddl/vcd_ddl_properties.c
index ab681d1d59e..ca17660e583 100644
--- a/drivers/video/msm/vidc/1080p/ddl/vcd_ddl_properties.c
+++ b/drivers/video/msm/vidc/1080p/ddl/vcd_ddl_properties.c
@@ -322,7 +322,11 @@ static u32 ddl_set_dec_property(struct ddl_client_context *ddl,
ddl_set_default_decoder_buffer_req(decoder,
true);
}
- DDL_MSG_HIGH("set VCD_I_FRAME_SIZE width = %d"
+ if (decoder->cont_mode) {
+ decoder->adaptive_width = decoder->client_frame_size.width;
+ decoder->adaptive_height = decoder->client_frame_size.height;
+ }
+ DDL_MSG_LOW("set VCD_I_FRAME_SIZE width = %d"
" height = %d\n",
frame_size->width, frame_size->height);
vcd_status = VCD_S_SUCCESS;
@@ -2020,6 +2024,8 @@ void ddl_set_default_dec_property(struct ddl_client_context *ddl)
decoder->output_order = VCD_DEC_ORDER_DISPLAY;
decoder->field_needed_for_prev_ip = 0;
decoder->cont_mode = 0;
+ decoder->adaptive_width = 0;
+ decoder->adaptive_height = 0;
decoder->reconfig_detected = false;
decoder->dmx_disable = false;
ddl_set_default_metadata_flag(ddl);
diff --git a/fs/dcache.c b/fs/dcache.c
index 2e4a6b9174f..90d396937db 100644
--- a/fs/dcache.c
+++ b/fs/dcache.c
@@ -2511,7 +2511,7 @@ static int prepend_path(const struct path *path,
bool slash = false;
int error = 0;
- br_read_lock(vfsmount_lock);
+ br_read_lock(&vfsmount_lock);
while (dentry != root->dentry || vfsmnt != root->mnt) {
struct dentry * parent;
@@ -2542,7 +2542,7 @@ static int prepend_path(const struct path *path,
error = prepend(buffer, buflen, "/", 1);
out:
- br_read_unlock(vfsmount_lock);
+ br_read_unlock(&vfsmount_lock);
return error;
global_root:
@@ -2558,7 +2558,7 @@ global_root:
if (!slash)
error = prepend(buffer, buflen, "/", 1);
if (!error)
- error = real_mount(vfsmnt)->mnt_ns ? 1 : 2;
+ error = is_mounted(vfsmnt) ? 1 : 2;
goto out;
}
diff --git a/fs/file_table.c b/fs/file_table.c
index 70f2a0fd6ae..a305d9e2d1b 100644
--- a/fs/file_table.c
+++ b/fs/file_table.c
@@ -34,7 +34,6 @@ struct files_stat_struct files_stat = {
.max_files = NR_FILE
};
-DECLARE_LGLOCK(files_lglock);
DEFINE_LGLOCK(files_lglock);
/* SLAB cache for file structures */
@@ -421,9 +420,9 @@ static inline void __file_sb_list_add(struct file *file, struct super_block *sb)
*/
void file_sb_list_add(struct file *file, struct super_block *sb)
{
- lg_local_lock(files_lglock);
+ lg_local_lock(&files_lglock);
__file_sb_list_add(file, sb);
- lg_local_unlock(files_lglock);
+ lg_local_unlock(&files_lglock);
}
/**
@@ -436,9 +435,9 @@ void file_sb_list_add(struct file *file, struct super_block *sb)
void file_sb_list_del(struct file *file)
{
if (!list_empty(&file->f_u.fu_list)) {
- lg_local_lock_cpu(files_lglock, file_list_cpu(file));
+ lg_local_lock_cpu(&files_lglock, file_list_cpu(file));
list_del_init(&file->f_u.fu_list);
- lg_local_unlock_cpu(files_lglock, file_list_cpu(file));
+ lg_local_unlock_cpu(&files_lglock, file_list_cpu(file));
}
}
@@ -485,7 +484,7 @@ void mark_files_ro(struct super_block *sb)
struct file *f;
retry:
- lg_global_lock(files_lglock);
+ lg_global_lock(&files_lglock);
do_file_list_for_each_entry(sb, f) {
struct vfsmount *mnt;
if (!S_ISREG(f->f_path.dentry->d_inode->i_mode))
@@ -502,12 +501,12 @@ retry:
file_release_write(f);
mnt = mntget(f->f_path.mnt);
/* This can sleep, so we can't hold the spinlock. */
- lg_global_unlock(files_lglock);
+ lg_global_unlock(&files_lglock);
mnt_drop_write(mnt);
mntput(mnt);
goto retry;
} while_file_list_for_each_entry;
- lg_global_unlock(files_lglock);
+ lg_global_unlock(&files_lglock);
}
void __init files_init(unsigned long mempages)
@@ -525,6 +524,6 @@ void __init files_init(unsigned long mempages)
n = (mempages * (PAGE_SIZE / 1024)) / 10;
files_stat.max_files = max_t(unsigned long, n, NR_FILE);
files_defer_init();
- lg_lock_init(files_lglock);
+ lg_lock_init(&files_lglock, "files_lglock");
percpu_counter_init(&nr_files, 0);
}
diff --git a/fs/fs_struct.c b/fs/fs_struct.c
index e159e682ad4..5df4775fea0 100644
--- a/fs/fs_struct.c
+++ b/fs/fs_struct.c
@@ -6,18 +6,6 @@
#include <linux/fs_struct.h>
#include "internal.h"
-static inline void path_get_longterm(struct path *path)
-{
- path_get(path);
- mnt_make_longterm(path->mnt);
-}
-
-static inline void path_put_longterm(struct path *path)
-{
- mnt_make_shortterm(path->mnt);
- path_put(path);
-}
-
/*
* Replace the fs->{rootmnt,root} with {mnt,dentry}. Put the old values.
* It can block.
@@ -26,7 +14,7 @@ void set_fs_root(struct fs_struct *fs, struct path *path)
{
struct path old_root;
- path_get_longterm(path);
+ path_get(path);
spin_lock(&fs->lock);
write_seqcount_begin(&fs->seq);
old_root = fs->root;
@@ -34,7 +22,7 @@ void set_fs_root(struct fs_struct *fs, struct path *path)
write_seqcount_end(&fs->seq);
spin_unlock(&fs->lock);
if (old_root.dentry)
- path_put_longterm(&old_root);
+ path_put(&old_root);
}
/*
@@ -45,7 +33,7 @@ void set_fs_pwd(struct fs_struct *fs, struct path *path)
{
struct path old_pwd;
- path_get_longterm(path);
+ path_get(path);
spin_lock(&fs->lock);
write_seqcount_begin(&fs->seq);
old_pwd = fs->pwd;
@@ -54,7 +42,7 @@ void set_fs_pwd(struct fs_struct *fs, struct path *path)
spin_unlock(&fs->lock);
if (old_pwd.dentry)
- path_put_longterm(&old_pwd);
+ path_put(&old_pwd);
}
static inline int replace_path(struct path *p, const struct path *old, const struct path *new)
@@ -84,7 +72,7 @@ void chroot_fs_refs(struct path *old_root, struct path *new_root)
write_seqcount_end(&fs->seq);
while (hits--) {
count++;
- path_get_longterm(new_root);
+ path_get(new_root);
}
spin_unlock(&fs->lock);
}
@@ -92,13 +80,13 @@ void chroot_fs_refs(struct path *old_root, struct path *new_root)
} while_each_thread(g, p);
read_unlock(&tasklist_lock);
while (count--)
- path_put_longterm(old_root);
+ path_put(old_root);
}
void free_fs_struct(struct fs_struct *fs)
{
- path_put_longterm(&fs->root);
- path_put_longterm(&fs->pwd);
+ path_put(&fs->root);
+ path_put(&fs->pwd);
kmem_cache_free(fs_cachep, fs);
}
@@ -132,9 +120,9 @@ struct fs_struct *copy_fs_struct(struct fs_struct *old)
spin_lock(&old->lock);
fs->root = old->root;
- path_get_longterm(&fs->root);
+ path_get(&fs->root);
fs->pwd = old->pwd;
- path_get_longterm(&fs->pwd);
+ path_get(&fs->pwd);
spin_unlock(&old->lock);
}
return fs;
diff --git a/fs/fuse/file.c b/fs/fuse/file.c
index 837a6e388a4..0035a059965 100644
--- a/fs/fuse/file.c
+++ b/fs/fuse/file.c
@@ -1000,7 +1000,9 @@ static ssize_t fuse_file_aio_write(struct kiocb *iocb, const struct iovec *iov,
if (err)
goto out;
- file_update_time(file);
+ err = file_update_time(file);
+ if (err)
+ goto out;
if (file->f_flags & O_DIRECT) {
written = generic_file_direct_write(iocb, iov, &nr_segs,
diff --git a/fs/inode.c b/fs/inode.c
index 9f4f5fecc09..06d8bd40b29 100644
--- a/fs/inode.c
+++ b/fs/inode.c
@@ -1480,6 +1480,27 @@ static int relatime_need_update(struct vfsmount *mnt, struct inode *inode,
return 0;
}
+/*
+ * This does the actual work of updating an inodes time or version. Must have
+ * had called mnt_want_write() before calling this.
+ */
+static int update_time(struct inode *inode, struct timespec *time, int flags)
+{
+ if (inode->i_op->update_time)
+ return inode->i_op->update_time(inode, time, flags);
+
+ if (flags & S_ATIME)
+ inode->i_atime = *time;
+ if (flags & S_VERSION)
+ inode_inc_iversion(inode);
+ if (flags & S_CTIME)
+ inode->i_ctime = *time;
+ if (flags & S_MTIME)
+ inode->i_mtime = *time;
+ mark_inode_dirty_sync(inode);
+ return 0;
+}
+
/**
* touch_atime - update the access time
* @mnt: mount the inode is accessed on
@@ -1518,8 +1539,14 @@ void touch_atime(struct path *path)
if (mnt_want_write(mnt))
return;
- inode->i_atime = now;
- mark_inode_dirty_sync(inode);
+ /*
+ * File systems can error out when updating inodes if they need to
+ * allocate new space to modify an inode (such is the case for
+ * Btrfs), but since we touch atime while walking down the path we
+ * really don't care if we failed to update the atime of the file,
+ * so just ignore the return value.
+ */
+ update_time(inode, &now, S_ATIME);
mnt_drop_write(mnt);
}
EXPORT_SYMBOL(touch_atime);
@@ -1533,18 +1560,20 @@ EXPORT_SYMBOL(touch_atime);
* usage in the file write path of filesystems, and filesystems may
* choose to explicitly ignore update via this function with the
* S_NOCMTIME inode flag, e.g. for network filesystem where these
- * timestamps are handled by the server.
+ * timestamps are handled by the server. This can return an error for
+ * file systems who need to allocate space in order to update an inode.
*/
-void file_update_time(struct file *file)
+int file_update_time(struct file *file)
{
struct inode *inode = file->f_path.dentry->d_inode;
struct timespec now;
- enum { S_MTIME = 1, S_CTIME = 2, S_VERSION = 4 } sync_it = 0;
+ int sync_it = 0;
+ int ret;
/* First try to exhaust all avenues to not sync */
if (IS_NOCMTIME(inode))
- return;
+ return 0;
now = current_fs_time(inode->i_sb);
if (!timespec_equal(&inode->i_mtime, &now))
@@ -1557,21 +1586,16 @@ void file_update_time(struct file *file)
sync_it |= S_VERSION;
if (!sync_it)
- return;
+ return 0;
/* Finally allowed to write? Takes lock. */
if (mnt_want_write_file(file))
- return;
+ return 0;
- /* Only change inode inside the lock region */
- if (sync_it & S_VERSION)
- inode_inc_iversion(inode);
- if (sync_it & S_CTIME)
- inode->i_ctime = now;
- if (sync_it & S_MTIME)
- inode->i_mtime = now;
- mark_inode_dirty_sync(inode);
+ ret = update_time(inode, &now, sync_it);
mnt_drop_write_file(file);
+
+ return ret;
}
EXPORT_SYMBOL(file_update_time);
diff --git a/fs/internal.h b/fs/internal.h
index 9962c59ba28..c0f75b80425 100644
--- a/fs/internal.h
+++ b/fs/internal.h
@@ -50,13 +50,11 @@ extern int copy_mount_string(const void __user *, char **);
extern struct vfsmount *lookup_mnt(struct path *);
extern int finish_automount(struct vfsmount *, struct path *);
-extern void mnt_make_longterm(struct vfsmount *);
-extern void mnt_make_shortterm(struct vfsmount *);
extern int sb_prepare_remount_readonly(struct super_block *);
extern void __init mnt_init(void);
-DECLARE_BRLOCK(vfsmount_lock);
+extern struct lglock vfsmount_lock;
/*
diff --git a/fs/mount.h b/fs/mount.h
index 4ef36d93e5a..cd500798040 100644
--- a/fs/mount.h
+++ b/fs/mount.h
@@ -4,8 +4,11 @@
struct mnt_namespace {
atomic_t count;
+ unsigned int proc_inum;
struct mount * root;
struct list_head list;
+ struct user_namespace *user_ns;
+ u64 seq; /* Sequence number to prevent loops */
wait_queue_head_t poll;
int event;
};
@@ -22,7 +25,6 @@ struct mount {
struct vfsmount mnt;
#ifdef CONFIG_SMP
struct mnt_pcp __percpu *mnt_pcp;
- atomic_t mnt_longterm; /* how many of the refs are longterm */
#else
int mnt_count;
int mnt_writers;
@@ -49,6 +51,8 @@ struct mount {
int mnt_ghosts;
};
+#define MNT_NS_INTERNAL ERR_PTR(-EINVAL) /* distinct from any mnt_namespace */
+
static inline struct mount *real_mount(struct vfsmount *mnt)
{
return container_of(mnt, struct mount, mnt);
@@ -59,6 +63,12 @@ static inline int mnt_has_parent(struct mount *mnt)
return mnt != mnt->mnt_parent;
}
+static inline int is_mounted(struct vfsmount *mnt)
+{
+ /* neither detached nor internal? */
+ return !IS_ERR_OR_NULL(real_mount(mnt));
+}
+
extern struct mount *__lookup_mnt(struct vfsmount *, struct dentry *, int);
static inline void get_mnt_ns(struct mnt_namespace *ns)
@@ -67,10 +77,12 @@ static inline void get_mnt_ns(struct mnt_namespace *ns)
}
struct proc_mounts {
- struct seq_file m; /* must be the first element */
+ struct seq_file m;
struct mnt_namespace *ns;
struct path root;
int (*show)(struct seq_file *, struct vfsmount *);
};
+#define proc_mounts(p) (container_of((p), struct proc_mounts, m))
+
extern const struct seq_operations mounts_op;
diff --git a/fs/namei.c b/fs/namei.c
index c42791914f8..1832e328287 100644
--- a/fs/namei.c
+++ b/fs/namei.c
@@ -462,7 +462,7 @@ static int unlazy_walk(struct nameidata *nd, struct dentry *dentry)
mntget(nd->path.mnt);
rcu_read_unlock();
- br_read_unlock(vfsmount_lock);
+ br_read_unlock(&vfsmount_lock);
nd->flags &= ~LOOKUP_RCU;
return 0;
@@ -520,14 +520,14 @@ static int complete_walk(struct nameidata *nd)
if (unlikely(!__d_rcu_to_refcount(dentry, nd->seq))) {
spin_unlock(&dentry->d_lock);
rcu_read_unlock();
- br_read_unlock(vfsmount_lock);
+ br_read_unlock(&vfsmount_lock);
return -ECHILD;
}
BUG_ON(nd->inode != dentry->d_inode);
spin_unlock(&dentry->d_lock);
mntget(nd->path.mnt);
rcu_read_unlock();
- br_read_unlock(vfsmount_lock);
+ br_read_unlock(&vfsmount_lock);
}
if (likely(!(nd->flags & LOOKUP_JUMPED)))
@@ -688,21 +688,31 @@ static int follow_up_rcu(struct path *path)
return 1;
}
+/*
+ * follow_up - Find the mountpoint of path's vfsmount
+ *
+ * Given a path, find the mountpoint of its source file system.
+ * Replace @path with the path of the mountpoint in the parent mount.
+ * Up is towards /.
+ *
+ * Return 1 if we went up a level and 0 if we were already at the
+ * root.
+ */
int follow_up(struct path *path)
{
struct mount *mnt = real_mount(path->mnt);
struct mount *parent;
struct dentry *mountpoint;
- br_read_lock(vfsmount_lock);
+ br_read_lock(&vfsmount_lock);
parent = mnt->mnt_parent;
if (&parent->mnt == path->mnt) {
- br_read_unlock(vfsmount_lock);
+ br_read_unlock(&vfsmount_lock);
return 0;
}
mntget(&parent->mnt);
mountpoint = dget(mnt->mnt_mountpoint);
- br_read_unlock(vfsmount_lock);
+ br_read_unlock(&vfsmount_lock);
dput(path->dentry);
path->dentry = mountpoint;
mntput(path->mnt);
@@ -960,7 +970,7 @@ failed:
if (!(nd->flags & LOOKUP_ROOT))
nd->root.mnt = NULL;
rcu_read_unlock();
- br_read_unlock(vfsmount_lock);
+ br_read_unlock(&vfsmount_lock);
return -ECHILD;
}
@@ -1265,7 +1275,7 @@ static void terminate_walk(struct nameidata *nd)
if (!(nd->flags & LOOKUP_ROOT))
nd->root.mnt = NULL;
rcu_read_unlock();
- br_read_unlock(vfsmount_lock);
+ br_read_unlock(&vfsmount_lock);
}
}
@@ -1618,7 +1628,7 @@ static int path_init(int dfd, const char *name, unsigned int flags,
nd->path = nd->root;
nd->inode = inode;
if (flags & LOOKUP_RCU) {
- br_read_lock(vfsmount_lock);
+ br_read_lock(&vfsmount_lock);
rcu_read_lock();
nd->seq = __read_seqcount_begin(&nd->path.dentry->d_seq);
} else {
@@ -1631,7 +1641,7 @@ static int path_init(int dfd, const char *name, unsigned int flags,
if (*name=='/') {
if (flags & LOOKUP_RCU) {
- br_read_lock(vfsmount_lock);
+ br_read_lock(&vfsmount_lock);
rcu_read_lock();
set_root_rcu(nd);
} else {
@@ -1644,7 +1654,7 @@ static int path_init(int dfd, const char *name, unsigned int flags,
struct fs_struct *fs = current->fs;
unsigned seq;
- br_read_lock(vfsmount_lock);
+ br_read_lock(&vfsmount_lock);
rcu_read_lock();
do {
@@ -1680,7 +1690,7 @@ static int path_init(int dfd, const char *name, unsigned int flags,
if (fput_needed)
*fp = file;
nd->seq = __read_seqcount_begin(&nd->path.dentry->d_seq);
- br_read_lock(vfsmount_lock);
+ br_read_lock(&vfsmount_lock);
rcu_read_lock();
} else {
path_get(&file->f_path);
diff --git a/fs/namespace.c b/fs/namespace.c
index 1200e29497b..08ebb1d63b2 100644
--- a/fs/namespace.c
+++ b/fs/namespace.c
@@ -12,6 +12,7 @@
#include <linux/export.h>
#include <linux/capability.h>
#include <linux/mnt_namespace.h>
+#include <linux/user_namespace.h>
#include <linux/namei.h>
#include <linux/security.h>
#include <linux/idr.h>
@@ -20,6 +21,7 @@
#include <linux/fs_struct.h> /* get_fs_root et.al. */
#include <linux/fsnotify.h> /* fsnotify_vfsmount_delete */
#include <linux/uaccess.h>
+#include <linux/proc_fs.h>
#include "pnode.h"
#include "internal.h"
@@ -397,7 +399,7 @@ static int mnt_make_readonly(struct mount *mnt)
{
int ret = 0;
- br_write_lock(vfsmount_lock);
+ br_write_lock(&vfsmount_lock);
mnt->mnt.mnt_flags |= MNT_WRITE_HOLD;
/*
* After storing MNT_WRITE_HOLD, we'll read the counters. This store
@@ -431,15 +433,15 @@ static int mnt_make_readonly(struct mount *mnt)
*/
smp_wmb();
mnt->mnt.mnt_flags &= ~MNT_WRITE_HOLD;
- br_write_unlock(vfsmount_lock);
+ br_write_unlock(&vfsmount_lock);
return ret;
}
static void __mnt_unmake_readonly(struct mount *mnt)
{
- br_write_lock(vfsmount_lock);
+ br_write_lock(&vfsmount_lock);
mnt->mnt.mnt_flags &= ~MNT_READONLY;
- br_write_unlock(vfsmount_lock);
+ br_write_unlock(&vfsmount_lock);
}
int sb_prepare_remount_readonly(struct super_block *sb)
@@ -451,7 +453,7 @@ int sb_prepare_remount_readonly(struct super_block *sb)
if (atomic_long_read(&sb->s_remove_count))
return -EBUSY;
- br_write_lock(vfsmount_lock);
+ br_write_lock(&vfsmount_lock);
list_for_each_entry(mnt, &sb->s_mounts, mnt_instance) {
if (!(mnt->mnt.mnt_flags & MNT_READONLY)) {
mnt->mnt.mnt_flags |= MNT_WRITE_HOLD;
@@ -473,7 +475,7 @@ int sb_prepare_remount_readonly(struct super_block *sb)
if (mnt->mnt.mnt_flags & MNT_WRITE_HOLD)
mnt->mnt.mnt_flags &= ~MNT_WRITE_HOLD;
}
- br_write_unlock(vfsmount_lock);
+ br_write_unlock(&vfsmount_lock);
return err;
}
@@ -515,21 +517,33 @@ struct mount *__lookup_mnt(struct vfsmount *mnt, struct dentry *dentry,
}
/*
- * lookup_mnt increments the ref count before returning
- * the vfsmount struct.
+ * lookup_mnt - Return the first child mount mounted at path
+ *
+ * "First" means first mounted chronologically. If you create the
+ * following mounts:
+ *
+ * mount /dev/sda1 /mnt
+ * mount /dev/sda2 /mnt
+ * mount /dev/sda3 /mnt
+ *
+ * Then lookup_mnt() on the base /mnt dentry in the root mount will
+ * return successively the root dentry and vfsmount of /dev/sda1, then
+ * /dev/sda2, then /dev/sda3, then NULL.
+ *
+ * lookup_mnt takes a reference to the found vfsmount.
*/
struct vfsmount *lookup_mnt(struct path *path)
{
struct mount *child_mnt;
- br_read_lock(vfsmount_lock);
+ br_read_lock(&vfsmount_lock);
child_mnt = __lookup_mnt(path->mnt, path->dentry, 1);
if (child_mnt) {
mnt_add_count(child_mnt, 1);
- br_read_unlock(vfsmount_lock);
+ br_read_unlock(&vfsmount_lock);
return &child_mnt->mnt;
} else {
- br_read_unlock(vfsmount_lock);
+ br_read_unlock(&vfsmount_lock);
return NULL;
}
}
@@ -621,21 +635,6 @@ static void attach_mnt(struct mount *mnt, struct path *path)
list_add_tail(&mnt->mnt_child, &real_mount(path->mnt)->mnt_mounts);
}
-static inline void __mnt_make_longterm(struct mount *mnt)
-{
-#ifdef CONFIG_SMP
- atomic_inc(&mnt->mnt_longterm);
-#endif
-}
-
-/* needs vfsmount lock for write */
-static inline void __mnt_make_shortterm(struct mount *mnt)
-{
-#ifdef CONFIG_SMP
- atomic_dec(&mnt->mnt_longterm);
-#endif
-}
-
/*
* vfsmount lock must be held for write
*/
@@ -649,10 +648,8 @@ static void commit_tree(struct mount *mnt)
BUG_ON(parent == mnt);
list_add_tail(&head, &mnt->mnt_list);
- list_for_each_entry(m, &head, mnt_list) {
+ list_for_each_entry(m, &head, mnt_list)
m->mnt_ns = n;
- __mnt_make_longterm(m);
- }
list_splice(&head, n->list.prev);
@@ -714,9 +711,9 @@ vfs_kern_mount(struct file_system_type *type, int flags, const char *name, void
mnt->mnt.mnt_sb = root->d_sb;
mnt->mnt_mountpoint = mnt->mnt.mnt_root;
mnt->mnt_parent = mnt;
- br_write_lock(vfsmount_lock);
+ br_write_lock(&vfsmount_lock);
list_add_tail(&mnt->mnt_instance, &root->d_sb->s_mounts);
- br_write_unlock(vfsmount_lock);
+ br_write_unlock(&vfsmount_lock);
return &mnt->mnt;
}
EXPORT_SYMBOL_GPL(vfs_kern_mount);
@@ -725,56 +722,61 @@ static struct mount *clone_mnt(struct mount *old, struct dentry *root,
int flag)
{
struct super_block *sb = old->mnt.mnt_sb;
- struct mount *mnt = alloc_vfsmnt(old->mnt_devname);
+ struct mount *mnt;
+ int err;
- if (mnt) {
- if (flag & (CL_SLAVE | CL_PRIVATE))
- mnt->mnt_group_id = 0; /* not a peer of original */
- else
- mnt->mnt_group_id = old->mnt_group_id;
-
- if ((flag & CL_MAKE_SHARED) && !mnt->mnt_group_id) {
- int err = mnt_alloc_group_id(mnt);
- if (err)
- goto out_free;
- }
+ mnt = alloc_vfsmnt(old->mnt_devname);
+ if (!mnt)
+ return ERR_PTR(-ENOMEM);
- mnt->mnt.mnt_flags = old->mnt.mnt_flags & ~MNT_WRITE_HOLD;
- atomic_inc(&sb->s_active);
- mnt->mnt.mnt_sb = sb;
- mnt->mnt.mnt_root = dget(root);
- mnt->mnt_mountpoint = mnt->mnt.mnt_root;
- mnt->mnt_parent = mnt;
- br_write_lock(vfsmount_lock);
- list_add_tail(&mnt->mnt_instance, &sb->s_mounts);
- br_write_unlock(vfsmount_lock);
-
- if (flag & CL_SLAVE) {
- list_add(&mnt->mnt_slave, &old->mnt_slave_list);
- mnt->mnt_master = old;
- CLEAR_MNT_SHARED(mnt);
- } else if (!(flag & CL_PRIVATE)) {
- if ((flag & CL_MAKE_SHARED) || IS_MNT_SHARED(old))
- list_add(&mnt->mnt_share, &old->mnt_share);
- if (IS_MNT_SLAVE(old))
- list_add(&mnt->mnt_slave, &old->mnt_slave);
- mnt->mnt_master = old->mnt_master;
- }
- if (flag & CL_MAKE_SHARED)
- set_mnt_shared(mnt);
-
- /* stick the duplicate mount on the same expiry list
- * as the original if that was on one */
- if (flag & CL_EXPIRE) {
- if (!list_empty(&old->mnt_expire))
- list_add(&mnt->mnt_expire, &old->mnt_expire);
- }
+ if (flag & (CL_SLAVE | CL_PRIVATE | CL_SHARED_TO_SLAVE))
+ mnt->mnt_group_id = 0; /* not a peer of original */
+ else
+ mnt->mnt_group_id = old->mnt_group_id;
+
+ if ((flag & CL_MAKE_SHARED) && !mnt->mnt_group_id) {
+ err = mnt_alloc_group_id(mnt);
+ if (err)
+ goto out_free;
+ }
+
+ mnt->mnt.mnt_flags = old->mnt.mnt_flags & ~MNT_WRITE_HOLD;
+ atomic_inc(&sb->s_active);
+ mnt->mnt.mnt_sb = sb;
+ mnt->mnt.mnt_root = dget(root);
+ mnt->mnt_mountpoint = mnt->mnt.mnt_root;
+ mnt->mnt_parent = mnt;
+ br_write_lock(&vfsmount_lock);
+ list_add_tail(&mnt->mnt_instance, &sb->s_mounts);
+ br_write_unlock(&vfsmount_lock);
+
+ if ((flag & CL_SLAVE) ||
+ ((flag & CL_SHARED_TO_SLAVE) && IS_MNT_SHARED(old))) {
+ list_add(&mnt->mnt_slave, &old->mnt_slave_list);
+ mnt->mnt_master = old;
+ CLEAR_MNT_SHARED(mnt);
+ } else if (!(flag & CL_PRIVATE)) {
+ if ((flag & CL_MAKE_SHARED) || IS_MNT_SHARED(old))
+ list_add(&mnt->mnt_share, &old->mnt_share);
+ if (IS_MNT_SLAVE(old))
+ list_add(&mnt->mnt_slave, &old->mnt_slave);
+ mnt->mnt_master = old->mnt_master;
+ }
+ if (flag & CL_MAKE_SHARED)
+ set_mnt_shared(mnt);
+
+ /* stick the duplicate mount on the same expiry list
+ * as the original if that was on one */
+ if (flag & CL_EXPIRE) {
+ if (!list_empty(&old->mnt_expire))
+ list_add(&mnt->mnt_expire, &old->mnt_expire);
}
+
return mnt;
out_free:
free_vfsmnt(mnt);
- return NULL;
+ return ERR_PTR(err);
}
static inline void mntfree(struct mount *mnt)
@@ -803,35 +805,37 @@ static void mntput_no_expire(struct mount *mnt)
{
put_again:
#ifdef CONFIG_SMP
- br_read_lock(vfsmount_lock);
- if (likely(atomic_read(&mnt->mnt_longterm))) {
+ br_read_lock(&vfsmount_lock);
+ if (likely(mnt->mnt_ns)) {
+ /* shouldn't be the last one */
mnt_add_count(mnt, -1);
- br_read_unlock(vfsmount_lock);
+ br_read_unlock(&vfsmount_lock);
return;
}
- br_read_unlock(vfsmount_lock);
+ br_read_unlock(&vfsmount_lock);
- br_write_lock(vfsmount_lock);
+ br_write_lock(&vfsmount_lock);
mnt_add_count(mnt, -1);
if (mnt_get_count(mnt)) {
- br_write_unlock(vfsmount_lock);
+ br_write_unlock(&vfsmount_lock);
return;
}
#else
mnt_add_count(mnt, -1);
if (likely(mnt_get_count(mnt)))
return;
- br_write_lock(vfsmount_lock);
+ br_write_lock(&vfsmount_lock);
#endif
if (unlikely(mnt->mnt_pinned)) {
mnt_add_count(mnt, mnt->mnt_pinned + 1);
mnt->mnt_pinned = 0;
- br_write_unlock(vfsmount_lock);
+ br_write_unlock(&vfsmount_lock);
acct_auto_close_mnt(&mnt->mnt);
goto put_again;
}
+
list_del(&mnt->mnt_instance);
- br_write_unlock(vfsmount_lock);
+ br_write_unlock(&vfsmount_lock);
mntfree(mnt);
}
@@ -857,21 +861,21 @@ EXPORT_SYMBOL(mntget);
void mnt_pin(struct vfsmount *mnt)
{
- br_write_lock(vfsmount_lock);
+ br_write_lock(&vfsmount_lock);
real_mount(mnt)->mnt_pinned++;
- br_write_unlock(vfsmount_lock);
+ br_write_unlock(&vfsmount_lock);
}
EXPORT_SYMBOL(mnt_pin);
void mnt_unpin(struct vfsmount *m)
{
struct mount *mnt = real_mount(m);
- br_write_lock(vfsmount_lock);
+ br_write_lock(&vfsmount_lock);
if (mnt->mnt_pinned) {
mnt_add_count(mnt, 1);
mnt->mnt_pinned--;
}
- br_write_unlock(vfsmount_lock);
+ br_write_unlock(&vfsmount_lock);
}
EXPORT_SYMBOL(mnt_unpin);
@@ -938,7 +942,7 @@ EXPORT_SYMBOL(replace_mount_options);
/* iterator; we want it to have access to namespace_sem, thus here... */
static void *m_start(struct seq_file *m, loff_t *pos)
{
- struct proc_mounts *p = container_of(m, struct proc_mounts, m);
+ struct proc_mounts *p = proc_mounts(m);
down_read(&namespace_sem);
return seq_list_start(&p->ns->list, *pos);
@@ -946,7 +950,7 @@ static void *m_start(struct seq_file *m, loff_t *pos)
static void *m_next(struct seq_file *m, void *v, loff_t *pos)
{
- struct proc_mounts *p = container_of(m, struct proc_mounts, m);
+ struct proc_mounts *p = proc_mounts(m);
return seq_list_next(v, &p->ns->list, pos);
}
@@ -958,7 +962,7 @@ static void m_stop(struct seq_file *m, void *v)
static int m_show(struct seq_file *m, void *v)
{
- struct proc_mounts *p = container_of(m, struct proc_mounts, m);
+ struct proc_mounts *p = proc_mounts(m);
struct mount *r = list_entry(v, struct mount, mnt_list);
return p->show(m, &r->mnt);
}
@@ -988,12 +992,12 @@ int may_umount_tree(struct vfsmount *m)
BUG_ON(!m);
/* write lock needed for mnt_get_count */
- br_write_lock(vfsmount_lock);
+ br_write_lock(&vfsmount_lock);
for (p = mnt; p; p = next_mnt(p, mnt)) {
actual_refs += mnt_get_count(p);
minimum_refs += 2;
}
- br_write_unlock(vfsmount_lock);
+ br_write_unlock(&vfsmount_lock);
if (actual_refs > minimum_refs)
return 0;
@@ -1020,10 +1024,10 @@ int may_umount(struct vfsmount *mnt)
{
int ret = 1;
down_read(&namespace_sem);
- br_write_lock(vfsmount_lock);
+ br_write_lock(&vfsmount_lock);
if (propagate_mount_busy(real_mount(mnt), 2))
ret = 0;
- br_write_unlock(vfsmount_lock);
+ br_write_unlock(&vfsmount_lock);
up_read(&namespace_sem);
return ret;
}
@@ -1040,13 +1044,13 @@ void release_mounts(struct list_head *head)
struct dentry *dentry;
struct mount *m;
- br_write_lock(vfsmount_lock);
+ br_write_lock(&vfsmount_lock);
dentry = mnt->mnt_mountpoint;
m = mnt->mnt_parent;
mnt->mnt_mountpoint = mnt->mnt.mnt_root;
mnt->mnt_parent = mnt;
m->mnt_ghosts--;
- br_write_unlock(vfsmount_lock);
+ br_write_unlock(&vfsmount_lock);
dput(dentry);
mntput(&m->mnt);
}
@@ -1074,7 +1078,6 @@ void umount_tree(struct mount *mnt, int propagate, struct list_head *kill)
list_del_init(&p->mnt_list);
__touch_mnt_namespace(p->mnt_ns);
p->mnt_ns = NULL;
- __mnt_make_shortterm(p);
list_del_init(&p->mnt_child);
if (mnt_has_parent(p)) {
p->mnt_parent->mnt_ghosts++;
@@ -1112,12 +1115,12 @@ static int do_umount(struct mount *mnt, int flags)
* probably don't strictly need the lock here if we examined
* all race cases, but it's a slowpath.
*/
- br_write_lock(vfsmount_lock);
+ br_write_lock(&vfsmount_lock);
if (mnt_get_count(mnt) != 2) {
- br_write_unlock(vfsmount_lock);
+ br_write_unlock(&vfsmount_lock);
return -EBUSY;
}
- br_write_unlock(vfsmount_lock);
+ br_write_unlock(&vfsmount_lock);
if (!xchg(&mnt->mnt_expiry_mark, 1))
return -EAGAIN;
@@ -1159,7 +1162,7 @@ static int do_umount(struct mount *mnt, int flags)
}
down_write(&namespace_sem);
- br_write_lock(vfsmount_lock);
+ br_write_lock(&vfsmount_lock);
event++;
if (!(flags & MNT_DETACH))
@@ -1171,7 +1174,7 @@ static int do_umount(struct mount *mnt, int flags)
umount_tree(mnt, 1, &umount_list);
retval = 0;
}
- br_write_unlock(vfsmount_lock);
+ br_write_unlock(&vfsmount_lock);
up_write(&namespace_sem);
release_mounts(&umount_list);
return retval;
@@ -1209,7 +1212,7 @@ SYSCALL_DEFINE2(umount, char __user *, name, int, flags)
goto dput_and_out;
retval = -EPERM;
- if (!capable(CAP_SYS_ADMIN))
+ if (!ns_capable(mnt->mnt_ns->user_ns, CAP_SYS_ADMIN))
goto dput_and_out;
retval = do_umount(mnt, flags);
@@ -1235,7 +1238,7 @@ SYSCALL_DEFINE1(oldumount, char __user *, name)
static int mount_is_safe(struct path *path)
{
- if (capable(CAP_SYS_ADMIN))
+ if (ns_capable(real_mount(path->mnt)->mnt_ns->user_ns, CAP_SYS_ADMIN))
return 0;
return -EPERM;
#ifdef notyet
@@ -1251,6 +1254,26 @@ static int mount_is_safe(struct path *path)
#endif
}
+static bool mnt_ns_loop(struct path *path)
+{
+ /* Could bind mounting the mount namespace inode cause a
+ * mount namespace loop?
+ */
+ struct inode *inode = path->dentry->d_inode;
+ struct proc_inode *ei;
+ struct mnt_namespace *mnt_ns;
+
+ if (!proc_ns_inode(inode))
+ return false;
+
+ ei = PROC_I(inode);
+ if (ei->ns_ops != &mntns_operations)
+ return false;
+
+ mnt_ns = ei->ns;
+ return current->nsproxy->mnt_ns->seq >= mnt_ns->seq;
+}
+
struct mount *copy_tree(struct mount *mnt, struct dentry *dentry,
int flag)
{
@@ -1258,11 +1281,12 @@ struct mount *copy_tree(struct mount *mnt, struct dentry *dentry,
struct path path;
if (!(flag & CL_COPY_ALL) && IS_MNT_UNBINDABLE(mnt))
- return NULL;
+ return ERR_PTR(-EINVAL);
res = q = clone_mnt(mnt, dentry, flag);
- if (!q)
- goto Enomem;
+ if (IS_ERR(q))
+ return q;
+
q->mnt_mountpoint = mnt->mnt_mountpoint;
p = mnt;
@@ -1284,26 +1308,28 @@ struct mount *copy_tree(struct mount *mnt, struct dentry *dentry,
path.mnt = &q->mnt;
path.dentry = p->mnt_mountpoint;
q = clone_mnt(p, p->mnt.mnt_root, flag);
- if (!q)
- goto Enomem;
- br_write_lock(vfsmount_lock);
+ if (IS_ERR(q))
+ goto out;
+ br_write_lock(&vfsmount_lock);
list_add_tail(&q->mnt_list, &res->mnt_list);
attach_mnt(q, &path);
- br_write_unlock(vfsmount_lock);
+ br_write_unlock(&vfsmount_lock);
}
}
return res;
-Enomem:
+out:
if (res) {
LIST_HEAD(umount_list);
- br_write_lock(vfsmount_lock);
+ br_write_lock(&vfsmount_lock);
umount_tree(res, 0, &umount_list);
- br_write_unlock(vfsmount_lock);
+ br_write_unlock(&vfsmount_lock);
release_mounts(&umount_list);
}
- return NULL;
+ return q;
}
+/* Caller should check returned pointer for errors */
+
struct vfsmount *collect_mounts(struct path *path)
{
struct mount *tree;
@@ -1311,16 +1337,18 @@ struct vfsmount *collect_mounts(struct path *path)
tree = copy_tree(real_mount(path->mnt), path->dentry,
CL_COPY_ALL | CL_PRIVATE);
up_write(&namespace_sem);
- return tree ? &tree->mnt : NULL;
+ if (IS_ERR(tree))
+ return NULL;
+ return &tree->mnt;
}
void drop_collected_mounts(struct vfsmount *mnt)
{
LIST_HEAD(umount_list);
down_write(&namespace_sem);
- br_write_lock(vfsmount_lock);
+ br_write_lock(&vfsmount_lock);
umount_tree(real_mount(mnt), 0, &umount_list);
- br_write_unlock(vfsmount_lock);
+ br_write_unlock(&vfsmount_lock);
up_write(&namespace_sem);
release_mounts(&umount_list);
}
@@ -1448,7 +1476,7 @@ static int attach_recursive_mnt(struct mount *source_mnt,
if (err)
goto out_cleanup_ids;
- br_write_lock(vfsmount_lock);
+ br_write_lock(&vfsmount_lock);
if (IS_MNT_SHARED(dest_mnt)) {
for (p = source_mnt; p; p = next_mnt(p, source_mnt))
@@ -1467,7 +1495,7 @@ static int attach_recursive_mnt(struct mount *source_mnt,
list_del_init(&child->mnt_hash);
commit_tree(child);
}
- br_write_unlock(vfsmount_lock);
+ br_write_unlock(&vfsmount_lock);
return 0;
@@ -1548,7 +1576,7 @@ static int do_change_type(struct path *path, int flag)
int type;
int err = 0;
- if (!capable(CAP_SYS_ADMIN))
+ if (!ns_capable(mnt->mnt_ns->user_ns, CAP_SYS_ADMIN))
return -EPERM;
if (path->dentry != path->mnt->mnt_root)
@@ -1565,10 +1593,10 @@ static int do_change_type(struct path *path, int flag)
goto out_unlock;
}
- br_write_lock(vfsmount_lock);
+ br_write_lock(&vfsmount_lock);
for (m = mnt; m; m = (recurse ? next_mnt(m, mnt) : NULL))
change_mnt_propagation(m, type);
- br_write_unlock(vfsmount_lock);
+ br_write_unlock(&vfsmount_lock);
out_unlock:
up_write(&namespace_sem);
@@ -1578,7 +1606,7 @@ static int do_change_type(struct path *path, int flag)
/*
* do loopback mount.
*/
-static int do_loopback(struct path *path, char *old_name,
+static int do_loopback(struct path *path, const char *old_name,
int recurse)
{
LIST_HEAD(umount_list);
@@ -1593,6 +1621,10 @@ static int do_loopback(struct path *path, char *old_name,
if (err)
return err;
+ err = -EINVAL;
+ if (mnt_ns_loop(&old_path))
+ goto out;
+
err = lock_mount(path);
if (err)
goto out;
@@ -1606,20 +1638,21 @@ static int do_loopback(struct path *path, char *old_name,
if (!check_mnt(real_mount(path->mnt)) || !check_mnt(old))
goto out2;
- err = -ENOMEM;
if (recurse)
mnt = copy_tree(old, old_path.dentry, 0);
else
mnt = clone_mnt(old, old_path.dentry, 0);
- if (!mnt)
- goto out2;
+ if (IS_ERR(mnt)) {
+ err = PTR_ERR(mnt);
+ goto out;
+ }
err = graft_tree(mnt, path);
if (err) {
- br_write_lock(vfsmount_lock);
+ br_write_lock(&vfsmount_lock);
umount_tree(mnt, 0, &umount_list);
- br_write_unlock(vfsmount_lock);
+ br_write_unlock(&vfsmount_lock);
}
out2:
unlock_mount(path);
@@ -1677,16 +1710,16 @@ static int do_remount(struct path *path, int flags, int mnt_flags,
else
err = do_remount_sb(sb, flags, data, 0);
if (!err) {
- br_write_lock(vfsmount_lock);
- mnt_flags |= mnt->mnt.mnt_flags & ~MNT_USER_SETTABLE_MASK;
+ br_write_lock(&vfsmount_lock);
+ mnt_flags |= mnt->mnt.mnt_flags & MNT_PROPAGATION_MASK;
mnt->mnt.mnt_flags = mnt_flags;
- br_write_unlock(vfsmount_lock);
+ br_write_unlock(&vfsmount_lock);
}
up_write(&sb->s_umount);
if (!err) {
- br_write_lock(vfsmount_lock);
+ br_write_lock(&vfsmount_lock);
touch_mnt_namespace(mnt->mnt_ns);
- br_write_unlock(vfsmount_lock);
+ br_write_unlock(&vfsmount_lock);
}
return err;
}
@@ -1701,13 +1734,13 @@ static inline int tree_contains_unbindable(struct mount *mnt)
return 0;
}
-static int do_move_mount(struct path *path, char *old_name)
+static int do_move_mount(struct path *path, const char *old_name)
{
struct path old_path, parent_path;
struct mount *p;
struct mount *old;
int err = 0;
- if (!capable(CAP_SYS_ADMIN))
+ if (!ns_capable(real_mount(path->mnt)->mnt_ns->user_ns, CAP_SYS_ADMIN))
return -EPERM;
if (!old_name || !*old_name)
return -EINVAL;
@@ -1794,21 +1827,6 @@ static struct vfsmount *fs_set_subtype(struct vfsmount *mnt, const char *fstype)
return ERR_PTR(err);
}
-static struct vfsmount *
-do_kern_mount(const char *fstype, int flags, const char *name, void *data)
-{
- struct file_system_type *type = get_fs_type(fstype);
- struct vfsmount *mnt;
- if (!type)
- return ERR_PTR(-ENODEV);
- mnt = vfs_kern_mount(type, flags, name, data);
- if (!IS_ERR(mnt) && (type->fs_flags & FS_HAS_SUBTYPE) &&
- !mnt->mnt_sb->s_subtype)
- mnt = fs_set_subtype(mnt, fstype);
- put_filesystem(type);
- return mnt;
-}
-
/*
* add a mount into a namespace's mount tree
*/
@@ -1823,8 +1841,14 @@ static int do_add_mount(struct mount *newmnt, struct path *path, int mnt_flags)
return err;
err = -EINVAL;
- if (!(mnt_flags & MNT_SHRINKABLE) && !check_mnt(real_mount(path->mnt)))
- goto unlock;
+ if (unlikely(!check_mnt(real_mount(path->mnt)))) {
+ /* that's acceptable only for automounts done in private ns */
+ if (!(mnt_flags & MNT_SHRINKABLE))
+ goto unlock;
+ /* ... and for those we'd better have mountpoint still alive */
+ if (!real_mount(path->mnt)->mnt_ns)
+ goto unlock;
+ }
/* Refuse the same filesystem on the same mount point */
err = -EBUSY;
@@ -1848,20 +1872,46 @@ unlock:
* create a new mount for userspace and request it to be added into the
* namespace's tree
*/
-static int do_new_mount(struct path *path, char *type, int flags,
- int mnt_flags, char *name, void *data)
+static int do_new_mount(struct path *path, const char *fstype, int flags,
+ int mnt_flags, const char *name, void *data)
{
+ struct file_system_type *type;
+ struct user_namespace *user_ns;
struct vfsmount *mnt;
int err;
- if (!type)
+ if (!fstype)
return -EINVAL;
/* we need capabilities... */
- if (!capable(CAP_SYS_ADMIN))
+ user_ns = real_mount(path->mnt)->mnt_ns->user_ns;
+ if (!ns_capable(user_ns, CAP_SYS_ADMIN))
return -EPERM;
- mnt = do_kern_mount(type, flags, name, data);
+ type = get_fs_type(fstype);
+ if (!type)
+ return -ENODEV;
+
+ if (user_ns != &init_user_ns) {
+ if (!(type->fs_flags & FS_USERNS_MOUNT)) {
+ put_filesystem(type);
+ return -EPERM;
+ }
+ /* Only in special cases allow devices from mounts
+ * created outside the initial user namespace.
+ */
+ if (!(type->fs_flags & FS_USERNS_DEV_MOUNT)) {
+ flags |= MS_NODEV;
+ mnt_flags |= MNT_NODEV;
+ }
+ }
+
+ mnt = vfs_kern_mount(type, flags, name, data);
+ if (!IS_ERR(mnt) && (type->fs_flags & FS_HAS_SUBTYPE) &&
+ !mnt->mnt_sb->s_subtype)
+ mnt = fs_set_subtype(mnt, fstype);
+
+ put_filesystem(type);
if (IS_ERR(mnt))
return PTR_ERR(mnt);
@@ -1893,9 +1943,9 @@ fail:
/* remove m from any expiration list it may be on */
if (!list_empty(&mnt->mnt_expire)) {
down_write(&namespace_sem);
- br_write_lock(vfsmount_lock);
+ br_write_lock(&vfsmount_lock);
list_del_init(&mnt->mnt_expire);
- br_write_unlock(vfsmount_lock);
+ br_write_unlock(&vfsmount_lock);
up_write(&namespace_sem);
}
mntput(m);
@@ -1911,11 +1961,11 @@ fail:
void mnt_set_expiry(struct vfsmount *mnt, struct list_head *expiry_list)
{
down_write(&namespace_sem);
- br_write_lock(vfsmount_lock);
+ br_write_lock(&vfsmount_lock);
list_add_tail(&real_mount(mnt)->mnt_expire, expiry_list);
- br_write_unlock(vfsmount_lock);
+ br_write_unlock(&vfsmount_lock);
up_write(&namespace_sem);
}
EXPORT_SYMBOL(mnt_set_expiry);
@@ -1935,7 +1985,7 @@ void mark_mounts_for_expiry(struct list_head *mounts)
return;
down_write(&namespace_sem);
- br_write_lock(vfsmount_lock);
+ br_write_lock(&vfsmount_lock);
/* extract from the expiration list every vfsmount that matches the
* following criteria:
@@ -1954,7 +2004,7 @@ void mark_mounts_for_expiry(struct list_head *mounts)
touch_mnt_namespace(mnt->mnt_ns);
umount_tree(mnt, 1, &umounts);
}
- br_write_unlock(vfsmount_lock);
+ br_write_unlock(&vfsmount_lock);
up_write(&namespace_sem);
release_mounts(&umounts);
@@ -2122,8 +2172,8 @@ int copy_mount_string(const void __user *data, char **where)
* Therefore, if this magic number is present, it carries no information
* and must be discarded.
*/
-long do_mount(char *dev_name, char *dir_name, char *type_page,
- unsigned long flags, void *data_page)
+long do_mount(const char *dev_name, const char *dir_name,
+ const char *type_page, unsigned long flags, void *data_page)
{
struct path path;
int retval = 0;
@@ -2192,67 +2242,78 @@ dput_out:
return retval;
}
-static struct mnt_namespace *alloc_mnt_ns(void)
+static void free_mnt_ns(struct mnt_namespace *ns)
+{
+ proc_free_inum(ns->proc_inum);
+ put_user_ns(ns->user_ns);
+ kfree(ns);
+}
+
+/*
+ * Assign a sequence number so we can detect when we attempt to bind
+ * mount a reference to an older mount namespace into the current
+ * mount namespace, preventing reference counting loops. A 64bit
+ * number incrementing at 10Ghz will take 12,427 years to wrap which
+ * is effectively never, so we can ignore the possibility.
+ */
+static atomic64_t mnt_ns_seq = ATOMIC64_INIT(1);
+
+static struct mnt_namespace *alloc_mnt_ns(struct user_namespace *user_ns)
{
struct mnt_namespace *new_ns;
+ int ret;
new_ns = kmalloc(sizeof(struct mnt_namespace), GFP_KERNEL);
if (!new_ns)
return ERR_PTR(-ENOMEM);
+ ret = proc_alloc_inum(&new_ns->proc_inum);
+ if (ret) {
+ kfree(new_ns);
+ return ERR_PTR(ret);
+ }
+ new_ns->seq = atomic64_add_return(1, &mnt_ns_seq);
atomic_set(&new_ns->count, 1);
new_ns->root = NULL;
INIT_LIST_HEAD(&new_ns->list);
init_waitqueue_head(&new_ns->poll);
new_ns->event = 0;
+ new_ns->user_ns = get_user_ns(user_ns);
return new_ns;
}
-void mnt_make_longterm(struct vfsmount *mnt)
-{
- __mnt_make_longterm(real_mount(mnt));
-}
-
-void mnt_make_shortterm(struct vfsmount *m)
-{
-#ifdef CONFIG_SMP
- struct mount *mnt = real_mount(m);
- if (atomic_add_unless(&mnt->mnt_longterm, -1, 1))
- return;
- br_write_lock(vfsmount_lock);
- atomic_dec(&mnt->mnt_longterm);
- br_write_unlock(vfsmount_lock);
-#endif
-}
-
/*
* Allocate a new namespace structure and populate it with contents
* copied from the namespace of the passed in task structure.
*/
static struct mnt_namespace *dup_mnt_ns(struct mnt_namespace *mnt_ns,
- struct fs_struct *fs)
+ struct user_namespace *user_ns, struct fs_struct *fs)
{
struct mnt_namespace *new_ns;
struct vfsmount *rootmnt = NULL, *pwdmnt = NULL;
struct mount *p, *q;
struct mount *old = mnt_ns->root;
struct mount *new;
+ int copy_flags;
- new_ns = alloc_mnt_ns();
+ new_ns = alloc_mnt_ns(user_ns);
if (IS_ERR(new_ns))
return new_ns;
down_write(&namespace_sem);
/* First pass: copy the tree topology */
- new = copy_tree(old, old->mnt.mnt_root, CL_COPY_ALL | CL_EXPIRE);
- if (!new) {
+ copy_flags = CL_COPY_ALL | CL_EXPIRE;
+ if (user_ns != mnt_ns->user_ns)
+ copy_flags |= CL_SHARED_TO_SLAVE;
+ new = copy_tree(old, old->mnt.mnt_root, copy_flags);
+ if (IS_ERR(new)) {
up_write(&namespace_sem);
- kfree(new_ns);
- return ERR_PTR(-ENOMEM);
+ free_mnt_ns(new_ns);
+ return ERR_CAST(new);
}
new_ns->root = new;
- br_write_lock(vfsmount_lock);
+ br_write_lock(&vfsmount_lock);
list_add_tail(&new_ns->list, &new->mnt_list);
- br_write_unlock(vfsmount_lock);
+ br_write_unlock(&vfsmount_lock);
/*
* Second pass: switch the tsk->fs->* elements and mark new vfsmounts
@@ -2263,18 +2324,13 @@ static struct mnt_namespace *dup_mnt_ns(struct mnt_namespace *mnt_ns,
q = new;
while (p) {
q->mnt_ns = new_ns;
- __mnt_make_longterm(q);
if (fs) {
if (&p->mnt == fs->root.mnt) {
fs->root.mnt = mntget(&q->mnt);
- __mnt_make_longterm(q);
- mnt_make_shortterm(&p->mnt);
rootmnt = &p->mnt;
}
if (&p->mnt == fs->pwd.mnt) {
fs->pwd.mnt = mntget(&q->mnt);
- __mnt_make_longterm(q);
- mnt_make_shortterm(&p->mnt);
pwdmnt = &p->mnt;
}
}
@@ -2292,7 +2348,7 @@ static struct mnt_namespace *dup_mnt_ns(struct mnt_namespace *mnt_ns,
}
struct mnt_namespace *copy_mnt_ns(unsigned long flags, struct mnt_namespace *ns,
- struct fs_struct *new_fs)
+ struct user_namespace *user_ns, struct fs_struct *new_fs)
{
struct mnt_namespace *new_ns;
@@ -2302,7 +2358,7 @@ struct mnt_namespace *copy_mnt_ns(unsigned long flags, struct mnt_namespace *ns,
if (!(flags & CLONE_NEWNS))
return ns;
- new_ns = dup_mnt_ns(ns, new_fs);
+ new_ns = dup_mnt_ns(ns, user_ns, new_fs);
put_mnt_ns(ns);
return new_ns;
@@ -2314,11 +2370,10 @@ struct mnt_namespace *copy_mnt_ns(unsigned long flags, struct mnt_namespace *ns,
*/
static struct mnt_namespace *create_mnt_ns(struct vfsmount *m)
{
- struct mnt_namespace *new_ns = alloc_mnt_ns();
+ struct mnt_namespace *new_ns = alloc_mnt_ns(&init_user_ns);
if (!IS_ERR(new_ns)) {
struct mount *mnt = real_mount(m);
mnt->mnt_ns = new_ns;
- __mnt_make_longterm(mnt);
new_ns->root = mnt;
list_add(&new_ns->list, &mnt->mnt_list);
} else {
@@ -2416,9 +2471,9 @@ bool is_path_reachable(struct mount *mnt, struct dentry *dentry,
int path_is_under(struct path *path1, struct path *path2)
{
int res;
- br_read_lock(vfsmount_lock);
+ br_read_lock(&vfsmount_lock);
res = is_path_reachable(real_mount(path1->mnt), path1->dentry, path2);
- br_read_unlock(vfsmount_lock);
+ br_read_unlock(&vfsmount_lock);
return res;
}
EXPORT_SYMBOL(path_is_under);
@@ -2455,7 +2510,7 @@ SYSCALL_DEFINE2(pivot_root, const char __user *, new_root,
struct mount *new_mnt, *root_mnt;
int error;
- if (!capable(CAP_SYS_ADMIN))
+ if (!ns_capable(current->nsproxy->mnt_ns->user_ns, CAP_SYS_ADMIN))
return -EPERM;
error = user_path_dir(new_root, &new);
@@ -2505,10 +2560,7 @@ SYSCALL_DEFINE2(pivot_root, const char __user *, new_root,
/* make sure we can reach put_old from new_root */
if (!is_path_reachable(real_mount(old.mnt), old.dentry, &new))
goto out4;
- /* make certain new is below the root */
- if (!is_path_reachable(new_mnt, new.dentry, &root))
- goto out4;
- br_write_lock(vfsmount_lock);
+ br_write_lock(&vfsmount_lock);
detach_mnt(new_mnt, &parent_path);
detach_mnt(root_mnt, &root_parent);
/* mount old root on put_old */
@@ -2516,7 +2568,7 @@ SYSCALL_DEFINE2(pivot_root, const char __user *, new_root,
/* mount new_root on / */
attach_mnt(new_mnt, &root_parent);
touch_mnt_namespace(current->nsproxy->mnt_ns);
- br_write_unlock(vfsmount_lock);
+ br_write_unlock(&vfsmount_lock);
chroot_fs_refs(&root, &new);
error = 0;
out4:
@@ -2540,8 +2592,13 @@ static void __init init_mount_tree(void)
struct vfsmount *mnt;
struct mnt_namespace *ns;
struct path root;
+ struct file_system_type *type;
- mnt = do_kern_mount("rootfs", 0, "rootfs", NULL);
+ type = get_fs_type("rootfs");
+ if (!type)
+ panic("Can't find rootfs type");
+ mnt = vfs_kern_mount(type, 0, "rootfs", NULL);
+ put_filesystem(type);
if (IS_ERR(mnt))
panic("Can't create rootfs");
@@ -2579,7 +2636,7 @@ void __init mnt_init(void)
for (u = 0; u < HASH_SIZE; u++)
INIT_LIST_HEAD(&mount_hashtable[u]);
- br_lock_init(vfsmount_lock);
+ br_lock_init(&vfsmount_lock);
err = sysfs_init();
if (err)
@@ -2599,12 +2656,12 @@ void put_mnt_ns(struct mnt_namespace *ns)
if (!atomic_dec_and_test(&ns->count))
return;
down_write(&namespace_sem);
- br_write_lock(vfsmount_lock);
+ br_write_lock(&vfsmount_lock);
umount_tree(ns->root, 0, &umount_list);
- br_write_unlock(vfsmount_lock);
+ br_write_unlock(&vfsmount_lock);
up_write(&namespace_sem);
release_mounts(&umount_list);
- kfree(ns);
+ free_mnt_ns(ns);
}
struct vfsmount *kern_mount_data(struct file_system_type *type, void *data)
@@ -2616,7 +2673,7 @@ struct vfsmount *kern_mount_data(struct file_system_type *type, void *data)
* it is a longterm mount, don't release mnt until
* we unmount before file sys is unregistered
*/
- mnt_make_longterm(mnt);
+ real_mount(mnt)->mnt_ns = MNT_NS_INTERNAL;
}
return mnt;
}
@@ -2626,7 +2683,9 @@ void kern_unmount(struct vfsmount *mnt)
{
/* release long term mount so mount point can be released */
if (!IS_ERR_OR_NULL(mnt)) {
- mnt_make_shortterm(mnt);
+ br_write_lock(&vfsmount_lock);
+ real_mount(mnt)->mnt_ns = NULL;
+ br_write_unlock(&vfsmount_lock);
mntput(mnt);
}
}
@@ -2636,3 +2695,71 @@ bool our_mnt(struct vfsmount *mnt)
{
return check_mnt(real_mount(mnt));
}
+
+static void *mntns_get(struct task_struct *task)
+{
+ struct mnt_namespace *ns = NULL;
+ struct nsproxy *nsproxy;
+
+ rcu_read_lock();
+ nsproxy = task_nsproxy(task);
+ if (nsproxy) {
+ ns = nsproxy->mnt_ns;
+ get_mnt_ns(ns);
+ }
+ rcu_read_unlock();
+
+ return ns;
+}
+
+static void mntns_put(void *ns)
+{
+ put_mnt_ns(ns);
+}
+
+static int mntns_install(struct nsproxy *nsproxy, void *ns)
+{
+ struct fs_struct *fs = current->fs;
+ struct mnt_namespace *mnt_ns = ns;
+ struct path root;
+
+ if (!ns_capable(mnt_ns->user_ns, CAP_SYS_ADMIN) ||
+ !nsown_capable(CAP_SYS_CHROOT))
+ return -EINVAL;
+
+ if (fs->users != 1)
+ return -EINVAL;
+
+ get_mnt_ns(mnt_ns);
+ put_mnt_ns(nsproxy->mnt_ns);
+ nsproxy->mnt_ns = mnt_ns;
+
+ /* Find the root */
+ root.mnt = &mnt_ns->root->mnt;
+ root.dentry = mnt_ns->root->mnt.mnt_root;
+ path_get(&root);
+ while(d_mountpoint(root.dentry) && follow_down_one(&root))
+ ;
+
+ /* Update the pwd and root */
+ set_fs_pwd(fs, &root);
+ set_fs_root(fs, &root);
+
+ path_put(&root);
+ return 0;
+}
+
+static unsigned int mntns_inum(void *ns)
+{
+ struct mnt_namespace *mnt_ns = ns;
+ return mnt_ns->proc_inum;
+}
+
+const struct proc_ns_operations mntns_operations = {
+ .name = "mnt",
+ .type = CLONE_NEWNS,
+ .get = mntns_get,
+ .put = mntns_put,
+ .install = mntns_install,
+ .inum = mntns_inum,
+};
diff --git a/fs/ncpfs/file.c b/fs/ncpfs/file.c
index 3ff5fcc1528..122e260247f 100644
--- a/fs/ncpfs/file.c
+++ b/fs/ncpfs/file.c
@@ -221,6 +221,10 @@ ncp_file_write(struct file *file, const char __user *buf, size_t count, loff_t *
already_written = 0;
+ errno = file_update_time(file);
+ if (errno)
+ goto outrel;
+
bouncebuffer = vmalloc(bufsize);
if (!bouncebuffer) {
errno = -EIO; /* -ENOMEM */
@@ -252,8 +256,6 @@ ncp_file_write(struct file *file, const char __user *buf, size_t count, loff_t *
}
vfree(bouncebuffer);
- file_update_time(file);
-
*ppos = pos;
if (pos > i_size_read(inode)) {
diff --git a/fs/ntfs/file.c b/fs/ntfs/file.c
index 8639169221c..7389d2d5e51 100644
--- a/fs/ntfs/file.c
+++ b/fs/ntfs/file.c
@@ -2096,7 +2096,9 @@ static ssize_t ntfs_file_aio_write_nolock(struct kiocb *iocb,
err = file_remove_suid(file);
if (err)
goto out;
- file_update_time(file);
+ err = file_update_time(file);
+ if (err)
+ goto out;
written = ntfs_file_buffered_write(iocb, iov, nr_segs, pos, ppos,
count);
out:
diff --git a/fs/pipe.c b/fs/pipe.c
index fec5e4ad071..1a6cf089397 100644
--- a/fs/pipe.c
+++ b/fs/pipe.c
@@ -654,8 +654,11 @@ out:
wake_up_interruptible_sync_poll(&pipe->wait, POLLIN | POLLRDNORM);
kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
}
- if (ret > 0)
- file_update_time(filp);
+ if (ret > 0) {
+ int err = file_update_time(filp);
+ if (err)
+ ret = err;
+ }
return ret;
}
diff --git a/fs/pnode.c b/fs/pnode.c
index ab5fa9e1a79..3e000a51ac0 100644
--- a/fs/pnode.c
+++ b/fs/pnode.c
@@ -237,8 +237,9 @@ int propagate_mnt(struct mount *dest_mnt, struct dentry *dest_dentry,
source = get_source(m, prev_dest_mnt, prev_src_mnt, &type);
- if (!(child = copy_tree(source, source->mnt.mnt_root, type))) {
- ret = -ENOMEM;
+ child = copy_tree(source, source->mnt.mnt_root, type);
+ if (IS_ERR(child)) {
+ ret = PTR_ERR(child);
list_splice(tree_list, tmp_list.prev);
goto out;
}
@@ -257,12 +258,12 @@ int propagate_mnt(struct mount *dest_mnt, struct dentry *dest_dentry,
prev_src_mnt = child;
}
out:
- br_write_lock(vfsmount_lock);
+ br_write_lock(&vfsmount_lock);
while (!list_empty(&tmp_list)) {
child = list_first_entry(&tmp_list, struct mount, mnt_hash);
umount_tree(child, 0, &umount_list);
}
- br_write_unlock(vfsmount_lock);
+ br_write_unlock(&vfsmount_lock);
release_mounts(&umount_list);
return ret;
}
diff --git a/fs/pnode.h b/fs/pnode.h
index 65c60979d54..19b853a3445 100644
--- a/fs/pnode.h
+++ b/fs/pnode.h
@@ -22,6 +22,7 @@
#define CL_COPY_ALL 0x04
#define CL_MAKE_SHARED 0x08
#define CL_PRIVATE 0x10
+#define CL_SHARED_TO_SLAVE 0x20
static inline void set_mnt_shared(struct mount *mnt)
{
diff --git a/fs/proc/base.c b/fs/proc/base.c
index 2f198dad12c..a6c0c090283 100644
--- a/fs/proc/base.c
+++ b/fs/proc/base.c
@@ -137,12 +137,6 @@ struct pid_entry {
static int proc_fd_permission(struct inode *inode, int mask);
-/* ANDROID is for special files in /proc. */
-#define ANDROID(NAME, MODE, OTYPE) \
- NOD(NAME, (S_IFREG|(MODE)), \
- &proc_##OTYPE##_inode_operations, \
- &proc_##OTYPE##_operations, {})
-
/*
* Count the number of hardlinks for the pid_entry table, excluding the .
* and .. links.
@@ -975,35 +969,6 @@ out:
return err < 0 ? err : count;
}
-static int oom_adjust_permission(struct inode *inode, int mask)
-{
- uid_t uid;
- struct task_struct *p;
-
- p = get_proc_task(inode);
- if(p) {
- uid = task_uid(p);
- put_task_struct(p);
- }
-
- /*
- * System Server (uid == 1000) is granted access to oom_adj of all
- * android applications (uid > 10000) as and services (uid >= 1000)
- */
- if (p && (current_fsuid() == 1000) && (uid >= 1000)) {
- if (inode->i_mode >> 6 & mask) {
- return 0;
- }
- }
-
- /* Fall back to default. */
- return generic_permission(inode, mask);
-}
-
-static const struct inode_operations proc_oom_adjust_inode_operations = {
- .permission = oom_adjust_permission,
-};
-
static const struct file_operations proc_oom_adjust_operations = {
.read = oom_adjust_read,
.write = oom_adjust_write,
@@ -3043,8 +3008,8 @@ static const struct pid_entry tgid_base_stuff[] = {
REG("cgroup", S_IRUGO, proc_cgroup_operations),
#endif
INF("oom_score", S_IRUGO, proc_oom_score),
- ANDROID("oom_adj",S_IRUGO|S_IWUSR, oom_adjust),
- REG("oom_score_adj", S_IRUGO|S_IWUSR, proc_oom_score_adj_operations),
+ REG("oom_adj", S_IRUSR, proc_oom_adjust_operations),
+ REG("oom_score_adj", S_IRUSR, proc_oom_score_adj_operations),
#ifdef CONFIG_AUDITSYSCALL
REG("loginuid", S_IWUSR|S_IRUGO, proc_loginuid_operations),
REG("sessionid", S_IRUGO, proc_sessionid_operations),
@@ -3401,8 +3366,8 @@ static const struct pid_entry tid_base_stuff[] = {
REG("cgroup", S_IRUGO, proc_cgroup_operations),
#endif
INF("oom_score", S_IRUGO, proc_oom_score),
- REG("oom_adj", S_IRUGO|S_IWUSR, proc_oom_adjust_operations),
- REG("oom_score_adj", S_IRUGO|S_IWUSR, proc_oom_score_adj_operations),
+ REG("oom_adj", S_IRUSR, proc_oom_adjust_operations),
+ REG("oom_score_adj", S_IRUSR, proc_oom_score_adj_operations),
#ifdef CONFIG_AUDITSYSCALL
REG("loginuid", S_IWUSR|S_IRUGO, proc_loginuid_operations),
REG("sessionid", S_IRUGO, proc_sessionid_operations),
diff --git a/fs/proc/generic.c b/fs/proc/generic.c
index 2edf34f2eb6..a1487e5ab08 100644
--- a/fs/proc/generic.c
+++ b/fs/proc/generic.c
@@ -350,37 +350,39 @@ static DEFINE_SPINLOCK(proc_inum_lock); /* protects the above */
* Return an inode number between PROC_DYNAMIC_FIRST and
* 0xffffffff, or zero on failure.
*/
-static unsigned int get_inode_number(void)
+int proc_alloc_inum(unsigned int *inum)
{
unsigned int i;
int error;
retry:
- if (ida_pre_get(&proc_inum_ida, GFP_KERNEL) == 0)
- return 0;
+ if (!ida_pre_get(&proc_inum_ida, GFP_KERNEL))
+ return -ENOMEM;
- spin_lock(&proc_inum_lock);
+ spin_lock_irq(&proc_inum_lock);
error = ida_get_new(&proc_inum_ida, &i);
- spin_unlock(&proc_inum_lock);
+ spin_unlock_irq(&proc_inum_lock);
if (error == -EAGAIN)
goto retry;
else if (error)
- return 0;
+ return error;
if (i > UINT_MAX - PROC_DYNAMIC_FIRST) {
- spin_lock(&proc_inum_lock);
+ spin_lock_irq(&proc_inum_lock);
ida_remove(&proc_inum_ida, i);
- spin_unlock(&proc_inum_lock);
- return 0;
+ spin_unlock_irq(&proc_inum_lock);
+ return -ENOSPC;
}
- return PROC_DYNAMIC_FIRST + i;
+ *inum = PROC_DYNAMIC_FIRST + i;
+ return 0;
}
-static void release_inode_number(unsigned int inum)
+void proc_free_inum(unsigned int inum)
{
- spin_lock(&proc_inum_lock);
+ unsigned long flags;
+ spin_lock_irqsave(&proc_inum_lock, flags);
ida_remove(&proc_inum_ida, inum - PROC_DYNAMIC_FIRST);
- spin_unlock(&proc_inum_lock);
+ spin_unlock_irqrestore(&proc_inum_lock, flags);
}
static void *proc_follow_link(struct dentry *dentry, struct nameidata *nd)
@@ -554,13 +556,12 @@ static const struct inode_operations proc_dir_inode_operations = {
static int proc_register(struct proc_dir_entry * dir, struct proc_dir_entry * dp)
{
- unsigned int i;
struct proc_dir_entry *tmp;
+ int ret;
- i = get_inode_number();
- if (i == 0)
- return -EAGAIN;
- dp->low_ino = i;
+ ret = proc_alloc_inum(&dp->low_ino);
+ if (ret)
+ return ret;
if (S_ISDIR(dp->mode)) {
if (dp->proc_iops == NULL) {
@@ -765,7 +766,7 @@ EXPORT_SYMBOL(proc_create_data);
static void free_proc_entry(struct proc_dir_entry *de)
{
- release_inode_number(de->low_ino);
+ proc_free_inum(de->low_ino);
if (S_ISLNK(de->mode))
kfree(de->data);
diff --git a/fs/proc/inode.c b/fs/proc/inode.c
index 205c9228083..b1f55aef44d 100644
--- a/fs/proc/inode.c
+++ b/fs/proc/inode.c
@@ -31,6 +31,7 @@ static void proc_evict_inode(struct inode *inode)
struct proc_dir_entry *de;
struct ctl_table_header *head;
const struct proc_ns_operations *ns_ops;
+ void *ns;
truncate_inode_pages(&inode->i_data, 0);
end_writeback(inode);
@@ -49,8 +50,9 @@ static void proc_evict_inode(struct inode *inode)
}
/* Release any associated namespace */
ns_ops = PROC_I(inode)->ns_ops;
- if (ns_ops && ns_ops->put)
- ns_ops->put(PROC_I(inode)->ns);
+ ns = PROC_I(inode)->ns;
+ if (ns_ops && ns)
+ ns_ops->put(ns);
}
static struct kmem_cache * proc_inode_cachep;
diff --git a/fs/proc/namespaces.c b/fs/proc/namespaces.c
index 0d9e23a39e4..08dfd6ad8f3 100644
--- a/fs/proc/namespaces.c
+++ b/fs/proc/namespaces.c
@@ -24,12 +24,165 @@ static const struct proc_ns_operations *ns_entries[] = {
#ifdef CONFIG_IPC_NS
&ipcns_operations,
#endif
+#ifdef CONFIG_PID_NS
+ &pidns_operations,
+#endif
+ &mntns_operations,
};
static const struct file_operations ns_file_operations = {
.llseek = no_llseek,
};
+static const struct inode_operations ns_inode_operations = {
+ .setattr = proc_setattr,
+};
+
+static int ns_delete_dentry(const struct dentry *dentry)
+{
+ /* Don't cache namespace inodes when not in use */
+ return 1;
+}
+
+static char *ns_dname(struct dentry *dentry, char *buffer, int buflen)
+{
+ struct inode *inode = dentry->d_inode;
+ const struct proc_ns_operations *ns_ops = PROC_I(inode)->ns_ops;
+
+ return dynamic_dname(dentry, buffer, buflen, "%s:[%lu]",
+ ns_ops->name, inode->i_ino);
+}
+
+const struct dentry_operations ns_dentry_operations =
+{
+ .d_delete = ns_delete_dentry,
+ .d_dname = ns_dname,
+};
+
+static struct dentry *proc_ns_get_dentry(struct super_block *sb,
+ struct task_struct *task, const struct proc_ns_operations *ns_ops)
+{
+ struct dentry *dentry, *result;
+ struct inode *inode;
+ struct proc_inode *ei;
+ struct qstr qname = { .name = "", };
+ void *ns;
+
+ ns = ns_ops->get(task);
+ if (!ns)
+ return ERR_PTR(-ENOENT);
+
+ dentry = d_alloc_pseudo(sb, &qname);
+ if (!dentry) {
+ ns_ops->put(ns);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ inode = iget_locked(sb, ns_ops->inum(ns));
+ if (!inode) {
+ dput(dentry);
+ ns_ops->put(ns);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ ei = PROC_I(inode);
+ if (inode->i_state & I_NEW) {
+ inode->i_mtime = inode->i_atime = inode->i_ctime = CURRENT_TIME;
+ inode->i_op = &ns_inode_operations;
+ inode->i_mode = S_IFREG | S_IRUGO;
+ inode->i_fop = &ns_file_operations;
+ ei->ns_ops = ns_ops;
+ ei->ns = ns;
+ unlock_new_inode(inode);
+ } else {
+ ns_ops->put(ns);
+ }
+
+ d_set_d_op(dentry, &ns_dentry_operations);
+ result = d_instantiate_unique(dentry, inode);
+ if (result) {
+ dput(dentry);
+ dentry = result;
+ }
+
+ return dentry;
+}
+
+static void *proc_ns_follow_link(struct dentry *dentry, struct nameidata *nd)
+{
+ struct inode *inode = dentry->d_inode;
+ struct super_block *sb = inode->i_sb;
+ struct proc_inode *ei = PROC_I(inode);
+ struct task_struct *task;
+ struct dentry *ns_dentry;
+ void *error = ERR_PTR(-EACCES);
+
+ task = get_proc_task(inode);
+ if (!task)
+ goto out;
+
+ if (!ptrace_may_access(task, PTRACE_MODE_READ))
+ goto out_put_task;
+
+ ns_dentry = proc_ns_get_dentry(sb, task, ei->ns_ops);
+ if (IS_ERR(ns_dentry)) {
+ error = ERR_CAST(ns_dentry);
+ goto out_put_task;
+ }
+
+ dput(nd->path.dentry);
+ nd->path.dentry = ns_dentry;
+ error = NULL;
+
+out_put_task:
+ put_task_struct(task);
+out:
+ return error;
+}
+
+static int proc_ns_readlink(struct dentry *dentry, char __user *buffer, int buflen)
+{
+ struct inode *inode = dentry->d_inode;
+ struct proc_inode *ei = PROC_I(inode);
+ const struct proc_ns_operations *ns_ops = ei->ns_ops;
+ struct task_struct *task;
+ void *ns;
+ char name[50];
+ int len = -EACCES;
+
+ task = get_proc_task(inode);
+ if (!task)
+ goto out;
+
+ if (!ptrace_may_access(task, PTRACE_MODE_READ))
+ goto out_put_task;
+
+ len = -ENOENT;
+ ns = ns_ops->get(task);
+ if (!ns)
+ goto out_put_task;
+
+ snprintf(name, sizeof(name), "%s:[%u]", ns_ops->name, ns_ops->inum(ns));
+ len = strlen(name);
+
+ if (len > buflen)
+ len = buflen;
+ if (copy_to_user(buffer, name, len))
+ len = -EFAULT;
+
+ ns_ops->put(ns);
+out_put_task:
+ put_task_struct(task);
+out:
+ return len;
+}
+
+static const struct inode_operations proc_ns_link_inode_operations = {
+ .readlink = proc_ns_readlink,
+ .follow_link = proc_ns_follow_link,
+ .setattr = proc_setattr,
+};
+
static struct dentry *proc_ns_instantiate(struct inode *dir,
struct dentry *dentry, struct task_struct *task, const void *ptr)
{
@@ -37,21 +190,15 @@ static struct dentry *proc_ns_instantiate(struct inode *dir,
struct inode *inode;
struct proc_inode *ei;
struct dentry *error = ERR_PTR(-ENOENT);
- void *ns;
inode = proc_pid_make_inode(dir->i_sb, task);
if (!inode)
goto out;
- ns = ns_ops->get(task);
- if (!ns)
- goto out_iput;
-
ei = PROC_I(inode);
- inode->i_mode = S_IFREG|S_IRUSR;
- inode->i_fop = &ns_file_operations;
- ei->ns_ops = ns_ops;
- ei->ns = ns;
+ inode->i_mode = S_IFLNK|S_IRWXUGO;
+ inode->i_op = &proc_ns_link_inode_operations;
+ ei->ns_ops = ns_ops;
d_set_d_op(dentry, &pid_dentry_operations);
d_add(dentry, inode);
@@ -60,9 +207,6 @@ static struct dentry *proc_ns_instantiate(struct inode *dir,
error = NULL;
out:
return error;
-out_iput:
- iput(inode);
- goto out;
}
static int proc_ns_fill_cache(struct file *filp, void *dirent,
@@ -89,10 +233,6 @@ static int proc_ns_dir_readdir(struct file *filp, void *dirent,
if (!task)
goto out_no_task;
- ret = -EPERM;
- if (!ptrace_may_access(task, PTRACE_MODE_READ))
- goto out;
-
ret = 0;
i = filp->f_pos;
switch (i) {
@@ -152,10 +292,6 @@ static struct dentry *proc_ns_dir_lookup(struct inode *dir,
if (!task)
goto out_no_task;
- error = ERR_PTR(-EPERM);
- if (!ptrace_may_access(task, PTRACE_MODE_READ))
- goto out;
-
last = &ns_entries[ARRAY_SIZE(ns_entries)];
for (entry = ns_entries; entry < last; entry++) {
if (strlen((*entry)->name) != len)
@@ -163,7 +299,6 @@ static struct dentry *proc_ns_dir_lookup(struct inode *dir,
if (!memcmp(dentry->d_name.name, (*entry)->name, len))
break;
}
- error = ERR_PTR(-ENOENT);
if (entry == last)
goto out;
@@ -198,3 +333,7 @@ out_invalid:
return ERR_PTR(-EINVAL);
}
+bool proc_ns_inode(struct inode *inode)
+{
+ return inode->i_fop == &ns_file_operations;
+}
diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c
index a789934737e..5794452610c 100644
--- a/fs/proc/task_mmu.c
+++ b/fs/proc/task_mmu.c
@@ -671,6 +671,7 @@ static int clear_refs_pte_range(pmd_t *pmd, unsigned long addr,
#define CLEAR_REFS_ALL 1
#define CLEAR_REFS_ANON 2
#define CLEAR_REFS_MAPPED 3
+#define CLEAR_REFS_MM_HIWATER_RSS 5
static ssize_t clear_refs_write(struct file *file, const char __user *buf,
size_t count, loff_t *ppos)
@@ -690,7 +691,8 @@ static ssize_t clear_refs_write(struct file *file, const char __user *buf,
rv = kstrtoint(strstrip(buffer), 10, &type);
if (rv < 0)
return rv;
- if (type < CLEAR_REFS_ALL || type > CLEAR_REFS_MAPPED)
+ if ((type < CLEAR_REFS_ALL || type > CLEAR_REFS_MAPPED) &&
+ type != CLEAR_REFS_MM_HIWATER_RSS)
return -EINVAL;
task = get_proc_task(file->f_path.dentry->d_inode);
if (!task)
@@ -701,6 +703,18 @@ static ssize_t clear_refs_write(struct file *file, const char __user *buf,
.pmd_entry = clear_refs_pte_range,
.mm = mm,
};
+
+ if (type == CLEAR_REFS_MM_HIWATER_RSS) {
+ /*
+ * Writing 5 to /proc/pid/clear_refs resets the peak
+ * resident set size to this mm's current rss value.
+ */
+ down_write(&mm->mmap_sem);
+ reset_mm_hiwater_rss(mm);
+ up_write(&mm->mmap_sem);
+ goto out_mm;
+ }
+
down_read(&mm->mmap_sem);
for (vma = mm->mmap; vma; vma = vma->vm_next) {
clear_refs_walk.private = vma;
@@ -724,6 +738,7 @@ static ssize_t clear_refs_write(struct file *file, const char __user *buf,
}
flush_tlb_mm(mm);
up_read(&mm->mmap_sem);
+out_mm:
mmput(mm);
}
put_task_struct(task);
diff --git a/fs/proc_namespace.c b/fs/proc_namespace.c
index 12412852d88..5fe34c355e8 100644
--- a/fs/proc_namespace.c
+++ b/fs/proc_namespace.c
@@ -17,18 +17,18 @@
static unsigned mounts_poll(struct file *file, poll_table *wait)
{
- struct proc_mounts *p = file->private_data;
+ struct proc_mounts *p = proc_mounts(file->private_data);
struct mnt_namespace *ns = p->ns;
unsigned res = POLLIN | POLLRDNORM;
poll_wait(file, &p->ns->poll, wait);
- br_read_lock(vfsmount_lock);
+ br_read_lock(&vfsmount_lock);
if (p->m.poll_event != ns->event) {
p->m.poll_event = ns->event;
res |= POLLERR | POLLPRI;
}
- br_read_unlock(vfsmount_lock);
+ br_read_unlock(&vfsmount_lock);
return res;
}
@@ -121,7 +121,7 @@ out:
static int show_mountinfo(struct seq_file *m, struct vfsmount *mnt)
{
- struct proc_mounts *p = m->private;
+ struct proc_mounts *p = proc_mounts(m);
struct mount *r = real_mount(mnt);
struct super_block *sb = mnt->mnt_sb;
struct path mnt_path = { .dentry = mnt->mnt_root, .mnt = mnt };
@@ -268,7 +268,6 @@ static int mounts_open_common(struct inode *inode, struct file *file,
if (ret)
goto err_free;
- p->m.private = p;
p->ns = ns;
p->root = root;
p->m.poll_event = ns->event;
@@ -288,7 +287,7 @@ static int mounts_open_common(struct inode *inode, struct file *file,
static int mounts_release(struct inode *inode, struct file *file)
{
- struct proc_mounts *p = file->private_data;
+ struct proc_mounts *p = proc_mounts(file->private_data);
path_put(&p->root);
put_mnt_ns(p->ns);
return seq_release(inode, file);
diff --git a/fs/splice.c b/fs/splice.c
index f8476841eb0..47c4c1ad0c0 100644
--- a/fs/splice.c
+++ b/fs/splice.c
@@ -1003,8 +1003,10 @@ generic_file_splice_write(struct pipe_inode_info *pipe, struct file *out,
mutex_lock_nested(&inode->i_mutex, I_MUTEX_CHILD);
ret = file_remove_suid(out);
if (!ret) {
- file_update_time(out);
- ret = splice_from_pipe_feed(pipe, &sd, pipe_to_file);
+ ret = file_update_time(out);
+ if (!ret)
+ ret = splice_from_pipe_feed(pipe, &sd,
+ pipe_to_file);
}
mutex_unlock(&inode->i_mutex);
} while (ret > 0);
diff --git a/fs/xfs/xfs_file.c b/fs/xfs/xfs_file.c
index 54a67dd9ac0..a27969494b8 100644
--- a/fs/xfs/xfs_file.c
+++ b/fs/xfs/xfs_file.c
@@ -629,8 +629,11 @@ restart:
* lock above. Eventually we should look into a way to avoid
* the pointless lock roundtrip.
*/
- if (likely(!(file->f_mode & FMODE_NOCMTIME)))
- file_update_time(file);
+ if (likely(!(file->f_mode & FMODE_NOCMTIME))) {
+ error = file_update_time(file);
+ if (error)
+ return error;
+ }
/*
* If we're writing the file then make sure to clear the setuid and
diff --git a/include/linux/fs.h b/include/linux/fs.h
index 938b3291d50..6c1474a35e6 100644
--- a/include/linux/fs.h
+++ b/include/linux/fs.h
@@ -1665,6 +1665,7 @@ struct inode_operations {
void (*truncate_range)(struct inode *, loff_t, loff_t);
int (*fiemap)(struct inode *, struct fiemap_extent_info *, u64 start,
u64 len);
+ int (*update_time)(struct inode *, struct timespec *, int);
} ____cacheline_aligned;
struct seq_file;
@@ -1823,6 +1824,13 @@ static inline void inode_inc_iversion(struct inode *inode)
spin_unlock(&inode->i_lock);
}
+enum file_time_flags {
+ S_ATIME = 1,
+ S_MTIME = 2,
+ S_CTIME = 4,
+ S_VERSION = 8,
+};
+
extern void touch_atime(struct path *);
static inline void file_accessed(struct file *file)
{
@@ -1836,6 +1844,13 @@ int sync_inode_metadata(struct inode *inode, int wait);
struct file_system_type {
const char *name;
int fs_flags;
+#define FS_REQUIRES_DEV 1
+#define FS_BINARY_MOUNTDATA 2
+#define FS_HAS_SUBTYPE 4
+#define FS_USERNS_MOUNT 8 /* Can be mounted by userns root */
+#define FS_USERNS_DEV_MOUNT 16 /* A userns mount does not imply MNT_NODEV */
+#define FS_REVAL_DOT 16384 /* Check the paths ".", ".." for staleness */
+#define FS_RENAME_DOES_D_MOVE 32768 /* FS will handle d_move() during rename() internally. */
struct dentry *(*mount) (struct file_system_type *, int,
const char *, void *);
void (*kill_sb) (struct super_block *);
@@ -1895,7 +1910,7 @@ extern struct vfsmount *kern_mount_data(struct file_system_type *, void *data);
extern void kern_unmount(struct vfsmount *mnt);
extern int may_umount_tree(struct vfsmount *);
extern int may_umount(struct vfsmount *);
-extern long do_mount(char *, char *, char *, unsigned long, void *);
+extern long do_mount(const char *, const char *, const char *, unsigned long, void *);
extern struct vfsmount *collect_mounts(struct path *);
extern void drop_collected_mounts(struct vfsmount *);
extern int iterate_mounts(int (*)(struct vfsmount *, void *), void *,
@@ -2560,7 +2575,7 @@ extern int inode_change_ok(const struct inode *, struct iattr *);
extern int inode_newsize_ok(const struct inode *, loff_t offset);
extern void setattr_copy(struct inode *inode, const struct iattr *attr);
-extern void file_update_time(struct file *file);
+extern int file_update_time(struct file *file);
extern int generic_show_options(struct seq_file *m, struct dentry *root);
extern void save_mount_options(struct super_block *sb, char *options);
diff --git a/include/linux/hashtable.h b/include/linux/hashtable.h
new file mode 100644
index 00000000000..227c62424f3
--- /dev/null
+++ b/include/linux/hashtable.h
@@ -0,0 +1,192 @@
+/*
+ * Statically sized hash table implementation
+ * (C) 2012 Sasha Levin <levinsasha928@gmail.com>
+ */
+
+#ifndef _LINUX_HASHTABLE_H
+#define _LINUX_HASHTABLE_H
+
+#include <linux/list.h>
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/hash.h>
+#include <linux/rculist.h>
+
+#define DEFINE_HASHTABLE(name, bits) \
+ struct hlist_head name[1 << (bits)] = \
+ { [0 ... ((1 << (bits)) - 1)] = HLIST_HEAD_INIT }
+
+#define DECLARE_HASHTABLE(name, bits) \
+ struct hlist_head name[1 << (bits)]
+
+#define HASH_SIZE(name) (ARRAY_SIZE(name))
+#define HASH_BITS(name) ilog2(HASH_SIZE(name))
+
+/* Use hash_32 when possible to allow for fast 32bit hashing in 64bit kernels. */
+#define hash_min(val, bits) \
+ (sizeof(val) <= 4 ? hash_32(val, bits) : hash_long(val, bits))
+
+static inline void __hash_init(struct hlist_head *ht, unsigned int sz)
+{
+ unsigned int i;
+
+ for (i = 0; i < sz; i++)
+ INIT_HLIST_HEAD(&ht[i]);
+}
+
+/**
+ * hash_init - initialize a hash table
+ * @hashtable: hashtable to be initialized
+ *
+ * Calculates the size of the hashtable from the given parameter, otherwise
+ * same as hash_init_size.
+ *
+ * This has to be a macro since HASH_BITS() will not work on pointers since
+ * it calculates the size during preprocessing.
+ */
+#define hash_init(hashtable) __hash_init(hashtable, HASH_SIZE(hashtable))
+
+/**
+ * hash_add - add an object to a hashtable
+ * @hashtable: hashtable to add to
+ * @node: the &struct hlist_node of the object to be added
+ * @key: the key of the object to be added
+ */
+#define hash_add(hashtable, node, key) \
+ hlist_add_head(node, &hashtable[hash_min(key, HASH_BITS(hashtable))])
+
+/**
+ * hash_add_rcu - add an object to a rcu enabled hashtable
+ * @hashtable: hashtable to add to
+ * @node: the &struct hlist_node of the object to be added
+ * @key: the key of the object to be added
+ */
+#define hash_add_rcu(hashtable, node, key) \
+ hlist_add_head_rcu(node, &hashtable[hash_min(key, HASH_BITS(hashtable))])
+
+/**
+ * hash_hashed - check whether an object is in any hashtable
+ * @node: the &struct hlist_node of the object to be checked
+ */
+static inline bool hash_hashed(struct hlist_node *node)
+{
+ return !hlist_unhashed(node);
+}
+
+static inline bool __hash_empty(struct hlist_head *ht, unsigned int sz)
+{
+ unsigned int i;
+
+ for (i = 0; i < sz; i++)
+ if (!hlist_empty(&ht[i]))
+ return false;
+
+ return true;
+}
+
+/**
+ * hash_empty - check whether a hashtable is empty
+ * @hashtable: hashtable to check
+ *
+ * This has to be a macro since HASH_BITS() will not work on pointers since
+ * it calculates the size during preprocessing.
+ */
+#define hash_empty(hashtable) __hash_empty(hashtable, HASH_SIZE(hashtable))
+
+/**
+ * hash_del - remove an object from a hashtable
+ * @node: &struct hlist_node of the object to remove
+ */
+static inline void hash_del(struct hlist_node *node)
+{
+ hlist_del_init(node);
+}
+
+/**
+ * hash_del_rcu - remove an object from a rcu enabled hashtable
+ * @node: &struct hlist_node of the object to remove
+ */
+static inline void hash_del_rcu(struct hlist_node *node)
+{
+ hlist_del_init_rcu(node);
+}
+
+/**
+ * hash_for_each - iterate over a hashtable
+ * @name: hashtable to iterate
+ * @bkt: integer to use as bucket loop cursor
+ * @node: the &struct list_head to use as a loop cursor for each entry
+ * @obj: the type * to use as a loop cursor for each entry
+ * @member: the name of the hlist_node within the struct
+ */
+#define hash_for_each(name, bkt, node, obj, member) \
+ for ((bkt) = 0, node = NULL; node == NULL && (bkt) < HASH_SIZE(name); (bkt)++)\
+ hlist_for_each_entry(obj, node, &name[bkt], member)
+
+/**
+ * hash_for_each_rcu - iterate over a rcu enabled hashtable
+ * @name: hashtable to iterate
+ * @bkt: integer to use as bucket loop cursor
+ * @node: the &struct list_head to use as a loop cursor for each entry
+ * @obj: the type * to use as a loop cursor for each entry
+ * @member: the name of the hlist_node within the struct
+ */
+#define hash_for_each_rcu(name, bkt, node, obj, member) \
+ for ((bkt) = 0, node = NULL; node == NULL && (bkt) < HASH_SIZE(name); (bkt)++)\
+ hlist_for_each_entry_rcu(obj, node, &name[bkt], member)
+
+/**
+ * hash_for_each_safe - iterate over a hashtable safe against removal of
+ * hash entry
+ * @name: hashtable to iterate
+ * @bkt: integer to use as bucket loop cursor
+ * @node: the &struct list_head to use as a loop cursor for each entry
+ * @tmp: a &struct used for temporary storage
+ * @obj: the type * to use as a loop cursor for each entry
+ * @member: the name of the hlist_node within the struct
+ */
+#define hash_for_each_safe(name, bkt, node, tmp, obj, member) \
+ for ((bkt) = 0, node = NULL; node == NULL && (bkt) < HASH_SIZE(name); (bkt)++)\
+ hlist_for_each_entry_safe(obj, node, tmp, &name[bkt], member)
+
+/**
+ * hash_for_each_possible - iterate over all possible objects hashing to the
+ * same bucket
+ * @name: hashtable to iterate
+ * @obj: the type * to use as a loop cursor for each entry
+ * @node: the &struct list_head to use as a loop cursor for each entry
+ * @member: the name of the hlist_node within the struct
+ * @key: the key of the objects to iterate over
+ */
+#define hash_for_each_possible(name, obj, node, member, key) \
+ hlist_for_each_entry(obj, node, &name[hash_min(key, HASH_BITS(name))], member)
+
+/**
+ * hash_for_each_possible_rcu - iterate over all possible objects hashing to the
+ * same bucket in an rcu enabled hashtable
+ * in a rcu enabled hashtable
+ * @name: hashtable to iterate
+ * @obj: the type * to use as a loop cursor for each entry
+ * @node: the &struct list_head to use as a loop cursor for each entry
+ * @member: the name of the hlist_node within the struct
+ * @key: the key of the objects to iterate over
+ */
+#define hash_for_each_possible_rcu(name, obj, node, member, key) \
+ hlist_for_each_entry_rcu(obj, node, &name[hash_min(key, HASH_BITS(name))], member)
+
+/**
+ * hash_for_each_possible_safe - iterate over all possible objects hashing to the
+ * same bucket safe against removals
+ * @name: hashtable to iterate
+ * @obj: the type * to use as a loop cursor for each entry
+ * @node: the &struct list_head to use as a loop cursor for each entry
+ * @tmp: a &struct used for temporary storage
+ * @member: the name of the hlist_node within the struct
+ * @key: the key of the objects to iterate over
+ */
+#define hash_for_each_possible_safe(name, obj, node, tmp, member, key) \
+ hlist_for_each_entry_safe(obj, node, tmp, \
+ &name[hash_min(key, HASH_BITS(name))], member)
+
+
+#endif
diff --git a/include/linux/ipc_namespace.h b/include/linux/ipc_namespace.h
index 8a297a5e794..0bd5a4e75ee 100644
--- a/include/linux/ipc_namespace.h
+++ b/include/linux/ipc_namespace.h
@@ -65,6 +65,8 @@ struct ipc_namespace {
/* user_ns which owns the ipc ns */
struct user_namespace *user_ns;
+
+ unsigned int proc_inum;
};
extern struct ipc_namespace init_ipc_ns;
diff --git a/include/linux/ipv6.h b/include/linux/ipv6.h
index 9ea0dbb4035..e9c4e51ed11 100644
--- a/include/linux/ipv6.h
+++ b/include/linux/ipv6.h
@@ -174,6 +174,7 @@ struct ipv6_devconf {
__s32 disable_ipv6;
__s32 accept_dad;
__s32 force_tllao;
+ __s32 use_oif_addrs_only;
void *sysctl;
};
@@ -217,6 +218,7 @@ enum {
DEVCONF_FORCE_TLLAO,
DEVCONF_ACCEPT_RA_RT_TABLE,
DEVCONF_USE_OPTIMISTIC,
+ DEVCONF_USE_OIF_ADDRS_ONLY,
DEVCONF_MAX
};
diff --git a/include/linux/lglock.h b/include/linux/lglock.h
index 87f402ccec5..f01e5f6d1f0 100644
--- a/include/linux/lglock.h
+++ b/include/linux/lglock.h
@@ -23,28 +23,17 @@
#include <linux/lockdep.h>
#include <linux/percpu.h>
#include <linux/cpu.h>
+#include <linux/notifier.h>
/* can make br locks by using local lock for read side, global lock for write */
-#define br_lock_init(name) name##_lock_init()
-#define br_read_lock(name) name##_local_lock()
-#define br_read_unlock(name) name##_local_unlock()
-#define br_write_lock(name) name##_global_lock_online()
-#define br_write_unlock(name) name##_global_unlock_online()
+#define br_lock_init(name) lg_lock_init(name, #name)
+#define br_read_lock(name) lg_local_lock(name)
+#define br_read_unlock(name) lg_local_unlock(name)
+#define br_write_lock(name) lg_global_lock(name)
+#define br_write_unlock(name) lg_global_unlock(name)
-#define DECLARE_BRLOCK(name) DECLARE_LGLOCK(name)
#define DEFINE_BRLOCK(name) DEFINE_LGLOCK(name)
-
-#define lg_lock_init(name) name##_lock_init()
-#define lg_local_lock(name) name##_local_lock()
-#define lg_local_unlock(name) name##_local_unlock()
-#define lg_local_lock_cpu(name, cpu) name##_local_lock_cpu(cpu)
-#define lg_local_unlock_cpu(name, cpu) name##_local_unlock_cpu(cpu)
-#define lg_global_lock(name) name##_global_lock()
-#define lg_global_unlock(name) name##_global_unlock()
-#define lg_global_lock_online(name) name##_global_lock_online()
-#define lg_global_unlock_online(name) name##_global_unlock_online()
-
#ifdef CONFIG_DEBUG_LOCK_ALLOC
#define LOCKDEP_INIT_MAP lockdep_init_map
@@ -59,142 +48,26 @@
#define DEFINE_LGLOCK_LOCKDEP(name)
#endif
-
-#define DECLARE_LGLOCK(name) \
- extern void name##_lock_init(void); \
- extern void name##_local_lock(void); \
- extern void name##_local_unlock(void); \
- extern void name##_local_lock_cpu(int cpu); \
- extern void name##_local_unlock_cpu(int cpu); \
- extern void name##_global_lock(void); \
- extern void name##_global_unlock(void); \
- extern void name##_global_lock_online(void); \
- extern void name##_global_unlock_online(void); \
+struct lglock {
+ arch_spinlock_t __percpu *lock;
+#ifdef CONFIG_DEBUG_LOCK_ALLOC
+ struct lock_class_key lock_key;
+ struct lockdep_map lock_dep_map;
+#endif
+};
#define DEFINE_LGLOCK(name) \
- \
- DEFINE_SPINLOCK(name##_cpu_lock); \
- cpumask_t name##_cpus __read_mostly; \
- DEFINE_PER_CPU(arch_spinlock_t, name##_lock); \
- DEFINE_LGLOCK_LOCKDEP(name); \
- \
- static int \
- name##_lg_cpu_callback(struct notifier_block *nb, \
- unsigned long action, void *hcpu) \
- { \
- switch (action & ~CPU_TASKS_FROZEN) { \
- case CPU_UP_PREPARE: \
- spin_lock(&name##_cpu_lock); \
- cpu_set((unsigned long)hcpu, name##_cpus); \
- spin_unlock(&name##_cpu_lock); \
- break; \
- case CPU_UP_CANCELED: case CPU_DEAD: \
- spin_lock(&name##_cpu_lock); \
- cpu_clear((unsigned long)hcpu, name##_cpus); \
- spin_unlock(&name##_cpu_lock); \
- } \
- return NOTIFY_OK; \
- } \
- static struct notifier_block name##_lg_cpu_notifier = { \
- .notifier_call = name##_lg_cpu_callback, \
- }; \
- void name##_lock_init(void) { \
- int i; \
- LOCKDEP_INIT_MAP(&name##_lock_dep_map, #name, &name##_lock_key, 0); \
- for_each_possible_cpu(i) { \
- arch_spinlock_t *lock; \
- lock = &per_cpu(name##_lock, i); \
- *lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED; \
- } \
- register_hotcpu_notifier(&name##_lg_cpu_notifier); \
- get_online_cpus(); \
- for_each_online_cpu(i) \
- cpu_set(i, name##_cpus); \
- put_online_cpus(); \
- } \
- EXPORT_SYMBOL(name##_lock_init); \
- \
- void name##_local_lock(void) { \
- arch_spinlock_t *lock; \
- preempt_disable(); \
- rwlock_acquire_read(&name##_lock_dep_map, 0, 0, _THIS_IP_); \
- lock = &__get_cpu_var(name##_lock); \
- arch_spin_lock(lock); \
- } \
- EXPORT_SYMBOL(name##_local_lock); \
- \
- void name##_local_unlock(void) { \
- arch_spinlock_t *lock; \
- rwlock_release(&name##_lock_dep_map, 1, _THIS_IP_); \
- lock = &__get_cpu_var(name##_lock); \
- arch_spin_unlock(lock); \
- preempt_enable(); \
- } \
- EXPORT_SYMBOL(name##_local_unlock); \
- \
- void name##_local_lock_cpu(int cpu) { \
- arch_spinlock_t *lock; \
- preempt_disable(); \
- rwlock_acquire_read(&name##_lock_dep_map, 0, 0, _THIS_IP_); \
- lock = &per_cpu(name##_lock, cpu); \
- arch_spin_lock(lock); \
- } \
- EXPORT_SYMBOL(name##_local_lock_cpu); \
- \
- void name##_local_unlock_cpu(int cpu) { \
- arch_spinlock_t *lock; \
- rwlock_release(&name##_lock_dep_map, 1, _THIS_IP_); \
- lock = &per_cpu(name##_lock, cpu); \
- arch_spin_unlock(lock); \
- preempt_enable(); \
- } \
- EXPORT_SYMBOL(name##_local_unlock_cpu); \
- \
- void name##_global_lock_online(void) { \
- int i; \
- spin_lock(&name##_cpu_lock); \
- rwlock_acquire(&name##_lock_dep_map, 0, 0, _RET_IP_); \
- for_each_cpu(i, &name##_cpus) { \
- arch_spinlock_t *lock; \
- lock = &per_cpu(name##_lock, i); \
- arch_spin_lock(lock); \
- } \
- } \
- EXPORT_SYMBOL(name##_global_lock_online); \
- \
- void name##_global_unlock_online(void) { \
- int i; \
- rwlock_release(&name##_lock_dep_map, 1, _RET_IP_); \
- for_each_cpu(i, &name##_cpus) { \
- arch_spinlock_t *lock; \
- lock = &per_cpu(name##_lock, i); \
- arch_spin_unlock(lock); \
- } \
- spin_unlock(&name##_cpu_lock); \
- } \
- EXPORT_SYMBOL(name##_global_unlock_online); \
- \
- void name##_global_lock(void) { \
- int i; \
- preempt_disable(); \
- rwlock_acquire(&name##_lock_dep_map, 0, 0, _RET_IP_); \
- for_each_possible_cpu(i) { \
- arch_spinlock_t *lock; \
- lock = &per_cpu(name##_lock, i); \
- arch_spin_lock(lock); \
- } \
- } \
- EXPORT_SYMBOL(name##_global_lock); \
- \
- void name##_global_unlock(void) { \
- int i; \
- rwlock_release(&name##_lock_dep_map, 1, _RET_IP_); \
- for_each_possible_cpu(i) { \
- arch_spinlock_t *lock; \
- lock = &per_cpu(name##_lock, i); \
- arch_spin_unlock(lock); \
- } \
- preempt_enable(); \
- } \
- EXPORT_SYMBOL(name##_global_unlock);
+ DEFINE_LGLOCK_LOCKDEP(name); \
+ DEFINE_PER_CPU(arch_spinlock_t, name ## _lock) \
+ = __ARCH_SPIN_LOCK_UNLOCKED; \
+ struct lglock name = { .lock = &name ## _lock }
+
+void lg_lock_init(struct lglock *lg, char *name);
+void lg_local_lock(struct lglock *lg);
+void lg_local_unlock(struct lglock *lg);
+void lg_local_lock_cpu(struct lglock *lg, int cpu);
+void lg_local_unlock_cpu(struct lglock *lg, int cpu);
+void lg_global_lock(struct lglock *lg);
+void lg_global_unlock(struct lglock *lg);
+
#endif
diff --git a/include/linux/lsm_audit.h b/include/linux/lsm_audit.h
index fad48aab893..9323a9861d2 100644
--- a/include/linux/lsm_audit.h
+++ b/include/linux/lsm_audit.h
@@ -40,6 +40,11 @@ struct lsm_network_audit {
} fam;
};
+struct lsm_ioctlop_audit {
+ struct path path;
+ u16 cmd;
+};
+
/* Auxiliary data to use in generating the audit record. */
struct common_audit_data {
char type;
@@ -53,6 +58,7 @@ struct common_audit_data {
#define LSM_AUDIT_DATA_KMOD 8
#define LSM_AUDIT_DATA_INODE 9
#define LSM_AUDIT_DATA_DENTRY 10
+#define LSM_AUDIT_DATA_IOCTL_OP 11
struct task_struct *tsk;
union {
struct path path;
@@ -69,6 +75,7 @@ struct common_audit_data {
} key_struct;
#endif
char *kmod_name;
+ struct lsm_ioctlop_audit *op;
} u;
/* this union contains LSM specific data */
union {
diff --git a/include/linux/mm.h b/include/linux/mm.h
index 5daf404486e..d3c03cd3bb6 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -1153,6 +1153,11 @@ static inline void update_hiwater_vm(struct mm_struct *mm)
mm->hiwater_vm = mm->total_vm;
}
+static inline void reset_mm_hiwater_rss(struct mm_struct *mm)
+{
+ mm->hiwater_rss = get_mm_rss(mm);
+}
+
static inline void setmax_mm_hiwater_rss(unsigned long *maxrss,
struct mm_struct *mm)
{
diff --git a/include/linux/mnt_namespace.h b/include/linux/mnt_namespace.h
index 5a8e3903d77..12b2ab51032 100644
--- a/include/linux/mnt_namespace.h
+++ b/include/linux/mnt_namespace.h
@@ -4,9 +4,10 @@
struct mnt_namespace;
struct fs_struct;
+struct user_namespace;
extern struct mnt_namespace *copy_mnt_ns(unsigned long, struct mnt_namespace *,
- struct fs_struct *);
+ struct user_namespace *, struct fs_struct *);
extern void put_mnt_ns(struct mnt_namespace *ns);
extern const struct file_operations proc_mounts_operations;
diff --git a/include/linux/pid_namespace.h b/include/linux/pid_namespace.h
index b067bd8c49d..375527bab2c 100644
--- a/include/linux/pid_namespace.h
+++ b/include/linux/pid_namespace.h
@@ -34,6 +34,7 @@ struct pid_namespace {
gid_t pid_gid;
int hide_pid;
int reboot; /* group exit code if this pidns was rebooted */
+ unsigned int proc_inum;
};
extern struct pid_namespace init_pid_ns;
diff --git a/include/linux/proc_fs.h b/include/linux/proc_fs.h
index 85c50730623..6c890170af8 100644
--- a/include/linux/proc_fs.h
+++ b/include/linux/proc_fs.h
@@ -28,7 +28,11 @@ struct mm_struct;
*/
enum {
- PROC_ROOT_INO = 1,
+ PROC_ROOT_INO = 1,
+ PROC_IPC_INIT_INO = 0xEFFFFFFFU,
+ PROC_UTS_INIT_INO = 0xEFFFFFFEU,
+ PROC_USER_INIT_INO = 0xEFFFFFFDU,
+ PROC_PID_INIT_INO = 0xEFFFFFFCU,
};
/*
@@ -174,7 +178,10 @@ extern struct proc_dir_entry *proc_net_mkdir(struct net *net, const char *name,
struct proc_dir_entry *parent);
extern struct file *proc_ns_fget(int fd);
+extern bool proc_ns_inode(struct inode *inode);
+extern int proc_alloc_inum(unsigned int *pino);
+extern void proc_free_inum(unsigned int inum);
#else
#define proc_net_fops_create(net, name, mode, fops) ({ (void)(mode), NULL; })
@@ -229,6 +236,19 @@ static inline struct file *proc_ns_fget(int fd)
return ERR_PTR(-EINVAL);
}
+static inline bool proc_ns_inode(struct inode *inode)
+{
+ return false;
+}
+
+static inline int proc_alloc_inum(unsigned int *inum)
+{
+ *inum = 1;
+ return 0;
+}
+static inline void proc_free_inum(unsigned int inum)
+{
+}
#endif /* CONFIG_PROC_FS */
#if !defined(CONFIG_PROC_KCORE)
@@ -247,10 +267,13 @@ struct proc_ns_operations {
void *(*get)(struct task_struct *task);
void (*put)(void *ns);
int (*install)(struct nsproxy *nsproxy, void *ns);
+ unsigned int (*inum)(void *ns);
};
extern const struct proc_ns_operations netns_operations;
extern const struct proc_ns_operations utsns_operations;
extern const struct proc_ns_operations ipcns_operations;
+extern const struct proc_ns_operations pidns_operations;
+extern const struct proc_ns_operations mntns_operations;
union proc_op {
int (*proc_get_link)(struct dentry *, struct path *);
diff --git a/include/linux/security.h b/include/linux/security.h
index 2a825304509..9eb6fd4cefc 100644
--- a/include/linux/security.h
+++ b/include/linux/security.h
@@ -1418,8 +1418,8 @@ struct security_operations {
int (*sb_kern_mount) (struct super_block *sb, int flags, void *data);
int (*sb_show_options) (struct seq_file *m, struct super_block *sb);
int (*sb_statfs) (struct dentry *dentry);
- int (*sb_mount) (char *dev_name, struct path *path,
- char *type, unsigned long flags, void *data);
+ int (*sb_mount) (const char *dev_name, struct path *path,
+ const char *type, unsigned long flags, void *data);
int (*sb_umount) (struct vfsmount *mnt, int flags);
int (*sb_pivotroot) (struct path *old_path,
struct path *new_path);
@@ -1705,8 +1705,8 @@ int security_sb_remount(struct super_block *sb, void *data);
int security_sb_kern_mount(struct super_block *sb, int flags, void *data);
int security_sb_show_options(struct seq_file *m, struct super_block *sb);
int security_sb_statfs(struct dentry *dentry);
-int security_sb_mount(char *dev_name, struct path *path,
- char *type, unsigned long flags, void *data);
+int security_sb_mount(const char *dev_name, struct path *path,
+ const char *type, unsigned long flags, void *data);
int security_sb_umount(struct vfsmount *mnt, int flags);
int security_sb_pivotroot(struct path *old_path, struct path *new_path);
int security_sb_set_mnt_opts(struct super_block *sb, struct security_mnt_opts *opts);
@@ -1995,8 +1995,8 @@ static inline int security_sb_statfs(struct dentry *dentry)
return 0;
}
-static inline int security_sb_mount(char *dev_name, struct path *path,
- char *type, unsigned long flags,
+static inline int security_sb_mount(const char *dev_name, struct path *path,
+ const char *type, unsigned long flags,
void *data)
{
return 0;
diff --git a/include/linux/user_namespace.h b/include/linux/user_namespace.h
index faf467944ba..5ecc9888c22 100644
--- a/include/linux/user_namespace.h
+++ b/include/linux/user_namespace.h
@@ -14,6 +14,7 @@ struct user_namespace {
struct hlist_head uidhash_table[UIDHASH_SZ];
struct user_struct *creator;
struct work_struct destroyer;
+ unsigned int proc_inum;
};
extern struct user_namespace init_user_ns;
diff --git a/include/linux/utsname.h b/include/linux/utsname.h
index c714ed75eae..ae739d72638 100644
--- a/include/linux/utsname.h
+++ b/include/linux/utsname.h
@@ -52,6 +52,7 @@ struct uts_namespace {
struct kref kref;
struct new_utsname name;
struct user_namespace *user_ns;
+ unsigned int proc_inum;
};
extern struct uts_namespace init_uts_ns;
diff --git a/include/net/net_namespace.h b/include/net/net_namespace.h
index ee547c14981..b1cd8b6916d 100644
--- a/include/net/net_namespace.h
+++ b/include/net/net_namespace.h
@@ -52,6 +52,8 @@ struct net {
struct list_head cleanup_list; /* namespaces on death row */
struct list_head exit_list; /* Use only net_mutex */
+ unsigned int proc_inum;
+
struct proc_dir_entry *proc_net;
struct proc_dir_entry *proc_net_stat;
diff --git a/init/version.c b/init/version.c
index 86fe0ccb997..58170f18912 100644
--- a/init/version.c
+++ b/init/version.c
@@ -12,6 +12,7 @@
#include <linux/utsname.h>
#include <generated/utsrelease.h>
#include <linux/version.h>
+#include <linux/proc_fs.h>
#ifndef CONFIG_KALLSYMS
#define version(a) Version_ ## a
@@ -34,6 +35,7 @@ struct uts_namespace init_uts_ns = {
.domainname = UTS_DOMAINNAME,
},
.user_ns = &init_user_ns,
+ .proc_inum = PROC_UTS_INIT_INO,
};
EXPORT_SYMBOL_GPL(init_uts_ns);
diff --git a/ipc/msgutil.c b/ipc/msgutil.c
index 26143d377c9..6471f1bdae9 100644
--- a/ipc/msgutil.c
+++ b/ipc/msgutil.c
@@ -16,6 +16,7 @@
#include <linux/msg.h>
#include <linux/ipc_namespace.h>
#include <linux/utsname.h>
+#include <linux/proc_fs.h>
#include <asm/uaccess.h>
#include "util.h"
@@ -30,6 +31,7 @@ DEFINE_SPINLOCK(mq_lock);
struct ipc_namespace init_ipc_ns = {
.count = ATOMIC_INIT(1),
.user_ns = &init_user_ns,
+ .proc_inum = PROC_IPC_INIT_INO,
};
atomic_t nr_ipc_ns = ATOMIC_INIT(1);
diff --git a/ipc/namespace.c b/ipc/namespace.c
index ce0a647869b..cd7f7330d68 100644
--- a/ipc/namespace.c
+++ b/ipc/namespace.c
@@ -26,9 +26,16 @@ static struct ipc_namespace *create_ipc_ns(struct task_struct *tsk,
if (ns == NULL)
return ERR_PTR(-ENOMEM);
+ err = proc_alloc_inum(&ns->proc_inum);
+ if (err) {
+ kfree(ns);
+ return ERR_PTR(err);
+ }
+
atomic_set(&ns->count, 1);
err = mq_init_ns(ns);
if (err) {
+ proc_free_inum(ns->proc_inum);
kfree(ns);
return ERR_PTR(err);
}
@@ -113,6 +120,7 @@ static void free_ipc_ns(struct ipc_namespace *ns)
*/
ipcns_notify(IPCNS_REMOVED);
put_user_ns(ns->user_ns);
+ proc_free_inum(ns->proc_inum);
kfree(ns);
}
@@ -170,10 +178,18 @@ static int ipcns_install(struct nsproxy *nsproxy, void *ns)
return 0;
}
+static unsigned int ipcns_inum(void *vp)
+{
+ struct ipc_namespace *ns = vp;
+
+ return ns->proc_inum;
+}
+
const struct proc_ns_operations ipcns_operations = {
.name = "ipc",
.type = CLONE_NEWIPC,
.get = ipcns_get,
.put = ipcns_put,
.install = ipcns_install,
+ .inum = ipcns_inum,
};
diff --git a/kernel/Makefile b/kernel/Makefile
index 5b148532e57..ace94dd4135 100644
--- a/kernel/Makefile
+++ b/kernel/Makefile
@@ -10,7 +10,7 @@ obj-y = fork.o exec_domain.o panic.o printk.o \
kthread.o wait.o kfifo.o sys_ni.o posix-cpu-timers.o mutex.o \
hrtimer.o rwsem.o nsproxy.o srcu.o semaphore.o \
notifier.o ksysfs.o cred.o \
- async.o range.o groups.o smpboot.o
+ async.o range.o groups.o lglock.o
ifdef CONFIG_FUNCTION_TRACER
# Do not trace debug files and internal ftrace files
diff --git a/kernel/audit_tree.c b/kernel/audit_tree.c
index 5bf0790497e..3a5ca582ba1 100644
--- a/kernel/audit_tree.c
+++ b/kernel/audit_tree.c
@@ -595,7 +595,7 @@ void audit_trim_trees(void)
root_mnt = collect_mounts(&path);
path_put(&path);
- if (!root_mnt)
+ if (IS_ERR(root_mnt))
goto skip_it;
spin_lock(&hash_lock);
@@ -669,8 +669,8 @@ int audit_add_tree_rule(struct audit_krule *rule)
goto Err;
mnt = collect_mounts(&path);
path_put(&path);
- if (!mnt) {
- err = -ENOMEM;
+ if (IS_ERR(mnt)) {
+ err = PTR_ERR(mnt);
goto Err;
}
@@ -719,8 +719,8 @@ int audit_tag_tree(char *old, char *new)
return err;
tagged = collect_mounts(&path2);
path_put(&path2);
- if (!tagged)
- return -ENOMEM;
+ if (IS_ERR(tagged))
+ return PTR_ERR(tagged);
err = kern_path(old, 0, &path1);
if (err) {
diff --git a/kernel/exit.c b/kernel/exit.c
index f38ee31c007..c17566b48a9 100644
--- a/kernel/exit.c
+++ b/kernel/exit.c
@@ -640,7 +640,9 @@ static void exit_mm(struct task_struct * tsk)
{
struct mm_struct *mm = tsk->mm;
struct core_state *core_state;
+#ifndef CONFIG_UML
int mm_released;
+#endif
mm_release(tsk, mm);
if (!mm)
@@ -686,9 +688,11 @@ static void exit_mm(struct task_struct * tsk)
task_unlock(tsk);
mm_update_next_owner(mm);
+#ifndef CONFIG_UML
mm_released = mmput(mm);
if (mm_released)
set_tsk_thread_flag(tsk, TIF_MM_RELEASED);
+#endif
}
/*
diff --git a/kernel/lglock.c b/kernel/lglock.c
new file mode 100644
index 00000000000..6535a667a5a
--- /dev/null
+++ b/kernel/lglock.c
@@ -0,0 +1,89 @@
+/* See include/linux/lglock.h for description */
+#include <linux/module.h>
+#include <linux/lglock.h>
+#include <linux/cpu.h>
+#include <linux/string.h>
+
+/*
+ * Note there is no uninit, so lglocks cannot be defined in
+ * modules (but it's fine to use them from there)
+ * Could be added though, just undo lg_lock_init
+ */
+
+void lg_lock_init(struct lglock *lg, char *name)
+{
+ LOCKDEP_INIT_MAP(&lg->lock_dep_map, name, &lg->lock_key, 0);
+}
+EXPORT_SYMBOL(lg_lock_init);
+
+void lg_local_lock(struct lglock *lg)
+{
+ arch_spinlock_t *lock;
+
+ preempt_disable();
+ rwlock_acquire_read(&lg->lock_dep_map, 0, 0, _RET_IP_);
+ lock = this_cpu_ptr(lg->lock);
+ arch_spin_lock(lock);
+}
+EXPORT_SYMBOL(lg_local_lock);
+
+void lg_local_unlock(struct lglock *lg)
+{
+ arch_spinlock_t *lock;
+
+ rwlock_release(&lg->lock_dep_map, 1, _RET_IP_);
+ lock = this_cpu_ptr(lg->lock);
+ arch_spin_unlock(lock);
+ preempt_enable();
+}
+EXPORT_SYMBOL(lg_local_unlock);
+
+void lg_local_lock_cpu(struct lglock *lg, int cpu)
+{
+ arch_spinlock_t *lock;
+
+ preempt_disable();
+ rwlock_acquire_read(&lg->lock_dep_map, 0, 0, _RET_IP_);
+ lock = per_cpu_ptr(lg->lock, cpu);
+ arch_spin_lock(lock);
+}
+EXPORT_SYMBOL(lg_local_lock_cpu);
+
+void lg_local_unlock_cpu(struct lglock *lg, int cpu)
+{
+ arch_spinlock_t *lock;
+
+ rwlock_release(&lg->lock_dep_map, 1, _RET_IP_);
+ lock = per_cpu_ptr(lg->lock, cpu);
+ arch_spin_unlock(lock);
+ preempt_enable();
+}
+EXPORT_SYMBOL(lg_local_unlock_cpu);
+
+void lg_global_lock(struct lglock *lg)
+{
+ int i;
+
+ preempt_disable();
+ rwlock_acquire(&lg->lock_dep_map, 0, 0, _RET_IP_);
+ for_each_possible_cpu(i) {
+ arch_spinlock_t *lock;
+ lock = per_cpu_ptr(lg->lock, i);
+ arch_spin_lock(lock);
+ }
+}
+EXPORT_SYMBOL(lg_global_lock);
+
+void lg_global_unlock(struct lglock *lg)
+{
+ int i;
+
+ rwlock_release(&lg->lock_dep_map, 1, _RET_IP_);
+ for_each_possible_cpu(i) {
+ arch_spinlock_t *lock;
+ lock = per_cpu_ptr(lg->lock, i);
+ arch_spin_unlock(lock);
+ }
+ preempt_enable();
+}
+EXPORT_SYMBOL(lg_global_unlock);
diff --git a/kernel/nsproxy.c b/kernel/nsproxy.c
index b576f7f14bc..5b6ce19064a 100644
--- a/kernel/nsproxy.c
+++ b/kernel/nsproxy.c
@@ -66,7 +66,7 @@ static struct nsproxy *create_new_namespaces(unsigned long flags,
if (!new_nsp)
return ERR_PTR(-ENOMEM);
- new_nsp->mnt_ns = copy_mnt_ns(flags, tsk->nsproxy->mnt_ns, new_fs);
+ new_nsp->mnt_ns = copy_mnt_ns(flags, tsk->nsproxy->mnt_ns, task_cred_xxx(tsk, user_ns), new_fs);
if (IS_ERR(new_nsp->mnt_ns)) {
err = PTR_ERR(new_nsp->mnt_ns);
goto out_ns;
diff --git a/kernel/pid.c b/kernel/pid.c
index 9f08dfabaf1..c2010d14c8b 100644
--- a/kernel/pid.c
+++ b/kernel/pid.c
@@ -78,6 +78,7 @@ struct pid_namespace init_pid_ns = {
.last_pid = 0,
.level = 0,
.child_reaper = &init_task,
+ .proc_inum = PROC_PID_INIT_INO,
};
EXPORT_SYMBOL_GPL(init_pid_ns);
diff --git a/kernel/pid_namespace.c b/kernel/pid_namespace.c
index 57bc1fd35b3..4f15befb090 100644
--- a/kernel/pid_namespace.c
+++ b/kernel/pid_namespace.c
@@ -88,6 +88,10 @@ static struct pid_namespace *create_pid_namespace(struct pid_namespace *parent_p
if (ns->pid_cachep == NULL)
goto out_free_map;
+ err = proc_alloc_inum(&ns->proc_inum);
+ if (err)
+ goto out_free_map;
+
kref_init(&ns->kref);
ns->level = level;
ns->parent = get_pid_ns(parent_pid_ns);
@@ -118,6 +122,7 @@ static void destroy_pid_namespace(struct pid_namespace *ns)
{
int i;
+ proc_free_inum(ns->proc_inum);
for (i = 0; i < PIDMAP_ENTRIES; i++)
kfree(ns->pidmap[i].page);
kmem_cache_free(pid_ns_cachep, ns);
diff --git a/kernel/printk.c b/kernel/printk.c
index ecb4ce36390..fe326d1a9b2 100644
--- a/kernel/printk.c
+++ b/kernel/printk.c
@@ -44,7 +44,10 @@
#include <asm/uaccess.h>
+#ifdef CONFIG_MSM_RTB
#include <mach/msm_rtb.h>
+#endif
+
#define CREATE_TRACE_POINTS
#include <trace/events/printk.h>
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index b685037b415..5422a2c91e5 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -1798,25 +1798,11 @@ stat:
out:
raw_spin_unlock_irqrestore(&p->pi_lock, flags);
- if (task_notify_on_migrate(p)) {
- struct migration_notify_data mnd;
-
- mnd.src_cpu = src_cpu;
- mnd.dest_cpu = cpu;
- mnd.load = pct_task_load(p);
-
- /*
- * Call the migration notifier with mnd for foreground task
- * migrations as well as for wakeups if their load is above
- * sysctl_sched_wakeup_load_threshold. This would prompt the
- * cpu-boost to boost the CPU frequency on wake up of a heavy
- * weight foreground task
- */
- if ((src_cpu != cpu) || (mnd.load >
- sysctl_sched_wakeup_load_threshold))
- atomic_notifier_call_chain(&migration_notifier_head,
- 0, (void *)&mnd);
- }
+#ifndef CONFIG_UML
+ if (src_cpu != cpu && task_notify_on_migrate(p))
+ atomic_notifier_call_chain(&migration_notifier_head,
+ cpu, (void *)src_cpu);
+#endif
return success;
}
diff --git a/kernel/user.c b/kernel/user.c
index 71dd2363ab0..9013a4fe0b1 100644
--- a/kernel/user.c
+++ b/kernel/user.c
@@ -16,6 +16,7 @@
#include <linux/interrupt.h>
#include <linux/export.h>
#include <linux/user_namespace.h>
+#include <linux/proc_fs.h>
/*
* userns count is 1 for root user, 1 for init_uts_ns,
@@ -26,6 +27,7 @@ struct user_namespace init_user_ns = {
.refcount = ATOMIC_INIT(3),
},
.creator = &root_user,
+ .proc_inum = PROC_USER_INIT_INO,
};
EXPORT_SYMBOL_GPL(init_user_ns);
diff --git a/kernel/user_namespace.c b/kernel/user_namespace.c
index 3b906e98b1d..c14b7b9fe41 100644
--- a/kernel/user_namespace.c
+++ b/kernel/user_namespace.c
@@ -27,11 +27,18 @@ int create_user_ns(struct cred *new)
struct user_namespace *ns;
struct user_struct *root_user;
int n;
+ int ret;
ns = kmem_cache_alloc(user_ns_cachep, GFP_KERNEL);
if (!ns)
return -ENOMEM;
+ ret = proc_alloc_inum(&ns->proc_inum);
+ if (ret) {
+ kmem_cache_free(user_ns_cachep, ns);
+ return ret;
+ }
+
kref_init(&ns->kref);
for (n = 0; n < UIDHASH_SZ; ++n)
@@ -73,6 +80,7 @@ static void free_user_ns_work(struct work_struct *work)
struct user_namespace *ns =
container_of(work, struct user_namespace, destroyer);
free_uid(ns->creator);
+ proc_free_inum(ns->proc_inum);
kmem_cache_free(user_ns_cachep, ns);
}
diff --git a/kernel/utsname.c b/kernel/utsname.c
index 405caf91aad..ce3d44b4187 100644
--- a/kernel/utsname.c
+++ b/kernel/utsname.c
@@ -36,11 +36,18 @@ static struct uts_namespace *clone_uts_ns(struct task_struct *tsk,
struct uts_namespace *old_ns)
{
struct uts_namespace *ns;
+ int err;
ns = create_uts_ns();
if (!ns)
return ERR_PTR(-ENOMEM);
+ err = proc_alloc_inum(&ns->proc_inum);
+ if (err) {
+ kfree(ns);
+ return ERR_PTR(err);
+ }
+
down_read(&uts_sem);
memcpy(&ns->name, &old_ns->name, sizeof(ns->name));
ns->user_ns = get_user_ns(task_cred_xxx(tsk, user)->user_ns);
@@ -78,6 +85,7 @@ void free_uts_ns(struct kref *kref)
ns = container_of(kref, struct uts_namespace, kref);
put_user_ns(ns->user_ns);
+ proc_free_inum(ns->proc_inum);
kfree(ns);
}
@@ -110,11 +118,18 @@ static int utsns_install(struct nsproxy *nsproxy, void *ns)
return 0;
}
+static unsigned int utsns_inum(void *vp)
+{
+ struct uts_namespace *ns = vp;
+
+ return ns->proc_inum;
+}
+
const struct proc_ns_operations utsns_operations = {
.name = "uts",
.type = CLONE_NEWUTS,
.get = utsns_get,
.put = utsns_put,
.install = utsns_install,
+ .inum = utsns_inum,
};
-
diff --git a/lib/memory_alloc.c b/lib/memory_alloc.c
index 03f19448984..e15836fb8e4 100644
--- a/lib/memory_alloc.c
+++ b/lib/memory_alloc.c
@@ -186,10 +186,14 @@ static void *__alloc(struct mem_pool *mpool, unsigned long size,
if (!node)
goto out;
+#ifndef CONFIG_UML
if (cached)
vaddr = ioremap_cached(paddr, aligned_size);
else
vaddr = ioremap(paddr, aligned_size);
+#else
+ vaddr = NULL; /* hack to allow this to compile on ARCH=um */
+#endif
if (!vaddr)
goto out_kfree;
@@ -211,8 +215,10 @@ static void *__alloc(struct mem_pool *mpool, unsigned long size,
return vaddr;
out_kfree:
+#ifndef CONFIG_UML
if (vaddr)
iounmap(vaddr);
+#endif
kfree(node);
out:
gen_pool_free(mpool->gpool, paddr, aligned_size);
@@ -226,14 +232,10 @@ static void __free(void *vaddr, bool unmap)
if (!node)
return;
+#ifndef CONFIG_UML
if (unmap)
- /*
- * We need the double cast because otherwise gcc complains about
- * cast to pointer of different size. This is technically a down
- * cast but if unmap is being called, this had better be an
- * actual 32-bit pointer anyway.
- */
- iounmap((void *)(unsigned long)node->vaddr);
+ iounmap(node->vaddr);
+#endif
gen_pool_free(node->mpool->gpool, node->paddr, node->len);
node->mpool->free += node->len;
diff --git a/mm/filemap.c b/mm/filemap.c
index b15b75636ce..6ca89ec353a 100644
--- a/mm/filemap.c
+++ b/mm/filemap.c
@@ -2547,7 +2547,9 @@ ssize_t __generic_file_aio_write(struct kiocb *iocb, const struct iovec *iov,
if (err)
goto out;
- file_update_time(file);
+ err = file_update_time(file);
+ if (err)
+ goto out;
/* coalesce the iovecs and go direct-to-BIO for O_DIRECT */
if (unlikely(file->f_flags & O_DIRECT)) {
diff --git a/mm/filemap_xip.c b/mm/filemap_xip.c
index a4eb3113222..213ca1f5340 100644
--- a/mm/filemap_xip.c
+++ b/mm/filemap_xip.c
@@ -426,7 +426,9 @@ xip_file_write(struct file *filp, const char __user *buf, size_t len,
if (ret)
goto out_backing;
- file_update_time(filp);
+ ret = file_update_time(filp);
+ if (ret)
+ goto out_backing;
ret = __xip_file_write (filp, buf, count, pos, ppos);
diff --git a/mm/mlock.c b/mm/mlock.c
index 029f234eb6d..f38698cd7c9 100644
--- a/mm/mlock.c
+++ b/mm/mlock.c
@@ -23,10 +23,10 @@
int can_do_mlock(void)
{
- if (capable(CAP_IPC_LOCK))
- return 1;
if (rlimit(RLIMIT_MEMLOCK) != 0)
return 1;
+ if (capable(CAP_IPC_LOCK))
+ return 1;
return 0;
}
EXPORT_SYMBOL(can_do_mlock);
diff --git a/net/core/neighbour.c b/net/core/neighbour.c
index 0a68045782d..603b4036ff0 100644
--- a/net/core/neighbour.c
+++ b/net/core/neighbour.c
@@ -935,6 +935,7 @@ static void neigh_timer_handler(unsigned long arg)
neigh->nud_state = NUD_PROBE;
neigh->updated = jiffies;
atomic_set(&neigh->probes, 0);
+ notify = 1;
next = now + neigh->parms->retrans_time;
}
} else {
@@ -1162,6 +1163,8 @@ int neigh_update(struct neighbour *neigh, const u8 *lladdr, u8 new,
if (new != old) {
neigh_del_timer(neigh);
+ if (new & NUD_PROBE)
+ atomic_set(&neigh->probes, 0);
if (new & NUD_IN_TIMER)
neigh_add_timer(neigh, (jiffies +
((new & NUD_REACHABLE) ?
diff --git a/net/core/net_namespace.c b/net/core/net_namespace.c
index 31a5ae51a45..335ab8875bf 100644
--- a/net/core/net_namespace.c
+++ b/net/core/net_namespace.c
@@ -376,6 +376,21 @@ struct net *get_net_ns_by_pid(pid_t pid)
}
EXPORT_SYMBOL_GPL(get_net_ns_by_pid);
+static __net_init int net_ns_net_init(struct net *net)
+{
+ return proc_alloc_inum(&net->proc_inum);
+}
+
+static __net_exit void net_ns_net_exit(struct net *net)
+{
+ proc_free_inum(net->proc_inum);
+}
+
+static struct pernet_operations __net_initdata net_ns_ops = {
+ .init = net_ns_net_init,
+ .exit = net_ns_net_exit,
+};
+
static int __init net_ns_init(void)
{
struct net_generic *ng;
@@ -407,6 +422,8 @@ static int __init net_ns_init(void)
mutex_unlock(&net_mutex);
+ register_pernet_subsys(&net_ns_ops);
+
return 0;
}
@@ -630,11 +647,18 @@ static int netns_install(struct nsproxy *nsproxy, void *ns)
return 0;
}
+static unsigned int netns_inum(void *ns)
+{
+ struct net *net = ns;
+ return net->proc_inum;
+}
+
const struct proc_ns_operations netns_operations = {
.name = "net",
.type = CLONE_NEWNET,
.get = netns_get,
.put = netns_put,
.install = netns_install,
+ .inum = netns_inum,
};
#endif
diff --git a/net/ipv4/ping.c b/net/ipv4/ping.c
index e2a832defa5..145dffd8f1f 100644
--- a/net/ipv4/ping.c
+++ b/net/ipv4/ping.c
@@ -255,6 +255,9 @@ int ping_init_sock(struct sock *sk)
int i, j, count;
int ret = 0;
+ if (sk->sk_family == AF_INET6)
+ inet6_sk(sk)->ipv6only = 1;
+
inet_get_ping_group_range_net(net, range, range+1);
if (range[0] <= group && group <= range[1])
return 0;
@@ -302,6 +305,11 @@ int ping_check_bind_addr(struct sock *sk, struct inet_sock *isk,
if (addr_len < sizeof(*addr))
return -EINVAL;
+ if (addr->sin_family != AF_INET &&
+ !(addr->sin_family == AF_UNSPEC &&
+ addr->sin_addr.s_addr == htonl(INADDR_ANY)))
+ return -EAFNOSUPPORT;
+
pr_debug("ping_check_bind_addr(sk=%p,addr=%pI4,port=%d)\n",
sk, &addr->sin_addr.s_addr, ntohs(addr->sin_port));
@@ -326,6 +334,9 @@ int ping_check_bind_addr(struct sock *sk, struct inet_sock *isk,
if (addr_len < sizeof(*addr))
return -EINVAL;
+ if (addr->sin6_family != AF_INET6)
+ return -EAFNOSUPPORT;
+
pr_debug("ping_check_bind_addr(sk=%p,addr=%pI6c,port=%d)\n",
sk, addr->sin6_addr.s6_addr, ntohs(addr->sin6_port));
@@ -712,7 +723,7 @@ int ping_v4_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
if (msg->msg_namelen < sizeof(*usin))
return -EINVAL;
if (usin->sin_family != AF_INET)
- return -EINVAL;
+ return -EAFNOSUPPORT;
daddr = usin->sin_addr.s_addr;
/* no remote port */
} else {
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
index b2458f26d55..ab84cd9c412 100644
--- a/net/ipv4/tcp.c
+++ b/net/ipv4/tcp.c
@@ -2531,7 +2531,7 @@ void tcp_get_info(const struct sock *sk, struct tcp_info *info)
if (sk->sk_socket) {
struct file *filep = sk->sk_socket->file;
if (filep)
- info->tcpi_count = atomic_read(&filep->f_count);
+ info->tcpi_count = file_count(filep);
}
}
EXPORT_SYMBOL_GPL(tcp_get_info);
diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
index d775657db60..2ffd65ee463 100644
--- a/net/ipv6/addrconf.c
+++ b/net/ipv6/addrconf.c
@@ -198,6 +198,7 @@ static struct ipv6_devconf ipv6_devconf __read_mostly = {
.accept_source_route = 0, /* we do not accept RH0 by default. */
.disable_ipv6 = 0,
.accept_dad = 1,
+ .use_oif_addrs_only = 0,
};
static struct ipv6_devconf ipv6_devconf_dflt __read_mostly = {
@@ -233,6 +234,7 @@ static struct ipv6_devconf ipv6_devconf_dflt __read_mostly = {
.accept_source_route = 0, /* we do not accept RH0 by default. */
.disable_ipv6 = 0,
.accept_dad = 1,
+ .use_oif_addrs_only = 0,
};
/* IPv6 Wildcard Address and Loopback Address defined by RFC2553 */
@@ -1166,9 +1168,15 @@ int ipv6_dev_get_saddr(struct net *net, struct net_device *dst_dev,
* include addresses assigned to interfaces
* belonging to the same site as the outgoing
* interface.)
+ * - "It is RECOMMENDED that the candidate source addresses
+ * be the set of unicast addresses assigned to the
+ * interface that will be used to send to the destination
+ * (the 'outgoing' interface)." (RFC 6724)
*/
+ idev = dst_dev ? __in6_dev_get(dst_dev) : NULL;
if (((dst_type & IPV6_ADDR_MULTICAST) ||
- dst.scope <= IPV6_ADDR_SCOPE_LINKLOCAL) &&
+ dst.scope <= IPV6_ADDR_SCOPE_LINKLOCAL ||
+ (idev && idev->cnf.use_oif_addrs_only)) &&
dst.ifindex && dev->ifindex != dst.ifindex)
continue;
@@ -3994,6 +4002,7 @@ static inline void ipv6_store_devconf(struct ipv6_devconf *cnf,
array[DEVCONF_DISABLE_IPV6] = cnf->disable_ipv6;
array[DEVCONF_ACCEPT_DAD] = cnf->accept_dad;
array[DEVCONF_FORCE_TLLAO] = cnf->force_tllao;
+ array[DEVCONF_USE_OIF_ADDRS_ONLY] = cnf->use_oif_addrs_only;
}
static inline size_t inet6_ifla6_size(void)
@@ -4680,6 +4689,14 @@ static struct addrconf_sysctl_table
.proc_handler = proc_dointvec
},
{
+ .procname = "use_oif_addrs_only",
+ .data = &ipv6_devconf.use_oif_addrs_only,
+ .maxlen = sizeof(int),
+ .mode = 0644,
+ .proc_handler = proc_dointvec,
+
+ },
+ {
/* sentinel */
}
},
diff --git a/net/ipv6/ping.c b/net/ipv6/ping.c
index f1c42c8eee5..045eb06a43f 100644
--- a/net/ipv6/ping.c
+++ b/net/ipv6/ping.c
@@ -127,9 +127,10 @@ int ping_v6_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
if (msg->msg_name) {
struct sockaddr_in6 *u = (struct sockaddr_in6 *) msg->msg_name;
- if (msg->msg_namelen < sizeof(struct sockaddr_in6) ||
- u->sin6_family != AF_INET6) {
+ if (msg->msg_namelen < sizeof(*u))
return -EINVAL;
+ if (u->sin6_family != AF_INET6) {
+ return -EAFNOSUPPORT;
}
if (sk->sk_bound_dev_if &&
sk->sk_bound_dev_if != u->sin6_scope_id) {
diff --git a/net/ipv6/route.c b/net/ipv6/route.c
index 3e6b53b981a..ab71b41e139 100644
--- a/net/ipv6/route.c
+++ b/net/ipv6/route.c
@@ -928,6 +928,8 @@ struct dst_entry * ip6_route_output(struct net *net, const struct sock *sk,
{
int flags = 0;
+ fl6->flowi6_iif = net->loopback_dev->ifindex;
+
if ((sk && sk->sk_bound_dev_if) || rt6_need_strict(&fl6->daddr))
flags |= RT6_LOOKUP_F_IFACE;
diff --git a/security/capability.c b/security/capability.c
index fc991bcab24..d0a49eeca59 100644
--- a/security/capability.c
+++ b/security/capability.c
@@ -94,8 +94,8 @@ static int cap_sb_statfs(struct dentry *dentry)
return 0;
}
-static int cap_sb_mount(char *dev_name, struct path *path, char *type,
- unsigned long flags, void *data)
+static int cap_sb_mount(const char *dev_name, struct path *path,
+ const char *type, unsigned long flags, void *data)
{
return 0;
}
diff --git a/security/lsm_audit.c b/security/lsm_audit.c
index 90c129b0102..abacc49f111 100644
--- a/security/lsm_audit.c
+++ b/security/lsm_audit.c
@@ -242,6 +242,21 @@ static void dump_common_audit_data(struct audit_buffer *ab,
}
break;
}
+ case LSM_AUDIT_DATA_IOCTL_OP: {
+ struct inode *inode;
+
+ audit_log_d_path(ab, " path=", &a->u.op->path);
+
+ inode = a->u.op->path.dentry->d_inode;
+ if (inode) {
+ audit_log_format(ab, " dev=");
+ audit_log_untrustedstring(ab, inode->i_sb->s_id);
+ audit_log_format(ab, " ino=%lu", inode->i_ino);
+ }
+
+ audit_log_format(ab, " ioctlcmd=%hx", a->u.op->cmd);
+ break;
+ }
case LSM_AUDIT_DATA_DENTRY: {
struct inode *inode;
diff --git a/security/security.c b/security/security.c
index cecd55e581c..0e1916e91bd 100644
--- a/security/security.c
+++ b/security/security.c
@@ -280,8 +280,8 @@ int security_sb_statfs(struct dentry *dentry)
return security_ops->sb_statfs(dentry);
}
-int security_sb_mount(char *dev_name, struct path *path,
- char *type, unsigned long flags, void *data)
+int security_sb_mount(const char *dev_name, struct path *path,
+ const char *type, unsigned long flags, void *data)
{
return security_ops->sb_mount(dev_name, path, type, flags, data);
}
diff --git a/security/selinux/avc.c b/security/selinux/avc.c
index 698cb053d1e..59c831cd802 100644
--- a/security/selinux/avc.c
+++ b/security/selinux/avc.c
@@ -22,6 +22,7 @@
#include <linux/init.h>
#include <linux/skbuff.h>
#include <linux/percpu.h>
+#include <linux/list.h>
#include <net/sock.h>
#include <linux/un.h>
#include <net/af_unix.h>
@@ -48,6 +49,7 @@ struct avc_entry {
u32 tsid;
u16 tclass;
struct av_decision avd;
+ struct avc_operation_node *ops_node;
};
struct avc_node {
@@ -56,6 +58,16 @@ struct avc_node {
struct rcu_head rhead;
};
+struct avc_operation_decision_node {
+ struct operation_decision od;
+ struct list_head od_list;
+};
+
+struct avc_operation_node {
+ struct operation ops;
+ struct list_head od_head; /* list of operation_decision_node */
+};
+
struct avc_cache {
struct hlist_head slots[AVC_CACHE_SLOTS]; /* head for avc_node->list */
spinlock_t slots_lock[AVC_CACHE_SLOTS]; /* lock for writes */
@@ -86,6 +98,9 @@ DEFINE_PER_CPU(struct avc_cache_stats, avc_cache_stats) = { 0 };
static struct avc_cache avc_cache;
static struct avc_callback_node *avc_callbacks;
static struct kmem_cache *avc_node_cachep;
+static struct kmem_cache *avc_operation_decision_node_cachep;
+static struct kmem_cache *avc_operation_node_cachep;
+static struct kmem_cache *avc_operation_perm_cachep;
static inline int avc_hash(u32 ssid, u32 tsid, u16 tclass)
{
@@ -177,6 +192,16 @@ void __init avc_init(void)
avc_node_cachep = kmem_cache_create("avc_node", sizeof(struct avc_node),
0, SLAB_PANIC, NULL);
+ avc_operation_node_cachep = kmem_cache_create("avc_operation_node",
+ sizeof(struct avc_operation_node),
+ 0, SLAB_PANIC, NULL);
+ avc_operation_decision_node_cachep = kmem_cache_create(
+ "avc_operation_decision_node",
+ sizeof(struct avc_operation_decision_node),
+ 0, SLAB_PANIC, NULL);
+ avc_operation_perm_cachep = kmem_cache_create("avc_operation_perm",
+ sizeof(struct operation_perm),
+ 0, SLAB_PANIC, NULL);
audit_log(current->audit_context, GFP_KERNEL, AUDIT_KERNEL, "AVC INITIALIZED\n");
}
@@ -213,9 +238,255 @@ int avc_get_hash_stats(char *page)
slots_used, AVC_CACHE_SLOTS, max_chain_len);
}
+/*
+ * using a linked list for operation_decision lookup because the list is
+ * always small. i.e. less than 5, typically 1
+ */
+static struct operation_decision *avc_operation_lookup(u8 type,
+ struct avc_operation_node *ops_node)
+{
+ struct avc_operation_decision_node *od_node;
+ struct operation_decision *od = NULL;
+
+ list_for_each_entry(od_node, &ops_node->od_head, od_list) {
+ if (od_node->od.type != type)
+ continue;
+ od = &od_node->od;
+ break;
+ }
+ return od;
+}
+
+static inline unsigned int avc_operation_has_perm(struct operation_decision *od,
+ u16 cmd, u8 specified)
+{
+ unsigned int rc = 0;
+ u8 num = cmd & 0xff;
+
+ if ((specified == OPERATION_ALLOWED) &&
+ (od->specified & OPERATION_ALLOWED))
+ rc = security_operation_test(od->allowed->perms, num);
+ else if ((specified == OPERATION_AUDITALLOW) &&
+ (od->specified & OPERATION_AUDITALLOW))
+ rc = security_operation_test(od->auditallow->perms, num);
+ else if ((specified == OPERATION_DONTAUDIT) &&
+ (od->specified & OPERATION_DONTAUDIT))
+ rc = security_operation_test(od->dontaudit->perms, num);
+ return rc;
+}
+
+static void avc_operation_allow_perm(struct avc_operation_node *node, u16 cmd)
+{
+ struct operation_decision *od;
+ u8 type;
+ u8 num;
+
+ type = cmd >> 8;
+ num = cmd & 0xff;
+ security_operation_set(node->ops.type, type);
+ od = avc_operation_lookup(type, node);
+ if (od && od->allowed)
+ security_operation_set(od->allowed->perms, num);
+}
+
+static void avc_operation_decision_free(
+ struct avc_operation_decision_node *od_node)
+{
+ struct operation_decision *od;
+
+ od = &od_node->od;
+ if (od->allowed)
+ kmem_cache_free(avc_operation_perm_cachep, od->allowed);
+ if (od->auditallow)
+ kmem_cache_free(avc_operation_perm_cachep, od->auditallow);
+ if (od->dontaudit)
+ kmem_cache_free(avc_operation_perm_cachep, od->dontaudit);
+ kmem_cache_free(avc_operation_decision_node_cachep, od_node);
+}
+
+static void avc_operation_free(struct avc_operation_node *ops_node)
+{
+ struct avc_operation_decision_node *od_node, *tmp;
+
+ if (!ops_node)
+ return;
+
+ list_for_each_entry_safe(od_node, tmp, &ops_node->od_head, od_list) {
+ list_del(&od_node->od_list);
+ avc_operation_decision_free(od_node);
+ }
+ kmem_cache_free(avc_operation_node_cachep, ops_node);
+}
+
+static void avc_copy_operation_decision(struct operation_decision *dest,
+ struct operation_decision *src)
+{
+ dest->type = src->type;
+ dest->specified = src->specified;
+ if (dest->specified & OPERATION_ALLOWED)
+ memcpy(dest->allowed->perms, src->allowed->perms,
+ sizeof(src->allowed->perms));
+ if (dest->specified & OPERATION_AUDITALLOW)
+ memcpy(dest->auditallow->perms, src->auditallow->perms,
+ sizeof(src->auditallow->perms));
+ if (dest->specified & OPERATION_DONTAUDIT)
+ memcpy(dest->dontaudit->perms, src->dontaudit->perms,
+ sizeof(src->dontaudit->perms));
+}
+
+/*
+ * similar to avc_copy_operation_decision, but only copy decision
+ * information relevant to this command
+ */
+static inline void avc_quick_copy_operation_decision(u16 cmd,
+ struct operation_decision *dest,
+ struct operation_decision *src)
+{
+ /*
+ * compute index of the u32 of the 256 bits (8 u32s) that contain this
+ * command permission
+ */
+ u8 i = (0xff & cmd) >> 5;
+
+ dest->specified = src->specified;
+ if (dest->specified & OPERATION_ALLOWED)
+ dest->allowed->perms[i] = src->allowed->perms[i];
+ if (dest->specified & OPERATION_AUDITALLOW)
+ dest->auditallow->perms[i] = src->auditallow->perms[i];
+ if (dest->specified & OPERATION_DONTAUDIT)
+ dest->dontaudit->perms[i] = src->dontaudit->perms[i];
+}
+
+static struct avc_operation_decision_node
+ *avc_operation_decision_alloc(u8 specified)
+{
+ struct avc_operation_decision_node *node;
+ struct operation_decision *od;
+
+ node = kmem_cache_zalloc(avc_operation_decision_node_cachep,
+ GFP_ATOMIC | __GFP_NOMEMALLOC);
+ if (!node)
+ return NULL;
+
+ od = &node->od;
+ if (specified & OPERATION_ALLOWED) {
+ od->allowed = kmem_cache_zalloc(avc_operation_perm_cachep,
+ GFP_ATOMIC | __GFP_NOMEMALLOC);
+ if (!od->allowed)
+ goto error;
+ }
+ if (specified & OPERATION_AUDITALLOW) {
+ od->auditallow = kmem_cache_zalloc(avc_operation_perm_cachep,
+ GFP_ATOMIC | __GFP_NOMEMALLOC);
+ if (!od->auditallow)
+ goto error;
+ }
+ if (specified & OPERATION_DONTAUDIT) {
+ od->dontaudit = kmem_cache_zalloc(avc_operation_perm_cachep,
+ GFP_ATOMIC | __GFP_NOMEMALLOC);
+ if (!od->dontaudit)
+ goto error;
+ }
+ return node;
+error:
+ avc_operation_decision_free(node);
+ return NULL;
+}
+
+static int avc_add_operation(struct avc_node *node,
+ struct operation_decision *od)
+{
+ struct avc_operation_decision_node *dest_od;
+
+ node->ae.ops_node->ops.len++;
+ dest_od = avc_operation_decision_alloc(od->specified);
+ if (!dest_od)
+ return -ENOMEM;
+ avc_copy_operation_decision(&dest_od->od, od);
+ list_add(&dest_od->od_list, &node->ae.ops_node->od_head);
+ return 0;
+}
+
+static struct avc_operation_node *avc_operation_alloc(void)
+{
+ struct avc_operation_node *ops;
+
+ ops = kmem_cache_zalloc(avc_operation_node_cachep,
+ GFP_ATOMIC|__GFP_NOMEMALLOC);
+ if (!ops)
+ return ops;
+ INIT_LIST_HEAD(&ops->od_head);
+ return ops;
+}
+
+static int avc_operation_populate(struct avc_node *node,
+ struct avc_operation_node *src)
+{
+ struct avc_operation_node *dest;
+ struct avc_operation_decision_node *dest_od;
+ struct avc_operation_decision_node *src_od;
+
+ if (src->ops.len == 0)
+ return 0;
+ dest = avc_operation_alloc();
+ if (!dest)
+ return -ENOMEM;
+
+ memcpy(dest->ops.type, &src->ops.type, sizeof(dest->ops.type));
+ dest->ops.len = src->ops.len;
+
+ /* for each source od allocate a destination od and copy */
+ list_for_each_entry(src_od, &src->od_head, od_list) {
+ dest_od = avc_operation_decision_alloc(src_od->od.specified);
+ if (!dest_od)
+ goto error;
+ avc_copy_operation_decision(&dest_od->od, &src_od->od);
+ list_add(&dest_od->od_list, &dest->od_head);
+ }
+ node->ae.ops_node = dest;
+ return 0;
+error:
+ avc_operation_free(dest);
+ return -ENOMEM;
+
+}
+
+static inline u32 avc_operation_audit_required(u32 requested,
+ struct av_decision *avd,
+ struct operation_decision *od,
+ u16 cmd,
+ int result,
+ u32 *deniedp)
+{
+ u32 denied, audited;
+
+ denied = requested & ~avd->allowed;
+ if (unlikely(denied)) {
+ audited = denied & avd->auditdeny;
+ if (audited && od) {
+ if (avc_operation_has_perm(od, cmd,
+ OPERATION_DONTAUDIT))
+ audited &= ~requested;
+ }
+ } else if (result) {
+ audited = denied = requested;
+ } else {
+ audited = requested & avd->auditallow;
+ if (audited && od) {
+ if (!avc_operation_has_perm(od, cmd,
+ OPERATION_AUDITALLOW))
+ audited &= ~requested;
+ }
+ }
+
+ *deniedp = denied;
+ return audited;
+}
+
static void avc_node_free(struct rcu_head *rhead)
{
struct avc_node *node = container_of(rhead, struct avc_node, rhead);
+ avc_operation_free(node->ae.ops_node);
kmem_cache_free(avc_node_cachep, node);
avc_cache_stats_incr(frees);
}
@@ -229,6 +500,7 @@ static void avc_node_delete(struct avc_node *node)
static void avc_node_kill(struct avc_node *node)
{
+ avc_operation_free(node->ae.ops_node);
kmem_cache_free(avc_node_cachep, node);
avc_cache_stats_incr(frees);
atomic_dec(&avc_cache.active_nodes);
@@ -377,6 +649,7 @@ static int avc_latest_notif_update(int seqno, int is_insert)
* @tsid: target security identifier
* @tclass: target security class
* @avd: resulting av decision
+ * @ops: resulting operation decisions
*
* Insert an AVC entry for the SID pair
* (@ssid, @tsid) and class @tclass.
@@ -388,7 +661,9 @@ static int avc_latest_notif_update(int seqno, int is_insert)
* the access vectors into a cache entry, returns
* avc_node inserted. Otherwise, this function returns NULL.
*/
-static struct avc_node *avc_insert(u32 ssid, u32 tsid, u16 tclass, struct av_decision *avd)
+static struct avc_node *avc_insert(u32 ssid, u32 tsid, u16 tclass,
+ struct av_decision *avd,
+ struct avc_operation_node *ops_node)
{
struct avc_node *pos, *node = NULL;
int hvalue;
@@ -402,10 +677,15 @@ static struct avc_node *avc_insert(u32 ssid, u32 tsid, u16 tclass, struct av_dec
struct hlist_head *head;
struct hlist_node *next;
spinlock_t *lock;
+ int rc = 0;
hvalue = avc_hash(ssid, tsid, tclass);
avc_node_populate(node, ssid, tsid, tclass, avd);
-
+ rc = avc_operation_populate(node, ops_node);
+ if (rc) {
+ kmem_cache_free(avc_node_cachep, node);
+ return NULL;
+ }
head = &avc_cache.slots[hvalue];
lock = &avc_cache.slots_lock[hvalue];
@@ -501,6 +781,21 @@ static noinline int slow_avc_audit(u32 ssid, u32 tsid, u16 tclass,
return 0;
}
+static inline int avc_operation_audit(u32 ssid, u32 tsid, u16 tclass,
+ u32 requested, struct av_decision *avd,
+ struct operation_decision *od,
+ u16 cmd, int result,
+ struct common_audit_data *ad)
+{
+ u32 audited, denied;
+ audited = avc_operation_audit_required(
+ requested, avd, od, cmd, result, &denied);
+ if (likely(!audited))
+ return 0;
+ return slow_avc_audit(ssid, tsid, tclass, requested,
+ audited, denied, result, ad, 0);
+}
+
/**
* avc_audit - Audit the granting or denial of permissions.
* @ssid: source security identifier
@@ -614,14 +909,17 @@ static inline int avc_sidcmp(u32 x, u32 y)
* @perms : Permission mask bits
* @ssid,@tsid,@tclass : identifier of an AVC entry
* @seqno : sequence number when decision was made
+ * @od: operation_decision to be added to the node
*
* if a valid AVC entry doesn't exist,this function returns -ENOENT.
* if kmalloc() called internal returns NULL, this function returns -ENOMEM.
* otherwise, this function updates the AVC entry. The original AVC-entry object
* will release later by RCU.
*/
-static int avc_update_node(u32 event, u32 perms, u32 ssid, u32 tsid, u16 tclass,
- u32 seqno)
+static int avc_update_node(u32 event, u32 perms, u16 cmd, u32 ssid, u32 tsid,
+ u16 tclass, u32 seqno,
+ struct operation_decision *od,
+ u32 flags)
{
int hvalue, rc = 0;
unsigned long flag;
@@ -666,9 +964,19 @@ static int avc_update_node(u32 event, u32 perms, u32 ssid, u32 tsid, u16 tclass,
avc_node_populate(node, ssid, tsid, tclass, &orig->ae.avd);
+ if (orig->ae.ops_node) {
+ rc = avc_operation_populate(node, orig->ae.ops_node);
+ if (rc) {
+ kmem_cache_free(avc_node_cachep, node);
+ goto out_unlock;
+ }
+ }
+
switch (event) {
case AVC_CALLBACK_GRANT:
node->ae.avd.allowed |= perms;
+ if (node->ae.ops_node && (flags & AVC_OPERATION_CMD))
+ avc_operation_allow_perm(node->ae.ops_node, cmd);
break;
case AVC_CALLBACK_TRY_REVOKE:
case AVC_CALLBACK_REVOKE:
@@ -686,6 +994,9 @@ static int avc_update_node(u32 event, u32 perms, u32 ssid, u32 tsid, u16 tclass,
case AVC_CALLBACK_AUDITDENY_DISABLE:
node->ae.avd.auditdeny &= ~perms;
break;
+ case AVC_CALLBACK_ADD_OPERATION:
+ avc_add_operation(node, od);
+ break;
}
avc_node_replace(node, orig);
out_unlock:
@@ -759,18 +1070,20 @@ int avc_ss_reset(u32 seqno)
* results in a bigger stack frame.
*/
static noinline struct avc_node *avc_compute_av(u32 ssid, u32 tsid,
- u16 tclass, struct av_decision *avd)
+ u16 tclass, struct av_decision *avd,
+ struct avc_operation_node *ops_node)
{
rcu_read_unlock();
- security_compute_av(ssid, tsid, tclass, avd);
+ INIT_LIST_HEAD(&ops_node->od_head);
+ security_compute_av(ssid, tsid, tclass, avd, &ops_node->ops);
rcu_read_lock();
- return avc_insert(ssid, tsid, tclass, avd);
+ return avc_insert(ssid, tsid, tclass, avd, ops_node);
}
static noinline int avc_denied(u32 ssid, u32 tsid,
- u16 tclass, u32 requested,
- unsigned flags,
- struct av_decision *avd)
+ u16 tclass, u32 requested,
+ u16 cmd, unsigned flags,
+ struct av_decision *avd)
{
if (flags & AVC_STRICT)
return -EACCES;
@@ -778,11 +1091,92 @@ static noinline int avc_denied(u32 ssid, u32 tsid,
if (selinux_enforcing && !(avd->flags & AVD_FLAGS_PERMISSIVE))
return -EACCES;
- avc_update_node(AVC_CALLBACK_GRANT, requested, ssid,
- tsid, tclass, avd->seqno);
+ avc_update_node(AVC_CALLBACK_GRANT, requested, cmd, ssid,
+ tsid, tclass, avd->seqno, NULL, flags);
return 0;
}
+/*
+ * ioctl commands are comprised of four fields, direction, size, type, and
+ * number. The avc operation logic filters based on two of them:
+ *
+ * type: or code, typically unique to each driver
+ * number: or function
+ *
+ * For example, 0x89 is a socket type, and number 0x27 is the get hardware
+ * address function.
+ */
+int avc_has_operation(u32 ssid, u32 tsid, u16 tclass, u32 requested,
+ u16 cmd, struct common_audit_data *ad)
+{
+ struct avc_node *node;
+ struct av_decision avd;
+ u32 denied;
+ struct operation_decision *od = NULL;
+ struct operation_decision od_local;
+ struct operation_perm allowed;
+ struct operation_perm auditallow;
+ struct operation_perm dontaudit;
+ struct avc_operation_node local_ops_node;
+ struct avc_operation_node *ops_node;
+ u8 type = cmd >> 8;
+ int rc = 0, rc2;
+
+ ops_node = &local_ops_node;
+ BUG_ON(!requested);
+
+ rcu_read_lock();
+
+ node = avc_lookup(ssid, tsid, tclass);
+ if (unlikely(!node)) {
+ node = avc_compute_av(ssid, tsid, tclass, &avd, ops_node);
+ } else {
+ memcpy(&avd, &node->ae.avd, sizeof(avd));
+ ops_node = node->ae.ops_node;
+ }
+ /* if operations are not defined, only consider av_decision */
+ if (!ops_node || !ops_node->ops.len)
+ goto decision;
+
+ od_local.allowed = &allowed;
+ od_local.auditallow = &auditallow;
+ od_local.dontaudit = &dontaudit;
+
+ /* lookup operation decision */
+ od = avc_operation_lookup(type, ops_node);
+ if (unlikely(!od)) {
+ /* Compute operation decision if type is flagged */
+ if (!security_operation_test(ops_node->ops.type, type)) {
+ avd.allowed &= ~requested;
+ goto decision;
+ }
+ rcu_read_unlock();
+ security_compute_operation(ssid, tsid, tclass, type, &od_local);
+ rcu_read_lock();
+ avc_update_node(AVC_CALLBACK_ADD_OPERATION, requested, cmd,
+ ssid, tsid, tclass, avd.seqno, &od_local, 0);
+ } else {
+ avc_quick_copy_operation_decision(cmd, &od_local, od);
+ }
+ od = &od_local;
+
+ if (!avc_operation_has_perm(od, cmd, OPERATION_ALLOWED))
+ avd.allowed &= ~requested;
+
+decision:
+ denied = requested & ~(avd.allowed);
+ if (unlikely(denied))
+ rc = avc_denied(ssid, tsid, tclass, requested, cmd,
+ AVC_OPERATION_CMD, &avd);
+
+ rcu_read_unlock();
+
+ rc2 = avc_operation_audit(ssid, tsid, tclass, requested,
+ &avd, od, cmd, rc, ad);
+ if (rc2)
+ return rc2;
+ return rc;
+}
/**
* avc_has_perm_noaudit - Check permissions but perform no auditing.
@@ -810,6 +1204,7 @@ inline int avc_has_perm_noaudit(u32 ssid, u32 tsid,
struct av_decision *avd)
{
struct avc_node *node;
+ struct avc_operation_node ops_node;
int rc = 0;
u32 denied;
@@ -818,16 +1213,14 @@ inline int avc_has_perm_noaudit(u32 ssid, u32 tsid,
rcu_read_lock();
node = avc_lookup(ssid, tsid, tclass);
- if (unlikely(!node)) {
- node = avc_compute_av(ssid, tsid, tclass, avd);
- } else {
+ if (unlikely(!node))
+ node = avc_compute_av(ssid, tsid, tclass, avd, &ops_node);
+ else
memcpy(avd, &node->ae.avd, sizeof(*avd));
- avd = &node->ae.avd;
- }
denied = requested & ~(avd->allowed);
if (unlikely(denied))
- rc = avc_denied(ssid, tsid, tclass, requested, flags, avd);
+ rc = avc_denied(ssid, tsid, tclass, requested, 0, flags, avd);
rcu_read_unlock();
return rc;
diff --git a/security/selinux/hooks.c b/security/selinux/hooks.c
index c523f5bf289..676c12288c6 100644
--- a/security/selinux/hooks.c
+++ b/security/selinux/hooks.c
@@ -417,15 +417,11 @@ static int sb_finish_set_opts(struct super_block *sb)
sbsec->behavior > ARRAY_SIZE(labeling_behaviors))
sbsec->flags &= ~SE_SBLABELSUPP;
- /* Special handling for sysfs. Is genfs but also has setxattr handler*/
- if (strncmp(sb->s_type->name, "sysfs", sizeof("sysfs")) == 0)
- sbsec->flags |= SE_SBLABELSUPP;
-
- /*
- * Special handling for rootfs. Is genfs but supports
- * setting SELinux context on in-core inodes.
- */
- if (strncmp(sb->s_type->name, "rootfs", sizeof("rootfs")) == 0)
+ /* Special handling. Is genfs but also has in-core setxattr handler*/
+ if (!strcmp(sb->s_type->name, "sysfs") ||
+ !strcmp(sb->s_type->name, "pstore") ||
+ !strcmp(sb->s_type->name, "debugfs") ||
+ !strcmp(sb->s_type->name, "rootfs"))
sbsec->flags |= SE_SBLABELSUPP;
/* Initialize the root inode. */
@@ -2631,9 +2627,9 @@ static int selinux_sb_statfs(struct dentry *dentry)
return superblock_has_perm(cred, dentry->d_sb, FILESYSTEM__GETATTR, &ad);
}
-static int selinux_mount(char *dev_name,
+static int selinux_mount(const char *dev_name,
struct path *path,
- char *type,
+ const char *type,
unsigned long flags,
void *data)
{
@@ -3095,6 +3091,46 @@ static void selinux_file_free_security(struct file *file)
file_free_security(file);
}
+/*
+ * Check whether a task has the ioctl permission and cmd
+ * operation to an inode.
+ */
+int ioctl_has_perm(const struct cred *cred, struct file *file,
+ u32 requested, u16 cmd)
+{
+ struct common_audit_data ad;
+ struct file_security_struct *fsec = file->f_security;
+ struct inode *inode = file->f_path.dentry->d_inode;
+ struct inode_security_struct *isec = inode->i_security;
+ struct lsm_ioctlop_audit ioctl;
+ u32 ssid = cred_sid(cred);
+ struct selinux_audit_data sad = {0,};
+ int rc;
+
+ COMMON_AUDIT_DATA_INIT(&ad, IOCTL_OP);
+ ad.u.op = &ioctl;
+ ad.u.op->cmd = cmd;
+ ad.u.op->path = file->f_path;
+ ad.selinux_audit_data = &sad;
+
+ if (ssid != fsec->sid) {
+ rc = avc_has_perm(ssid, fsec->sid,
+ SECCLASS_FD,
+ FD__USE,
+ &ad);
+ if (rc)
+ goto out;
+ }
+
+ if (unlikely(IS_PRIVATE(inode)))
+ return 0;
+
+ rc = avc_has_operation(ssid, isec->sid, isec->sclass,
+ requested, cmd, &ad);
+out:
+ return rc;
+}
+
static int selinux_file_ioctl(struct file *file, unsigned int cmd,
unsigned long arg)
{
@@ -3137,7 +3173,7 @@ static int selinux_file_ioctl(struct file *file, unsigned int cmd,
* to the file's ioctl() function.
*/
default:
- error = file_has_perm(cred, file, FILE__IOCTL);
+ error = ioctl_has_perm(cred, file, FILE__IOCTL, (u16) cmd);
}
return error;
}
diff --git a/security/selinux/include/avc.h b/security/selinux/include/avc.h
index 1931370233d..63c3105d55f 100644
--- a/security/selinux/include/avc.h
+++ b/security/selinux/include/avc.h
@@ -84,11 +84,15 @@ int avc_audit(u32 ssid, u32 tsid,
struct common_audit_data *a, unsigned flags);
#define AVC_STRICT 1 /* Ignore permissive mode. */
+#define AVC_OPERATION_CMD 2 /* ignore command when updating operations */
int avc_has_perm_noaudit(u32 ssid, u32 tsid,
u16 tclass, u32 requested,
unsigned flags,
struct av_decision *avd);
+int avc_has_operation(u32 ssid, u32 tsid, u16 tclass, u32 requested,
+ u16 cmd, struct common_audit_data *ad);
+
int avc_has_perm_flags(u32 ssid, u32 tsid,
u16 tclass, u32 requested,
struct common_audit_data *auditdata,
@@ -111,6 +115,7 @@ u32 avc_policy_seqno(void);
#define AVC_CALLBACK_AUDITALLOW_DISABLE 32
#define AVC_CALLBACK_AUDITDENY_ENABLE 64
#define AVC_CALLBACK_AUDITDENY_DISABLE 128
+#define AVC_CALLBACK_ADD_OPERATION 256
int avc_add_callback(int (*callback)(u32 event, u32 ssid, u32 tsid,
u16 tclass, u32 perms,
diff --git a/security/selinux/include/security.h b/security/selinux/include/security.h
index d871e8ad210..ff1a188053b 100644
--- a/security/selinux/include/security.h
+++ b/security/selinux/include/security.h
@@ -31,13 +31,17 @@
#define POLICYDB_VERSION_BOUNDARY 24
#define POLICYDB_VERSION_FILENAME_TRANS 25
#define POLICYDB_VERSION_ROLETRANS 26
+#define POLICYDB_VERSION_NEW_OBJECT_DEFAULTS 27
+#define POLICYDB_VERSION_DEFAULT_TYPE 28
+#define POLICYDB_VERSION_CONSTRAINT_NAMES 29
+#define POLICYDB_VERSION_IOCTL_OPERATIONS 30
/* Range of policy versions we understand*/
#define POLICYDB_VERSION_MIN POLICYDB_VERSION_BASE
#ifdef CONFIG_SECURITY_SELINUX_POLICYDB_VERSION_MAX
#define POLICYDB_VERSION_MAX CONFIG_SECURITY_SELINUX_POLICYDB_VERSION_MAX_VALUE
#else
-#define POLICYDB_VERSION_MAX POLICYDB_VERSION_ROLETRANS
+#define POLICYDB_VERSION_MAX POLICYDB_VERSION_IOCTL_OPERATIONS
#endif
/* Mask for just the mount related flags */
@@ -100,11 +104,40 @@ struct av_decision {
u32 flags;
};
+#define security_operation_set(perms, x) (perms[x >> 5] |= 1 << (x & 0x1f))
+#define security_operation_test(perms, x) (1 & (perms[x >> 5] >> (x & 0x1f)))
+
+struct operation_perm {
+ u32 perms[8];
+};
+
+struct operation_decision {
+ u8 type;
+ u8 specified;
+ struct operation_perm *allowed;
+ struct operation_perm *auditallow;
+ struct operation_perm *dontaudit;
+};
+
+#define OPERATION_ALLOWED 1
+#define OPERATION_AUDITALLOW 2
+#define OPERATION_DONTAUDIT 4
+#define OPERATION_ALL (OPERATION_ALLOWED | OPERATION_AUDITALLOW |\
+ OPERATION_DONTAUDIT)
+struct operation {
+ u16 len; /* length of operation decision chain */
+ u32 type[8]; /* 256 types */
+};
+
/* definitions of av_decision.flags */
#define AVD_FLAGS_PERMISSIVE 0x0001
void security_compute_av(u32 ssid, u32 tsid,
- u16 tclass, struct av_decision *avd);
+ u16 tclass, struct av_decision *avd,
+ struct operation *ops);
+
+void security_compute_operation(u32 ssid, u32 tsid, u16 tclass,
+ u8 type, struct operation_decision *od);
void security_compute_av_user(u32 ssid, u32 tsid,
u16 tclass, struct av_decision *avd);
diff --git a/security/selinux/nlmsgtab.c b/security/selinux/nlmsgtab.c
index 0920ea3bf59..feb6262c60a 100644
--- a/security/selinux/nlmsgtab.c
+++ b/security/selinux/nlmsgtab.c
@@ -18,6 +18,7 @@
#include <linux/inet_diag.h>
#include <linux/xfrm.h>
#include <linux/audit.h>
+#include <linux/sock_diag.h>
#include "flask.h"
#include "av_permissions.h"
@@ -80,6 +81,7 @@ static struct nlmsg_perm nlmsg_tcpdiag_perms[] =
{
{ TCPDIAG_GETSOCK, NETLINK_TCPDIAG_SOCKET__NLMSG_READ },
{ DCCPDIAG_GETSOCK, NETLINK_TCPDIAG_SOCKET__NLMSG_READ },
+ { SOCK_DIAG_BY_FAMILY, NETLINK_TCPDIAG_SOCKET__NLMSG_READ },
};
static struct nlmsg_perm nlmsg_xfrm_perms[] =
@@ -100,6 +102,13 @@ static struct nlmsg_perm nlmsg_xfrm_perms[] =
{ XFRM_MSG_FLUSHPOLICY, NETLINK_XFRM_SOCKET__NLMSG_WRITE },
{ XFRM_MSG_NEWAE, NETLINK_XFRM_SOCKET__NLMSG_WRITE },
{ XFRM_MSG_GETAE, NETLINK_XFRM_SOCKET__NLMSG_READ },
+ { XFRM_MSG_REPORT, NETLINK_XFRM_SOCKET__NLMSG_READ },
+ { XFRM_MSG_MIGRATE, NETLINK_XFRM_SOCKET__NLMSG_WRITE },
+ { XFRM_MSG_NEWSADINFO, NETLINK_XFRM_SOCKET__NLMSG_READ },
+ { XFRM_MSG_GETSADINFO, NETLINK_XFRM_SOCKET__NLMSG_READ },
+ { XFRM_MSG_NEWSPDINFO, NETLINK_XFRM_SOCKET__NLMSG_WRITE },
+ { XFRM_MSG_GETSPDINFO, NETLINK_XFRM_SOCKET__NLMSG_READ },
+ { XFRM_MSG_MAPPING, NETLINK_XFRM_SOCKET__NLMSG_READ },
};
static struct nlmsg_perm nlmsg_audit_perms[] =
diff --git a/security/selinux/ss/avtab.c b/security/selinux/ss/avtab.c
index a3dd9faa19c..dd7466cb202 100644
--- a/security/selinux/ss/avtab.c
+++ b/security/selinux/ss/avtab.c
@@ -24,6 +24,7 @@
#include "policydb.h"
static struct kmem_cache *avtab_node_cachep;
+static struct kmem_cache *avtab_operation_cachep;
static inline int avtab_hash(struct avtab_key *keyp, u16 mask)
{
@@ -37,11 +38,24 @@ avtab_insert_node(struct avtab *h, int hvalue,
struct avtab_key *key, struct avtab_datum *datum)
{
struct avtab_node *newnode;
+ struct avtab_operation *ops;
newnode = kmem_cache_zalloc(avtab_node_cachep, GFP_KERNEL);
if (newnode == NULL)
return NULL;
newnode->key = *key;
- newnode->datum = *datum;
+
+ if (key->specified & AVTAB_OP) {
+ ops = kmem_cache_zalloc(avtab_operation_cachep, GFP_KERNEL);
+ if (ops == NULL) {
+ kmem_cache_free(avtab_node_cachep, newnode);
+ return NULL;
+ }
+ *ops = *(datum->u.ops);
+ newnode->datum.u.ops = ops;
+ } else {
+ newnode->datum.u.data = datum->u.data;
+ }
+
if (prev) {
newnode->next = prev->next;
prev->next = newnode;
@@ -70,8 +84,11 @@ static int avtab_insert(struct avtab *h, struct avtab_key *key, struct avtab_dat
if (key->source_type == cur->key.source_type &&
key->target_type == cur->key.target_type &&
key->target_class == cur->key.target_class &&
- (specified & cur->key.specified))
+ (specified & cur->key.specified)) {
+ if (specified & AVTAB_OPNUM)
+ break;
return -EEXIST;
+ }
if (key->source_type < cur->key.source_type)
break;
if (key->source_type == cur->key.source_type &&
@@ -232,6 +249,9 @@ void avtab_destroy(struct avtab *h)
while (cur) {
temp = cur;
cur = cur->next;
+ if (temp->key.specified & AVTAB_OP)
+ kmem_cache_free(avtab_operation_cachep,
+ temp->datum.u.ops);
kmem_cache_free(avtab_node_cachep, temp);
}
h->htable[i] = NULL;
@@ -320,7 +340,13 @@ static uint16_t spec_order[] = {
AVTAB_AUDITALLOW,
AVTAB_TRANSITION,
AVTAB_CHANGE,
- AVTAB_MEMBER
+ AVTAB_MEMBER,
+ AVTAB_OPNUM_ALLOWED,
+ AVTAB_OPNUM_AUDITALLOW,
+ AVTAB_OPNUM_DONTAUDIT,
+ AVTAB_OPTYPE_ALLOWED,
+ AVTAB_OPTYPE_AUDITALLOW,
+ AVTAB_OPTYPE_DONTAUDIT
};
int avtab_read_item(struct avtab *a, void *fp, struct policydb *pol,
@@ -330,10 +356,11 @@ int avtab_read_item(struct avtab *a, void *fp, struct policydb *pol,
{
__le16 buf16[4];
u16 enabled;
- __le32 buf32[7];
u32 items, items2, val, vers = pol->policyvers;
struct avtab_key key;
struct avtab_datum datum;
+ struct avtab_operation ops;
+ __le32 buf32[ARRAY_SIZE(ops.op.perms)];
int i, rc;
unsigned set;
@@ -390,11 +417,15 @@ int avtab_read_item(struct avtab *a, void *fp, struct policydb *pol,
printk(KERN_ERR "SELinux: avtab: entry has both access vectors and types\n");
return -EINVAL;
}
+ if (val & AVTAB_OP) {
+ printk(KERN_ERR "SELinux: avtab: entry has operations\n");
+ return -EINVAL;
+ }
for (i = 0; i < ARRAY_SIZE(spec_order); i++) {
if (val & spec_order[i]) {
key.specified = spec_order[i] | enabled;
- datum.data = le32_to_cpu(buf32[items++]);
+ datum.u.data = le32_to_cpu(buf32[items++]);
rc = insertf(a, &key, &datum, p);
if (rc)
return rc;
@@ -413,7 +444,6 @@ int avtab_read_item(struct avtab *a, void *fp, struct policydb *pol,
printk(KERN_ERR "SELinux: avtab: truncated entry\n");
return rc;
}
-
items = 0;
key.source_type = le16_to_cpu(buf16[items++]);
key.target_type = le16_to_cpu(buf16[items++]);
@@ -437,14 +467,32 @@ int avtab_read_item(struct avtab *a, void *fp, struct policydb *pol,
return -EINVAL;
}
- rc = next_entry(buf32, fp, sizeof(u32));
- if (rc) {
- printk(KERN_ERR "SELinux: avtab: truncated entry\n");
- return rc;
+ if ((vers < POLICYDB_VERSION_IOCTL_OPERATIONS)
+ || !(key.specified & AVTAB_OP)) {
+ rc = next_entry(buf32, fp, sizeof(u32));
+ if (rc) {
+ printk(KERN_ERR "SELinux: avtab: truncated entry\n");
+ return rc;
+ }
+ datum.u.data = le32_to_cpu(*buf32);
+ } else {
+ memset(&ops, 0, sizeof(struct avtab_operation));
+ rc = next_entry(&ops.type, fp, sizeof(u8));
+ if (rc) {
+ printk(KERN_ERR "SELinux: avtab: truncated entry\n");
+ return rc;
+ }
+ rc = next_entry(buf32, fp, sizeof(u32)*ARRAY_SIZE(ops.op.perms));
+ if (rc) {
+ printk(KERN_ERR "SELinux: avtab: truncated entry\n");
+ return rc;
+ }
+ for (i = 0; i < ARRAY_SIZE(ops.op.perms); i++)
+ ops.op.perms[i] = le32_to_cpu(buf32[i]);
+ datum.u.ops = &ops;
}
- datum.data = le32_to_cpu(*buf32);
if ((key.specified & AVTAB_TYPE) &&
- !policydb_type_isvalid(pol, datum.data)) {
+ !policydb_type_isvalid(pol, datum.u.data)) {
printk(KERN_ERR "SELinux: avtab: invalid type\n");
return -EINVAL;
}
@@ -504,8 +552,9 @@ bad:
int avtab_write_item(struct policydb *p, struct avtab_node *cur, void *fp)
{
__le16 buf16[4];
- __le32 buf32[1];
+ __le32 buf32[ARRAY_SIZE(cur->datum.u.ops->op.perms)];
int rc;
+ unsigned int i;
buf16[0] = cpu_to_le16(cur->key.source_type);
buf16[1] = cpu_to_le16(cur->key.target_type);
@@ -514,8 +563,19 @@ int avtab_write_item(struct policydb *p, struct avtab_node *cur, void *fp)
rc = put_entry(buf16, sizeof(u16), 4, fp);
if (rc)
return rc;
- buf32[0] = cpu_to_le32(cur->datum.data);
- rc = put_entry(buf32, sizeof(u32), 1, fp);
+
+ if (cur->key.specified & AVTAB_OP) {
+ rc = put_entry(&cur->datum.u.ops->type, sizeof(u8), 1, fp);
+ if (rc)
+ return rc;
+ for (i = 0; i < ARRAY_SIZE(cur->datum.u.ops->op.perms); i++)
+ buf32[i] = cpu_to_le32(cur->datum.u.ops->op.perms[i]);
+ rc = put_entry(buf32, sizeof(u32),
+ ARRAY_SIZE(cur->datum.u.ops->op.perms), fp);
+ } else {
+ buf32[0] = cpu_to_le32(cur->datum.u.data);
+ rc = put_entry(buf32, sizeof(u32), 1, fp);
+ }
if (rc)
return rc;
return 0;
@@ -548,9 +608,13 @@ void avtab_cache_init(void)
avtab_node_cachep = kmem_cache_create("avtab_node",
sizeof(struct avtab_node),
0, SLAB_PANIC, NULL);
+ avtab_operation_cachep = kmem_cache_create("avtab_operation",
+ sizeof(struct avtab_operation),
+ 0, SLAB_PANIC, NULL);
}
void avtab_cache_destroy(void)
{
kmem_cache_destroy(avtab_node_cachep);
+ kmem_cache_destroy(avtab_operation_cachep);
}
diff --git a/security/selinux/ss/avtab.h b/security/selinux/ss/avtab.h
index 63ce2f9e441..97acd6fa705 100644
--- a/security/selinux/ss/avtab.h
+++ b/security/selinux/ss/avtab.h
@@ -23,6 +23,8 @@
#ifndef _SS_AVTAB_H_
#define _SS_AVTAB_H_
+#include "security.h"
+
struct avtab_key {
u16 source_type; /* source type */
u16 target_type; /* target type */
@@ -35,13 +37,34 @@ struct avtab_key {
#define AVTAB_MEMBER 0x0020
#define AVTAB_CHANGE 0x0040
#define AVTAB_TYPE (AVTAB_TRANSITION | AVTAB_MEMBER | AVTAB_CHANGE)
+#define AVTAB_OPNUM_ALLOWED 0x0100
+#define AVTAB_OPNUM_AUDITALLOW 0x0200
+#define AVTAB_OPNUM_DONTAUDIT 0x0400
+#define AVTAB_OPNUM (AVTAB_OPNUM_ALLOWED | \
+ AVTAB_OPNUM_AUDITALLOW | \
+ AVTAB_OPNUM_DONTAUDIT)
+#define AVTAB_OPTYPE_ALLOWED 0x1000
+#define AVTAB_OPTYPE_AUDITALLOW 0x2000
+#define AVTAB_OPTYPE_DONTAUDIT 0x4000
+#define AVTAB_OPTYPE (AVTAB_OPTYPE_ALLOWED | \
+ AVTAB_OPTYPE_AUDITALLOW | \
+ AVTAB_OPTYPE_DONTAUDIT)
+#define AVTAB_OP (AVTAB_OPNUM | AVTAB_OPTYPE)
#define AVTAB_ENABLED_OLD 0x80000000 /* reserved for used in cond_avtab */
#define AVTAB_ENABLED 0x8000 /* reserved for used in cond_avtab */
u16 specified; /* what field is specified */
};
+struct avtab_operation {
+ u8 type;
+ struct operation_perm op;
+};
+
struct avtab_datum {
- u32 data; /* access vector or type value */
+ union {
+ u32 data; /* access vector or type value */
+ struct avtab_operation *ops; /* ioctl operations */
+ } u;
};
struct avtab_node {
diff --git a/security/selinux/ss/conditional.c b/security/selinux/ss/conditional.c
index 377d148e715..16651c7a154 100644
--- a/security/selinux/ss/conditional.c
+++ b/security/selinux/ss/conditional.c
@@ -15,6 +15,7 @@
#include "security.h"
#include "conditional.h"
+#include "services.h"
/*
* cond_evaluate_expr evaluates a conditional expr
@@ -617,21 +618,39 @@ int cond_write_list(struct policydb *p, struct cond_node *list, void *fp)
return 0;
}
+
+void cond_compute_operation(struct avtab *ctab, struct avtab_key *key,
+ struct operation_decision *od)
+{
+ struct avtab_node *node;
+
+ if (!ctab || !key || !od)
+ return;
+
+ for (node = avtab_search_node(ctab, key); node;
+ node = avtab_search_node_next(node, key->specified)) {
+ if (node->key.specified & AVTAB_ENABLED)
+ services_compute_operation_num(od, node);
+ }
+ return;
+
+}
/* Determine whether additional permissions are granted by the conditional
* av table, and if so, add them to the result
*/
-void cond_compute_av(struct avtab *ctab, struct avtab_key *key, struct av_decision *avd)
+void cond_compute_av(struct avtab *ctab, struct avtab_key *key,
+ struct av_decision *avd, struct operation *ops)
{
struct avtab_node *node;
- if (!ctab || !key || !avd)
+ if (!ctab || !key || !avd || !ops)
return;
for (node = avtab_search_node(ctab, key); node;
node = avtab_search_node_next(node, key->specified)) {
if ((u16)(AVTAB_ALLOWED|AVTAB_ENABLED) ==
(node->key.specified & (AVTAB_ALLOWED|AVTAB_ENABLED)))
- avd->allowed |= node->datum.data;
+ avd->allowed |= node->datum.u.data;
if ((u16)(AVTAB_AUDITDENY|AVTAB_ENABLED) ==
(node->key.specified & (AVTAB_AUDITDENY|AVTAB_ENABLED)))
/* Since a '0' in an auditdeny mask represents a
@@ -639,10 +658,13 @@ void cond_compute_av(struct avtab *ctab, struct avtab_key *key, struct av_decisi
* the '&' operand to ensure that all '0's in the mask
* are retained (much unlike the allow and auditallow cases).
*/
- avd->auditdeny &= node->datum.data;
+ avd->auditdeny &= node->datum.u.data;
if ((u16)(AVTAB_AUDITALLOW|AVTAB_ENABLED) ==
(node->key.specified & (AVTAB_AUDITALLOW|AVTAB_ENABLED)))
- avd->auditallow |= node->datum.data;
+ avd->auditallow |= node->datum.u.data;
+ if ((node->key.specified & AVTAB_ENABLED) &&
+ (node->key.specified & AVTAB_OP))
+ services_compute_operation_type(ops, node);
}
return;
}
diff --git a/security/selinux/ss/conditional.h b/security/selinux/ss/conditional.h
index 4d1f8746650..80ee2bb20ee 100644
--- a/security/selinux/ss/conditional.h
+++ b/security/selinux/ss/conditional.h
@@ -73,8 +73,10 @@ int cond_read_list(struct policydb *p, void *fp);
int cond_write_bool(void *key, void *datum, void *ptr);
int cond_write_list(struct policydb *p, struct cond_node *list, void *fp);
-void cond_compute_av(struct avtab *ctab, struct avtab_key *key, struct av_decision *avd);
-
+void cond_compute_av(struct avtab *ctab, struct avtab_key *key,
+ struct av_decision *avd, struct operation *ops);
+void cond_compute_operation(struct avtab *ctab, struct avtab_key *key,
+ struct operation_decision *od);
int evaluate_cond_node(struct policydb *p, struct cond_node *node);
#endif /* _CONDITIONAL_H_ */
diff --git a/security/selinux/ss/constraint.h b/security/selinux/ss/constraint.h
index 149dda731fd..96fd947c494 100644
--- a/security/selinux/ss/constraint.h
+++ b/security/selinux/ss/constraint.h
@@ -48,6 +48,7 @@ struct constraint_expr {
u32 op; /* operator */
struct ebitmap names; /* names */
+ struct type_set *type_names;
struct constraint_expr *next; /* next expression */
};
diff --git a/security/selinux/ss/context.h b/security/selinux/ss/context.h
index 45e8fb0515f..212e3479a0d 100644
--- a/security/selinux/ss/context.h
+++ b/security/selinux/ss/context.h
@@ -74,6 +74,26 @@ out:
return rc;
}
+/*
+ * Sets both levels in the MLS range of 'dst' to the high level of 'src'.
+ */
+static inline int mls_context_cpy_high(struct context *dst, struct context *src)
+{
+ int rc;
+
+ dst->range.level[0].sens = src->range.level[1].sens;
+ rc = ebitmap_cpy(&dst->range.level[0].cat, &src->range.level[1].cat);
+ if (rc)
+ goto out;
+
+ dst->range.level[1].sens = src->range.level[1].sens;
+ rc = ebitmap_cpy(&dst->range.level[1].cat, &src->range.level[1].cat);
+ if (rc)
+ ebitmap_destroy(&dst->range.level[0].cat);
+out:
+ return rc;
+}
+
static inline int mls_context_cmp(struct context *c1, struct context *c2)
{
return ((c1->range.level[0].sens == c2->range.level[0].sens) &&
diff --git a/security/selinux/ss/mls.c b/security/selinux/ss/mls.c
index fbf9c5816c7..40de8d3f208 100644
--- a/security/selinux/ss/mls.c
+++ b/security/selinux/ss/mls.c
@@ -517,6 +517,8 @@ int mls_compute_sid(struct context *scontext,
{
struct range_trans rtr;
struct mls_range *r;
+ struct class_datum *cladatum;
+ int default_range = 0;
if (!policydb.mls_enabled)
return 0;
@@ -530,6 +532,28 @@ int mls_compute_sid(struct context *scontext,
r = hashtab_search(policydb.range_tr, &rtr);
if (r)
return mls_range_set(newcontext, r);
+
+ if (tclass && tclass <= policydb.p_classes.nprim) {
+ cladatum = policydb.class_val_to_struct[tclass - 1];
+ if (cladatum)
+ default_range = cladatum->default_range;
+ }
+
+ switch (default_range) {
+ case DEFAULT_SOURCE_LOW:
+ return mls_context_cpy_low(newcontext, scontext);
+ case DEFAULT_SOURCE_HIGH:
+ return mls_context_cpy_high(newcontext, scontext);
+ case DEFAULT_SOURCE_LOW_HIGH:
+ return mls_context_cpy(newcontext, scontext);
+ case DEFAULT_TARGET_LOW:
+ return mls_context_cpy_low(newcontext, tcontext);
+ case DEFAULT_TARGET_HIGH:
+ return mls_context_cpy_high(newcontext, tcontext);
+ case DEFAULT_TARGET_LOW_HIGH:
+ return mls_context_cpy(newcontext, tcontext);
+ }
+
/* Fallthrough */
case AVTAB_CHANGE:
if ((tclass == policydb.process_class) || (sock == true))
diff --git a/security/selinux/ss/policydb.c b/security/selinux/ss/policydb.c
index a7f61d52f05..6962d15f549 100644
--- a/security/selinux/ss/policydb.c
+++ b/security/selinux/ss/policydb.c
@@ -133,6 +133,26 @@ static struct policydb_compat_info policydb_compat[] = {
.sym_num = SYM_NUM,
.ocon_num = OCON_NUM,
},
+ {
+ .version = POLICYDB_VERSION_NEW_OBJECT_DEFAULTS,
+ .sym_num = SYM_NUM,
+ .ocon_num = OCON_NUM,
+ },
+ {
+ .version = POLICYDB_VERSION_DEFAULT_TYPE,
+ .sym_num = SYM_NUM,
+ .ocon_num = OCON_NUM,
+ },
+ {
+ .version = POLICYDB_VERSION_CONSTRAINT_NAMES,
+ .sym_num = SYM_NUM,
+ .ocon_num = OCON_NUM,
+ },
+ {
+ .version = POLICYDB_VERSION_IOCTL_OPERATIONS,
+ .sym_num = SYM_NUM,
+ .ocon_num = OCON_NUM,
+ },
};
static struct policydb_compat_info *policydb_lookup_compat(int version)
@@ -603,6 +623,19 @@ static int common_destroy(void *key, void *datum, void *p)
return 0;
}
+static void constraint_expr_destroy(struct constraint_expr *expr)
+{
+ if (expr) {
+ ebitmap_destroy(&expr->names);
+ if (expr->type_names) {
+ ebitmap_destroy(&expr->type_names->types);
+ ebitmap_destroy(&expr->type_names->negset);
+ kfree(expr->type_names);
+ }
+ kfree(expr);
+ }
+}
+
static int cls_destroy(void *key, void *datum, void *p)
{
struct class_datum *cladatum;
@@ -618,10 +651,9 @@ static int cls_destroy(void *key, void *datum, void *p)
while (constraint) {
e = constraint->expr;
while (e) {
- ebitmap_destroy(&e->names);
etmp = e;
e = e->next;
- kfree(etmp);
+ constraint_expr_destroy(etmp);
}
ctemp = constraint;
constraint = constraint->next;
@@ -632,16 +664,14 @@ static int cls_destroy(void *key, void *datum, void *p)
while (constraint) {
e = constraint->expr;
while (e) {
- ebitmap_destroy(&e->names);
etmp = e;
e = e->next;
- kfree(etmp);
+ constraint_expr_destroy(etmp);
}
ctemp = constraint;
constraint = constraint->next;
kfree(ctemp);
}
-
kfree(cladatum->comkey);
}
kfree(datum);
@@ -1146,8 +1176,34 @@ bad:
return rc;
}
-static int read_cons_helper(struct constraint_node **nodep, int ncons,
- int allowxtarget, void *fp)
+static void type_set_init(struct type_set *t)
+{
+ ebitmap_init(&t->types);
+ ebitmap_init(&t->negset);
+}
+
+static int type_set_read(struct type_set *t, void *fp)
+{
+ __le32 buf[1];
+ int rc;
+
+ if (ebitmap_read(&t->types, fp))
+ return -EINVAL;
+ if (ebitmap_read(&t->negset, fp))
+ return -EINVAL;
+
+ rc = next_entry(buf, fp, sizeof(u32));
+ if (rc < 0)
+ return -EINVAL;
+ t->flags = le32_to_cpu(buf[0]);
+
+ return 0;
+}
+
+
+static int read_cons_helper(struct policydb *p,
+ struct constraint_node **nodep,
+ int ncons, int allowxtarget, void *fp)
{
struct constraint_node *c, *lc;
struct constraint_expr *e, *le;
@@ -1215,6 +1271,18 @@ static int read_cons_helper(struct constraint_node **nodep, int ncons,
rc = ebitmap_read(&e->names, fp);
if (rc)
return rc;
+ if (p->policyvers >=
+ POLICYDB_VERSION_CONSTRAINT_NAMES) {
+ e->type_names = kzalloc(sizeof
+ (*e->type_names),
+ GFP_KERNEL);
+ if (!e->type_names)
+ return -ENOMEM;
+ type_set_init(e->type_names);
+ rc = type_set_read(e->type_names, fp);
+ if (rc)
+ return rc;
+ }
break;
default:
return -EINVAL;
@@ -1291,7 +1359,7 @@ static int class_read(struct policydb *p, struct hashtab *h, void *fp)
goto bad;
}
- rc = read_cons_helper(&cladatum->constraints, ncons, 0, fp);
+ rc = read_cons_helper(p, &cladatum->constraints, ncons, 0, fp);
if (rc)
goto bad;
@@ -1301,9 +1369,27 @@ static int class_read(struct policydb *p, struct hashtab *h, void *fp)
if (rc)
goto bad;
ncons = le32_to_cpu(buf[0]);
- rc = read_cons_helper(&cladatum->validatetrans, ncons, 1, fp);
+ rc = read_cons_helper(p, &cladatum->validatetrans,
+ ncons, 1, fp);
+ if (rc)
+ goto bad;
+ }
+
+ if (p->policyvers >= POLICYDB_VERSION_NEW_OBJECT_DEFAULTS) {
+ rc = next_entry(buf, fp, sizeof(u32) * 3);
+ if (rc)
+ goto bad;
+
+ cladatum->default_user = le32_to_cpu(buf[0]);
+ cladatum->default_role = le32_to_cpu(buf[1]);
+ cladatum->default_range = le32_to_cpu(buf[2]);
+ }
+
+ if (p->policyvers >= POLICYDB_VERSION_DEFAULT_TYPE) {
+ rc = next_entry(buf, fp, sizeof(u32) * 1);
if (rc)
goto bad;
+ cladatum->default_type = le32_to_cpu(buf[0]);
}
rc = hashtab_insert(h, key, cladatum);
@@ -2723,6 +2809,24 @@ static int common_write(void *vkey, void *datum, void *ptr)
return 0;
}
+static int type_set_write(struct type_set *t, void *fp)
+{
+ int rc;
+ __le32 buf[1];
+
+ if (ebitmap_write(&t->types, fp))
+ return -EINVAL;
+ if (ebitmap_write(&t->negset, fp))
+ return -EINVAL;
+
+ buf[0] = cpu_to_le32(t->flags);
+ rc = put_entry(buf, sizeof(u32), 1, fp);
+ if (rc)
+ return -EINVAL;
+
+ return 0;
+}
+
static int write_cons_helper(struct policydb *p, struct constraint_node *node,
void *fp)
{
@@ -2754,6 +2858,12 @@ static int write_cons_helper(struct policydb *p, struct constraint_node *node,
rc = ebitmap_write(&e->names, fp);
if (rc)
return rc;
+ if (p->policyvers >=
+ POLICYDB_VERSION_CONSTRAINT_NAMES) {
+ rc = type_set_write(e->type_names, fp);
+ if (rc)
+ return rc;
+ }
break;
default:
break;
@@ -2832,6 +2942,23 @@ static int class_write(void *vkey, void *datum, void *ptr)
if (rc)
return rc;
+ if (p->policyvers >= POLICYDB_VERSION_NEW_OBJECT_DEFAULTS) {
+ buf[0] = cpu_to_le32(cladatum->default_user);
+ buf[1] = cpu_to_le32(cladatum->default_role);
+ buf[2] = cpu_to_le32(cladatum->default_range);
+
+ rc = put_entry(buf, sizeof(uint32_t), 3, fp);
+ if (rc)
+ return rc;
+ }
+
+ if (p->policyvers >= POLICYDB_VERSION_DEFAULT_TYPE) {
+ buf[0] = cpu_to_le32(cladatum->default_type);
+ rc = put_entry(buf, sizeof(uint32_t), 1, fp);
+ if (rc)
+ return rc;
+ }
+
return 0;
}
diff --git a/security/selinux/ss/policydb.h b/security/selinux/ss/policydb.h
index b846c038718..725d5945a97 100644
--- a/security/selinux/ss/policydb.h
+++ b/security/selinux/ss/policydb.h
@@ -60,6 +60,20 @@ struct class_datum {
struct symtab permissions; /* class-specific permission symbol table */
struct constraint_node *constraints; /* constraints on class permissions */
struct constraint_node *validatetrans; /* special transition rules */
+/* Options how a new object user, role, and type should be decided */
+#define DEFAULT_SOURCE 1
+#define DEFAULT_TARGET 2
+ char default_user;
+ char default_role;
+ char default_type;
+/* Options how a new object range should be decided */
+#define DEFAULT_SOURCE_LOW 1
+#define DEFAULT_SOURCE_HIGH 2
+#define DEFAULT_SOURCE_LOW_HIGH 3
+#define DEFAULT_TARGET_LOW 4
+#define DEFAULT_TARGET_HIGH 5
+#define DEFAULT_TARGET_LOW_HIGH 6
+ char default_range;
};
/* Role attributes */
@@ -140,6 +154,17 @@ struct cond_bool_datum {
struct cond_node;
/*
+ * type set preserves data needed to determine constraint info from
+ * policy source. This is not used by the kernel policy but allows
+ * utilities such as audit2allow to determine constraint denials.
+ */
+struct type_set {
+ struct ebitmap types;
+ struct ebitmap negset;
+ u32 flags;
+};
+
+/*
* The configuration data includes security contexts for
* initial SIDs, unlabeled file systems, TCP and UDP port numbers,
* network interfaces, and nodes. This structure stores the
diff --git a/security/selinux/ss/services.c b/security/selinux/ss/services.c
index 72b20b1089d..9ddca68b69b 100644
--- a/security/selinux/ss/services.c
+++ b/security/selinux/ss/services.c
@@ -92,9 +92,10 @@ static int context_struct_to_string(struct context *context, char **scontext,
u32 *scontext_len);
static void context_struct_compute_av(struct context *scontext,
- struct context *tcontext,
- u16 tclass,
- struct av_decision *avd);
+ struct context *tcontext,
+ u16 tclass,
+ struct av_decision *avd,
+ struct operation *ops);
struct selinux_mapping {
u16 value; /* policy value */
@@ -564,7 +565,8 @@ static void type_attribute_bounds_av(struct context *scontext,
context_struct_compute_av(&lo_scontext,
tcontext,
tclass,
- &lo_avd);
+ &lo_avd,
+ NULL);
if ((lo_avd.allowed & avd->allowed) == avd->allowed)
return; /* no masked permission */
masked = ~lo_avd.allowed & avd->allowed;
@@ -579,7 +581,8 @@ static void type_attribute_bounds_av(struct context *scontext,
context_struct_compute_av(scontext,
&lo_tcontext,
tclass,
- &lo_avd);
+ &lo_avd,
+ NULL);
if ((lo_avd.allowed & avd->allowed) == avd->allowed)
return; /* no masked permission */
masked = ~lo_avd.allowed & avd->allowed;
@@ -595,7 +598,8 @@ static void type_attribute_bounds_av(struct context *scontext,
context_struct_compute_av(&lo_scontext,
&lo_tcontext,
tclass,
- &lo_avd);
+ &lo_avd,
+ NULL);
if ((lo_avd.allowed & avd->allowed) == avd->allowed)
return; /* no masked permission */
masked = ~lo_avd.allowed & avd->allowed;
@@ -611,14 +615,39 @@ static void type_attribute_bounds_av(struct context *scontext,
}
}
+/* flag ioctl types that have operation permissions */
+void services_compute_operation_type(
+ struct operation *ops,
+ struct avtab_node *node)
+{
+ u8 type;
+ unsigned int i;
+
+ if (node->key.specified & AVTAB_OPTYPE) {
+ /* if allowing one or more complete types */
+ for (i = 0; i < ARRAY_SIZE(ops->type); i++)
+ ops->type[i] |= node->datum.u.ops->op.perms[i];
+ } else {
+ /* if allowing operations within a type */
+ type = node->datum.u.ops->type;
+ security_operation_set(ops->type, type);
+ }
+
+ /* If no ioctl commands are allowed, ignore auditallow and auditdeny */
+ if (node->key.specified & AVTAB_OPTYPE_ALLOWED ||
+ node->key.specified & AVTAB_OPNUM_ALLOWED)
+ ops->len = 1;
+}
+
/*
- * Compute access vectors based on a context structure pair for
- * the permissions in a particular class.
+ * Compute access vectors and operations ranges based on a context
+ * structure pair for the permissions in a particular class.
*/
static void context_struct_compute_av(struct context *scontext,
- struct context *tcontext,
- u16 tclass,
- struct av_decision *avd)
+ struct context *tcontext,
+ u16 tclass,
+ struct av_decision *avd,
+ struct operation *ops)
{
struct constraint_node *constraint;
struct role_allow *ra;
@@ -632,6 +661,10 @@ static void context_struct_compute_av(struct context *scontext,
avd->allowed = 0;
avd->auditallow = 0;
avd->auditdeny = 0xffffffff;
+ if (ops) {
+ memset(&ops->type, 0, sizeof(ops->type));
+ ops->len = 0;
+ }
if (unlikely(!tclass || tclass > policydb.p_classes.nprim)) {
if (printk_ratelimit())
@@ -646,7 +679,7 @@ static void context_struct_compute_av(struct context *scontext,
* this permission check, then use it.
*/
avkey.target_class = tclass;
- avkey.specified = AVTAB_AV;
+ avkey.specified = AVTAB_AV | AVTAB_OP;
sattr = flex_array_get(policydb.type_attr_map_array, scontext->type - 1);
BUG_ON(!sattr);
tattr = flex_array_get(policydb.type_attr_map_array, tcontext->type - 1);
@@ -659,15 +692,17 @@ static void context_struct_compute_av(struct context *scontext,
node;
node = avtab_search_node_next(node, avkey.specified)) {
if (node->key.specified == AVTAB_ALLOWED)
- avd->allowed |= node->datum.data;
+ avd->allowed |= node->datum.u.data;
else if (node->key.specified == AVTAB_AUDITALLOW)
- avd->auditallow |= node->datum.data;
+ avd->auditallow |= node->datum.u.data;
else if (node->key.specified == AVTAB_AUDITDENY)
- avd->auditdeny &= node->datum.data;
+ avd->auditdeny &= node->datum.u.data;
+ else if (ops && (node->key.specified & AVTAB_OP))
+ services_compute_operation_type(ops, node);
}
/* Check conditional av table for additional permissions */
- cond_compute_av(&policydb.te_cond_avtab, &avkey, avd);
+ cond_compute_av(&policydb.te_cond_avtab, &avkey, avd, ops);
}
}
@@ -898,13 +933,139 @@ static void avd_init(struct av_decision *avd)
avd->flags = 0;
}
+void services_compute_operation_num(struct operation_decision *od,
+ struct avtab_node *node)
+{
+ unsigned int i;
+
+ if (node->key.specified & AVTAB_OPNUM) {
+ if (od->type != node->datum.u.ops->type)
+ return;
+ } else {
+ if (!security_operation_test(node->datum.u.ops->op.perms,
+ od->type))
+ return;
+ }
+
+ if (node->key.specified == AVTAB_OPTYPE_ALLOWED) {
+ od->specified |= OPERATION_ALLOWED;
+ memset(od->allowed->perms, 0xff,
+ sizeof(od->allowed->perms));
+ } else if (node->key.specified == AVTAB_OPTYPE_AUDITALLOW) {
+ od->specified |= OPERATION_AUDITALLOW;
+ memset(od->auditallow->perms, 0xff,
+ sizeof(od->auditallow->perms));
+ } else if (node->key.specified == AVTAB_OPTYPE_DONTAUDIT) {
+ od->specified |= OPERATION_DONTAUDIT;
+ memset(od->dontaudit->perms, 0xff,
+ sizeof(od->dontaudit->perms));
+ } else if (node->key.specified == AVTAB_OPNUM_ALLOWED) {
+ od->specified |= OPERATION_ALLOWED;
+ for (i = 0; i < ARRAY_SIZE(od->allowed->perms); i++)
+ od->allowed->perms[i] |=
+ node->datum.u.ops->op.perms[i];
+ } else if (node->key.specified == AVTAB_OPNUM_AUDITALLOW) {
+ od->specified |= OPERATION_AUDITALLOW;
+ for (i = 0; i < ARRAY_SIZE(od->auditallow->perms); i++)
+ od->auditallow->perms[i] |=
+ node->datum.u.ops->op.perms[i];
+ } else if (node->key.specified == AVTAB_OPNUM_DONTAUDIT) {
+ od->specified |= OPERATION_DONTAUDIT;
+ for (i = 0; i < ARRAY_SIZE(od->dontaudit->perms); i++)
+ od->dontaudit->perms[i] |=
+ node->datum.u.ops->op.perms[i];
+ } else {
+ BUG();
+ }
+}
+
+void security_compute_operation(u32 ssid,
+ u32 tsid,
+ u16 orig_tclass,
+ u8 type,
+ struct operation_decision *od)
+{
+ u16 tclass;
+ struct context *scontext, *tcontext;
+ struct avtab_key avkey;
+ struct avtab_node *node;
+ struct ebitmap *sattr, *tattr;
+ struct ebitmap_node *snode, *tnode;
+ unsigned int i, j;
+
+ od->type = type;
+ od->specified = 0;
+ memset(od->allowed->perms, 0, sizeof(od->allowed->perms));
+ memset(od->auditallow->perms, 0, sizeof(od->auditallow->perms));
+ memset(od->dontaudit->perms, 0, sizeof(od->dontaudit->perms));
+
+ read_lock(&policy_rwlock);
+ if (!ss_initialized)
+ goto allow;
+
+ scontext = sidtab_search(&sidtab, ssid);
+ if (!scontext) {
+ printk(KERN_ERR "SELinux: %s: unrecognized SID %d\n",
+ __func__, ssid);
+ goto out;
+ }
+
+ tcontext = sidtab_search(&sidtab, tsid);
+ if (!tcontext) {
+ printk(KERN_ERR "SELinux: %s: unrecognized SID %d\n",
+ __func__, tsid);
+ goto out;
+ }
+
+ tclass = unmap_class(orig_tclass);
+ if (unlikely(orig_tclass && !tclass)) {
+ if (policydb.allow_unknown)
+ goto allow;
+ goto out;
+ }
+
+
+ if (unlikely(!tclass || tclass > policydb.p_classes.nprim)) {
+ if (printk_ratelimit())
+ printk(KERN_WARNING "SELinux: Invalid class %hu\n", tclass);
+ goto out;
+ }
+ avkey.target_class = tclass;
+ avkey.specified = AVTAB_OP;
+ sattr = flex_array_get(policydb.type_attr_map_array,
+ scontext->type - 1);
+ BUG_ON(!sattr);
+ tattr = flex_array_get(policydb.type_attr_map_array,
+ tcontext->type - 1);
+ BUG_ON(!tattr);
+ ebitmap_for_each_positive_bit(sattr, snode, i) {
+ ebitmap_for_each_positive_bit(tattr, tnode, j) {
+ avkey.source_type = i + 1;
+ avkey.target_type = j + 1;
+ for (node = avtab_search_node(&policydb.te_avtab, &avkey);
+ node;
+ node = avtab_search_node_next(node, avkey.specified))
+ services_compute_operation_num(od, node);
+
+ cond_compute_operation(&policydb.te_cond_avtab,
+ &avkey, od);
+ }
+ }
+out:
+ read_unlock(&policy_rwlock);
+ return;
+allow:
+ memset(od->allowed->perms, 0xff, sizeof(od->allowed->perms));
+ goto out;
+}
/**
* security_compute_av - Compute access vector decisions.
* @ssid: source security identifier
* @tsid: target security identifier
* @tclass: target security class
* @avd: access vector decisions
+ * @od: operation decisions
*
* Compute a set of access vector decisions based on the
* SID pair (@ssid, @tsid) for the permissions in @tclass.
@@ -912,13 +1073,15 @@ static void avd_init(struct av_decision *avd)
void security_compute_av(u32 ssid,
u32 tsid,
u16 orig_tclass,
- struct av_decision *avd)
+ struct av_decision *avd,
+ struct operation *ops)
{
u16 tclass;
struct context *scontext = NULL, *tcontext = NULL;
read_lock(&policy_rwlock);
avd_init(avd);
+ ops->len = 0;
if (!ss_initialized)
goto allow;
@@ -946,7 +1109,7 @@ void security_compute_av(u32 ssid,
goto allow;
goto out;
}
- context_struct_compute_av(scontext, tcontext, tclass, avd);
+ context_struct_compute_av(scontext, tcontext, tclass, avd, ops);
map_decision(orig_tclass, avd, policydb.allow_unknown);
out:
read_unlock(&policy_rwlock);
@@ -992,7 +1155,7 @@ void security_compute_av_user(u32 ssid,
goto out;
}
- context_struct_compute_av(scontext, tcontext, tclass, avd);
+ context_struct_compute_av(scontext, tcontext, tclass, avd, NULL);
out:
read_unlock(&policy_rwlock);
return;
@@ -1393,6 +1556,7 @@ static int security_compute_sid(u32 ssid,
u32 *out_sid,
bool kern)
{
+ struct class_datum *cladatum = NULL;
struct context *scontext = NULL, *tcontext = NULL, newcontext;
struct role_trans *roletr = NULL;
struct avtab_key avkey;
@@ -1441,12 +1605,20 @@ static int security_compute_sid(u32 ssid,
goto out_unlock;
}
+ if (tclass && tclass <= policydb.p_classes.nprim)
+ cladatum = policydb.class_val_to_struct[tclass - 1];
+
/* Set the user identity. */
switch (specified) {
case AVTAB_TRANSITION:
case AVTAB_CHANGE:
- /* Use the process user identity. */
- newcontext.user = scontext->user;
+ if (cladatum && cladatum->default_user == DEFAULT_TARGET) {
+ newcontext.user = tcontext->user;
+ } else {
+ /* notice this gets both DEFAULT_SOURCE and unset */
+ /* Use the process user identity. */
+ newcontext.user = scontext->user;
+ }
break;
case AVTAB_MEMBER:
/* Use the related object owner. */
@@ -1454,16 +1626,31 @@ static int security_compute_sid(u32 ssid,
break;
}
- /* Set the role and type to default values. */
- if ((tclass == policydb.process_class) || (sock == true)) {
- /* Use the current role and type of process. */
+ /* Set the role to default values. */
+ if (cladatum && cladatum->default_role == DEFAULT_SOURCE) {
newcontext.role = scontext->role;
- newcontext.type = scontext->type;
+ } else if (cladatum && cladatum->default_role == DEFAULT_TARGET) {
+ newcontext.role = tcontext->role;
} else {
- /* Use the well-defined object role. */
- newcontext.role = OBJECT_R_VAL;
- /* Use the type of the related object. */
+ if ((tclass == policydb.process_class) || (sock == true))
+ newcontext.role = scontext->role;
+ else
+ newcontext.role = OBJECT_R_VAL;
+ }
+
+ /* Set the type to default values. */
+ if (cladatum && cladatum->default_type == DEFAULT_SOURCE) {
+ newcontext.type = scontext->type;
+ } else if (cladatum && cladatum->default_type == DEFAULT_TARGET) {
newcontext.type = tcontext->type;
+ } else {
+ if ((tclass == policydb.process_class) || (sock == true)) {
+ /* Use the type of process. */
+ newcontext.type = scontext->type;
+ } else {
+ /* Use the type of the related object. */
+ newcontext.type = tcontext->type;
+ }
}
/* Look for a type transition/member/change rule. */
@@ -1486,7 +1673,7 @@ static int security_compute_sid(u32 ssid,
if (avdatum) {
/* Use the type from the type transition/member/change rule. */
- newcontext.type = avdatum->data;
+ newcontext.type = avdatum->u.data;
}
/* if we have a objname this is a file trans check so check those rules */
diff --git a/security/selinux/ss/services.h b/security/selinux/ss/services.h
index e8d907e903c..569757484d0 100644
--- a/security/selinux/ss/services.h
+++ b/security/selinux/ss/services.h
@@ -11,5 +11,11 @@
extern struct policydb policydb;
+void services_compute_operation_type(struct operation *ops,
+ struct avtab_node *node);
+
+void services_compute_operation_num(struct operation_decision *od,
+ struct avtab_node *node);
+
#endif /* _SS_SERVICES_H_ */
diff --git a/security/smack/smack_lsm.c b/security/smack/smack_lsm.c
index 45c32f07416..d1e6879b962 100644
--- a/security/smack/smack_lsm.c
+++ b/security/smack/smack_lsm.c
@@ -406,8 +406,8 @@ static int smack_sb_statfs(struct dentry *dentry)
* Returns 0 if current can write the floor of the filesystem
* being mounted on, an error code otherwise.
*/
-static int smack_sb_mount(char *dev_name, struct path *path,
- char *type, unsigned long flags, void *data)
+static int smack_sb_mount(const char *dev_name, struct path *path,
+ const char *type, unsigned long flags, void *data)
{
struct superblock_smack *sbp = path->dentry->d_sb->s_security;
struct smk_audit_info ad;
diff --git a/security/tomoyo/common.h b/security/tomoyo/common.h
index 30fd9836970..997aa5bfaf6 100644
--- a/security/tomoyo/common.h
+++ b/security/tomoyo/common.h
@@ -971,7 +971,7 @@ int tomoyo_init_request_info(struct tomoyo_request_info *r,
const u8 index);
int tomoyo_mkdev_perm(const u8 operation, struct path *path,
const unsigned int mode, unsigned int dev);
-int tomoyo_mount_permission(char *dev_name, struct path *path,
+int tomoyo_mount_permission(const char *dev_name, struct path *path,
const char *type, unsigned long flags,
void *data_page);
int tomoyo_open_control(const u8 type, struct file *file);
diff --git a/security/tomoyo/mount.c b/security/tomoyo/mount.c
index fe00cdfd026..390c646013c 100644
--- a/security/tomoyo/mount.c
+++ b/security/tomoyo/mount.c
@@ -71,7 +71,8 @@ static bool tomoyo_check_mount_acl(struct tomoyo_request_info *r,
*
* Caller holds tomoyo_read_lock().
*/
-static int tomoyo_mount_acl(struct tomoyo_request_info *r, char *dev_name,
+static int tomoyo_mount_acl(struct tomoyo_request_info *r,
+ const char *dev_name,
struct path *dir, const char *type,
unsigned long flags)
{
@@ -183,7 +184,7 @@ static int tomoyo_mount_acl(struct tomoyo_request_info *r, char *dev_name,
*
* Returns 0 on success, negative value otherwise.
*/
-int tomoyo_mount_permission(char *dev_name, struct path *path,
+int tomoyo_mount_permission(const char *dev_name, struct path *path,
const char *type, unsigned long flags,
void *data_page)
{
diff --git a/security/tomoyo/tomoyo.c b/security/tomoyo/tomoyo.c
index 620d37c159a..43d3f97af46 100644
--- a/security/tomoyo/tomoyo.c
+++ b/security/tomoyo/tomoyo.c
@@ -406,8 +406,8 @@ static int tomoyo_path_chroot(struct path *path)
*
* Returns 0 on success, negative value otherwise.
*/
-static int tomoyo_sb_mount(char *dev_name, struct path *path,
- char *type, unsigned long flags, void *data)
+static int tomoyo_sb_mount(const char *dev_name, struct path *path,
+ const char *type, unsigned long flags, void *data)
{
return tomoyo_mount_permission(dev_name, path, type, flags, data);
}