diff options
| author | Andrew Lehmer <alehmer@google.com> | 2018-10-31 14:01:43 -0700 |
|---|---|---|
| committer | Andrew Lehmer <alehmer@google.com> | 2018-10-31 14:01:43 -0700 |
| commit | fe0b5874cc6e4f65dc93bc09d404dcdf1a966e6d (patch) | |
| tree | c603384d14bbbf6ef3c2d0d3f7093a92644d5a23 | |
| parent | ae2f29938c9e438d1dacd8b4e5e19696a9fc76d0 (diff) | |
| parent | d3d78ed4070192e3eb00d2fb4438ec4cb6dec2fb (diff) | |
Merge branch 'android-msm-marlin-3.18-pi-security-next' into android-msm-marlin-3.18-pi
January 2019.1
Bug: 118159841
Change-Id: I467a8bc59214f19dcf23e5521b64ea97af5aa4d0
Signed-off-by: Andrew Lehmer <alehmer@google.com>
| -rw-r--r-- | fs/ext4/balloc.c | 14 | ||||
| -rw-r--r-- | fs/ext4/ext4.h | 5 | ||||
| -rw-r--r-- | fs/ext4/ext4_extents.h | 1 | ||||
| -rw-r--r-- | fs/ext4/extents.c | 6 | ||||
| -rw-r--r-- | fs/ext4/ialloc.c | 57 | ||||
| -rw-r--r-- | fs/ext4/inode.c | 3 | ||||
| -rw-r--r-- | fs/ext4/mballoc.c | 6 | ||||
| -rw-r--r-- | fs/ext4/super.c | 16 | ||||
| -rw-r--r-- | fs/ext4/xattr.c | 5 | ||||
| -rw-r--r-- | fs/f2fs/inline.c | 10 | ||||
| -rw-r--r-- | fs/inode.c | 6 | ||||
| -rw-r--r-- | include/linux/mm_types.h | 2 | ||||
| -rw-r--r-- | include/linux/sched.h | 2 | ||||
| -rw-r--r-- | include/linux/vmacache.h | 5 | ||||
| -rw-r--r-- | mm/debug.c | 4 | ||||
| -rw-r--r-- | mm/vmacache.c | 36 |
16 files changed, 79 insertions, 99 deletions
diff --git a/fs/ext4/balloc.c b/fs/ext4/balloc.c index cb3860817fe..177c9b788b7 100644 --- a/fs/ext4/balloc.c +++ b/fs/ext4/balloc.c @@ -243,8 +243,6 @@ static int ext4_init_block_bitmap(struct super_block *sb, */ ext4_mark_bitmap_end(num_clusters_in_group(sb, block_group), sb->s_blocksize * 8, bh->b_data); - ext4_block_bitmap_csum_set(sb, block_group, gdp, bh); - ext4_group_desc_csum_set(sb, block_group, gdp); return 0; } @@ -440,12 +438,22 @@ ext4_read_block_bitmap_nowait(struct super_block *sb, ext4_group_t block_group) goto verify; } ext4_lock_group(sb, block_group); - if (desc->bg_flags & cpu_to_le16(EXT4_BG_BLOCK_UNINIT)) { + if (ext4_has_group_desc_csum(sb) && + (desc->bg_flags & cpu_to_le16(EXT4_BG_BLOCK_UNINIT))) { int err; + if (block_group == 0) { + ext4_unlock_group(sb, block_group); + unlock_buffer(bh); + ext4_error(sb, "Block bitmap for bg 0 marked " + "uninitialized"); + put_bh(bh); + return NULL; + } err = ext4_init_block_bitmap(sb, bh, block_group, desc); set_bitmap_uptodate(bh); set_buffer_uptodate(bh); + set_buffer_verified(bh); ext4_unlock_group(sb, block_group); unlock_buffer(bh); if (err) diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h index 89bd9704222..7f908b4fce3 100644 --- a/fs/ext4/ext4.h +++ b/fs/ext4/ext4.h @@ -1418,11 +1418,6 @@ static inline struct timespec ext4_current_time(struct inode *inode) static inline int ext4_valid_inum(struct super_block *sb, unsigned long ino) { return ino == EXT4_ROOT_INO || - ino == EXT4_USR_QUOTA_INO || - ino == EXT4_GRP_QUOTA_INO || - ino == EXT4_BOOT_LOADER_INO || - ino == EXT4_JOURNAL_INO || - ino == EXT4_RESIZE_INO || (ino >= EXT4_FIRST_INO(sb) && ino <= le32_to_cpu(EXT4_SB(sb)->s_es->s_inodes_count)); } diff --git a/fs/ext4/ext4_extents.h b/fs/ext4/ext4_extents.h index 3c938154709..2d8e7379351 100644 --- a/fs/ext4/ext4_extents.h +++ b/fs/ext4/ext4_extents.h @@ -103,6 +103,7 @@ struct ext4_extent_header { }; #define EXT4_EXT_MAGIC cpu_to_le16(0xf30a) +#define EXT4_MAX_EXTENT_DEPTH 5 #define EXT4_EXTENT_TAIL_OFFSET(hdr) \ (sizeof(struct ext4_extent_header) + \ diff --git a/fs/ext4/extents.c b/fs/ext4/extents.c index e5dae370d69..767c855fa6a 100644 --- a/fs/ext4/extents.c +++ b/fs/ext4/extents.c @@ -870,6 +870,12 @@ ext4_find_extent(struct inode *inode, ext4_lblk_t block, eh = ext_inode_hdr(inode); depth = ext_depth(inode); + if (depth < 0 || depth > EXT4_MAX_EXTENT_DEPTH) { + EXT4_ERROR_INODE(inode, "inode has invalid extent depth: %d", + depth); + ret = -EIO; + goto err; + } if (path) { ext4_ext_drop_refs(path); diff --git a/fs/ext4/ialloc.c b/fs/ext4/ialloc.c index e7f6b35fe62..cc6b6f4478d 100644 --- a/fs/ext4/ialloc.c +++ b/fs/ext4/ialloc.c @@ -64,45 +64,6 @@ void ext4_mark_bitmap_end(int start_bit, int end_bit, char *bitmap) memset(bitmap + (i >> 3), 0xff, (end_bit - i) >> 3); } -/* Initializes an uninitialized inode bitmap */ -static unsigned ext4_init_inode_bitmap(struct super_block *sb, - struct buffer_head *bh, - ext4_group_t block_group, - struct ext4_group_desc *gdp) -{ - struct ext4_group_info *grp; - struct ext4_sb_info *sbi = EXT4_SB(sb); - J_ASSERT_BH(bh, buffer_locked(bh)); - - /* If checksum is bad mark all blocks and inodes use to prevent - * allocation, essentially implementing a per-group read-only flag. */ - if (!ext4_group_desc_csum_verify(sb, block_group, gdp)) { - ext4_error(sb, "Checksum bad for group %u", block_group); - grp = ext4_get_group_info(sb, block_group); - if (!EXT4_MB_GRP_BBITMAP_CORRUPT(grp)) - percpu_counter_sub(&sbi->s_freeclusters_counter, - grp->bb_free); - set_bit(EXT4_GROUP_INFO_BBITMAP_CORRUPT_BIT, &grp->bb_state); - if (!EXT4_MB_GRP_IBITMAP_CORRUPT(grp)) { - int count; - count = ext4_free_inodes_count(sb, gdp); - percpu_counter_sub(&sbi->s_freeinodes_counter, - count); - } - set_bit(EXT4_GROUP_INFO_IBITMAP_CORRUPT_BIT, &grp->bb_state); - return 0; - } - - memset(bh->b_data, 0, (EXT4_INODES_PER_GROUP(sb) + 7) / 8); - ext4_mark_bitmap_end(EXT4_INODES_PER_GROUP(sb), sb->s_blocksize * 8, - bh->b_data); - ext4_inode_bitmap_csum_set(sb, block_group, gdp, bh, - EXT4_INODES_PER_GROUP(sb) / 8); - ext4_group_desc_csum_set(sb, block_group, gdp); - - return EXT4_INODES_PER_GROUP(sb); -} - void ext4_end_bitmap_read(struct buffer_head *bh, int uptodate) { if (uptodate) { @@ -150,8 +111,19 @@ ext4_read_inode_bitmap(struct super_block *sb, ext4_group_t block_group) } ext4_lock_group(sb, block_group); - if (desc->bg_flags & cpu_to_le16(EXT4_BG_INODE_UNINIT)) { - ext4_init_inode_bitmap(sb, bh, block_group, desc); + if (ext4_has_group_desc_csum(sb) && + (desc->bg_flags & cpu_to_le16(EXT4_BG_INODE_UNINIT))) { + if (block_group == 0) { + ext4_unlock_group(sb, block_group); + unlock_buffer(bh); + ext4_error(sb, "Inode bitmap for bg 0 marked " + "uninitialized"); + put_bh(bh); + return NULL; + } + memset(bh->b_data, 0, (EXT4_INODES_PER_GROUP(sb) + 7) / 8); + ext4_mark_bitmap_end(EXT4_INODES_PER_GROUP(sb), + sb->s_blocksize * 8, bh->b_data); set_bitmap_uptodate(bh); set_buffer_uptodate(bh); set_buffer_verified(bh); @@ -918,7 +890,8 @@ got: /* recheck and clear flag under lock if we still need to */ ext4_lock_group(sb, group); - if (gdp->bg_flags & cpu_to_le16(EXT4_BG_BLOCK_UNINIT)) { + if (ext4_has_group_desc_csum(sb) && + (gdp->bg_flags & cpu_to_le16(EXT4_BG_BLOCK_UNINIT))) { gdp->bg_flags &= cpu_to_le16(~EXT4_BG_BLOCK_UNINIT); ext4_free_group_clusters_set(sb, gdp, ext4_free_clusters_after_init(sb, group, gdp)); diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c index acaeca693b1..7fccc33d5b0 100644 --- a/fs/ext4/inode.c +++ b/fs/ext4/inode.c @@ -3877,7 +3877,8 @@ static int __ext4_get_inode_loc(struct inode *inode, int inodes_per_block, inode_offset; iloc->bh = NULL; - if (!ext4_valid_inum(sb, inode->i_ino)) + if (inode->i_ino < EXT4_ROOT_INO || + inode->i_ino > le32_to_cpu(EXT4_SB(sb)->s_es->s_inodes_count)) return -EIO; iloc->block_group = (inode->i_ino - 1) / EXT4_INODES_PER_GROUP(sb); diff --git a/fs/ext4/mballoc.c b/fs/ext4/mballoc.c index 2fcb8ba4b06..8749dc88f13 100644 --- a/fs/ext4/mballoc.c +++ b/fs/ext4/mballoc.c @@ -2415,7 +2415,8 @@ int ext4_mb_add_groupinfo(struct super_block *sb, ext4_group_t group, * initialize bb_free to be able to skip * empty groups without initialization */ - if (desc->bg_flags & cpu_to_le16(EXT4_BG_BLOCK_UNINIT)) { + if (ext4_has_group_desc_csum(sb) && + (desc->bg_flags & cpu_to_le16(EXT4_BG_BLOCK_UNINIT))) { meta_group_info[i]->bb_free = ext4_free_clusters_after_init(sb, group, desc); } else { @@ -2943,7 +2944,8 @@ ext4_mb_mark_diskspace_used(struct ext4_allocation_context *ac, #endif ext4_set_bits(bitmap_bh->b_data, ac->ac_b_ex.fe_start, ac->ac_b_ex.fe_len); - if (gdp->bg_flags & cpu_to_le16(EXT4_BG_BLOCK_UNINIT)) { + if (ext4_has_group_desc_csum(sb) && + (gdp->bg_flags & cpu_to_le16(EXT4_BG_BLOCK_UNINIT))) { gdp->bg_flags &= cpu_to_le16(~EXT4_BG_BLOCK_UNINIT); ext4_free_group_clusters_set(sb, gdp, ext4_free_clusters_after_init(sb, diff --git a/fs/ext4/super.c b/fs/ext4/super.c index 81fb7a36c15..c282bfbe489 100644 --- a/fs/ext4/super.c +++ b/fs/ext4/super.c @@ -3124,13 +3124,22 @@ static ext4_group_t ext4_has_uninit_itable(struct super_block *sb) ext4_group_t group, ngroups = EXT4_SB(sb)->s_groups_count; struct ext4_group_desc *gdp = NULL; + if (!ext4_has_group_desc_csum(sb)) + return ngroups; + for (group = 0; group < ngroups; group++) { gdp = ext4_get_group_desc(sb, group, NULL); if (!gdp) continue; - if (!(gdp->bg_flags & cpu_to_le16(EXT4_BG_INODE_ZEROED))) + if (gdp->bg_flags & cpu_to_le16(EXT4_BG_INODE_ZEROED)) + continue; + if (group != 0) break; + ext4_error(sb, "Inode table for bg 0 marked as " + "needing zeroing"); + if (sb->s_flags & MS_RDONLY) + return ngroups; } return group; @@ -3783,6 +3792,11 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent) } else { sbi->s_inode_size = le16_to_cpu(es->s_inode_size); sbi->s_first_ino = le32_to_cpu(es->s_first_ino); + if (sbi->s_first_ino < EXT4_GOOD_OLD_FIRST_INO) { + ext4_msg(sb, KERN_ERR, "invalid first ino: %u", + sbi->s_first_ino); + goto failed_mount; + } if ((sbi->s_inode_size < EXT4_GOOD_OLD_INODE_SIZE) || (!is_power_of_2(sbi->s_inode_size)) || (sbi->s_inode_size > blocksize)) { diff --git a/fs/ext4/xattr.c b/fs/ext4/xattr.c index 070abc1f286..04a24a73cbf 100644 --- a/fs/ext4/xattr.c +++ b/fs/ext4/xattr.c @@ -1390,6 +1390,11 @@ retry: /* Find the entry best suited to be pushed into EA block */ entry = NULL; for (; !IS_LAST_ENTRY(last); last = EXT4_XATTR_NEXT(last)) { + /* never move system.data out of the inode */ + if ((last->e_name_len == 4) && + (last->e_name_index == EXT4_XATTR_INDEX_SYSTEM) && + !memcmp(last->e_name, "data", 4)) + continue; total_size = EXT4_XATTR_SIZE(le32_to_cpu(last->e_value_size)) + EXT4_XATTR_LEN(last->e_name_len); diff --git a/fs/f2fs/inline.c b/fs/f2fs/inline.c index 88036fd7579..9bd6e2e227a 100644 --- a/fs/f2fs/inline.c +++ b/fs/f2fs/inline.c @@ -100,6 +100,16 @@ static int __f2fs_convert_inline_data(struct inode *inode, struct page *page) if (err) goto out; + if (unlikely(dn->data_blkaddr != NEW_ADDR)) { + f2fs_put_dnode(dn); + set_sbi_flag(fio.sbi, SBI_NEED_FSCK); + f2fs_msg(fio.sbi->sb, KERN_WARNING, + "%s: corrupted inline inode ino=%lx, i_addr[0]:0x%x, " + "run fsck to fix.", + __func__, dn->inode->i_ino, dn->data_blkaddr); + return -EINVAL; + } + f2fs_wait_on_page_writeback(page, DATA); zero_user_segment(page, MAX_INLINE_DATA, PAGE_CACHE_SIZE); diff --git a/fs/inode.c b/fs/inode.c index da55347c44a..cbb47b97d56 100644 --- a/fs/inode.c +++ b/fs/inode.c @@ -1821,8 +1821,14 @@ void inode_init_owner(struct inode *inode, const struct inode *dir, inode->i_uid = current_fsuid(); if (dir && dir->i_mode & S_ISGID) { inode->i_gid = dir->i_gid; + + /* Directories are special, and always inherit S_ISGID */ if (S_ISDIR(mode)) mode |= S_ISGID; + else if ((mode & (S_ISGID | S_IXGRP)) == (S_ISGID | S_IXGRP) && + !in_group_p(inode->i_gid) && + !capable_wrt_inode_uidgid(dir, CAP_FSETID)) + mode &= ~S_ISGID; } else inode->i_gid = current_fsgid(); inode->i_mode = mode; diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h index cc4d6214c29..23f1abe85f6 100644 --- a/include/linux/mm_types.h +++ b/include/linux/mm_types.h @@ -361,7 +361,7 @@ struct kioctx_table; struct mm_struct { struct vm_area_struct *mmap; /* list of VMAs */ struct rb_root mm_rb; - u32 vmacache_seqnum; /* per-thread vmacache */ + u64 vmacache_seqnum; /* per-thread vmacache */ #ifdef CONFIG_MMU unsigned long (*get_unmapped_area) (struct file *filp, unsigned long addr, unsigned long len, diff --git a/include/linux/sched.h b/include/linux/sched.h index 2a79f97d46c..7cbed81c2e1 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -1453,7 +1453,7 @@ struct task_struct { unsigned brk_randomized:1; #endif /* per-thread vma caching */ - u32 vmacache_seqnum; + u64 vmacache_seqnum; struct vm_area_struct *vmacache[VMACACHE_SIZE]; #if defined(SPLIT_RSS_COUNTING) struct task_rss_stat rss_stat; diff --git a/include/linux/vmacache.h b/include/linux/vmacache.h index c3fa0fd4394..4f58ff2dacd 100644 --- a/include/linux/vmacache.h +++ b/include/linux/vmacache.h @@ -15,7 +15,6 @@ static inline void vmacache_flush(struct task_struct *tsk) memset(tsk->vmacache, 0, sizeof(tsk->vmacache)); } -extern void vmacache_flush_all(struct mm_struct *mm); extern void vmacache_update(unsigned long addr, struct vm_area_struct *newvma); extern struct vm_area_struct *vmacache_find(struct mm_struct *mm, unsigned long addr); @@ -29,10 +28,6 @@ extern struct vm_area_struct *vmacache_find_exact(struct mm_struct *mm, static inline void vmacache_invalidate(struct mm_struct *mm) { mm->vmacache_seqnum++; - - /* deal with overflows */ - if (unlikely(mm->vmacache_seqnum == 0)) - vmacache_flush_all(mm); } #endif /* __LINUX_VMACACHE_H */ diff --git a/mm/debug.c b/mm/debug.c index a5cb434a6ea..0d70a457a72 100644 --- a/mm/debug.c +++ b/mm/debug.c @@ -169,7 +169,7 @@ EXPORT_SYMBOL(dump_vma); void dump_mm(const struct mm_struct *mm) { - pr_emerg("mm %p mmap %p seqnum %d task_size %lu\n" + pr_emerg("mm %p mmap %p seqnum %llu task_size %lu\n" #ifdef CONFIG_MMU "get_unmapped_area %p\n" #endif @@ -199,7 +199,7 @@ void dump_mm(const struct mm_struct *mm) #endif "%s", /* This is here to hold the comma */ - mm, mm->mmap, mm->vmacache_seqnum, mm->task_size, + mm, mm->mmap, (long long) mm->vmacache_seqnum, mm->task_size, #ifdef CONFIG_MMU mm->get_unmapped_area, #endif diff --git a/mm/vmacache.c b/mm/vmacache.c index 9f25af825de..e6e6e92d0d7 100644 --- a/mm/vmacache.c +++ b/mm/vmacache.c @@ -6,42 +6,6 @@ #include <linux/vmacache.h> /* - * Flush vma caches for threads that share a given mm. - * - * The operation is safe because the caller holds the mmap_sem - * exclusively and other threads accessing the vma cache will - * have mmap_sem held at least for read, so no extra locking - * is required to maintain the vma cache. - */ -void vmacache_flush_all(struct mm_struct *mm) -{ - struct task_struct *g, *p; - - /* - * Single threaded tasks need not iterate the entire - * list of process. We can avoid the flushing as well - * since the mm's seqnum was increased and don't have - * to worry about other threads' seqnum. Current's - * flush will occur upon the next lookup. - */ - if (atomic_read(&mm->mm_users) == 1) - return; - - rcu_read_lock(); - for_each_process_thread(g, p) { - /* - * Only flush the vmacache pointers as the - * mm seqnum is already set and curr's will - * be set upon invalidation when the next - * lookup is done. - */ - if (mm == p->mm) - vmacache_flush(p); - } - rcu_read_unlock(); -} - -/* * This task may be accessing a foreign mm via (for example) * get_user_pages()->find_vma(). The vmacache is task-local and this * task's vmacache pertains to a different mm (ie, its own). There is |
