diff options
| author | spkal01 <kalligeross@gmail.com> | 2021-05-17 02:37:28 +0530 |
|---|---|---|
| committer | spkal01 <kalligeross@gmail.com> | 2021-05-17 02:37:28 +0530 |
| commit | 93b265ae2eba8d93d0ffa406958547232f3114c8 (patch) | |
| tree | c2f093aa144f732b5cf7bd8a0b45bf35eda42e1c /drivers | |
| parent | 0a82617b8fce8994076b518064e7d420af290ea8 (diff) | |
| parent | 016f4ba70bffb6d02725e778c3989fa542e6d12a (diff) | |
Diffstat (limited to 'drivers')
74 files changed, 1481 insertions, 717 deletions
diff --git a/drivers/android/binder.c b/drivers/android/binder.c index afb4675d65f3..632308c48bf7 100644 --- a/drivers/android/binder.c +++ b/drivers/android/binder.c @@ -459,9 +459,19 @@ struct binder_priority { * (protected by binder_deferred_lock) * @deferred_work: bitmap of deferred work to perform * (protected by binder_deferred_lock) + * @outstanding_txns: number of transactions to be transmitted before + * processes in freeze_wait are woken up + * (protected by @inner_lock) * @is_dead: process is dead and awaiting free * when outstanding transactions are cleaned up * (protected by @inner_lock) + * @sync_recv: process received sync transactions since last frozen + * (protected by @inner_lock) + * @async_recv: process received async transactions since last frozen + * (protected by @inner_lock) + * @freeze_wait: waitqueue of processes waiting for all outstanding + * transactions to be processed + * (protected by @inner_lock) * @todo: list of work for this process * (protected by @inner_lock) * @stats: per-process binder statistics @@ -505,7 +515,12 @@ struct binder_proc { struct mutex files_lock; struct hlist_node deferred_work_node; int deferred_work; + int outstanding_txns; bool is_dead; + bool is_frozen; + bool sync_recv; + bool async_recv; + wait_queue_head_t freeze_wait; struct list_head todo; struct binder_stats stats; @@ -2076,6 +2091,10 @@ static void binder_free_transaction(struct binder_transaction *t) spin_unlock(&t->lock); binder_inner_proc_lock(target_proc); + target_proc->outstanding_txns--; + BUG_ON(target_proc->outstanding_txns < 0); + if (!target_proc->outstanding_txns && target_proc->is_frozen) + wake_up_interruptible_all(&target_proc->freeze_wait); if (t->buffer) t->buffer->transaction = NULL; binder_inner_proc_unlock(target_proc); @@ -2829,10 +2848,11 @@ static int binder_fixup_parent(struct binder_transaction *t, * If the @thread parameter is not NULL, the transaction is always queued * to the waitlist of that specific thread. * - * Return: true if the transactions was successfully queued - * false if the target process or thread is dead + * Return: 0 if the transaction was successfully queued + * BR_DEAD_REPLY if the target process or thread is dead + * BR_FROZEN_REPLY if the target process or thread is frozen */ -static bool binder_proc_transaction(struct binder_transaction *t, +static int binder_proc_transaction(struct binder_transaction *t, struct binder_proc *proc, struct binder_thread *thread) { @@ -2856,11 +2876,18 @@ static bool binder_proc_transaction(struct binder_transaction *t, } binder_inner_proc_lock(proc); + if (proc->is_frozen) { + proc->sync_recv |= !oneway; + proc->async_recv |= oneway; + } - if (proc->is_dead || (thread && thread->is_dead)) { + if ((proc->is_frozen && !oneway) || proc->is_dead || + (thread && thread->is_dead)) { + bool proc_is_dead = proc->is_dead + || (thread && thread->is_dead); binder_inner_proc_unlock(proc); binder_node_unlock(node); - return false; + return proc_is_dead ? BR_DEAD_REPLY : BR_FROZEN_REPLY; } if (!thread && !pending_async) @@ -2879,10 +2906,11 @@ static bool binder_proc_transaction(struct binder_transaction *t, if (!pending_async) binder_wakeup_thread_ilocked(proc, thread, !oneway /* sync */); + proc->outstanding_txns++; binder_inner_proc_unlock(proc); binder_node_unlock(node); - return true; + return 0; } /** @@ -3203,7 +3231,7 @@ static void binder_transaction(struct binder_proc *proc, t->buffer = binder_alloc_new_buf(&target_proc->alloc, tr->data_size, tr->offsets_size, extra_buffers_size, - !reply && (t->flags & TF_ONE_WAY)); + !reply && (t->flags & TF_ONE_WAY), current->tgid); if (IS_ERR(t->buffer)) { /* * -ESRCH indicates VMA cleared. The target is dying. @@ -3231,6 +3259,7 @@ static void binder_transaction(struct binder_proc *proc, t->buffer->debug_id = t->debug_id; t->buffer->transaction = t; t->buffer->target_node = target_node; + t->buffer->clear_on_free = !!(t->flags & TF_CLEAR_BUF); trace_binder_transaction_alloc_buf(t->buffer); if (binder_alloc_copy_user_to_buffer( @@ -3476,13 +3505,16 @@ static void binder_transaction(struct binder_proc *proc, if (reply) { binder_enqueue_thread_work(thread, tcomplete); binder_inner_proc_lock(target_proc); - if (target_thread->is_dead) { + if (target_thread->is_dead || target_proc->is_frozen) { + return_error = target_thread->is_dead ? + BR_DEAD_REPLY : BR_FROZEN_REPLY; binder_inner_proc_unlock(target_proc); goto err_dead_proc_or_thread; } BUG_ON(t->buffer->async_transaction != 0); binder_pop_transaction_ilocked(target_thread, in_reply_to); binder_enqueue_thread_work_ilocked(target_thread, &t->work); + target_proc->outstanding_txns++; binder_inner_proc_unlock(target_proc); wake_up_interruptible_sync(&target_thread->wait); binder_restore_priority(current, in_reply_to->saved_priority); @@ -3502,7 +3534,9 @@ static void binder_transaction(struct binder_proc *proc, t->from_parent = thread->transaction_stack; thread->transaction_stack = t; binder_inner_proc_unlock(proc); - if (!binder_proc_transaction(t, target_proc, target_thread)) { + return_error = binder_proc_transaction(t, + target_proc, target_thread); + if (return_error) { binder_inner_proc_lock(proc); binder_pop_transaction_ilocked(thread, t); binder_inner_proc_unlock(proc); @@ -3512,7 +3546,8 @@ static void binder_transaction(struct binder_proc *proc, BUG_ON(target_node == NULL); BUG_ON(t->buffer->async_transaction != 1); binder_enqueue_thread_work(thread, tcomplete); - if (!binder_proc_transaction(t, target_proc, NULL)) + return_error = binder_proc_transaction(t, target_proc, NULL); + if (return_error) goto err_dead_proc_or_thread; } if (target_thread) @@ -3529,7 +3564,6 @@ static void binder_transaction(struct binder_proc *proc, return; err_dead_proc_or_thread: - return_error = BR_DEAD_REPLY; return_error_line = __LINE__; binder_dequeue_work(proc, tcomplete); err_translate_failed: @@ -4134,7 +4168,7 @@ static int binder_wait_for_work(struct binder_thread *thread, binder_inner_proc_lock(proc); list_del_init(&thread->waiting_thread_node); if (signal_pending(current)) { - ret = -ERESTARTSYS; + ret = -EINTR; break; } } @@ -4638,6 +4672,7 @@ static void binder_free_proc(struct binder_proc *proc) BUG_ON(!list_empty(&proc->todo)); BUG_ON(!list_empty(&proc->delivered_death)); + WARN_ON(proc->outstanding_txns); device = container_of(proc->context, struct binder_device, context); if (refcount_dec_and_test(&device->ref)) { kfree(proc->context->name); @@ -4698,6 +4733,7 @@ static int binder_thread_release(struct binder_proc *proc, (t->to_thread == thread) ? "in" : "out"); if (t->to_thread == thread) { + t->to_proc->outstanding_txns--; t->to_proc = NULL; t->to_thread = NULL; if (t->buffer) { @@ -4944,6 +4980,76 @@ static int binder_ioctl_get_node_debug_info(struct binder_proc *proc, return 0; } +static int binder_ioctl_freeze(struct binder_freeze_info *info, + struct binder_proc *target_proc) +{ + int ret = 0; + + if (!info->enable) { + binder_inner_proc_lock(target_proc); + target_proc->sync_recv = false; + target_proc->async_recv = false; + target_proc->is_frozen = false; + binder_inner_proc_unlock(target_proc); + return 0; + } + + /* + * Freezing the target. Prevent new transactions by + * setting frozen state. If timeout specified, wait + * for transactions to drain. + */ + binder_inner_proc_lock(target_proc); + target_proc->sync_recv = false; + target_proc->async_recv = false; + target_proc->is_frozen = true; + binder_inner_proc_unlock(target_proc); + + if (info->timeout_ms > 0) + ret = wait_event_interruptible_timeout( + target_proc->freeze_wait, + (!target_proc->outstanding_txns), + msecs_to_jiffies(info->timeout_ms)); + + if (!ret && target_proc->outstanding_txns) + ret = -EAGAIN; + + if (ret < 0) { + binder_inner_proc_lock(target_proc); + target_proc->is_frozen = false; + binder_inner_proc_unlock(target_proc); + } + + return ret; +} + +static int binder_ioctl_get_freezer_info( + struct binder_frozen_status_info *info) +{ + struct binder_proc *target_proc; + bool found = false; + + info->sync_recv = 0; + info->async_recv = 0; + + mutex_lock(&binder_procs_lock); + hlist_for_each_entry(target_proc, &binder_procs, proc_node) { + if (target_proc->pid == info->pid) { + found = true; + binder_inner_proc_lock(target_proc); + info->sync_recv |= target_proc->sync_recv; + info->async_recv |= target_proc->async_recv; + binder_inner_proc_unlock(target_proc); + } + } + mutex_unlock(&binder_procs_lock); + + if (!found) + return -EINVAL; + + return 0; +} + static long binder_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) { int ret; @@ -5062,6 +5168,84 @@ static long binder_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) } break; } + case BINDER_FREEZE: { + struct binder_freeze_info info; + struct binder_proc **target_procs = NULL, *target_proc; + int target_procs_count = 0, i = 0; + + ret = 0; + + if (copy_from_user(&info, ubuf, sizeof(info))) { + ret = -EFAULT; + goto err; + } + + mutex_lock(&binder_procs_lock); + hlist_for_each_entry(target_proc, &binder_procs, proc_node) { + if (target_proc->pid == info.pid) + target_procs_count++; + } + + if (target_procs_count == 0) { + mutex_unlock(&binder_procs_lock); + ret = -EINVAL; + goto err; + } + + target_procs = kmalloc(sizeof(struct binder_proc *) * + target_procs_count, + GFP_KERNEL); + + if (!target_procs) { + mutex_unlock(&binder_procs_lock); + ret = -ENOMEM; + goto err; + } + + hlist_for_each_entry(target_proc, &binder_procs, proc_node) { + if (target_proc->pid != info.pid) + continue; + + binder_inner_proc_lock(target_proc); + atomic_inc(&target_proc->tmp_ref); + binder_inner_proc_unlock(target_proc); + + target_procs[i++] = target_proc; + } + mutex_unlock(&binder_procs_lock); + + for (i = 0; i < target_procs_count; i++) { + if (ret >= 0) + ret = binder_ioctl_freeze(&info, + target_procs[i]); + + binder_proc_dec_tmpref(target_procs[i]); + } + + kfree(target_procs); + + if (ret < 0) + goto err; + break; + } + case BINDER_GET_FROZEN_INFO: { + struct binder_frozen_status_info info; + + if (copy_from_user(&info, ubuf, sizeof(info))) { + ret = -EFAULT; + goto err; + } + + ret = binder_ioctl_get_freezer_info(&info); + if (ret < 0) + goto err; + + if (copy_to_user(ubuf, &info, sizeof(info))) { + ret = -EFAULT; + goto err; + } + break; + } default: ret = -EINVAL; goto err; @@ -5176,6 +5360,7 @@ static int binder_open(struct inode *nodp, struct file *filp) proc->tsk = current->group_leader; mutex_init(&proc->files_lock); INIT_LIST_HEAD(&proc->todo); + init_waitqueue_head(&proc->freeze_wait); if (binder_supported_policy(current->policy)) { proc->default_priority.sched_policy = current->policy; proc->default_priority.prio = current->normal_prio; @@ -5397,6 +5582,9 @@ static void binder_deferred_release(struct binder_proc *proc) atomic_inc(&proc->tmp_ref); proc->is_dead = true; + proc->is_frozen = false; + proc->sync_recv = false; + proc->async_recv = false; threads = 0; active_transactions = 0; while ((n = rb_first(&proc->threads))) { diff --git a/drivers/android/binder_alloc.c b/drivers/android/binder_alloc.c index 1b5a131278bc..a0856e920b2b 100644 --- a/drivers/android/binder_alloc.c +++ b/drivers/android/binder_alloc.c @@ -38,11 +38,12 @@ struct list_lru binder_alloc_lru; static DEFINE_MUTEX(binder_alloc_mmap_lock); enum { + BINDER_DEBUG_USER_ERROR = 1U << 0, BINDER_DEBUG_OPEN_CLOSE = 1U << 1, BINDER_DEBUG_BUFFER_ALLOC = 1U << 2, BINDER_DEBUG_BUFFER_ALLOC_ASYNC = 1U << 3, }; -static uint32_t binder_alloc_debug_mask; +static uint32_t binder_alloc_debug_mask = 0; module_param_named(debug_mask, binder_alloc_debug_mask, uint, 0644); @@ -343,12 +344,50 @@ static inline struct vm_area_struct *binder_alloc_get_vma( return vma; } +static void debug_low_async_space_locked(struct binder_alloc *alloc, int pid) +{ + /* + * Find the amount and size of buffers allocated by the current caller; + * The idea is that once we cross the threshold, whoever is responsible + * for the low async space is likely to try to send another async txn, + * and at some point we'll catch them in the act. This is more efficient + * than keeping a map per pid. + */ + struct rb_node *n = alloc->free_buffers.rb_node; + struct binder_buffer *buffer; + size_t total_alloc_size = 0; + size_t num_buffers = 0; + + for (n = rb_first(&alloc->allocated_buffers); n != NULL; + n = rb_next(n)) { + buffer = rb_entry(n, struct binder_buffer, rb_node); + if (buffer->pid != pid) + continue; + if (!buffer->async_transaction) + continue; + total_alloc_size += binder_alloc_buffer_size(alloc, buffer) + + sizeof(struct binder_buffer); + num_buffers++; + } + + /* + * Warn if this pid has more than 50 transactions, or more than 50% of + * async space (which is 25% of total buffer size). + */ + if (num_buffers > 50 || total_alloc_size > alloc->buffer_size / 4) { + binder_alloc_debug(BINDER_DEBUG_USER_ERROR, + "%d: pid %d spamming oneway? %zd buffers allocated for a total size of %zd\n", + alloc->pid, pid, num_buffers, total_alloc_size); + } +} + static struct binder_buffer *binder_alloc_new_buf_locked( struct binder_alloc *alloc, size_t data_size, size_t offsets_size, size_t extra_buffers_size, - int is_async) + int is_async, + int pid) { struct rb_node *n = alloc->free_buffers.rb_node; struct binder_buffer *buffer; @@ -487,11 +526,20 @@ static struct binder_buffer *binder_alloc_new_buf_locked( buffer->offsets_size = offsets_size; buffer->async_transaction = is_async; buffer->extra_buffers_size = extra_buffers_size; + buffer->pid = pid; if (is_async) { alloc->free_async_space -= size + sizeof(struct binder_buffer); binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC_ASYNC, "%d: binder_alloc_buf size %zd async free %zd\n", alloc->pid, size, alloc->free_async_space); + if (alloc->free_async_space < alloc->buffer_size / 10) { + /* + * Start detecting spammers once we have less than 20% + * of async space left (which is less than 10% of total + * buffer size). + */ + debug_low_async_space_locked(alloc, pid); + } } return buffer; @@ -509,6 +557,7 @@ err_alloc_buf_struct_failed: * @offsets_size: user specified buffer offset * @extra_buffers_size: size of extra space for meta-data (eg, security context) * @is_async: buffer for async transaction + * @pid: pid to attribute allocation to (used for debugging) * * Allocate a new buffer given the requested sizes. Returns * the kernel version of the buffer pointer. The size allocated @@ -521,13 +570,14 @@ struct binder_buffer *binder_alloc_new_buf(struct binder_alloc *alloc, size_t data_size, size_t offsets_size, size_t extra_buffers_size, - int is_async) + int is_async, + int pid) { struct binder_buffer *buffer; mutex_lock(&alloc->mutex); buffer = binder_alloc_new_buf_locked(alloc, data_size, offsets_size, - extra_buffers_size, is_async); + extra_buffers_size, is_async, pid); mutex_unlock(&alloc->mutex); return buffer; } @@ -647,6 +697,8 @@ static void binder_free_buf_locked(struct binder_alloc *alloc, binder_insert_free_buffer(alloc, buffer); } +static void binder_alloc_clear_buf(struct binder_alloc *alloc, + struct binder_buffer *buffer); /** * binder_alloc_free_buf() - free a binder buffer * @alloc: binder_alloc for this proc @@ -657,6 +709,18 @@ static void binder_free_buf_locked(struct binder_alloc *alloc, void binder_alloc_free_buf(struct binder_alloc *alloc, struct binder_buffer *buffer) { + /* + * We could eliminate the call to binder_alloc_clear_buf() + * from binder_alloc_deferred_release() by moving this to + * binder_alloc_free_buf_locked(). However, that could + * increase contention for the alloc mutex if clear_on_free + * is used frequently for large buffers. The mutex is not + * needed for correctness here. + */ + if (buffer->clear_on_free) { + binder_alloc_clear_buf(alloc, buffer); + buffer->clear_on_free = false; + } mutex_lock(&alloc->mutex); binder_free_buf_locked(alloc, buffer); mutex_unlock(&alloc->mutex); @@ -749,6 +813,10 @@ void binder_alloc_deferred_release(struct binder_alloc *alloc) /* Transaction should already have been freed */ BUG_ON(buffer->transaction); + if (buffer->clear_on_free) { + binder_alloc_clear_buf(alloc, buffer); + buffer->clear_on_free = false; + } binder_free_buf_locked(alloc, buffer); buffers++; } @@ -1076,6 +1144,36 @@ static struct page *binder_alloc_get_page(struct binder_alloc *alloc, } /** + * binder_alloc_clear_buf() - zero out buffer + * @alloc: binder_alloc for this proc + * @buffer: binder buffer to be cleared + * + * memset the given buffer to 0 + */ +static void binder_alloc_clear_buf(struct binder_alloc *alloc, + struct binder_buffer *buffer) +{ + size_t bytes = binder_alloc_buffer_size(alloc, buffer); + binder_size_t buffer_offset = 0; + + while (bytes) { + unsigned long size; + struct page *page; + pgoff_t pgoff; + void *kptr; + + page = binder_alloc_get_page(alloc, buffer, + buffer_offset, &pgoff); + size = min_t(size_t, bytes, PAGE_SIZE - pgoff); + kptr = kmap(page) + pgoff; + memset(kptr, 0, size); + kunmap(page); + bytes -= size; + buffer_offset += size; + } +} + +/** * binder_alloc_copy_user_to_buffer() - copy src user to tgt user * @alloc: binder_alloc for this proc * @buffer: binder buffer to be accessed diff --git a/drivers/android/binder_alloc.h b/drivers/android/binder_alloc.h index b60d161b7a7a..fdb220eb55d1 100644 --- a/drivers/android/binder_alloc.h +++ b/drivers/android/binder_alloc.h @@ -32,6 +32,7 @@ struct binder_transaction; * @entry: entry alloc->buffers * @rb_node: node for allocated_buffers/free_buffers rb trees * @free: %true if buffer is free + * @clear_on_free: %true if buffer must be zeroed after use * @allow_user_free: %true if user is allowed to free buffer * @async_transaction: %true if buffer is in use for an async txn * @debug_id: unique ID for debugging @@ -41,6 +42,7 @@ struct binder_transaction; * @offsets_size: size of array of offsets * @extra_buffers_size: size of space for other objects (like sg lists) * @user_data: user pointer to base of buffer space + * @pid: pid to attribute the buffer to (caller) * * Bookkeeping structure for binder transaction buffers */ @@ -49,9 +51,10 @@ struct binder_buffer { struct rb_node rb_node; /* free entry by size or allocated entry */ /* by address */ unsigned free:1; + unsigned clear_on_free:1; unsigned allow_user_free:1; unsigned async_transaction:1; - unsigned debug_id:29; + unsigned debug_id:28; struct binder_transaction *transaction; @@ -60,6 +63,7 @@ struct binder_buffer { size_t offsets_size; size_t extra_buffers_size; void __user *user_data; + int pid; }; /** @@ -126,7 +130,8 @@ extern struct binder_buffer *binder_alloc_new_buf(struct binder_alloc *alloc, size_t data_size, size_t offsets_size, size_t extra_buffers_size, - int is_async); + int is_async, + int pid); extern void binder_alloc_init(struct binder_alloc *alloc); extern int binder_alloc_shrinker_init(void); extern void binder_alloc_vma_close(struct binder_alloc *alloc); diff --git a/drivers/android/binder_alloc_selftest.c b/drivers/android/binder_alloc_selftest.c index b72708918b06..c839c490fde3 100644 --- a/drivers/android/binder_alloc_selftest.c +++ b/drivers/android/binder_alloc_selftest.c @@ -128,7 +128,7 @@ static void binder_selftest_alloc_buf(struct binder_alloc *alloc, int i; for (i = 0; i < BUFFER_NUM; i++) { - buffers[i] = binder_alloc_new_buf(alloc, sizes[i], 0, 0, 0); + buffers[i] = binder_alloc_new_buf(alloc, sizes[i], 0, 0, 0, 0); if (IS_ERR(buffers[i]) || !check_buffer_pages_allocated(alloc, buffers[i], sizes[i])) { diff --git a/drivers/base/cpu.c b/drivers/base/cpu.c index 11027b2146bc..5a537093b9a6 100644 --- a/drivers/base/cpu.c +++ b/drivers/base/cpu.c @@ -435,6 +435,16 @@ static int cpu_uevent(struct device *dev, struct kobj_uevent_env *env) } #endif +static int cpu_dev_pm_unset_is_prepared(unsigned int cpu) +{ + struct device *cpu_dev = get_cpu_device(cpu); + + if (cpu_dev) + cpu_dev->power.is_prepared = false; + + return 0; +} + /* * register_cpu - Setup a sysfs device for a CPU. * @cpu - cpu->hotpluggable field set to 1 will generate a control file in @@ -469,7 +479,9 @@ int register_cpu(struct cpu *cpu, int num) register_cpu_under_node(num, cpu_to_node(num)); dev_pm_qos_expose_latency_limit(&cpu->dev, 0); - return 0; + return cpuhp_setup_state_nocalls(CPUHP_CPUDEV_PM_PREPARE, + "base/cpu/dev_pm:prepare", + cpu_dev_pm_unset_is_prepared, NULL); } struct device *get_cpu_device(unsigned cpu) diff --git a/drivers/char/adsprpc.c b/drivers/char/adsprpc.c index 0ce6ea7f2a0f..267cef6eda27 100644 --- a/drivers/char/adsprpc.c +++ b/drivers/char/adsprpc.c @@ -3393,11 +3393,15 @@ static int fastrpc_set_process_info(struct fastrpc_file *fl) { int err = 0, buf_size = 0; char strpid[PID_SIZE]; + char cur_comm[TASK_COMM_LEN]; + + memcpy(cur_comm, current->comm, TASK_COMM_LEN); + cur_comm[TASK_COMM_LEN-1] = '\0'; fl->tgid = current->tgid; snprintf(strpid, PID_SIZE, "%d", current->pid); if (debugfs_root) { - buf_size = strlen(current->comm) + strlen("_") + buf_size = strlen(cur_comm) + strlen("_") + strlen(strpid) + 1; spin_lock(&fl->hlock); if (fl->debug_buf_alloced_attempted) { @@ -3411,13 +3415,13 @@ static int fastrpc_set_process_info(struct fastrpc_file *fl) err = -ENOMEM; return err; } - snprintf(fl->debug_buf, UL_SIZE, "%.10s%s%d", - current->comm, "_", current->pid); + snprintf(fl->debug_buf, buf_size, "%.10s%s%d", + cur_comm, "_", current->pid); fl->debugfs_file = debugfs_create_file(fl->debug_buf, 0644, debugfs_root, fl, &debugfs_fops); if (IS_ERR_OR_NULL(fl->debugfs_file)) { pr_warn("Error: %s: %s: failed to create debugfs file %s\n", - current->comm, __func__, fl->debug_buf); + cur_comm, __func__, fl->debug_buf); fl->debugfs_file = NULL; kfree(fl->debug_buf); fl->debug_buf = NULL; diff --git a/drivers/devfreq/devfreq.c b/drivers/devfreq/devfreq.c index e66ea8953792..b1ffb96bbe52 100644 --- a/drivers/devfreq/devfreq.c +++ b/drivers/devfreq/devfreq.c @@ -1302,7 +1302,8 @@ static int __init devfreq_init(void) return PTR_ERR(devfreq_class); } - devfreq_wq = create_freezable_workqueue("devfreq_wq"); + devfreq_wq = alloc_workqueue("devfreq_wq", WQ_HIGHPRI | WQ_FREEZABLE | + WQ_UNBOUND | WQ_MEM_RECLAIM, 1); if (!devfreq_wq) { class_destroy(devfreq_class); pr_err("%s: couldn't create workqueue\n", __FILE__); diff --git a/drivers/dma-buf/sync_file.c b/drivers/dma-buf/sync_file.c index bf65e634590b..3bbce058bd4b 100644 --- a/drivers/dma-buf/sync_file.c +++ b/drivers/dma-buf/sync_file.c @@ -124,36 +124,6 @@ struct dma_fence *sync_file_get_fence(int fd) } EXPORT_SYMBOL(sync_file_get_fence); -/** - * sync_file_get_name - get the name of the sync_file - * @sync_file: sync_file to get the fence from - * @buf: destination buffer to copy sync_file name into - * @len: available size of destination buffer. - * - * Each sync_file may have a name assigned either by the user (when merging - * sync_files together) or created from the fence it contains. In the latter - * case construction of the name is deferred until use, and so requires - * sync_file_get_name(). - * - * Returns: a string representing the name. - */ -char *sync_file_get_name(struct sync_file *sync_file, char *buf, int len) -{ - if (sync_file->user_name[0]) { - strlcpy(buf, sync_file->user_name, len); - } else { - struct dma_fence *fence = sync_file->fence; - - snprintf(buf, len, "%s-%s%llu-%d", - fence->ops->get_driver_name(fence), - fence->ops->get_timeline_name(fence), - fence->context, - fence->seqno); - } - - return buf; -} - static int sync_file_set_fence(struct sync_file *sync_file, struct dma_fence **fences, int num_fences) { @@ -216,7 +186,7 @@ static void add_fence(struct dma_fence **fences, * @a and @b. @a and @b remain valid, independent sync_file. Returns the * new merged sync_file or NULL in case of error. */ -static struct sync_file *sync_file_merge(const char *name, struct sync_file *a, +static struct sync_file *sync_file_merge(struct sync_file *a, struct sync_file *b) { struct sync_file *sync_file; @@ -291,7 +261,6 @@ static struct sync_file *sync_file_merge(const char *name, struct sync_file *a, goto err; } - strlcpy(sync_file->user_name, name, sizeof(sync_file->user_name)); return sync_file; err: @@ -335,11 +304,14 @@ static long sync_file_ioctl_merge(struct sync_file *sync_file, int err; struct sync_file *fence2, *fence3; struct sync_merge_data data; + size_t len; if (fd < 0) return fd; - if (copy_from_user(&data, (void __user *)arg, sizeof(data))) { + arg += offsetof(typeof(data), fd2); + len = sizeof(data) - offsetof(typeof(data), fd2); + if (copy_from_user(&data.fd2, (void __user *)arg, len)) { err = -EFAULT; goto err_put_fd; } @@ -355,15 +327,14 @@ static long sync_file_ioctl_merge(struct sync_file *sync_file, goto err_put_fd; } - data.name[sizeof(data.name) - 1] = '\0'; - fence3 = sync_file_merge(data.name, sync_file, fence2); + fence3 = sync_file_merge(sync_file, fence2); if (!fence3) { err = -ENOMEM; goto err_put_fence2; } data.fence = fd; - if (copy_to_user((void __user *)arg, &data, sizeof(data))) { + if (copy_to_user((void __user *)arg, &data.fd2, len)) { err = -EFAULT; goto err_put_fence3; } @@ -386,11 +357,6 @@ err_put_fd: static int sync_fill_fence_info(struct dma_fence *fence, struct sync_fence_info *info) { - strlcpy(info->obj_name, fence->ops->get_timeline_name(fence), - sizeof(info->obj_name)); - strlcpy(info->driver_name, fence->ops->get_driver_name(fence), - sizeof(info->driver_name)); - info->status = dma_fence_get_status(fence); while (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags) && !test_bit(DMA_FENCE_FLAG_TIMESTAMP_BIT, &fence->flags)) @@ -407,12 +373,13 @@ static long sync_file_ioctl_fence_info(struct sync_file *sync_file, unsigned long arg) { struct sync_file_info info; - struct sync_fence_info *fence_info = NULL; struct dma_fence **fences; - __u32 size; - int num_fences, ret, i; + size_t len, offset; + int num_fences, i; - if (copy_from_user(&info, (void __user *)arg, sizeof(info))) + arg += offsetof(typeof(info), status); + len = sizeof(info) - offsetof(typeof(info), status); + if (copy_from_user(&info.status, (void __user *)arg, len)) return -EFAULT; if (info.flags || info.pad) @@ -436,35 +403,31 @@ static long sync_file_ioctl_fence_info(struct sync_file *sync_file, if (info.num_fences < num_fences) return -EINVAL; - size = num_fences * sizeof(*fence_info); - fence_info = kzalloc(size, GFP_KERNEL); - if (!fence_info) - return -ENOMEM; - + offset = offsetof(struct sync_fence_info, status); for (i = 0; i < num_fences; i++) { - int status = sync_fill_fence_info(fences[i], &fence_info[i]); + struct { + __s32 status; + __u32 flags; + __u64 timestamp_ns; + } fence_info; + struct sync_fence_info *finfo = (void *)&fence_info - offset; + int status = sync_fill_fence_info(fences[i], finfo); + u64 dest; + + /* Don't leak kernel memory to userspace via finfo->flags */ + finfo->flags = 0; info.status = info.status <= 0 ? info.status : status; - } - - if (copy_to_user(u64_to_user_ptr(info.sync_fence_info), fence_info, - size)) { - ret = -EFAULT; - goto out; + dest = info.sync_fence_info + i * sizeof(*finfo) + offset; + if (copy_to_user(u64_to_user_ptr(dest), &fence_info, + sizeof(fence_info))) + return -EFAULT; } no_fences: - sync_file_get_name(sync_file, info.name, sizeof(info.name)); info.num_fences = num_fences; - - if (copy_to_user((void __user *)arg, &info, sizeof(info))) - ret = -EFAULT; - else - ret = 0; - -out: - kfree(fence_info); - - return ret; + if (copy_to_user((void __user *)arg, &info.status, len)) + return -EFAULT; + return 0; } static long sync_file_ioctl(struct file *file, unsigned int cmd, diff --git a/drivers/dma/dw/Kconfig b/drivers/dma/dw/Kconfig index 04b9728c1d26..070860ec0ef1 100644 --- a/drivers/dma/dw/Kconfig +++ b/drivers/dma/dw/Kconfig @@ -8,6 +8,7 @@ config DW_DMAC_CORE config DW_DMAC tristate "Synopsys DesignWare AHB DMA platform driver" + depends on HAS_IOMEM select DW_DMAC_CORE help Support the Synopsys DesignWare AHB DMA controller. This @@ -16,6 +17,7 @@ config DW_DMAC config DW_DMAC_PCI tristate "Synopsys DesignWare AHB DMA PCI driver" depends on PCI + depends on HAS_IOMEM select DW_DMAC_CORE help Support the Synopsys DesignWare AHB DMA controller on the diff --git a/drivers/gpu/drm/msm/dsi-staging/dsi_ctrl.c b/drivers/gpu/drm/msm/dsi-staging/dsi_ctrl.c index 243c213e89e6..b1d1432b1acd 100644 --- a/drivers/gpu/drm/msm/dsi-staging/dsi_ctrl.c +++ b/drivers/gpu/drm/msm/dsi-staging/dsi_ctrl.c @@ -980,36 +980,25 @@ error: return rc; } -static int dsi_ctrl_copy_and_pad_cmd(struct dsi_ctrl *dsi_ctrl, - const struct mipi_dsi_packet *packet, - u8 **buffer, - u32 *size) +static int dsi_ctrl_copy_and_pad_cmd(const struct mipi_dsi_packet *packet, + u8 *buf, size_t len) { int rc = 0; - u8 *buf = NULL; - u32 len, i; u8 cmd_type = 0; - len = packet->size; - len += 0x3; len &= ~0x03; /* Align to 32 bits */ - - buf = devm_kzalloc(&dsi_ctrl->pdev->dev, len * sizeof(u8), GFP_KERNEL); - if (!buf) - return -ENOMEM; + if (unlikely(len < packet->size)) + return -EINVAL; - for (i = 0; i < len; i++) { - if (i >= packet->size) - buf[i] = 0xFF; - else if (i < sizeof(packet->header)) - buf[i] = packet->header[i]; - else - buf[i] = packet->payload[i - sizeof(packet->header)]; - } + memcpy(buf, packet->header, sizeof(packet->header)); + if (packet->payload_length) + memcpy(buf + sizeof(packet->header), packet->payload, + packet->payload_length); + if (packet->size < len) + memset(buf + packet->size, 0xFF, len - packet->size); if (packet->payload_length > 0) buf[3] |= BIT(6); - /* send embedded BTA for read commands */ cmd_type = buf[2] & 0x3f; if ((cmd_type == MIPI_DSI_DCS_READ) || @@ -1018,9 +1007,6 @@ static int dsi_ctrl_copy_and_pad_cmd(struct dsi_ctrl *dsi_ctrl, (cmd_type == MIPI_DSI_GENERIC_READ_REQUEST_2_PARAM)) buf[3] |= BIT(5); - *buffer = buf; - *size = len; - return rc; } @@ -1141,11 +1127,13 @@ int dsi_message_validate_tx_mode(struct dsi_ctrl *dsi_ctrl, pr_err("Cannot transfer,size is greater than 4096\n"); return -ENOTSUPP; } - } + } else if (*flags & DSI_CTRL_CMD_FETCH_MEMORY) { + const size_t transfer_size = dsi_ctrl->cmd_len + cmd_len + 4; - if (*flags & DSI_CTRL_CMD_FETCH_MEMORY) { - if ((dsi_ctrl->cmd_len + cmd_len + 4) > SZ_4K) { - pr_err("Cannot transfer,size is greater than 4096\n"); + if (transfer_size > DSI_EMBEDDED_MODE_DMA_MAX_SIZE_BYTES) { + pr_err("Cannot transfer, size: %zu is greater than %d\n", + transfer_size, + DSI_EMBEDDED_MODE_DMA_MAX_SIZE_BYTES); return -ENOTSUPP; } } @@ -1162,9 +1150,9 @@ static int dsi_message_tx(struct dsi_ctrl *dsi_ctrl, struct dsi_ctrl_cmd_dma_fifo_info cmd; struct dsi_ctrl_cmd_dma_info cmd_mem; u32 hw_flags = 0; - u32 length = 0; + u32 length; u8 *buffer = NULL; - u32 cnt = 0, line_no = 0x1; + u32 line_no = 0x1; u8 *cmdbuf; struct dsi_mode_info *timing; struct dsi_ctrl_hw_ops dsi_hw_ops = dsi_ctrl->hw.ops; @@ -1180,6 +1168,10 @@ static int dsi_message_tx(struct dsi_ctrl *dsi_ctrl, goto error; } + pr_debug("cmd tx type=%02x cmd=%02x len=%d last=%d\n", msg->type, + msg->tx_len ? *((u8 *)msg->tx_buf) : 0, msg->tx_len, + (msg->flags & MIPI_DSI_MSG_LASTCOMMAND) != 0); + if (flags & DSI_CTRL_CMD_NON_EMBEDDED_MODE) { cmd_mem.offset = dsi_ctrl->cmd_buffer_iova; cmd_mem.en_broadcast = (flags & DSI_CTRL_CMD_BROADCAST) ? @@ -1205,20 +1197,22 @@ static int dsi_message_tx(struct dsi_ctrl *dsi_ctrl, goto error; } - rc = dsi_ctrl_copy_and_pad_cmd(dsi_ctrl, - &packet, - &buffer, - &length); - if (rc) { - pr_err("[%s] failed to copy message, rc=%d\n", - dsi_ctrl->name, rc); - goto error; - } + length = ALIGN(packet.size, 4); if ((msg->flags & MIPI_DSI_MSG_LASTCOMMAND)) - buffer[3] |= BIT(7);//set the last cmd bit in header. + packet.header[3] |= BIT(7);//set the last cmd bit in header. if (flags & DSI_CTRL_CMD_FETCH_MEMORY) { + msm_gem_sync(dsi_ctrl->tx_cmd_buf); + cmdbuf = dsi_ctrl->vaddr + dsi_ctrl->cmd_len; + + rc = dsi_ctrl_copy_and_pad_cmd(&packet, cmdbuf, length); + if (rc) { + pr_err("[%s] failed to copy message, rc=%d\n", + dsi_ctrl->name, rc); + goto error; + } + /* Embedded mode config is selected */ cmd_mem.offset = dsi_ctrl->cmd_buffer_iova; cmd_mem.en_broadcast = (flags & DSI_CTRL_CMD_BROADCAST) ? @@ -1228,12 +1222,6 @@ static int dsi_message_tx(struct dsi_ctrl *dsi_ctrl, cmd_mem.use_lpm = (msg->flags & MIPI_DSI_MSG_USE_LPM) ? true : false; - cmdbuf = (u8 *)(dsi_ctrl->vaddr); - - msm_gem_sync(dsi_ctrl->tx_cmd_buf); - for (cnt = 0; cnt < length; cnt++) - cmdbuf[dsi_ctrl->cmd_len + cnt] = buffer[cnt]; - dsi_ctrl->cmd_len += length; if (!(msg->flags & MIPI_DSI_MSG_LASTCOMMAND)) { @@ -1244,6 +1232,20 @@ static int dsi_message_tx(struct dsi_ctrl *dsi_ctrl, } } else if (flags & DSI_CTRL_CMD_FIFO_STORE) { + buffer = devm_kzalloc(&dsi_ctrl->pdev->dev, length, + GFP_KERNEL); + if (!buffer) { + rc = -ENOMEM; + goto error; + } + + rc = dsi_ctrl_copy_and_pad_cmd(&packet, buffer, length); + if (rc) { + pr_err("[%s] failed to copy message, rc=%d\n", + dsi_ctrl->name, rc); + goto error; + } + cmd.command = (u32 *)buffer; cmd.size = length; cmd.en_broadcast = (flags & DSI_CTRL_CMD_BROADCAST) ? diff --git a/drivers/gpu/drm/msm/msm_atomic.c b/drivers/gpu/drm/msm/msm_atomic.c index c1e6cb5b4718..c19f9e22e75d 100644 --- a/drivers/gpu/drm/msm/msm_atomic.c +++ b/drivers/gpu/drm/msm/msm_atomic.c @@ -33,7 +33,10 @@ struct msm_commit { uint32_t crtc_mask; uint32_t plane_mask; bool nonblock; - struct kthread_work commit_work; + union { + struct kthread_work commit_work; + struct work_struct clean_work; + }; }; static BLOCKING_NOTIFIER_HEAD(msm_drm_notifier_list); @@ -117,7 +120,6 @@ static void end_atomic(struct msm_drm_private *priv, uint32_t crtc_mask, static void commit_destroy(struct msm_commit *c) { - end_atomic(c->dev->dev_private, c->crtc_mask, c->plane_mask); if (c->nonblock) kfree(c); } @@ -528,6 +530,16 @@ static void msm_atomic_helper_commit_modeset_enables(struct drm_device *dev, SDE_ATRACE_END("msm_enable"); } +static void complete_commit_cleanup(struct work_struct *work) +{ + struct msm_commit *c = container_of(work, typeof(*c), clean_work); + struct drm_atomic_state *state = c->state; + + drm_atomic_state_put(state); + + commit_destroy(c); +} + /* The (potentially) asynchronous part of the commit. At this point * nothing can fail short of armageddon. */ @@ -567,25 +579,24 @@ static void complete_commit(struct msm_commit *c) kms->funcs->complete_commit(kms, state); - drm_atomic_state_put(state); - - commit_destroy(c); + end_atomic(priv, c->crtc_mask, c->plane_mask); } static void _msm_drm_commit_work_cb(struct kthread_work *work) { - struct msm_commit *commit = NULL; - - if (!work) { - DRM_ERROR("%s: Invalid commit work data!\n", __func__); - return; - } - - commit = container_of(work, struct msm_commit, commit_work); + struct msm_commit *c = container_of(work, typeof(*c), commit_work); SDE_ATRACE_BEGIN("complete_commit"); - complete_commit(commit); + complete_commit(c); SDE_ATRACE_END("complete_commit"); + + if (c->nonblock) { + /* Offload the cleanup onto little CPUs (an unbound wq) */ + INIT_WORK(&c->clean_work, complete_commit_cleanup); + queue_work(system_unbound_wq, &c->clean_work); + } else { + complete_commit_cleanup(&c->clean_work); + } } static struct msm_commit *commit_init(struct drm_atomic_state *state, @@ -656,6 +667,7 @@ static void msm_atomic_commit_dispatch(struct drm_device *dev, */ DRM_ERROR("failed to dispatch commit to any CRTC\n"); complete_commit(commit); + complete_commit_cleanup(&commit->clean_work); } else if (!nonblock) { kthread_flush_work(&commit->commit_work); } diff --git a/drivers/gpu/drm/msm/sde/sde_core_irq.c b/drivers/gpu/drm/msm/sde/sde_core_irq.c index b082778a4590..c250fbb6d402 100644 --- a/drivers/gpu/drm/msm/sde/sde_core_irq.c +++ b/drivers/gpu/drm/msm/sde/sde_core_irq.c @@ -662,15 +662,6 @@ int sde_core_irq_domain_fini(struct sde_kms *sde_kms) irqreturn_t sde_core_irq(struct sde_kms *sde_kms) { /* - * Read interrupt status from all sources. Interrupt status are - * stored within hw_intr. - * Function will also clear the interrupt status after reading. - * Individual interrupt status bit will only get stored if it - * is enabled. - */ - sde_kms->hw_intr->ops.get_interrupt_statuses(sde_kms->hw_intr); - - /* * Dispatch to HW driver to handle interrupt lookup that is being * fired. When matching interrupt is located, HW driver will call to * sde_core_irq_callback_handler with the irq_idx from the lookup table. diff --git a/drivers/gpu/drm/msm/sde/sde_crtc.c b/drivers/gpu/drm/msm/sde/sde_crtc.c index 77dd68db9b4b..b5e41c826351 100644 --- a/drivers/gpu/drm/msm/sde/sde_crtc.c +++ b/drivers/gpu/drm/msm/sde/sde_crtc.c @@ -2251,9 +2251,11 @@ static void _sde_crtc_blend_setup(struct drm_crtc *crtc, mixer[i].hw_ctl); /* clear dim_layer settings */ - lm = mixer[i].hw_lm; - if (lm->ops.clear_dim_layer) - lm->ops.clear_dim_layer(lm); + if (sde_crtc_state->num_dim_layers > 0) { + lm = mixer[i].hw_lm; + if (lm->ops.clear_dim_layer) + lm->ops.clear_dim_layer(lm); + } } _sde_crtc_swap_mixers_for_right_partial_update(crtc); @@ -4395,8 +4397,6 @@ void sde_crtc_commit_kickoff(struct drm_crtc *crtc, if (_sde_crtc_commit_kickoff_rot(crtc, cstate)) is_error = true; - sde_vbif_clear_errors(sde_kms); - if (is_error) { _sde_crtc_remove_pipe_flush(crtc); _sde_crtc_blend_setup(crtc, old_state, false); diff --git a/drivers/gpu/drm/msm/sde/sde_encoder_phys_cmd.c b/drivers/gpu/drm/msm/sde/sde_encoder_phys_cmd.c index 1e50e8ff76ee..655a48a225eb 100644 --- a/drivers/gpu/drm/msm/sde/sde_encoder_phys_cmd.c +++ b/drivers/gpu/drm/msm/sde/sde_encoder_phys_cmd.c @@ -663,9 +663,9 @@ static int _sde_encoder_phys_cmd_poll_write_pointer_started( } if (phys_enc->has_intf_te) - ret = hw_intf->ops.get_vsync_info(hw_intf, &info); + ret = hw_intf->ops.get_vsync_info(hw_intf, &info, false); else - ret = hw_pp->ops.get_vsync_info(hw_pp, &info); + ret = hw_pp->ops.get_vsync_info(hw_pp, &info, false); if (ret) return ret; @@ -714,13 +714,13 @@ static bool _sde_encoder_phys_cmd_is_ongoing_pptx( if (!hw_intf || !hw_intf->ops.get_vsync_info) return false; - hw_intf->ops.get_vsync_info(hw_intf, &info); + hw_intf->ops.get_vsync_info(hw_intf, &info, true); } else { hw_pp = phys_enc->hw_pp; if (!hw_pp || !hw_pp->ops.get_vsync_info) return false; - hw_pp->ops.get_vsync_info(hw_pp, &info); + hw_pp->ops.get_vsync_info(hw_pp, &info, true); } SDE_EVT32(DRMID(phys_enc->parent), @@ -1173,12 +1173,20 @@ static void sde_encoder_phys_cmd_enable(struct sde_encoder_phys *phys_enc) static bool sde_encoder_phys_cmd_is_autorefresh_enabled( struct sde_encoder_phys *phys_enc) { + struct sde_encoder_phys_cmd *cmd_enc; struct sde_hw_pingpong *hw_pp; struct sde_hw_intf *hw_intf; struct sde_hw_autorefresh cfg; int ret; - if (!phys_enc || !phys_enc->hw_pp || !phys_enc->hw_intf) + if (!phys_enc) + return 0; + + cmd_enc = to_sde_encoder_phys_cmd(phys_enc); + if (!cmd_enc->autorefresh.cfg.enable) + return 0; + + if (!phys_enc->hw_pp || !phys_enc->hw_intf) return 0; if (!sde_encoder_phys_cmd_is_master(phys_enc)) @@ -1271,14 +1279,14 @@ static int sde_encoder_phys_cmd_get_write_line_count( if (!hw_intf->ops.get_vsync_info) return -EINVAL; - if (hw_intf->ops.get_vsync_info(hw_intf, &info)) + if (hw_intf->ops.get_vsync_info(hw_intf, &info, true)) return -EINVAL; } else { hw_pp = phys_enc->hw_pp; if (!hw_pp->ops.get_vsync_info) return -EINVAL; - if (hw_pp->ops.get_vsync_info(hw_pp, &info)) + if (hw_pp->ops.get_vsync_info(hw_pp, &info, true)) return -EINVAL; } diff --git a/drivers/gpu/drm/msm/sde/sde_hw_interrupts.c b/drivers/gpu/drm/msm/sde/sde_hw_interrupts.c index 0fc778e35f88..038b1fc53907 100644 --- a/drivers/gpu/drm/msm/sde/sde_hw_interrupts.c +++ b/drivers/gpu/drm/msm/sde/sde_hw_interrupts.c @@ -886,8 +886,6 @@ static void sde_hw_intr_dispatch_irq(struct sde_hw_intr *intr, */ spin_lock_irqsave(&intr->irq_lock, irq_flags); for (reg_idx = 0; reg_idx < intr->sde_irq_size; reg_idx++) { - irq_status = intr->save_irq_status[reg_idx]; - /* get the global offset in 'sde_irq_map' */ sde_irq_idx = intr->sde_irq_tbl[reg_idx].sde_irq_idx; if (sde_irq_idx < 0) @@ -904,6 +902,9 @@ static void sde_hw_intr_dispatch_irq(struct sde_hw_intr *intr, end_idx > ARRAY_SIZE(sde_irq_map)) continue; + irq_status = SDE_REG_READ(&intr->hw, + intr->sde_irq_tbl[reg_idx].status_off); + /* * Search through matching intr status from irq map. * start_idx and end_idx defined the search range in @@ -924,8 +925,9 @@ static void sde_hw_intr_dispatch_irq(struct sde_hw_intr *intr, if (cbfunc) cbfunc(arg, irq_idx); else - intr->ops.clear_intr_status_nolock( - intr, irq_idx); + SDE_REG_WRITE(&intr->hw, + intr->sde_irq_tbl[reg_idx].clr_off, + sde_irq_map[irq_idx].irq_mask); /* * When callback finish, clear the irq_status @@ -977,9 +979,6 @@ static int sde_hw_intr_enable_irq(struct sde_hw_intr *intr, int irq_idx) /* Enabling interrupts with the new mask */ SDE_REG_WRITE(&intr->hw, reg->en_off, cache_irq_mask); - /* ensure register write goes through */ - wmb(); - intr->cache_irq_mask[reg_idx] = cache_irq_mask; } spin_unlock_irqrestore(&intr->irq_lock, irq_flags); @@ -1115,40 +1114,6 @@ static int sde_hw_intr_get_interrupt_sources(struct sde_hw_intr *intr, return 0; } -static void sde_hw_intr_get_interrupt_statuses(struct sde_hw_intr *intr) -{ - int i; - u32 enable_mask; - unsigned long irq_flags; - - if (!intr) - return; - - spin_lock_irqsave(&intr->irq_lock, irq_flags); - for (i = 0; i < intr->sde_irq_size; i++) { - /* Read interrupt status */ - intr->save_irq_status[i] = SDE_REG_READ(&intr->hw, - intr->sde_irq_tbl[i].status_off); - - /* Read enable mask */ - enable_mask = SDE_REG_READ(&intr->hw, - intr->sde_irq_tbl[i].en_off); - - /* and clear the interrupt */ - if (intr->save_irq_status[i]) - SDE_REG_WRITE(&intr->hw, intr->sde_irq_tbl[i].clr_off, - intr->save_irq_status[i]); - - /* Finally update IRQ status based on enable mask */ - intr->save_irq_status[i] &= enable_mask; - } - - /* ensure register writes go through */ - wmb(); - - spin_unlock_irqrestore(&intr->irq_lock, irq_flags); -} - static void sde_hw_intr_clear_intr_status_force_mask(struct sde_hw_intr *intr, int irq_idx, u32 irq_mask) { @@ -1200,12 +1165,20 @@ static void sde_hw_intr_clear_interrupt_status(struct sde_hw_intr *intr, int irq_idx) { unsigned long irq_flags; + int reg_idx; if (!intr) return; + reg_idx = sde_irq_map[irq_idx].reg_idx; + if (reg_idx < 0 || reg_idx > intr->sde_irq_size) { + pr_err("invalid irq reg:%d irq:%d\n", reg_idx, irq_idx); + return; + } + spin_lock_irqsave(&intr->irq_lock, irq_flags); - sde_hw_intr_clear_intr_status_nolock(intr, irq_idx); + SDE_REG_WRITE(&intr->hw, intr->sde_irq_tbl[reg_idx].clr_off, + sde_irq_map[irq_idx].irq_mask); spin_unlock_irqrestore(&intr->irq_lock, irq_flags); } @@ -1272,9 +1245,6 @@ static u32 sde_hw_intr_get_interrupt_status(struct sde_hw_intr *intr, SDE_REG_WRITE(&intr->hw, intr->sde_irq_tbl[reg_idx].clr_off, intr_status); - /* ensure register writes go through */ - wmb(); - spin_unlock_irqrestore(&intr->irq_lock, irq_flags); return intr_status; @@ -1321,7 +1291,6 @@ static void __setup_intr_ops(struct sde_hw_intr_ops *ops) ops->disable_all_irqs = sde_hw_intr_disable_irqs; ops->get_valid_interrupts = sde_hw_intr_get_valid_interrupts; ops->get_interrupt_sources = sde_hw_intr_get_interrupt_sources; - ops->get_interrupt_statuses = sde_hw_intr_get_interrupt_statuses; ops->clear_interrupt_status = sde_hw_intr_clear_interrupt_status; ops->clear_intr_status_nolock = sde_hw_intr_clear_intr_status_nolock; ops->clear_intr_status_force_mask = @@ -1469,7 +1438,6 @@ void sde_hw_intr_destroy(struct sde_hw_intr *intr) if (intr) { kfree(intr->sde_irq_tbl); kfree(intr->cache_irq_mask); - kfree(intr->save_irq_status); kfree(intr); } } @@ -1578,13 +1546,6 @@ struct sde_hw_intr *sde_hw_intr_init(void __iomem *addr, goto exit; } - intr->save_irq_status = kcalloc(intr->sde_irq_size, sizeof(u32), - GFP_KERNEL); - if (intr->save_irq_status == NULL) { - ret = -ENOMEM; - goto exit; - } - spin_lock_init(&intr->irq_lock); return intr; @@ -1592,7 +1553,6 @@ struct sde_hw_intr *sde_hw_intr_init(void __iomem *addr, exit: kfree(intr->sde_irq_tbl); kfree(intr->cache_irq_mask); - kfree(intr->save_irq_status); kfree(intr); return ERR_PTR(ret); } diff --git a/drivers/gpu/drm/msm/sde/sde_hw_interrupts.h b/drivers/gpu/drm/msm/sde/sde_hw_interrupts.h index 1d5a8427d0aa..e2de0e33bd59 100644 --- a/drivers/gpu/drm/msm/sde/sde_hw_interrupts.h +++ b/drivers/gpu/drm/msm/sde/sde_hw_interrupts.h @@ -189,14 +189,6 @@ struct sde_hw_intr_ops { void *arg); /** - * get_interrupt_statuses - Gets and store value from all interrupt - * status registers that are currently fired. - * @intr: HW interrupt handle - */ - void (*get_interrupt_statuses)( - struct sde_hw_intr *intr); - - /** * clear_interrupt_status - Clears HW interrupt status based on given * lookup IRQ index. * @intr: HW interrupt handle @@ -292,7 +284,6 @@ struct sde_hw_intr_ops { * @hw: virtual address mapping * @ops: function pointer mapping for IRQ handling * @cache_irq_mask: array of IRQ enable masks reg storage created during init - * @save_irq_status: array of IRQ status reg storage created during init * @irq_idx_tbl_size: total number of irq_idx mapped in the hw_interrupts * @irq_lock: spinlock for accessing IRQ resources * @sde_irq_size: total number of elements of the sde_irq_tbl @@ -303,7 +294,6 @@ struct sde_hw_intr { struct sde_hw_blk_reg_map hw; struct sde_hw_intr_ops ops; u32 *cache_irq_mask; - u32 *save_irq_status; u32 irq_idx_tbl_size; u32 sde_irq_size; struct sde_intr_reg *sde_irq_tbl; diff --git a/drivers/gpu/drm/msm/sde/sde_hw_intf.c b/drivers/gpu/drm/msm/sde/sde_hw_intf.c index 21b671164786..ba7510ab310d 100644 --- a/drivers/gpu/drm/msm/sde/sde_hw_intf.c +++ b/drivers/gpu/drm/msm/sde/sde_hw_intf.c @@ -613,7 +613,7 @@ static int sde_hw_intf_connect_external_te(struct sde_hw_intf *intf, } static int sde_hw_intf_get_vsync_info(struct sde_hw_intf *intf, - struct sde_hw_pp_vsync_info *info) + struct sde_hw_pp_vsync_info *info, bool wr_ptr_only) { struct sde_hw_blk_reg_map *c = &intf->hw; u32 val; @@ -623,12 +623,14 @@ static int sde_hw_intf_get_vsync_info(struct sde_hw_intf *intf, c = &intf->hw; - val = SDE_REG_READ(c, INTF_TEAR_VSYNC_INIT_VAL); - info->rd_ptr_init_val = val & 0xffff; + if (!wr_ptr_only) { + val = SDE_REG_READ(c, INTF_TEAR_VSYNC_INIT_VAL); + info->rd_ptr_init_val = val & 0xffff; - val = SDE_REG_READ(c, INTF_TEAR_INT_COUNT_VAL); - info->rd_ptr_frame_count = (val & 0xffff0000) >> 16; - info->rd_ptr_line_count = val & 0xffff; + val = SDE_REG_READ(c, INTF_TEAR_INT_COUNT_VAL); + info->rd_ptr_frame_count = (val & 0xffff0000) >> 16; + info->rd_ptr_line_count = val & 0xffff; + } val = SDE_REG_READ(c, INTF_TEAR_LINE_COUNT); info->wr_ptr_line_count = val & 0xffff; diff --git a/drivers/gpu/drm/msm/sde/sde_hw_intf.h b/drivers/gpu/drm/msm/sde/sde_hw_intf.h index 413aac279619..ec38000c1970 100644 --- a/drivers/gpu/drm/msm/sde/sde_hw_intf.h +++ b/drivers/gpu/drm/msm/sde/sde_hw_intf.h @@ -141,7 +141,7 @@ struct sde_hw_intf_ops { * line_count */ int (*get_vsync_info)(struct sde_hw_intf *intf, - struct sde_hw_pp_vsync_info *info); + struct sde_hw_pp_vsync_info *info, bool wr_ptr_only); /** * configure and enable the autorefresh config diff --git a/drivers/gpu/drm/msm/sde/sde_hw_pingpong.c b/drivers/gpu/drm/msm/sde/sde_hw_pingpong.c index 64c1a90f5952..e03eb3d8391f 100644 --- a/drivers/gpu/drm/msm/sde/sde_hw_pingpong.c +++ b/drivers/gpu/drm/msm/sde/sde_hw_pingpong.c @@ -404,7 +404,7 @@ static int sde_hw_pp_connect_external_te(struct sde_hw_pingpong *pp, } static int sde_hw_pp_get_vsync_info(struct sde_hw_pingpong *pp, - struct sde_hw_pp_vsync_info *info) + struct sde_hw_pp_vsync_info *info, bool wr_ptr_only) { struct sde_hw_blk_reg_map *c; u32 val; @@ -413,12 +413,14 @@ static int sde_hw_pp_get_vsync_info(struct sde_hw_pingpong *pp, return -EINVAL; c = &pp->hw; - val = SDE_REG_READ(c, PP_VSYNC_INIT_VAL); - info->rd_ptr_init_val = val & 0xffff; + if (!wr_ptr_only) { + val = SDE_REG_READ(c, PP_VSYNC_INIT_VAL); + info->rd_ptr_init_val = val & 0xffff; - val = SDE_REG_READ(c, PP_INT_COUNT_VAL); - info->rd_ptr_frame_count = (val & 0xffff0000) >> 16; - info->rd_ptr_line_count = val & 0xffff; + val = SDE_REG_READ(c, PP_INT_COUNT_VAL); + info->rd_ptr_frame_count = (val & 0xffff0000) >> 16; + info->rd_ptr_line_count = val & 0xffff; + } val = SDE_REG_READ(c, PP_LINE_COUNT); info->wr_ptr_line_count = val & 0xffff; diff --git a/drivers/gpu/drm/msm/sde/sde_hw_pingpong.h b/drivers/gpu/drm/msm/sde/sde_hw_pingpong.h index 9cc755237c68..e83c26bf0eb0 100644 --- a/drivers/gpu/drm/msm/sde/sde_hw_pingpong.h +++ b/drivers/gpu/drm/msm/sde/sde_hw_pingpong.h @@ -72,7 +72,7 @@ struct sde_hw_pingpong_ops { * line_count */ int (*get_vsync_info)(struct sde_hw_pingpong *pp, - struct sde_hw_pp_vsync_info *info); + struct sde_hw_pp_vsync_info *info, bool wr_ptr_only); /** * configure and enable the autorefresh config diff --git a/drivers/gpu/msm/Makefile b/drivers/gpu/msm/Makefile index b1f075ce037d..623feb5a6cdf 100644 --- a/drivers/gpu/msm/Makefile +++ b/drivers/gpu/msm/Makefile @@ -30,16 +30,8 @@ msm_adreno-y += \ adreno_snapshot.o \ adreno_coresight.o \ adreno_trace.o \ - adreno_a3xx.o \ - adreno_a4xx.o \ - adreno_a5xx.o \ adreno_a6xx.o \ - adreno_a3xx_snapshot.o \ - adreno_a4xx_snapshot.o \ - adreno_a5xx_snapshot.o \ adreno_a6xx_snapshot.o \ - adreno_a4xx_preempt.o \ - adreno_a5xx_preempt.o \ adreno_a6xx_preempt.o \ adreno_a6xx_gmu.o \ adreno_a6xx_rgmu.o \ diff --git a/drivers/gpu/msm/adreno-gpulist.h b/drivers/gpu/msm/adreno-gpulist.h index 1a7df11bda16..f2adcb7fd1d5 100644 --- a/drivers/gpu/msm/adreno-gpulist.h +++ b/drivers/gpu/msm/adreno-gpulist.h @@ -14,6 +14,7 @@ #define ANY_ID (~0) static const struct adreno_gpu_core adreno_gpulist[] = { +#if 0 { .gpurev = ADRENO_REV_A306, .core = 3, @@ -335,6 +336,7 @@ static const struct adreno_gpu_core adreno_gpulist[] = { .num_protected_regs = 0x20, .busy_mask = 0xFFFFFFFE, }, +#endif { .gpurev = ADRENO_REV_A630, .core = 6, diff --git a/drivers/gpu/msm/adreno.c b/drivers/gpu/msm/adreno.c index e4021fd73e4a..3b1ed485e80f 100644 --- a/drivers/gpu/msm/adreno.c +++ b/drivers/gpu/msm/adreno.c @@ -1686,13 +1686,6 @@ int adreno_set_unsecured_mode(struct adreno_device *adreno_dev, if (!adreno_is_a5xx(adreno_dev) && !adreno_is_a6xx(adreno_dev)) return -EINVAL; - if (ADRENO_QUIRK(adreno_dev, ADRENO_QUIRK_CRITICAL_PACKETS) && - adreno_is_a5xx(adreno_dev)) { - ret = a5xx_critical_packet_submit(adreno_dev, rb); - if (ret) - return ret; - } - /* GPU comes up in secured mode, make it unsecured by default */ if (adreno_dev->zap_handle_ptr) ret = adreno_switch_to_unsecure_mode(adreno_dev, rb); diff --git a/drivers/gpu/msm/adreno_cp_parser.h b/drivers/gpu/msm/adreno_cp_parser.h index 1fa46c147c3c..fa6a43ad33d6 100644 --- a/drivers/gpu/msm/adreno_cp_parser.h +++ b/drivers/gpu/msm/adreno_cp_parser.h @@ -134,15 +134,7 @@ static inline void adreno_ib_init_ib_obj(uint64_t gpuaddr, static inline int adreno_cp_parser_getreg(struct adreno_device *adreno_dev, enum adreno_cp_addr_regs reg_enum) { - if (reg_enum == ADRENO_CP_ADDR_MAX) - return -EEXIST; - - if (adreno_is_a3xx(adreno_dev)) - return a3xx_cp_addr_regs[reg_enum]; - else if (adreno_is_a4xx(adreno_dev)) - return a4xx_cp_addr_regs[reg_enum]; - else - return -EEXIST; + return -EEXIST; } /* @@ -160,19 +152,6 @@ static inline int adreno_cp_parser_regindex(struct adreno_device *adreno_dev, enum adreno_cp_addr_regs start, enum adreno_cp_addr_regs end) { - int i; - const unsigned int *regs; - - if (adreno_is_a4xx(adreno_dev)) - regs = a4xx_cp_addr_regs; - else if (adreno_is_a3xx(adreno_dev)) - regs = a3xx_cp_addr_regs; - else - return -EEXIST; - - for (i = start; i <= end && i < ADRENO_CP_ADDR_MAX; i++) - if (regs[i] == offset) - return i; return -EEXIST; } diff --git a/drivers/gpu/msm/kgsl.c b/drivers/gpu/msm/kgsl.c index 0ced8a2450e5..b296299a6afd 100644 --- a/drivers/gpu/msm/kgsl.c +++ b/drivers/gpu/msm/kgsl.c @@ -5154,7 +5154,7 @@ static void kgsl_core_exit(void) static int __init kgsl_core_init(void) { int result = 0; - struct sched_param param = { .sched_priority = 2 }; + struct sched_param param = { .sched_priority = 16 }; place_marker("M - DRIVER KGSL Init"); diff --git a/drivers/hid/hid-alps.c b/drivers/hid/hid-alps.c index ed9c0ea5b026..1bc6ad0339d2 100644 --- a/drivers/hid/hid-alps.c +++ b/drivers/hid/hid-alps.c @@ -429,6 +429,7 @@ static int alps_input_configured(struct hid_device *hdev, struct hid_input *hi) ret = input_register_device(data->input2); if (ret) { input_free_device(input2); + ret = -ENOENT; goto exit; } } diff --git a/drivers/hid/wacom_wac.c b/drivers/hid/wacom_wac.c index 8c0718b3754e..df89f490e552 100644 --- a/drivers/hid/wacom_wac.c +++ b/drivers/hid/wacom_wac.c @@ -2391,7 +2391,7 @@ static void wacom_wac_finger_slot(struct wacom_wac *wacom_wac, !wacom_wac->shared->is_touch_on) { if (!wacom_wac->shared->touch_down) return; - prox = 0; + prox = false; } wacom_wac->hid_data.num_received++; @@ -3346,8 +3346,6 @@ int wacom_setup_pen_input_capabilities(struct input_dev *input_dev, { struct wacom_features *features = &wacom_wac->features; - input_dev->evbit[0] |= BIT_MASK(EV_KEY) | BIT_MASK(EV_ABS); - if (!(features->device_type & WACOM_DEVICETYPE_PEN)) return -ENODEV; @@ -3360,6 +3358,7 @@ int wacom_setup_pen_input_capabilities(struct input_dev *input_dev, /* setup has already been done */ return 0; + input_dev->evbit[0] |= BIT_MASK(EV_KEY) | BIT_MASK(EV_ABS); __set_bit(BTN_TOUCH, input_dev->keybit); __set_bit(ABS_MISC, input_dev->absbit); @@ -3508,8 +3507,6 @@ int wacom_setup_touch_input_capabilities(struct input_dev *input_dev, { struct wacom_features *features = &wacom_wac->features; - input_dev->evbit[0] |= BIT_MASK(EV_KEY) | BIT_MASK(EV_ABS); - if (!(features->device_type & WACOM_DEVICETYPE_TOUCH)) return -ENODEV; @@ -3522,6 +3519,7 @@ int wacom_setup_touch_input_capabilities(struct input_dev *input_dev, /* setup has already been done */ return 0; + input_dev->evbit[0] |= BIT_MASK(EV_KEY) | BIT_MASK(EV_ABS); __set_bit(BTN_TOUCH, input_dev->keybit); if (features->touch_max == 1) { diff --git a/drivers/input/keyboard/nspire-keypad.c b/drivers/input/keyboard/nspire-keypad.c index c7f26fa3034c..cf138d836eec 100644 --- a/drivers/input/keyboard/nspire-keypad.c +++ b/drivers/input/keyboard/nspire-keypad.c @@ -96,9 +96,15 @@ static irqreturn_t nspire_keypad_irq(int irq, void *dev_id) return IRQ_HANDLED; } -static int nspire_keypad_chip_init(struct nspire_keypad *keypad) +static int nspire_keypad_open(struct input_dev *input) { + struct nspire_keypad *keypad = input_get_drvdata(input); unsigned long val = 0, cycles_per_us, delay_cycles, row_delay_cycles; + int error; + + error = clk_prepare_enable(keypad->clk); + if (error) + return error; cycles_per_us = (clk_get_rate(keypad->clk) / 1000000); if (cycles_per_us == 0) @@ -124,30 +130,6 @@ static int nspire_keypad_chip_init(struct nspire_keypad *keypad) keypad->int_mask = 1 << 1; writel(keypad->int_mask, keypad->reg_base + KEYPAD_INTMSK); - /* Disable GPIO interrupts to prevent hanging on touchpad */ - /* Possibly used to detect touchpad events */ - writel(0, keypad->reg_base + KEYPAD_UNKNOWN_INT); - /* Acknowledge existing interrupts */ - writel(~0, keypad->reg_base + KEYPAD_UNKNOWN_INT_STS); - - return 0; -} - -static int nspire_keypad_open(struct input_dev *input) -{ - struct nspire_keypad *keypad = input_get_drvdata(input); - int error; - - error = clk_prepare_enable(keypad->clk); - if (error) - return error; - - error = nspire_keypad_chip_init(keypad); - if (error) { - clk_disable_unprepare(keypad->clk); - return error; - } - return 0; } @@ -155,6 +137,11 @@ static void nspire_keypad_close(struct input_dev *input) { struct nspire_keypad *keypad = input_get_drvdata(input); + /* Disable interrupts */ + writel(0, keypad->reg_base + KEYPAD_INTMSK); + /* Acknowledge existing interrupts */ + writel(~0, keypad->reg_base + KEYPAD_INT); + clk_disable_unprepare(keypad->clk); } @@ -215,6 +202,25 @@ static int nspire_keypad_probe(struct platform_device *pdev) return -ENOMEM; } + error = clk_prepare_enable(keypad->clk); + if (error) { + dev_err(&pdev->dev, "failed to enable clock\n"); + return error; + } + + /* Disable interrupts */ + writel(0, keypad->reg_base + KEYPAD_INTMSK); + /* Acknowledge existing interrupts */ + writel(~0, keypad->reg_base + KEYPAD_INT); + + /* Disable GPIO interrupts to prevent hanging on touchpad */ + /* Possibly used to detect touchpad events */ + writel(0, keypad->reg_base + KEYPAD_UNKNOWN_INT); + /* Acknowledge existing GPIO interrupts */ + writel(~0, keypad->reg_base + KEYPAD_UNKNOWN_INT_STS); + + clk_disable_unprepare(keypad->clk); + input_set_drvdata(input, keypad); input->id.bustype = BUS_HOST; diff --git a/drivers/input/serio/i8042-x86ia64io.h b/drivers/input/serio/i8042-x86ia64io.h index 0463ab79160b..f20e54f41dde 100644 --- a/drivers/input/serio/i8042-x86ia64io.h +++ b/drivers/input/serio/i8042-x86ia64io.h @@ -592,6 +592,7 @@ static const struct dmi_system_id i8042_dmi_noselftest_table[] = { DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."), DMI_MATCH(DMI_CHASSIS_TYPE, "10"), /* Notebook */ }, + }, { .matches = { DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."), DMI_MATCH(DMI_CHASSIS_TYPE, "31"), /* Convertible Notebook */ diff --git a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/cam_vfe_fe_ver1.c b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/cam_vfe_fe_ver1.c index 6f833b35287e..65f0c77bd217 100644 --- a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/cam_vfe_fe_ver1.c +++ b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/cam_vfe_fe_ver1.c @@ -356,7 +356,6 @@ static int cam_vfe_fe_reg_dump( struct cam_isp_resource_node *fe_res) { struct cam_vfe_mux_fe_data *fe_priv; - struct cam_vfe_soc_private *soc_private; int rc = 0, i; uint32_t val = 0; @@ -370,7 +369,6 @@ static int cam_vfe_fe_reg_dump( return 0; fe_priv = (struct cam_vfe_mux_fe_data *)fe_res->res_priv; - soc_private = fe_priv->soc_info->soc_private; for (i = 0xA3C; i <= 0xA90; i += 4) { val = cam_io_r_mb(fe_priv->mem_base + i); CAM_INFO(CAM_ISP, "offset 0x%x val 0x%x", i, val); @@ -396,14 +394,6 @@ static int cam_vfe_fe_reg_dump( CAM_INFO(CAM_ISP, "offset 0x%x val 0x%x", i, val); } - cam_cpas_reg_read((uint32_t)soc_private->cpas_handle, - CAM_CPAS_REG_CAMNOC, 0x420, true, &val); - CAM_INFO(CAM_ISP, "IFE02_MAXWR_LOW offset 0x420 val 0x%x", val); - - cam_cpas_reg_read((uint32_t)soc_private->cpas_handle, - CAM_CPAS_REG_CAMNOC, 0x820, true, &val); - CAM_INFO(CAM_ISP, "IFE13_MAXWR_LOW offset 0x820 val 0x%x", val); - return rc; } diff --git a/drivers/media/platform/msm/camera/cam_sensor_module/cam_csiphy/include/cam_csiphy_1_2_hwreg.h b/drivers/media/platform/msm/camera/cam_sensor_module/cam_csiphy/include/cam_csiphy_1_2_hwreg.h index ac113d613fce..be66e39f5425 100644 --- a/drivers/media/platform/msm/camera/cam_sensor_module/cam_csiphy/include/cam_csiphy_1_2_hwreg.h +++ b/drivers/media/platform/msm/camera/cam_sensor_module/cam_csiphy/include/cam_csiphy_1_2_hwreg.h @@ -1,4 +1,4 @@ -/* Copyright (c) 2018-2019, The Linux Foundation. All rights reserved. +/* Copyright (c) 2018-2021, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -21,8 +21,8 @@ struct csiphy_reg_parms_t csiphy_v1_2 = { .mipi_csiphy_glbl_irq_cmd_addr = 0x828, .csiphy_common_array_size = 6, .csiphy_reset_array_size = 5, - .csiphy_2ph_config_array_size = 22, - .csiphy_3ph_config_array_size = 38, + .csiphy_2ph_config_array_size = 23, + .csiphy_3ph_config_array_size = 30, .csiphy_2ph_clock_lane = 0x1, .csiphy_2ph_combo_ck_ln = 0x10, }; @@ -30,8 +30,8 @@ struct csiphy_reg_parms_t csiphy_v1_2 = { struct csiphy_reg_t csiphy_common_reg_1_2[] = { {0x0814, 0xd5, 0x00, CSIPHY_LANE_ENABLE}, {0x0818, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS}, - {0x081C, 0x02, 0x00, CSIPHY_2PH_REGS}, - {0x081C, 0x52, 0x00, CSIPHY_3PH_REGS}, + {0x081C, 0x52, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0800, 0x03, 0x01, CSIPHY_DEFAULT_PARAMS}, {0x0800, 0x02, 0x00, CSIPHY_2PH_REGS}, {0x0800, 0x0E, 0x00, CSIPHY_3PH_REGS}, }; @@ -64,34 +64,34 @@ csiphy_reg_t csiphy_2ph_v1_2_reg[MAX_LANES][MAX_SETTINGS_PER_LANE] = { {0x0030, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, {0x0904, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, {0x0910, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, - {0x0900, 0x06, 0x00, CSIPHY_DEFAULT_PARAMS}, - {0x0908, 0x07, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0900, 0x0F, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0908, 0x06, 0x00, CSIPHY_DEFAULT_PARAMS}, {0x0904, 0x07, 0x00, CSIPHY_DEFAULT_PARAMS}, - {0x00C4, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, {0x002C, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS}, {0x0034, 0x0F, 0x00, CSIPHY_DEFAULT_PARAMS}, {0x0010, 0x50, 0x00, CSIPHY_DEFAULT_PARAMS}, {0x001C, 0x0A, 0x00, CSIPHY_DEFAULT_PARAMS}, {0x0014, 0x60, 0x00, CSIPHY_DEFAULT_PARAMS}, - {0x0028, 0x00, 0x00, CSIPHY_DNP_PARAMS}, + {0x0028, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, {0x003C, 0xB8, 0x00, CSIPHY_DEFAULT_PARAMS}, {0x0000, 0x91, 0x00, CSIPHY_DEFAULT_PARAMS}, {0x0004, 0x0C, 0x00, CSIPHY_DEFAULT_PARAMS}, {0x0020, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, {0x0008, 0x10, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE}, - {0x000c, 0x00, 0x00, CSIPHY_DNP_PARAMS}, {0x0010, 0x52, 0x00, CSIPHY_DEFAULT_PARAMS}, {0x0038, 0xFE, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x005C, 0xC0, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0060, 0x0D, 0x00, CSIPHY_DEFAULT_PARAMS}, {0x0800, 0x02, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0000, 0x00, 0x00, CSIPHY_DNP_PARAMS}, }, { {0x0730, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, {0x0C84, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, {0x0C90, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, {0x0C80, 0x0F, 0x00, CSIPHY_DEFAULT_PARAMS}, - {0x0C88, 0x14, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0C88, 0x06, 0x00, CSIPHY_DEFAULT_PARAMS}, {0x0C84, 0x07, 0x00, CSIPHY_DEFAULT_PARAMS}, - {0x07C4, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, {0x072C, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS}, {0x0734, 0x0F, 0x00, CSIPHY_DEFAULT_PARAMS}, {0x0710, 0x50, 0x00, CSIPHY_DEFAULT_PARAMS}, @@ -107,78 +107,83 @@ csiphy_reg_t csiphy_2ph_v1_2_reg[MAX_LANES][MAX_SETTINGS_PER_LANE] = { {0x0710, 0x52, 0x00, CSIPHY_DEFAULT_PARAMS}, {0x0738, 0x1F, 0x00, CSIPHY_DEFAULT_PARAMS}, {0x0800, 0x02, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0000, 0x00, 0x00, CSIPHY_DNP_PARAMS}, + {0x0000, 0x00, 0x00, CSIPHY_DNP_PARAMS}, }, { {0x0230, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, {0x0A04, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, {0x0A10, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, - {0x0A00, 0x0B, 0x00, CSIPHY_DEFAULT_PARAMS}, - {0x0A08, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A00, 0x0F, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A08, 0x06, 0x00, CSIPHY_DEFAULT_PARAMS}, {0x0A04, 0x07, 0x00, CSIPHY_DEFAULT_PARAMS}, - {0x02C4, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, {0x022C, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS}, {0x0234, 0x0F, 0x00, CSIPHY_DEFAULT_PARAMS}, {0x0210, 0x50, 0x00, CSIPHY_DEFAULT_PARAMS}, {0x021C, 0x0A, 0x00, CSIPHY_DEFAULT_PARAMS}, {0x0214, 0x60, 0x00, CSIPHY_DEFAULT_PARAMS}, - {0x0228, 0x00, 0x00, CSIPHY_DNP_PARAMS}, + {0x0228, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, {0x023C, 0xB8, 0x00, CSIPHY_DEFAULT_PARAMS}, {0x0200, 0x91, 0x00, CSIPHY_DEFAULT_PARAMS}, {0x0204, 0x0C, 0x00, CSIPHY_DEFAULT_PARAMS}, {0x0220, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, {0x0208, 0x04, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE}, - {0x020c, 0x00, 0x00, CSIPHY_DNP_PARAMS}, {0x0210, 0x52, 0x00, CSIPHY_DEFAULT_PARAMS}, {0x0238, 0xFE, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x025C, 0xC0, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0260, 0x0D, 0x00, CSIPHY_DEFAULT_PARAMS}, {0x0800, 0x02, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0000, 0x00, 0x00, CSIPHY_DNP_PARAMS}, }, { {0x0430, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, {0x0B04, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, {0x0B10, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, - {0x0B00, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS}, - {0x0B08, 0x03, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0B00, 0x0F, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0B08, 0x06, 0x00, CSIPHY_DEFAULT_PARAMS}, {0x0B04, 0x07, 0x00, CSIPHY_DEFAULT_PARAMS}, - {0x04C4, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, {0x042C, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS}, {0x0434, 0x0F, 0x00, CSIPHY_DEFAULT_PARAMS}, {0x0410, 0x50, 0x00, CSIPHY_DEFAULT_PARAMS}, {0x041C, 0x0A, 0x00, CSIPHY_DEFAULT_PARAMS}, {0x0414, 0x60, 0x00, CSIPHY_DEFAULT_PARAMS}, - {0x0428, 0x00, 0x00, CSIPHY_DNP_PARAMS}, + {0x0428, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, {0x043C, 0xB8, 0x00, CSIPHY_DEFAULT_PARAMS}, {0x0400, 0x91, 0x00, CSIPHY_DEFAULT_PARAMS}, {0x0404, 0x0C, 0x00, CSIPHY_DEFAULT_PARAMS}, {0x0420, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, {0x0408, 0x04, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE}, - {0x040c, 0x00, 0x00, CSIPHY_DNP_PARAMS}, {0x0410, 0x52, 0x00, CSIPHY_DEFAULT_PARAMS}, {0x0438, 0xFE, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x045C, 0xC0, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0460, 0x0D, 0x00, CSIPHY_DEFAULT_PARAMS}, {0x0800, 0x02, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0000, 0x00, 0x00, CSIPHY_DNP_PARAMS}, }, { {0x0630, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, {0x0C04, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, {0x0C10, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, - {0x0C00, 0x0E, 0x00, CSIPHY_DEFAULT_PARAMS}, - {0x0C08, 0x1D, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0C00, 0x0F, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0C08, 0x06, 0x00, CSIPHY_DEFAULT_PARAMS}, {0x0C04, 0x07, 0x00, CSIPHY_DEFAULT_PARAMS}, - {0x06C4, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, {0x062C, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS}, {0x0634, 0x0F, 0x00, CSIPHY_DEFAULT_PARAMS}, {0x0610, 0x50, 0x00, CSIPHY_DEFAULT_PARAMS}, {0x061C, 0x0A, 0x00, CSIPHY_DEFAULT_PARAMS}, {0x0614, 0x60, 0x00, CSIPHY_DEFAULT_PARAMS}, - {0x0628, 0x00, 0x00, CSIPHY_DNP_PARAMS}, + {0x0628, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, {0x063C, 0xB8, 0x00, CSIPHY_DEFAULT_PARAMS}, {0x0600, 0x91, 0x00, CSIPHY_DEFAULT_PARAMS}, {0x0604, 0x0C, 0x00, CSIPHY_DEFAULT_PARAMS}, {0x0620, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, {0x0608, 0x04, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE}, - {0x060c, 0x00, 0x00, CSIPHY_DNP_PARAMS}, {0x0610, 0x52, 0x00, CSIPHY_DEFAULT_PARAMS}, {0x0638, 0xFE, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x065C, 0xC0, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0660, 0x0D, 0x00, CSIPHY_DEFAULT_PARAMS}, {0x0800, 0x02, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0000, 0x00, 0x00, CSIPHY_DNP_PARAMS}, }, }; @@ -188,34 +193,34 @@ struct csiphy_reg_t {0x0030, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, {0x0904, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, {0x0910, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, - {0x0900, 0x06, 0x00, CSIPHY_DEFAULT_PARAMS}, - {0x0908, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0900, 0x0F, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0908, 0x06, 0x00, CSIPHY_DEFAULT_PARAMS}, {0x0904, 0x07, 0x00, CSIPHY_DEFAULT_PARAMS}, - {0x00C4, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, {0x002C, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS}, {0x0034, 0x0F, 0x00, CSIPHY_DEFAULT_PARAMS}, {0x0010, 0x50, 0x00, CSIPHY_DEFAULT_PARAMS}, {0x001C, 0x0A, 0x00, CSIPHY_DEFAULT_PARAMS}, {0x0014, 0x60, 0x00, CSIPHY_DEFAULT_PARAMS}, - {0x0028, 0x00, 0x00, CSIPHY_DNP_PARAMS}, {0x003C, 0xB8, 0x00, CSIPHY_DEFAULT_PARAMS}, {0x0000, 0x91, 0x00, CSIPHY_DEFAULT_PARAMS}, {0x0004, 0x0C, 0x00, CSIPHY_DEFAULT_PARAMS}, {0x0020, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, {0x0008, 0x10, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE}, - {0x000c, 0x00, 0x00, CSIPHY_DNP_PARAMS}, {0x0010, 0x52, 0x00, CSIPHY_DEFAULT_PARAMS}, {0x0038, 0xFE, 0x00, CSIPHY_DEFAULT_PARAMS}, - {0x0800, 0x00, 0x00, CSIPHY_DNP_PARAMS}, + {0x005C, 0xC0, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0060, 0x0D, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0800, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0000, 0x00, 0x00, CSIPHY_DNP_PARAMS}, + {0x0000, 0x00, 0x00, CSIPHY_DNP_PARAMS}, }, { {0x0730, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, {0x0C84, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, {0x0C90, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, {0x0C80, 0x0F, 0x00, CSIPHY_DEFAULT_PARAMS}, - {0x0C88, 0x14, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0C88, 0x06, 0x00, CSIPHY_DEFAULT_PARAMS}, {0x0C84, 0x07, 0x00, CSIPHY_DEFAULT_PARAMS}, - {0x07C4, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, {0x072C, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS}, {0x0734, 0x0F, 0x00, CSIPHY_DEFAULT_PARAMS}, {0x0710, 0x50, 0x00, CSIPHY_DEFAULT_PARAMS}, @@ -230,40 +235,42 @@ struct csiphy_reg_t {0x070c, 0xFF, 0x00, CSIPHY_DEFAULT_PARAMS}, {0x0710, 0x52, 0x00, CSIPHY_DEFAULT_PARAMS}, {0x0738, 0x1F, 0x00, CSIPHY_DEFAULT_PARAMS}, - {0x0800, 0x00, 0x00, CSIPHY_DNP_PARAMS}, + {0x075C, 0xC0, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0760, 0x0D, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0800, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, }, { {0x0230, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, {0x0A04, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, {0x0A10, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, - {0x0A00, 0x0B, 0x00, CSIPHY_DEFAULT_PARAMS}, - {0x0A08, 0x03, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A00, 0x0F, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A08, 0x06, 0x00, CSIPHY_DEFAULT_PARAMS}, {0x0A04, 0x07, 0x00, CSIPHY_DEFAULT_PARAMS}, - {0x02C4, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, {0x022C, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS}, {0x0234, 0x0F, 0x00, CSIPHY_DEFAULT_PARAMS}, {0x0210, 0x50, 0x00, CSIPHY_DEFAULT_PARAMS}, {0x021C, 0x0A, 0x00, CSIPHY_DEFAULT_PARAMS}, {0x0214, 0x60, 0x00, CSIPHY_DEFAULT_PARAMS}, - {0x0228, 0x00, 0x00, CSIPHY_DNP_PARAMS}, {0x023C, 0xB8, 0x00, CSIPHY_DEFAULT_PARAMS}, {0x0200, 0x91, 0x00, CSIPHY_DEFAULT_PARAMS}, {0x0204, 0x0C, 0x00, CSIPHY_DEFAULT_PARAMS}, {0x0220, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, {0x0208, 0x04, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE}, - {0x020c, 0x00, 0x00, CSIPHY_DNP_PARAMS}, {0x0210, 0x52, 0x00, CSIPHY_DEFAULT_PARAMS}, {0x0238, 0xFE, 0x00, CSIPHY_DEFAULT_PARAMS}, - {0x0800, 0x00, 0x00, CSIPHY_DNP_PARAMS}, + {0x025C, 0xC0, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0260, 0x0D, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0800, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0000, 0x00, 0x00, CSIPHY_DNP_PARAMS}, + {0x0000, 0x00, 0x00, CSIPHY_DNP_PARAMS}, }, { {0x0430, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, {0x0B04, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, {0x0B10, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, - {0x0B00, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS}, - {0x0B08, 0x1D, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0B00, 0x0F, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0B08, 0x06, 0x00, CSIPHY_DEFAULT_PARAMS}, {0x0B04, 0x07, 0x00, CSIPHY_DEFAULT_PARAMS}, - {0x04C4, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, {0x042C, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS}, {0x0434, 0x0F, 0x00, CSIPHY_DEFAULT_PARAMS}, {0x0410, 0x50, 0x00, CSIPHY_DEFAULT_PARAMS}, @@ -275,19 +282,20 @@ struct csiphy_reg_t {0x0404, 0x0C, 0x00, CSIPHY_DEFAULT_PARAMS}, {0x0420, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, {0x0408, 0x04, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE}, - {0x040c, 0x00, 0x00, CSIPHY_DNP_PARAMS}, {0x0410, 0x52, 0x00, CSIPHY_DEFAULT_PARAMS}, {0x0438, 0xFE, 0x00, CSIPHY_DEFAULT_PARAMS}, - {0x0800, 0x00, 0x00, CSIPHY_DNP_PARAMS}, + {0x045C, 0xC0, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0460, 0x0D, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0800, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0000, 0x00, 0x00, CSIPHY_DNP_PARAMS}, }, { {0x0630, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, {0x0C04, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, {0x0C10, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, - {0x0C00, 0x0E, 0x00, CSIPHY_DEFAULT_PARAMS}, - {0x0C08, 0x14, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0C00, 0x0F, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0C08, 0x06, 0x00, CSIPHY_DEFAULT_PARAMS}, {0x0C04, 0x07, 0x00, CSIPHY_DEFAULT_PARAMS}, - {0x06C4, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, {0x062C, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS}, {0x0634, 0x0F, 0x00, CSIPHY_DEFAULT_PARAMS}, {0x0610, 0x50, 0x00, CSIPHY_DEFAULT_PARAMS}, @@ -295,30 +303,29 @@ struct csiphy_reg_t {0x0614, 0x60, 0x00, CSIPHY_DEFAULT_PARAMS}, {0x0628, 0x0E, 0x00, CSIPHY_DEFAULT_PARAMS}, {0x063C, 0xB8, 0x00, CSIPHY_DEFAULT_PARAMS}, - {0x0600, 0x91, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0600, 0x80, 0x00, CSIPHY_DEFAULT_PARAMS}, {0x0604, 0x0C, 0x00, CSIPHY_DEFAULT_PARAMS}, {0x0620, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, {0x0608, 0x04, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE}, {0x060c, 0xFF, 0x00, CSIPHY_DEFAULT_PARAMS}, {0x0610, 0x52, 0x00, CSIPHY_DEFAULT_PARAMS}, {0x0638, 0xFE, 0x00, CSIPHY_DEFAULT_PARAMS}, - {0x0800, 0x00, 0x00, CSIPHY_DNP_PARAMS}, + {0x065C, 0xC0, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0660, 0x0D, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0800, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, }, }; struct csiphy_reg_t csiphy_3ph_v1_2_reg[MAX_LANES][MAX_SETTINGS_PER_LANE] = { { - {0x015C, 0x66, 0x00, CSIPHY_DEFAULT_PARAMS}, - {0x0990, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, - {0x0994, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, - {0x0998, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x015C, 0x46, 0x00, CSIPHY_DEFAULT_PARAMS}, {0x0990, 0x08, 0x00, CSIPHY_DEFAULT_PARAMS}, {0x0994, 0x08, 0x00, CSIPHY_DEFAULT_PARAMS}, {0x0998, 0x1A, 0x00, CSIPHY_DEFAULT_PARAMS}, {0x098C, 0xAF, 0x00, CSIPHY_DEFAULT_PARAMS}, - {0x0168, 0xAC, 0x00, CSIPHY_DEFAULT_PARAMS}, - {0x016C, 0xAD, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0168, 0xA0, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x016C, 0x25, 0x00, CSIPHY_DEFAULT_PARAMS}, {0x0104, 0x06, 0x00, CSIPHY_DEFAULT_PARAMS}, {0x010C, 0x07, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE}, {0x0108, 0x00, 0x00, CSIPHY_SETTLE_CNT_HIGHER_BYTE}, @@ -338,27 +345,19 @@ csiphy_reg_t csiphy_3ph_v1_2_reg[MAX_LANES][MAX_SETTINGS_PER_LANE] = { {0x01CC, 0x41, 0x00, CSIPHY_DEFAULT_PARAMS}, {0x0164, 0x33, 0x00, CSIPHY_DEFAULT_PARAMS}, {0x01DC, 0x50, 0x00, CSIPHY_DEFAULT_PARAMS}, - {0x09C0, 0x80, 0x00, CSIPHY_DEFAULT_PARAMS}, - {0x09C4, 0x7D, 0x00, CSIPHY_DEFAULT_PARAMS}, - {0x09C8, 0x7F, 0x00, CSIPHY_DEFAULT_PARAMS}, - {0x0984, 0x20, 0x00, CSIPHY_DEFAULT_PARAMS}, - {0x0988, 0x05, 0x00, CSIPHY_DEFAULT_PARAMS}, - {0x0980, 0x60, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0984, 0xA0, 0x00, CSIPHY_DEFAULT_PARAMS}, {0x09B0, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS}, - {0x09B4, 0x03, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x09B4, 0x02, 0x00, CSIPHY_DEFAULT_PARAMS}, {0x0800, 0x0E, 0x00, CSIPHY_DEFAULT_PARAMS}, }, { - {0x035C, 0x66, 0x00, CSIPHY_DEFAULT_PARAMS}, - {0x0A90, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, - {0x0A94, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, - {0x0A98, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x035C, 0x46, 0x00, CSIPHY_DEFAULT_PARAMS}, {0x0A90, 0x08, 0x00, CSIPHY_DEFAULT_PARAMS}, - {0x0A94, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, - {0x0A98, 0x1F, 0x00, CSIPHY_DEFAULT_PARAMS}, - {0x0A8C, 0xBF, 0x00, CSIPHY_DEFAULT_PARAMS}, - {0x0368, 0xAC, 0x00, CSIPHY_DEFAULT_PARAMS}, - {0x036C, 0xAD, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A94, 0x08, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A98, 0x1A, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A8C, 0xAF, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0368, 0xA0, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x036C, 0x25, 0x00, CSIPHY_DEFAULT_PARAMS}, {0x0304, 0x06, 0x00, CSIPHY_DEFAULT_PARAMS}, {0x030C, 0x07, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE}, {0x0308, 0x00, 0x00, CSIPHY_SETTLE_CNT_HIGHER_BYTE}, @@ -378,27 +377,19 @@ csiphy_reg_t csiphy_3ph_v1_2_reg[MAX_LANES][MAX_SETTINGS_PER_LANE] = { {0x03CC, 0x41, 0x00, CSIPHY_DEFAULT_PARAMS}, {0x0364, 0x33, 0x00, CSIPHY_DEFAULT_PARAMS}, {0x03DC, 0x50, 0x00, CSIPHY_DEFAULT_PARAMS}, - {0x0AC0, 0x80, 0x00, CSIPHY_DEFAULT_PARAMS}, - {0x0AC4, 0x7D, 0x00, CSIPHY_DEFAULT_PARAMS}, - {0x0AC8, 0x7F, 0x00, CSIPHY_DEFAULT_PARAMS}, - {0x0A84, 0x20, 0x00, CSIPHY_DEFAULT_PARAMS}, - {0x0A88, 0x05, 0x00, CSIPHY_DEFAULT_PARAMS}, - {0x0A80, 0x60, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A84, 0xA0, 0x00, CSIPHY_DEFAULT_PARAMS}, {0x0AB0, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS}, - {0x0AB4, 0x03, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0AB4, 0x02, 0x00, CSIPHY_DEFAULT_PARAMS}, {0x0800, 0x0E, 0x00, CSIPHY_DEFAULT_PARAMS}, }, { - {0x055C, 0x66, 0x00, CSIPHY_DEFAULT_PARAMS}, - {0x0B90, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, - {0x0B94, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, - {0x0B98, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x055C, 0x46, 0x00, CSIPHY_DEFAULT_PARAMS}, {0x0B90, 0x08, 0x00, CSIPHY_DEFAULT_PARAMS}, {0x0B94, 0x08, 0x00, CSIPHY_DEFAULT_PARAMS}, {0x0B98, 0x1A, 0x00, CSIPHY_DEFAULT_PARAMS}, {0x0B8C, 0xAF, 0x00, CSIPHY_DEFAULT_PARAMS}, - {0x0568, 0xAC, 0x00, CSIPHY_DEFAULT_PARAMS}, - {0x056C, 0xAD, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0568, 0xA0, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x056C, 0x25, 0x00, CSIPHY_DEFAULT_PARAMS}, {0x0504, 0x06, 0x00, CSIPHY_DEFAULT_PARAMS}, {0x050C, 0x07, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE}, {0x0508, 0x00, 0x00, CSIPHY_SETTLE_CNT_HIGHER_BYTE}, @@ -418,14 +409,9 @@ csiphy_reg_t csiphy_3ph_v1_2_reg[MAX_LANES][MAX_SETTINGS_PER_LANE] = { {0x05CC, 0x41, 0x00, CSIPHY_DEFAULT_PARAMS}, {0x0564, 0x33, 0x00, CSIPHY_DEFAULT_PARAMS}, {0x05DC, 0x50, 0x00, CSIPHY_DEFAULT_PARAMS}, - {0x0BC0, 0x80, 0x00, CSIPHY_DEFAULT_PARAMS}, - {0x0BC4, 0x7D, 0x00, CSIPHY_DEFAULT_PARAMS}, - {0x0BC8, 0x7F, 0x00, CSIPHY_DEFAULT_PARAMS}, - {0x0B84, 0x20, 0x00, CSIPHY_DEFAULT_PARAMS}, - {0x0B88, 0x05, 0x00, CSIPHY_DEFAULT_PARAMS}, - {0x0B80, 0x60, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0B84, 0xA0, 0x00, CSIPHY_DEFAULT_PARAMS}, {0x0BB0, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS}, - {0x0BB4, 0x03, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0BB4, 0x02, 0x00, CSIPHY_DEFAULT_PARAMS}, {0x0800, 0x0E, 0x00, CSIPHY_DEFAULT_PARAMS}, }, }; @@ -436,82 +422,53 @@ struct data_rate_settings_t data_rate_delta_table_1_2 = { { /* (2.5 * 10**3 * 2.28) rounded value*/ .bandwidth = 5700000000, - .data_rate_reg_array_size = 12, + .data_rate_reg_array_size = 6, .csiphy_data_rate_regs = { - {0x15C, 0x66, 0x00, CSIPHY_DEFAULT_PARAMS}, - {0x35C, 0x66, 0x00, CSIPHY_DEFAULT_PARAMS}, - {0x55C, 0x66, 0x00, CSIPHY_DEFAULT_PARAMS}, - {0x9B4, 0x03, 0x00, CSIPHY_DEFAULT_PARAMS}, - {0xAB4, 0x03, 0x00, CSIPHY_DEFAULT_PARAMS}, - {0xBB4, 0x03, 0x00, CSIPHY_DEFAULT_PARAMS}, {0x144, 0x22, 0x00, CSIPHY_DEFAULT_PARAMS}, {0x344, 0x22, 0x00, CSIPHY_DEFAULT_PARAMS}, {0x544, 0x22, 0x00, CSIPHY_DEFAULT_PARAMS}, - {0x16C, 0xAD, 0x00, CSIPHY_DEFAULT_PARAMS}, - {0x36C, 0xAD, 0x00, CSIPHY_DEFAULT_PARAMS}, - {0x56C, 0xAD, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x984, 0xA0, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0xA84, 0xA0, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0xB84, 0xA0, 0x00, CSIPHY_DEFAULT_PARAMS}, } }, { /* (3.5 * 10**3 * 2.28) rounded value */ .bandwidth = 7980000000, - .data_rate_reg_array_size = 24, + .data_rate_reg_array_size = 12, .csiphy_data_rate_regs = { - {0x15C, 0x46, 0x00, CSIPHY_DEFAULT_PARAMS}, - {0x35C, 0x46, 0x00, CSIPHY_DEFAULT_PARAMS}, - {0x55C, 0x46, 0x00, CSIPHY_DEFAULT_PARAMS}, - {0x9B4, 0x03, 0x00, CSIPHY_DEFAULT_PARAMS}, - {0xAB4, 0x03, 0x00, CSIPHY_DEFAULT_PARAMS}, - {0xBB4, 0x03, 0x00, CSIPHY_DEFAULT_PARAMS}, - {0x9B0, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS}, - {0xAB0, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS}, - {0xBB0, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS}, {0x144, 0xA2, 0x00, CSIPHY_DEFAULT_PARAMS}, {0x344, 0xA2, 0x00, CSIPHY_DEFAULT_PARAMS}, {0x544, 0xA2, 0x00, CSIPHY_DEFAULT_PARAMS}, - {0x13C, 0x10, 0x00, CSIPHY_DEFAULT_PARAMS}, - {0x33C, 0x10, 0x00, CSIPHY_DEFAULT_PARAMS}, - {0x53C, 0x10, 0x00, CSIPHY_DEFAULT_PARAMS}, - {0x140, 0x81, 0x00, CSIPHY_DEFAULT_PARAMS}, - {0x340, 0x81, 0x00, CSIPHY_DEFAULT_PARAMS}, - {0x540, 0x81, 0x00, CSIPHY_DEFAULT_PARAMS}, - {0x168, 0xA0, 0x00, CSIPHY_DEFAULT_PARAMS}, - {0x368, 0xA0, 0x00, CSIPHY_DEFAULT_PARAMS}, - {0x568, 0xA0, 0x00, CSIPHY_DEFAULT_PARAMS}, - {0x16C, 0x25, 0x00, CSIPHY_DEFAULT_PARAMS}, - {0x36C, 0x25, 0x00, CSIPHY_DEFAULT_PARAMS}, - {0x56C, 0x25, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x984, 0x20, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0xA84, 0x20, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0xB84, 0x20, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0988, 0x05, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0980, 0x60, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A88, 0x05, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A80, 0x60, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0B88, 0x05, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0B80, 0x60, 0x00, CSIPHY_DEFAULT_PARAMS}, }, }, { /* (4.5 * 10**3 * 2.28) rounded value */ .bandwidth = 10260000000, - .data_rate_reg_array_size = 24, + .data_rate_reg_array_size = 12, .csiphy_data_rate_regs = { - {0x15C, 0x46, 0x00, CSIPHY_DEFAULT_PARAMS}, - {0x35C, 0x46, 0x00, CSIPHY_DEFAULT_PARAMS}, - {0x55C, 0x46, 0x00, CSIPHY_DEFAULT_PARAMS}, - {0x9B4, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS}, - {0xAB4, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS}, - {0xBB4, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS}, - {0x9B0, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS}, - {0xAB0, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS}, - {0xBB0, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS}, {0x144, 0xA2, 0x00, CSIPHY_DEFAULT_PARAMS}, {0x344, 0xA2, 0x00, CSIPHY_DEFAULT_PARAMS}, {0x544, 0xA2, 0x00, CSIPHY_DEFAULT_PARAMS}, - {0x13C, 0x10, 0x00, CSIPHY_DEFAULT_PARAMS}, - {0x33C, 0x10, 0x00, CSIPHY_DEFAULT_PARAMS}, - {0x53C, 0x10, 0x00, CSIPHY_DEFAULT_PARAMS}, - {0x140, 0x81, 0x00, CSIPHY_DEFAULT_PARAMS}, - {0x340, 0x81, 0x00, CSIPHY_DEFAULT_PARAMS}, - {0x540, 0x81, 0x00, CSIPHY_DEFAULT_PARAMS}, - {0x168, 0xA0, 0x00, CSIPHY_DEFAULT_PARAMS}, - {0x368, 0xA0, 0x00, CSIPHY_DEFAULT_PARAMS}, - {0x568, 0xA0, 0x00, CSIPHY_DEFAULT_PARAMS}, - {0x16C, 0x1D, 0x00, CSIPHY_DEFAULT_PARAMS}, - {0x36C, 0x1D, 0x00, CSIPHY_DEFAULT_PARAMS}, - {0x56C, 0x1D, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x984, 0x20, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0xA84, 0x20, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0xB84, 0x20, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0988, 0x05, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0980, 0x60, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A88, 0x05, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A80, 0x60, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0B88, 0x05, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0B80, 0x60, 0x00, CSIPHY_DEFAULT_PARAMS}, + }, } } diff --git a/drivers/media/platform/msm/camera/cam_sensor_module/cam_res_mgr/cam_res_mgr.c b/drivers/media/platform/msm/camera/cam_sensor_module/cam_res_mgr/cam_res_mgr.c index 91b19c8f7ff0..1fcb06105e5e 100644 --- a/drivers/media/platform/msm/camera/cam_sensor_module/cam_res_mgr/cam_res_mgr.c +++ b/drivers/media/platform/msm/camera/cam_sensor_module/cam_res_mgr/cam_res_mgr.c @@ -675,6 +675,9 @@ static int cam_res_mgr_probe(struct platform_device *pdev) { int rc = 0; + if (cam_res) + return 0; + cam_res = kzalloc(sizeof(*cam_res), GFP_KERNEL); if (!cam_res) return -ENOMEM; diff --git a/drivers/media/platform/msm/camera/cam_smmu/cam_smmu_api.c b/drivers/media/platform/msm/camera/cam_smmu/cam_smmu_api.c index 139884e95d84..f107d6b36ff3 100644 --- a/drivers/media/platform/msm/camera/cam_smmu/cam_smmu_api.c +++ b/drivers/media/platform/msm/camera/cam_smmu/cam_smmu_api.c @@ -176,6 +176,8 @@ struct cam_dma_buff_info { struct cam_sec_buff_info { struct dma_buf *buf; + struct dma_buf_attachment *attach; + struct sg_table *table; enum dma_data_direction dir; int ref_count; dma_addr_t paddr; @@ -2514,6 +2516,8 @@ static int cam_smmu_map_stage2_buffer_and_add_to_list(int idx, int ion_fd, mapping_info->dir = dma_dir; mapping_info->ref_count = 1; mapping_info->buf = dmabuf; + mapping_info->attach = attach; + mapping_info->table = table; CAM_DBG(CAM_SMMU, "idx=%d, ion_fd=%d, dev=%pK, paddr=%pK, len=%u", idx, ion_fd, @@ -2617,6 +2621,9 @@ static int cam_smmu_secure_unmap_buf_and_remove_from_list( CAM_ERR(CAM_SMMU, "Error: List doesn't exist"); return -EINVAL; } + dma_buf_unmap_attachment(mapping_info->attach, mapping_info->table, + mapping_info->dir); + dma_buf_detach(mapping_info->buf, mapping_info->attach); dma_buf_put(mapping_info->buf); list_del_init(&mapping_info->list); diff --git a/drivers/net/ethernet/amd/pcnet32.c b/drivers/net/ethernet/amd/pcnet32.c index 7f60d17819ce..073184f15c64 100644 --- a/drivers/net/ethernet/amd/pcnet32.c +++ b/drivers/net/ethernet/amd/pcnet32.c @@ -1548,8 +1548,7 @@ pcnet32_probe_pci(struct pci_dev *pdev, const struct pci_device_id *ent) } pci_set_master(pdev); - ioaddr = pci_resource_start(pdev, 0); - if (!ioaddr) { + if (!pci_resource_len(pdev, 0)) { if (pcnet32_debug & NETIF_MSG_PROBE) pr_err("card has no PCI IO resources, aborting\n"); return -ENODEV; @@ -1561,6 +1560,8 @@ pcnet32_probe_pci(struct pci_dev *pdev, const struct pci_device_id *ent) pr_err("architecture does not support 32bit PCI busmaster DMA\n"); return err; } + + ioaddr = pci_resource_start(pdev, 0); if (!request_region(ioaddr, PCNET32_TOTAL_SIZE, "pcnet32_probe_pci")) { if (pcnet32_debug & NETIF_MSG_PROBE) pr_err("io address range already allocated\n"); diff --git a/drivers/net/ethernet/cavium/liquidio/cn66xx_regs.h b/drivers/net/ethernet/cavium/liquidio/cn66xx_regs.h index b248966837b4..7aad40b2aa73 100644 --- a/drivers/net/ethernet/cavium/liquidio/cn66xx_regs.h +++ b/drivers/net/ethernet/cavium/liquidio/cn66xx_regs.h @@ -412,7 +412,7 @@ | CN6XXX_INTR_M0UNWI_ERR \ | CN6XXX_INTR_M1UPB0_ERR \ | CN6XXX_INTR_M1UPWI_ERR \ - | CN6XXX_INTR_M1UPB0_ERR \ + | CN6XXX_INTR_M1UNB0_ERR \ | CN6XXX_INTR_M1UNWI_ERR \ | CN6XXX_INTR_INSTR_DB_OF_ERR \ | CN6XXX_INTR_SLIST_DB_OF_ERR \ diff --git a/drivers/net/ethernet/davicom/dm9000.c b/drivers/net/ethernet/davicom/dm9000.c index a339ea2fd496..8b07890b0b23 100644 --- a/drivers/net/ethernet/davicom/dm9000.c +++ b/drivers/net/ethernet/davicom/dm9000.c @@ -1482,8 +1482,10 @@ dm9000_probe(struct platform_device *pdev) /* Init network device */ ndev = alloc_etherdev(sizeof(struct board_info)); - if (!ndev) - return -ENOMEM; + if (!ndev) { + ret = -ENOMEM; + goto out_regulator_disable; + } SET_NETDEV_DEV(ndev, &pdev->dev); diff --git a/drivers/net/ethernet/ibm/ibmvnic.c b/drivers/net/ethernet/ibm/ibmvnic.c index 4771dbee9681..66fddc4ba56b 100644 --- a/drivers/net/ethernet/ibm/ibmvnic.c +++ b/drivers/net/ethernet/ibm/ibmvnic.c @@ -891,19 +891,13 @@ static int __ibmvnic_open(struct net_device *netdev) rc = set_link_state(adapter, IBMVNIC_LOGICAL_LNK_UP); if (rc) { - for (i = 0; i < adapter->req_rx_queues; i++) - napi_disable(&adapter->napi[i]); + ibmvnic_napi_disable(adapter); release_resources(adapter); return rc; } netif_tx_start_all_queues(netdev); - if (prev_state == VNIC_CLOSED) { - for (i = 0; i < adapter->req_rx_queues; i++) - napi_schedule(&adapter->napi[i]); - } - adapter->state = VNIC_OPEN; return rc; } @@ -1432,7 +1426,7 @@ static int do_reset(struct ibmvnic_adapter *adapter, struct ibmvnic_rwi *rwi, u32 reset_state) { struct net_device *netdev = adapter->netdev; - int i, rc; + int rc; netdev_dbg(adapter->netdev, "Re-setting driver (%d)\n", rwi->reset_reason); @@ -1497,10 +1491,6 @@ static int do_reset(struct ibmvnic_adapter *adapter, /* refresh device's multicast list */ ibmvnic_set_multi(netdev); - /* kick napi */ - for (i = 0; i < adapter->req_rx_queues; i++) - napi_schedule(&adapter->napi[i]); - if (adapter->reset_reason != VNIC_RESET_FAILOVER) netdev_notify_peers(netdev); diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c index 3f43e4f0d3b1..e25bb667fb59 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_main.c +++ b/drivers/net/ethernet/intel/i40e/i40e_main.c @@ -8941,6 +8941,7 @@ static int i40e_sw_init(struct i40e_pf *pf) { int err = 0; int size; + u16 pow; /* Set default capability flags */ pf->flags = I40E_FLAG_RX_CSUM_ENABLED | @@ -8959,6 +8960,11 @@ static int i40e_sw_init(struct i40e_pf *pf) pf->rss_table_size = pf->hw.func_caps.rss_table_size; pf->rss_size_max = min_t(int, pf->rss_size_max, pf->hw.func_caps.num_tx_qp); + + /* find the next higher power-of-2 of num cpus */ + pow = roundup_pow_of_two(num_online_cpus()); + pf->rss_size_max = min_t(int, pf->rss_size_max, pow); + if (pf->hw.func_caps.rss) { pf->flags |= I40E_FLAG_RSS_ENABLED; pf->alloc_rss_size = min_t(int, pf->rss_size_max, diff --git a/drivers/net/geneve.c b/drivers/net/geneve.c index f48006c22a8a..2a9bb13ecb54 100644 --- a/drivers/net/geneve.c +++ b/drivers/net/geneve.c @@ -835,6 +835,9 @@ static int geneve_xmit_skb(struct sk_buff *skb, struct net_device *dev, __be16 df; int err; + if (!pskb_network_may_pull(skb, sizeof(struct iphdr))) + return -EINVAL; + sport = udp_flow_src_port(geneve->net, skb, 1, USHRT_MAX, true); rt = geneve_get_v4_rt(skb, dev, gs4, &fl4, info, geneve->info.key.tp_dst, sport); @@ -882,6 +885,9 @@ static int geneve6_xmit_skb(struct sk_buff *skb, struct net_device *dev, __be16 sport; int err; + if (!pskb_network_may_pull(skb, sizeof(struct ipv6hdr))) + return -EINVAL; + sport = udp_flow_src_port(geneve->net, skb, 1, USHRT_MAX, true); dst = geneve_get_v6_dst(skb, dev, gs6, &fl6, info, geneve->info.key.tp_dst, sport); diff --git a/drivers/net/usb/hso.c b/drivers/net/usb/hso.c index 0e3d13e192e3..9ae6a1ccfbe1 100644 --- a/drivers/net/usb/hso.c +++ b/drivers/net/usb/hso.c @@ -626,7 +626,7 @@ static struct hso_serial *get_serial_by_index(unsigned index) return serial; } -static int get_free_serial_index(void) +static int obtain_minor(struct hso_serial *serial) { int index; unsigned long flags; @@ -634,8 +634,10 @@ static int get_free_serial_index(void) spin_lock_irqsave(&serial_table_lock, flags); for (index = 0; index < HSO_SERIAL_TTY_MINORS; index++) { if (serial_table[index] == NULL) { + serial_table[index] = serial->parent; + serial->minor = index; spin_unlock_irqrestore(&serial_table_lock, flags); - return index; + return 0; } } spin_unlock_irqrestore(&serial_table_lock, flags); @@ -644,15 +646,12 @@ static int get_free_serial_index(void) return -1; } -static void set_serial_by_index(unsigned index, struct hso_serial *serial) +static void release_minor(struct hso_serial *serial) { unsigned long flags; spin_lock_irqsave(&serial_table_lock, flags); - if (serial) - serial_table[index] = serial->parent; - else - serial_table[index] = NULL; + serial_table[serial->minor] = NULL; spin_unlock_irqrestore(&serial_table_lock, flags); } @@ -2241,6 +2240,7 @@ static int hso_stop_serial_device(struct hso_device *hso_dev) static void hso_serial_tty_unregister(struct hso_serial *serial) { tty_unregister_device(tty_drv, serial->minor); + release_minor(serial); } static void hso_serial_common_free(struct hso_serial *serial) @@ -2265,25 +2265,23 @@ static int hso_serial_common_create(struct hso_serial *serial, int num_urbs, int rx_size, int tx_size) { struct device *dev; - int minor; int i; tty_port_init(&serial->port); - minor = get_free_serial_index(); - if (minor < 0) + if (obtain_minor(serial)) goto exit2; /* register our minor number */ serial->parent->dev = tty_port_register_device_attr(&serial->port, - tty_drv, minor, &serial->parent->interface->dev, + tty_drv, serial->minor, &serial->parent->interface->dev, serial->parent, hso_serial_dev_groups); - if (IS_ERR(serial->parent->dev)) + if (IS_ERR(serial->parent->dev)) { + release_minor(serial); goto exit2; + } dev = serial->parent->dev; - /* fill in specific data for later use */ - serial->minor = minor; serial->magic = HSO_SERIAL_MAGIC; spin_lock_init(&serial->serial_lock); serial->num_rx_urbs = num_urbs; @@ -2676,9 +2674,6 @@ static struct hso_device *hso_create_bulk_serial_device( serial->write_data = hso_std_serial_write_data; - /* and record this serial */ - set_serial_by_index(serial->minor, serial); - /* setup the proc dirs and files if needed */ hso_log_port(hso_dev); @@ -2735,9 +2730,6 @@ struct hso_device *hso_create_mux_serial_device(struct usb_interface *interface, serial->shared_int->ref_count++; mutex_unlock(&serial->shared_int->shared_int_lock); - /* and record this serial */ - set_serial_by_index(serial->minor, serial); - /* setup the proc dirs and files if needed */ hso_log_port(hso_dev); @@ -3121,8 +3113,7 @@ static void hso_free_interface(struct usb_interface *interface) cancel_work_sync(&serial_table[i]->async_put_intf); cancel_work_sync(&serial_table[i]->async_get_intf); hso_serial_tty_unregister(serial); - kref_put(&serial_table[i]->ref, hso_serial_ref_free); - set_serial_by_index(i, NULL); + kref_put(&serial->parent->ref, hso_serial_ref_free); } } diff --git a/drivers/net/wireless/virt_wifi.c b/drivers/net/wireless/virt_wifi.c index 45ca9550ee24..c06b11c84732 100644 --- a/drivers/net/wireless/virt_wifi.c +++ b/drivers/net/wireless/virt_wifi.c @@ -12,6 +12,7 @@ #include <net/cfg80211.h> #include <net/rtnetlink.h> #include <linux/etherdevice.h> +#include <linux/math64.h> #include <linux/module.h> #include <net/cfg80211.h> @@ -173,11 +174,11 @@ static void virt_wifi_scan_result(struct work_struct *work) scan_result.work); struct wiphy *wiphy = priv_to_wiphy(priv); struct cfg80211_scan_info scan_info = { .aborted = false }; + u64 tsf = div_u64(ktime_get_boot_ns(), 1000); informed_bss = cfg80211_inform_bss(wiphy, &channel_5ghz, CFG80211_BSS_FTYPE_PRESP, - fake_router_bssid, - ktime_get_boot_ns(), + fake_router_bssid, tsf, WLAN_CAPABILITY_ESS, 0, (void *)&ssid, sizeof(ssid), DBM_TO_MBM(-50), GFP_KERNEL); diff --git a/drivers/net/xen-netback/xenbus.c b/drivers/net/xen-netback/xenbus.c index 910322b442bd..9092b55e087f 100644 --- a/drivers/net/xen-netback/xenbus.c +++ b/drivers/net/xen-netback/xenbus.c @@ -1043,11 +1043,15 @@ static void connect(struct backend_info *be) xenvif_carrier_on(be->vif); unregister_hotplug_status_watch(be); - err = xenbus_watch_pathfmt(dev, &be->hotplug_status_watch, NULL, - hotplug_status_changed, - "%s/%s", dev->nodename, "hotplug-status"); - if (!err) + if (xenbus_exists(XBT_NIL, dev->nodename, "hotplug-status")) { + err = xenbus_watch_pathfmt(dev, &be->hotplug_status_watch, + NULL, hotplug_status_changed, + "%s/%s", dev->nodename, + "hotplug-status"); + if (err) + goto err; be->have_hotplug_status_watch = 1; + } netif_tx_wake_all_queues(be->vif->dev); diff --git a/drivers/pinctrl/intel/pinctrl-lewisburg.c b/drivers/pinctrl/intel/pinctrl-lewisburg.c index c2164db14e9c..9fdcae3260e8 100644 --- a/drivers/pinctrl/intel/pinctrl-lewisburg.c +++ b/drivers/pinctrl/intel/pinctrl-lewisburg.c @@ -300,9 +300,9 @@ static const struct pinctrl_pin_desc lbg_pins[] = { static const struct intel_community lbg_communities[] = { LBG_COMMUNITY(0, 0, 71), LBG_COMMUNITY(1, 72, 132), - LBG_COMMUNITY(3, 133, 144), - LBG_COMMUNITY(4, 145, 180), - LBG_COMMUNITY(5, 181, 246), + LBG_COMMUNITY(3, 133, 143), + LBG_COMMUNITY(4, 144, 178), + LBG_COMMUNITY(5, 179, 246), }; static const struct intel_pinctrl_soc_data lbg_soc_data = { diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa.c b/drivers/platform/msm/ipa/ipa_v3/ipa.c index 2c684f647c8d..7ee304336e91 100644 --- a/drivers/platform/msm/ipa/ipa_v3/ipa.c +++ b/drivers/platform/msm/ipa/ipa_v3/ipa.c @@ -7262,6 +7262,13 @@ static int ipa3_pre_init(const struct ipa3_plat_drv_res *resource_p, result = -ENOMEM; goto fail_hdr_offset_cache; } + ipa3_ctx->fnr_stats_cache = kmem_cache_create("IPA_FNR_STATS", + sizeof(struct ipa_ioc_flt_rt_counter_alloc), 0, 0, NULL); + if (!ipa3_ctx->fnr_stats_cache) { + IPAERR(":ipa fnr stats cache create failed\n"); + result = -ENOMEM; + goto fail_fnr_stats_cache; + } ipa3_ctx->hdr_proc_ctx_cache = kmem_cache_create("IPA_HDR_PROC_CTX", sizeof(struct ipa3_hdr_proc_ctx_entry), 0, 0, NULL); if (!ipa3_ctx->hdr_proc_ctx_cache) { @@ -7519,6 +7526,8 @@ fail_rt_tbl_cache: fail_hdr_proc_ctx_offset_cache: kmem_cache_destroy(ipa3_ctx->hdr_proc_ctx_cache); fail_hdr_proc_ctx_cache: + kmem_cache_destroy(ipa3_ctx->fnr_stats_cache); +fail_fnr_stats_cache: kmem_cache_destroy(ipa3_ctx->hdr_offset_cache); fail_hdr_offset_cache: kmem_cache_destroy(ipa3_ctx->hdr_cache); diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_i.h b/drivers/platform/msm/ipa/ipa_v3/ipa_i.h index 7d3a95120462..bff5a7ffc3fe 100644 --- a/drivers/platform/msm/ipa/ipa_v3/ipa_i.h +++ b/drivers/platform/msm/ipa/ipa_v3/ipa_i.h @@ -1775,6 +1775,7 @@ struct ipa3_app_clock_vote { * @rt_rule_cache: routing rule cache * @hdr_cache: header cache * @hdr_offset_cache: header offset cache + * @fnr_stats_cache: FnR stats cache * @hdr_proc_ctx_cache: processing context cache * @hdr_proc_ctx_offset_cache: processing context offset cache * @rt_tbl_cache: routing table cache @@ -1870,6 +1871,7 @@ struct ipa3_context { struct kmem_cache *rt_rule_cache; struct kmem_cache *hdr_cache; struct kmem_cache *hdr_offset_cache; + struct kmem_cache *fnr_stats_cache; struct kmem_cache *hdr_proc_ctx_cache; struct kmem_cache *hdr_proc_ctx_offset_cache; struct kmem_cache *rt_tbl_cache; diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_pm.c b/drivers/platform/msm/ipa/ipa_v3/ipa_pm.c index 350199cc60b1..f5c7e0662bb3 100644 --- a/drivers/platform/msm/ipa/ipa_v3/ipa_pm.c +++ b/drivers/platform/msm/ipa/ipa_v3/ipa_pm.c @@ -1,4 +1,4 @@ -/* Copyright (c) 2017-2019, The Linux Foundation. All rights reserved. +/* Copyright (c) 2017-2020, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -1254,14 +1254,15 @@ int ipa_pm_set_throughput(u32 hdl, int throughput) return -EINVAL; } + mutex_lock(&ipa_pm_ctx->client_mutex); if (hdl >= IPA_PM_MAX_CLIENTS || ipa_pm_ctx->clients[hdl] == NULL || throughput < 0) { IPA_PM_ERR("Invalid Params\n"); + mutex_unlock(&ipa_pm_ctx->client_mutex); return -EINVAL; } client = ipa_pm_ctx->clients[hdl]; - mutex_lock(&ipa_pm_ctx->client_mutex); if (client->group == IPA_PM_GROUP_DEFAULT) IPA_PM_DBG_LOW("Old throughput: %d\n", client->throughput); else @@ -1280,14 +1281,16 @@ int ipa_pm_set_throughput(u32 hdl, int throughput) client->group, ipa_pm_ctx->group_tput[client->group]); mutex_unlock(&ipa_pm_ctx->client_mutex); - spin_lock_irqsave(&client->state_lock, flags); - if (IPA_PM_STATE_ACTIVE(client->state) || (client->group != + if (ipa_pm_ctx->clients[hdl]) { + spin_lock_irqsave(&client->state_lock, flags); + if (IPA_PM_STATE_ACTIVE(client->state) || (client->group != IPA_PM_GROUP_DEFAULT)) { + spin_unlock_irqrestore(&client->state_lock, flags); + do_clk_scaling(); + return 0; + } spin_unlock_irqrestore(&client->state_lock, flags); - do_clk_scaling(); - return 0; } - spin_unlock_irqrestore(&client->state_lock, flags); return 0; } diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_utils.c b/drivers/platform/msm/ipa/ipa_v3/ipa_utils.c index 653e1e694107..f404218fee76 100644 --- a/drivers/platform/msm/ipa/ipa_v3/ipa_utils.c +++ b/drivers/platform/msm/ipa/ipa_v3/ipa_utils.c @@ -6363,13 +6363,21 @@ static int __ipa3_alloc_counter_hdl return id; } -int ipa3_alloc_counter_id(struct ipa_ioc_flt_rt_counter_alloc *counter) +int ipa3_alloc_counter_id(struct ipa_ioc_flt_rt_counter_alloc *header) { int i, unused_cnt, unused_max, unused_start_id; + struct ipa_ioc_flt_rt_counter_alloc *counter; idr_preload(GFP_KERNEL); spin_lock(&ipa3_ctx->flt_rt_counters.hdl_lock); + counter = kmem_cache_zalloc(ipa3_ctx->fnr_stats_cache, GFP_KERNEL); + if (!counter) { + IPAERR_RL("failed to alloc fnr stats counter object\n"); + spin_unlock(&ipa3_ctx->flt_rt_counters.hdl_lock); + return -ENOMEM; + } + memcpy(counter, header, sizeof(struct ipa_ioc_flt_rt_counter_alloc)); /* allocate hw counters */ counter->hw_counter.start_id = 0; counter->hw_counter.end_id = 0; @@ -6469,7 +6477,7 @@ mark_hw_cnt: unused_start_id = counter->hw_counter.start_id; if (unused_start_id < 1 || unused_start_id > IPA_FLT_RT_HW_COUNTER) { - IPAERR("unexpected hw_counter start id %d\n", + IPAERR_RL("unexpected hw_counter start id %d\n", unused_start_id); goto err; } @@ -6484,7 +6492,7 @@ mark_sw_cnt: - IPA_FLT_RT_HW_COUNTER; if (unused_start_id < 1 || unused_start_id > IPA_FLT_RT_SW_COUNTER) { - IPAERR("unexpected sw_counter start id %d\n", + IPAERR_RL("unexpected sw_counter start id %d\n", unused_start_id); goto err; } @@ -6494,12 +6502,14 @@ mark_sw_cnt: done: /* get a handle from idr for dealloc */ counter->hdl = __ipa3_alloc_counter_hdl(counter); + memcpy(header, counter, sizeof(struct ipa_ioc_flt_rt_counter_alloc)); spin_unlock(&ipa3_ctx->flt_rt_counters.hdl_lock); idr_preload_end(); return 0; err: counter->hdl = -1; + kmem_cache_free(ipa3_ctx->fnr_stats_cache, counter); spin_unlock(&ipa3_ctx->flt_rt_counters.hdl_lock); idr_preload_end(); return -ENOMEM; @@ -6513,7 +6523,7 @@ void ipa3_counter_remove_hdl(int hdl) spin_lock(&ipa3_ctx->flt_rt_counters.hdl_lock); counter = idr_find(&ipa3_ctx->flt_rt_counters.hdl, hdl); if (counter == NULL) { - IPAERR("unexpected hdl %d\n", hdl); + IPAERR_RL("unexpected hdl %d\n", hdl); goto err; } /* remove counters belong to this hdl, set used back to 0 */ @@ -6523,7 +6533,7 @@ void ipa3_counter_remove_hdl(int hdl) memset(&ipa3_ctx->flt_rt_counters.used_hw + offset, 0, counter->hw_counter.num_counters * sizeof(bool)); } else { - IPAERR("unexpected hdl %d\n", hdl); + IPAERR_RL("unexpected hdl %d\n", hdl); goto err; } offset = counter->sw_counter.start_id - 1 - IPA_FLT_RT_HW_COUNTER; @@ -6532,11 +6542,12 @@ void ipa3_counter_remove_hdl(int hdl) memset(&ipa3_ctx->flt_rt_counters.used_sw + offset, 0, counter->sw_counter.num_counters * sizeof(bool)); } else { - IPAERR("unexpected hdl %d\n", hdl); + IPAERR_RL("unexpected hdl %d\n", hdl); goto err; } /* remove the handle */ idr_remove(&ipa3_ctx->flt_rt_counters.hdl, hdl); + kmem_cache_free(ipa3_ctx->fnr_stats_cache, counter); err: spin_unlock(&ipa3_ctx->flt_rt_counters.hdl_lock); } @@ -6553,8 +6564,10 @@ void ipa3_counter_id_remove_all(void) memset(&ipa3_ctx->flt_rt_counters.used_sw, 0, sizeof(ipa3_ctx->flt_rt_counters.used_sw)); /* remove all handles */ - idr_for_each_entry(&ipa3_ctx->flt_rt_counters.hdl, counter, hdl) + idr_for_each_entry(&ipa3_ctx->flt_rt_counters.hdl, counter, hdl) { idr_remove(&ipa3_ctx->flt_rt_counters.hdl, hdl); + kmem_cache_free(ipa3_ctx->fnr_stats_cache, counter); + } spin_unlock(&ipa3_ctx->flt_rt_counters.hdl_lock); } @@ -7079,7 +7092,7 @@ u32 ipa3_get_num_pipes(void) /** * ipa3_disable_apps_wan_cons_deaggr()- - * set ipa_ctx->ipa_client_apps_wan_cons_agg_gro + * set ipa3_ctx->ipa_client_apps_wan_cons_agg_gro * * Return value: 0 or negative in case of failure */ diff --git a/drivers/scsi/libsas/sas_ata.c b/drivers/scsi/libsas/sas_ata.c index 470e11b42820..9eb61a41be24 100644 --- a/drivers/scsi/libsas/sas_ata.c +++ b/drivers/scsi/libsas/sas_ata.c @@ -219,18 +219,17 @@ static unsigned int sas_ata_qc_issue(struct ata_queued_cmd *qc) memcpy(task->ata_task.atapi_packet, qc->cdb, qc->dev->cdb_len); task->total_xfer_len = qc->nbytes; task->num_scatter = qc->n_elem; + task->data_dir = qc->dma_dir; + } else if (qc->tf.protocol == ATA_PROT_NODATA) { + task->data_dir = DMA_NONE; } else { for_each_sg(qc->sg, sg, qc->n_elem, si) xfer += sg_dma_len(sg); task->total_xfer_len = xfer; task->num_scatter = si; - } - - if (qc->tf.protocol == ATA_PROT_NODATA) - task->data_dir = DMA_NONE; - else task->data_dir = qc->dma_dir; + } task->scatter = qc->sg; task->ata_task.retry_count = 1; task->task_state_flags = SAS_TASK_STATE_PENDING; diff --git a/drivers/scsi/scsi_transport_srp.c b/drivers/scsi/scsi_transport_srp.c index a0e35028ebda..118e764108f7 100644 --- a/drivers/scsi/scsi_transport_srp.c +++ b/drivers/scsi/scsi_transport_srp.c @@ -555,7 +555,7 @@ int srp_reconnect_rport(struct srp_rport *rport) res = mutex_lock_interruptible(&rport->mutex); if (res) goto out; - if (rport->state != SRP_RPORT_FAIL_FAST) + if (rport->state != SRP_RPORT_FAIL_FAST && rport->state != SRP_RPORT_LOST) /* * sdev state must be SDEV_TRANSPORT_OFFLINE, transition * to SDEV_BLOCK is illegal. Calling scsi_target_unblock() diff --git a/drivers/soc/qcom/pil-q6v5-mss.c b/drivers/soc/qcom/pil-q6v5-mss.c index f36835a90404..14170a88ea0e 100644 --- a/drivers/soc/qcom/pil-q6v5-mss.c +++ b/drivers/soc/qcom/pil-q6v5-mss.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2012-2020, The Linux Foundation. All rights reserved. + * Copyright (c) 2012-2021, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -166,9 +166,11 @@ static void modem_crash_shutdown(const struct subsys_desc *subsys) drv->crash_shutdown = true; if (!subsys_get_crash_status(drv->subsys) && - subsys->force_stop_bit) { + subsys->state) { qcom_smem_state_update_bits(subsys->state, - BIT(subsys->force_stop_bit), 1); + BIT(subsys->force_stop_bit), + BIT(subsys->force_stop_bit)); + drv->ignore_errors = true; msleep(STOP_ACK_TIMEOUT_MS); } } diff --git a/drivers/staging/fw-api/fw/htt.h b/drivers/staging/fw-api/fw/htt.h index 2d92b288a40e..df354344e1c1 100644 --- a/drivers/staging/fw-api/fw/htt.h +++ b/drivers/staging/fw-api/fw/htt.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2011-2020 The Linux Foundation. All rights reserved. + * Copyright (c) 2011-2021 The Linux Foundation. All rights reserved. * * Previously licensed under the ISC license by Qualcomm Atheros, Inc. * @@ -209,9 +209,11 @@ * 3.85 Add HTT_RX_PEER_META_DATA defs. * 3.86 Add HTT_T2H_MSG_TYPE_FSE_CMEM_BASE_SEND def. * 3.87 Add on-chip AST index field to PEER_MAP_V2 msg. + * 3.88 Add HTT_H2T_MSG_TYPE_HOST_PADDR_SIZE def. + * 3.89 Add MSDU queue enumerations. */ #define HTT_CURRENT_VERSION_MAJOR 3 -#define HTT_CURRENT_VERSION_MINOR 87 +#define HTT_CURRENT_VERSION_MINOR 89 #define HTT_NUM_TX_FRAG_DESC 1024 @@ -523,6 +525,7 @@ enum htt_h2t_msg_type { HTT_H2T_MSG_TYPE_RX_FISA_CFG = 0x15, HTT_H2T_MSG_TYPE_3_TUPLE_HASH_CFG = 0x16, HTT_H2T_MSG_TYPE_RX_FULL_MONITOR_MODE = 0x17, + HTT_H2T_MSG_TYPE_HOST_PADDR_SIZE = 0x18, /* keep this last */ HTT_H2T_NUM_MSGS @@ -546,6 +549,9 @@ enum htt_h2t_msg_type { /** * @brief host -> target version number request message definition * + * MSG_TYPE => HTT_H2T_MSG_TYPE_VERSION_REQ + * + * * |31 24|23 16|15 8|7 0| * |----------------+----------------+----------------+----------------| * | reserved | msg type | @@ -566,7 +572,7 @@ enum htt_h2t_msg_type { * - MSG_TYPE * Bits 7:0 * Purpose: identifies this as a version number request message - * Value: 0x0 + * Value: 0x0 (HTT_H2T_MSG_TYPE_VERSION_REQ) */ #define HTT_VER_REQ_BYTES 4 @@ -578,6 +584,8 @@ enum htt_h2t_msg_type { /** * @brief HTT tx MSDU descriptor * + * MSG_TYPE => HTT_H2T_MSG_TYPE_TX_FRM + * * @details * The HTT tx MSDU descriptor is created by the host HTT SW for each * tx MSDU. The HTT tx MSDU descriptor contains the information that @@ -2718,7 +2726,13 @@ PREPACK struct htt_tx_flow_metadata { /** - * @brief Used in HTT_H2T_MSG_TYPE_ADD_WDS_ENTRY and HTT_H2T_MSG_TYPE_DELETE_WDS_ENTRY messages + * @brief host -> target ADD WDS Entry + * + * MSG_TYPE => HTT_H2T_MSG_TYPE_ADD_WDS_ENTRY + * + * @brief host -> target DELETE WDS Entry + * + * MSG_TYPE => HTT_H2T_MSG_TYPE_DELETE_WDS_ENTRY * * @details * HTT wds entry from source port learning @@ -2742,8 +2756,8 @@ PREPACK struct htt_tx_flow_metadata { * The message is interpreted as follows: * * dword0 - b'0:7 - msg_type: This will be set to - * HTT_H2T_MSG_TYPE_ADD_WDS_ENTRY or - * HTT_H2T_MSG_TYPE_DELETE_WDS_ENTRY + * 0xd (HTT_H2T_MSG_TYPE_ADD_WDS_ENTRY) or + * 0xe (HTT_H2T_MSG_TYPE_DELETE_WDS_ENTRY) * * dword0 - b'8:15 - vdev_id * @@ -2824,6 +2838,9 @@ PREPACK struct htt_wds_entry { /** * @brief MAC DMA rx ring setup specification + * + * MSG_TYPE => HTT_H2T_MSG_TYPE_RX_RING_CFG + * * @details * To allow for dynamic rx ring reconfiguration and to avoid race * conditions, the host SW never directly programs the MAC DMA rx ring(s) @@ -2865,7 +2882,7 @@ PREPACK struct htt_wds_entry { * - MSG_TYPE * Bits 7:0 * Purpose: identifies this as an rx ring configuration message - * Value: 0x2 + * Value: 0x2 (HTT_H2T_MSG_TYPE_RX_RING_CFG) * - NUM_RINGS * Bits 15:8 * Purpose: indicates whether the host is setting up one rx ring or two @@ -3333,6 +3350,8 @@ PREPACK struct htt_wds_entry { /** * @brief host -> target FW statistics retrieve * + * MSG_TYPE => HTT_H2T_MSG_TYPE_STATS_REQ + * * @details * The following field definitions describe the format of the HTT host * to target FW stats retrieve message. The message specifies the type of @@ -3354,7 +3373,7 @@ PREPACK struct htt_wds_entry { * - MSG_TYPE * Bits 7:0 * Purpose: identifies this is a stats upload request message - * Value: 0x3 + * Value: 0x3 (HTT_H2T_MSG_TYPE_STATS_REQ) * - UPLOAD_TYPES * Bits 31:8 * Purpose: identifies which types of FW statistics to upload @@ -3443,6 +3462,8 @@ PREPACK struct htt_wds_entry { /** * @brief host -> target HTT out-of-band sync request * + * MSG_TYPE => HTT_H2T_MSG_TYPE_SYNC + * * @details * The HTT SYNC tells the target to suspend processing of subsequent * HTT host-to-target messages until some other target agent locally @@ -3464,7 +3485,7 @@ PREPACK struct htt_wds_entry { * - MSG_TYPE * Bits 7:0 * Purpose: identifies this as a sync message - * Value: 0x4 + * Value: 0x4 (HTT_H2T_MSG_TYPE_SYNC) * - SYNC_COUNT * Bits 15:8 * Purpose: specifies what sync value the HTT FW will wait for from @@ -3492,7 +3513,9 @@ PREPACK struct htt_wds_entry { /** - * @brief HTT aggregation configuration + * @brief host -> target HTT aggregation configuration + * + * MSG_TYPE => HTT_H2T_MSG_TYPE_AGGR_CFG */ #define HTT_AGGR_CFG_MSG_SZ 4 @@ -3523,6 +3546,8 @@ PREPACK struct htt_wds_entry { /** * @brief host -> target HTT configure max amsdu info per vdev * + * MSG_TYPE => HTT_H2T_MSG_TYPE_AGGR_CFG_EX + * * @details * The HTT AGGR CFG EX tells the target to configure max_amsdu info per vdev * @@ -3534,7 +3559,7 @@ PREPACK struct htt_wds_entry { * - MSG_TYPE * Bits 7:0 * Purpose: identifies this as a aggr cfg ex message - * Value: 0xa + * Value: 0xa (HTT_H2T_MSG_TYPE_AGGR_CFG_EX) * - MAX_NUM_AMSDU_SUBFRM * Bits 15:8 * Purpose: max MSDUs per A-MSDU @@ -3570,6 +3595,8 @@ PREPACK struct htt_wds_entry { /** * @brief HTT WDI_IPA Config Message * + * MSG_TYPE => HTT_H2T_MSG_TYPE_WDI_IPA_CFG + * * @details * The HTT WDI_IPA config message is created/sent by host at driver * init time. It contains information about data structures used on @@ -3649,7 +3676,7 @@ PREPACK struct htt_wds_entry { * - MSG_TYPE * Bits 7:0 * Purpose: Identifies this as WDI_IPA config message - * value: = 0x8 + * value: = 0x8 (HTT_H2T_MSG_TYPE_WDI_IPA_CFG) * - TX_PKT_POOL_SIZE * Bits 15:0 * Purpose: Total number of TX packet buffer pool allocated by Host for @@ -4246,6 +4273,8 @@ enum htt_wdi_ipa_op_code { /** * @brief HTT WDI_IPA Operation Request Message * + * MSG_TYPE => HTT_H2T_MSG_TYPE_WDI_IPA_OP_REQ + * * @details * HTT WDI_IPA Operation Request message is sent by host * to either suspend or resume WDI_IPA TX or RX path. @@ -4258,7 +4287,7 @@ enum htt_wdi_ipa_op_code { * - MSG_TYPE * Bits 7:0 * Purpose: Identifies this as WDI_IPA Operation Request message - * value: = 0x9 + * value: = 0x9 (HTT_H2T_MSG_TYPE_WDI_IPA_OP_REQ) * - OP_CODE * Bits 31:16 * Purpose: Identifies operation host is requesting (e.g. TX suspend) @@ -4290,6 +4319,8 @@ PREPACK struct htt_wdi_ipa_op_request_t /* * @brief host -> target HTT_SRING_SETUP message * + * MSG_TYPE => HTT_H2T_MSG_TYPE_SRING_SETUP + * * @details * After target is booted up, Host can send SRING setup message for * each host facing LMAC SRING. Target setups up HW registers based @@ -4336,7 +4367,7 @@ PREPACK struct htt_wdi_ipa_op_request_t * * The message is interpreted as follows: * dword0 - b'0:7 - msg_type: This will be set to - * HTT_H2T_MSG_TYPE_SRING_SETUP + * 0xb (HTT_H2T_MSG_TYPE_SRING_SETUP) * b'8:15 - pdev_id: * 0 (for rings at SOC/UMAC level), * 1/2/3 mac id (for rings at LMAC level) @@ -4795,7 +4826,9 @@ enum htt_srng_ring_id { /** - * @brief HTT_H2T_MSG_TYPE_RX_RING_SELECTION_CFG Message + * @brief host -> target RX ring selection config message + * + * MSG_TYPE => HTT_H2T_MSG_TYPE_RX_RING_SELECTION_CFG * * @details * HTT_H2T_MSG_TYPE_RX_RING_SELECTION_CFG message is sent by host to @@ -4839,7 +4872,7 @@ enum htt_srng_ring_id { * DT = drop_thresh_valid * The message is interpreted as follows: * dword0 - b'0:7 - msg_type: This will be set to - * HTT_H2T_MSG_TYPE_RX_RING_SELECTION_CFG + * 0xc (HTT_H2T_MSG_TYPE_RX_RING_SELECTION_CFG) * b'8:15 - pdev_id: * 0 (for rings at SOC/UMAC level), * 1/2/3 mac id (for rings at LMAC level) @@ -5637,7 +5670,10 @@ PREPACK struct htt_rx_ring_selection_cfg_t { word, HTT_RX_RING_SELECTION_CFG_TLV_FILTER_IN_FLAG_RX_##tlv) /** - * @brief HTT_H2T_MSG_TYPE_RFS_CONFIG + * @brief host --> target Receive Flow Steering configuration message definition + * + * MSG_TYPE => HTT_H2T_MSG_TYPE_RFS_CONFIG + * * host --> target Receive Flow Steering configuration message definition. * Host must send this message before sending HTT_H2T_MSG_TYPE_RX_RING_CFG. * The reason for this is we want RFS to be configured and ready before MAC @@ -5678,6 +5714,8 @@ PREPACK struct htt_rx_ring_selection_cfg_t { /** * @brief host -> target FW extended statistics retrieve * + * MSG_TYPE => HTT_H2T_MSG_TYPE_EXT_STATS_REQ + * * @details * The following field definitions describe the format of the HTT host * to target FW extended stats retrieve message. @@ -5705,7 +5743,7 @@ PREPACK struct htt_rx_ring_selection_cfg_t { * - MSG_TYPE * Bits 7:0 * Purpose: identifies this is a extended stats upload request message - * Value: 0x10 + * Value: 0x10 (HTT_H2T_MSG_TYPE_EXT_STATS_REQ) * - PDEV_MASK * Bits 8:15 * Purpose: identifies the mask of PDEVs to retrieve stats from @@ -5794,6 +5832,8 @@ PREPACK struct htt_rx_ring_selection_cfg_t { /** * @brief host -> target FW PPDU_STATS request message * + * MSG_TYPE => HTT_H2T_MSG_TYPE_PPDU_STATS_CFG + * * @details * The following field definitions describe the format of the HTT host * to target FW for PPDU_STATS_CFG msg. @@ -5808,7 +5848,7 @@ PREPACK struct htt_rx_ring_selection_cfg_t { * - MSG_TYPE * Bits 7:0 * Purpose: identifies this is a req to configure ppdu_stats_ind from target - * Value: 0x11 + * Value: 0x11 (HTT_H2T_MSG_TYPE_PPDU_STATS_CFG) * - PDEV_MASK * Bits 8:15 * Purpose: identifies which pdevs this PPDU stats configuration applies to @@ -5854,6 +5894,9 @@ PREPACK struct htt_rx_ring_selection_cfg_t { /** * @brief Host-->target HTT RX FSE setup message + * + * MSG_TYPE => HTT_H2T_MSG_TYPE_RX_FSE_SETUP_CFG + * * @details * Through this message, the host will provide details of the flow tables * in host DDR along with hash keys. @@ -5868,7 +5911,7 @@ PREPACK struct htt_rx_ring_selection_cfg_t { * * Header fields: * dword0 - b'7:0 - msg_type: This will be set to - * HTT_H2T_MSG_TYPE_RX_FSE_SETUP_CFG + * 0x12 (HTT_H2T_MSG_TYPE_RX_FSE_SETUP_CFG) * b'15:8 - pdev_id: 0 indicates msg is for all LMAC rings, i.e. soc * 1, 2, 3 indicates pdev_id 0,1,2 and the msg is for that * pdev's LMAC ring. @@ -5945,6 +5988,9 @@ enum htt_ip_da_sa_prefix { /** * @brief Host-->target HTT RX FISA configure and enable + * + * MSG_TYPE => HTT_H2T_MSG_TYPE_RX_FISA_CFG + * * @details * The host will send this command down to configure and enable the FISA * operational params. @@ -5952,8 +5998,8 @@ enum htt_ip_da_sa_prefix { * register. * Should configure both the MACs. * - * dword0 - b'7:0 - msg_type: This will be set to HTT_H2T_MSG_TYPE_RX_FISA_CFG - * + * dword0 - b'7:0 - msg_type: + * This will be set to 0x15 (HTT_H2T_MSG_TYPE_RX_FISA_CFG) * b'15:8 - pdev_id: 0 indicates msg is for all LMAC rings, i.e. soc * 1, 2, 3 indicates pdev_id 0,1,2 and the msg is for that * pdev's LMAC ring. @@ -6404,6 +6450,9 @@ PREPACK struct htt_h2t_msg_rx_fse_setup_t { /** * @brief Host-->target HTT RX FSE operation message + * + * MSG_TYPE => HTT_H2T_MSG_TYPE_RX_FSE_OPERATION_CFG + * * @details * The host will send this Flow Search Engine (FSE) operation message for * every flow add/delete operation. @@ -6448,7 +6497,7 @@ PREPACK struct htt_h2t_msg_rx_fse_setup_t { * * Header fields: * dword0 - b'7:0 - msg_type: This will be set to - * HTT_H2T_MSG_TYPE_RX_FSE_OPERATION_CFG + * 0x13 (HTT_H2T_MSG_TYPE_RX_FSE_OPERATION_CFG) * b'15:8 - pdev_id: 0 indicates msg is for all LMAC rings, i.e. soc * 1, 2, 3 indicates pdev_id 0,1,2 and the msg is for the * specified pdev's LMAC ring. @@ -6527,6 +6576,9 @@ PREPACK struct htt_h2t_msg_rx_fse_operation_t { /** * @brief Host-->target HTT RX Full monitor mode register configuration message + * + * MSG_TYPE => HTT_H2T_MSG_TYPE_RX_FULL_MONITOR_MODE + * * @details * The host will send this Full monitor mode register configuration message. * This message can be sent per SOC or per PDEV which is differentiated @@ -6548,7 +6600,7 @@ PREPACK struct htt_h2t_msg_rx_fse_operation_t { * * Header fields: * dword0 - b'7:0 - msg_type: This will be set to - * HTT_H2T_MSG_TYPE_RX_FULL_MONITOR_MODE. + * 0x17 (HTT_H2T_MSG_TYPE_RX_FULL_MONITOR_MODE) * b'15:8 - pdev_id: 0 indicates msg is for all LMAC rings, i.e. soc * 1, 2, 3 indicates pdev_id 0,1,2 and the msg is for the * specified pdev's LMAC ring. @@ -6779,8 +6831,9 @@ enum htt_rx_fse_operation { /** - * @brief HTT_H2T_MSG_TYPE_3_TUPLE_HASH_CFG - * host --> target Receive to configure the RxOLE 3-tuple Hash + * @brief host --> target Receive to configure the RxOLE 3-tuple Hash + * + * MSG_TYPE => HTT_H2T_MSG_TYPE_3_TUPLE_HASH_CFG * * |31 24|23 |15 8|7 2|1|0| * |----------------+----------------+----------------+----------------| @@ -6798,7 +6851,7 @@ enum htt_rx_fse_operation { * * Header fields: * dword0 - b'7:0 - msg_type: This will be set to - * HTT_H2T_MSG_TYPE_3_TUPLE_HASH_CFG + * 0x16 (HTT_H2T_MSG_TYPE_3_TUPLE_HASH_CFG) * b'15:8 - pdev_id: 0 indicates msg is for all LMAC rings, i.e. soc * 1, 2, 3 indicates pdev_id 0,1,2 and the msg is for the * specified pdev's LMAC ring. @@ -6869,6 +6922,83 @@ PREPACK struct htt_h2t_msg_rx_3_tuple_hash_cfg_t { #define HTT_3_TUPLE_HASH_CFG_REQ_BYTES 8 +/** + * @brief host --> target Host PA Address Size + * + * MSG_TYPE => HTT_H2T_MSG_TYPE_HOST_PADDR_SIZE + * + * @details + * The HTT_H2T_MSG_TYPE_HOST_PADDR_SIZE message is sent by the host to + * provide the physical start address and size of each of the memory + * areas within host DDR that the target FW may need to access. + * + * For example, the host can use this message to allow the target FW + * to set up access to the host's pools of TQM link descriptors. + * The message would appear as follows: + * + * |31 24|23 16|15 8|7 0| + * |----------------+----------------+----------------+----------------| + * | reserved | num_entries | msg_type | + * |-=-=-=-=-=-=-=-=+-=-=-=-=-=-=-=-=+=-=-=-=-=-=-=-=-+=-=-=-=-=-=-=-=-| + * | mem area 0 size | + * |----------------+----------------+----------------+----------------| + * | mem area 0 physical_address_lo | + * |----------------+----------------+----------------+----------------| + * | mem area 0 physical_address_hi | + * |-=-=-=-=-=-=-=-=+-=-=-=-=-=-=-=-=+=-=-=-=-=-=-=-=-+=-=-=-=-=-=-=-=-| + * | mem area 1 size | + * |----------------+----------------+----------------+----------------| + * | mem area 1 physical_address_lo | + * |----------------+----------------+----------------+----------------| + * | mem area 1 physical_address_hi | + * |----------------+----------------+----------------+----------------| + * ... + * |-=-=-=-=-=-=-=-=+-=-=-=-=-=-=-=-=+=-=-=-=-=-=-=-=-+=-=-=-=-=-=-=-=-| + * | mem area N size | + * |----------------+----------------+----------------+----------------| + * | mem area N physical_address_lo | + * |----------------+----------------+----------------+----------------| + * | mem area N physical_address_hi | + * |----------------+----------------+----------------+----------------| + * + * The message is interpreted as follows: + * dword0 - b'0:7 - msg_type: This will be set to + * 0x18 (HTT_H2T_MSG_TYPE_HOST_PADDR_SIZE) + * b'8:15 - number_entries: Indicated the number of host memory + * areas specified within the remainder of the message + * b'16:31 - reserved. + * dword1 - b'0:31 - memory area 0 size in bytes + * dword2 - b'0:31 - memory area 0 physical address, lower 32 bits + * dword3 - b'0:31 - memory area 0 physical address, upper 32 bits + * and similar for memory area 1 through memory area N. + */ + +PREPACK struct htt_h2t_host_paddr_size { + A_UINT32 msg_type: 8, + num_entries: 8, + reserved: 16; +} POSTPACK; + +PREPACK struct htt_h2t_host_paddr_size_entry_t { + A_UINT32 size; + A_UINT32 physical_address_lo; + A_UINT32 physical_address_hi; +} POSTPACK; + +#define HTT_H2T_HOST_PADDR_SIZE_ENTRY_SIZE (sizeof(struct htt_h2t_host_paddr_size_entry_t)) + +#define HTT_H2T_HOST_PADDR_SIZE_NUM_ENTRIES_M 0x0000FF00 +#define HTT_H2T_HOST_PADDR_SIZE_NUM_ENTRIES_S 8 + +#define HTT_H2T_HOST_PADDR_SIZE_NUM_ENTRIES_GET(_var) \ + (((_var) & HTT_H2T_HOST_PADDR_SIZE_NUM_ENTRIES_M) >> \ + HTT_H2T_HOST_PADDR_SIZE_NUM_ENTRIES_S) + +#define HTT_H2T_HOST_PADDR_SIZE_NUM_ENTRIES_SET(_var, _val) \ + do { \ + HTT_CHECK_SET_VAL(HTT_H2T_HOST_PADDR_SIZE_NUM_ENTRIES, _val); \ + ((_var) |= ((_val) << HTT_H2T_HOST_PADDR_SIZE_NUM_ENTRIES_S)); \ + } while (0) /*=== target -> host messages ===============================================*/ @@ -6945,6 +7075,8 @@ enum htt_t2h_msg_type { /** * @brief target -> host version number confirmation message definition * + * MSG_TYPE => HTT_T2H_MSG_TYPE_VERSION_CONF + * * |31 24|23 16|15 8|7 0| * |----------------+----------------+----------------+----------------| * | reserved | major number | minor number | msg type | @@ -6965,7 +7097,7 @@ enum htt_t2h_msg_type { * - MSG_TYPE * Bits 7:0 * Purpose: identifies this as a version number confirmation message - * Value: 0x0 + * Value: 0x0 (HTT_T2H_MSG_TYPE_VERSION_CONF) * - VER_MINOR * Bits 15:8 * Purpose: Specify the minor number of the HTT message library version @@ -7017,6 +7149,8 @@ enum htt_t2h_msg_type { /** * @brief - target -> host HTT Rx In order indication message * + * MSG_TYPE => HTT_T2H_MSG_TYPE_RX_IN_ORD_PADDR_IND + * * @details * * |31 24|23 |15|14|13|12|11|10|9|8|7|6|5|4 0| @@ -7443,6 +7577,8 @@ A_COMPILE_TIME_ASSERT(HTT_RX_IND_hdr_size_quantum, /** * @brief target -> host rx indication message definition * + * MSG_TYPE => HTT_T2H_MSG_TYPE_RX_IND + * * @details * The following field definitions describe the format of the rx indication * message sent from the target to the host. @@ -7519,7 +7655,7 @@ A_COMPILE_TIME_ASSERT(HTT_RX_IND_hdr_size_quantum, * - MSG_TYPE * Bits 7:0 * Purpose: identifies this as an rx indication message - * Value: 0x1 + * Value: 0x1 (HTT_T2H_MSG_TYPE_RX_IND) * - EXT_TID * Bits 12:8 * Purpose: identify the traffic ID of the rx data, including @@ -8327,9 +8463,10 @@ PREPACK struct htt_chan_info_t (((word) & HTT_CHAN_INFO_PHY_MODE_M) >> HTT_CHAN_INFO_PHY_MODE_S) /* - * HTT_T2H_MSG_TYPE_TX_OFFLOAD_DELIVER_IND * @brief target -> host message definition for FW offloaded pkts * + * MSG_TYPE => HTT_T2H_MSG_TYPE_TX_OFFLOAD_DELIVER_IND + * * @details * The following field definitions describe the format of the firmware * offload deliver message sent from the target to the host. @@ -8655,6 +8792,8 @@ PREPACK struct htt_tx_offload_deliver_ind_hdr_t /* * @brief target -> host rx reorder flush message definition * + * MSG_TYPE => HTT_T2H_MSG_TYPE_RX_FLUSH + * * @details * The following field definitions describe the format of the rx flush * message sent from the target to the host. @@ -8671,7 +8810,7 @@ PREPACK struct htt_tx_offload_deliver_ind_hdr_t * - MSG_TYPE * Bits 7:0 * Purpose: identifies this as an rx flush message - * Value: 0x2 + * Value: 0x2 (HTT_T2H_MSG_TYPE_RX_FLUSH) * - PEER_ID * Bits 23:8 (only bits 18:8 actually used) * Purpose: identify which peer's rx data is being flushed @@ -8774,6 +8913,8 @@ PREPACK struct htt_tx_offload_deliver_ind_hdr_t /* * @brief target -> host rx pn check indication message * + * MSG_TYPE => HTT_T2H_MSG_TYPE_RX_PN_IND + * * @details * The following field definitions describe the format of the Rx PN check * indication message sent from the target to the host. @@ -8795,7 +8936,7 @@ PREPACK struct htt_tx_offload_deliver_ind_hdr_t * - MSG_TYPE * Bits 7:0 * Purpose: Identifies this as an rx pn check indication message - * Value: 0x2 + * Value: 0x10 (HTT_T2H_MSG_TYPE_RX_PN_IND) * - PEER_ID * Bits 23:8 (only bits 18:8 actually used) * Purpose: identify which peer @@ -8900,6 +9041,8 @@ PREPACK struct htt_tx_offload_deliver_ind_hdr_t /* * @brief target -> host rx offload deliver message for LL system * + * MSG_TYPE => HTT_T2H_MSG_TYPE_RX_OFFLOAD_DELIVER_IND + * * @details * In a low latency system this message is sent whenever the offload * manager flushes out the packets it has coalesced in its coalescing buffer. @@ -9015,6 +9158,8 @@ PREPACK struct htt_tx_offload_deliver_ind_hdr_t /** * @brief target -> host rx peer map/unmap message definition * + * MSG_TYPE => HTT_T2H_MSG_TYPE_PEER_MAP + * * @details * The following diagram shows the format of the rx peer map message sent * from the target to the host. This layout assumes the target operates @@ -9050,6 +9195,8 @@ PREPACK struct htt_tx_offload_deliver_ind_hdr_t * |-----------------------------------------------------------------------| * * + * MSG_TYPE => HTT_T2H_MSG_TYPE_PEER_UNMAP + * * The following diagram shows the format of the rx peer unmap message sent * from the target to the host. * @@ -9063,7 +9210,8 @@ PREPACK struct htt_tx_offload_deliver_ind_hdr_t * - MSG_TYPE * Bits 7:0 * Purpose: identifies this as an rx peer map or peer unmap message - * Value: peer map -> 0x3, peer unmap -> 0x4 + * Value: peer map -> 0x3 (HTT_T2H_MSG_TYPE_PEER_MAP), + * peer unmap -> 0x4 (HTT_T2H_MSG_TYPE_PEER_UNMAP) * - VDEV_ID * Bits 15:8 * Purpose: Indicates which virtual device the peer is associated @@ -9154,6 +9302,8 @@ PREPACK struct htt_tx_offload_deliver_ind_hdr_t /** * @brief target -> host rx peer map V2 message definition * + * MSG_TYPE => HTT_T2H_MSG_TYPE_PEER_MAP_V2 + * * @details * The following diagram shows the format of the rx peer map v2 message sent * from the target to the host. This layout assumes the target operates @@ -9211,7 +9361,7 @@ PREPACK struct htt_tx_offload_deliver_ind_hdr_t * - MSG_TYPE * Bits 7:0 * Purpose: identifies this as an rx peer map v2 message - * Value: peer map v2 -> 0x1e + * Value: peer map v2 -> 0x1e (HTT_T2H_MSG_TYPE_PEER_MAP_V2) * - VDEV_ID * Bits 15:8 * Purpose: Indicates which virtual device the peer is associated with. @@ -9482,6 +9632,7 @@ PREPACK struct htt_tx_offload_deliver_ind_hdr_t /** * @brief target -> host rx peer unmap V2 message definition * + * MSG_TYPE => HTT_T2H_MSG_TYPE_PEER_UNMAP_V2 * * The following diagram shows the format of the rx peer unmap message sent * from the target to the host. @@ -9509,7 +9660,7 @@ PREPACK struct htt_tx_offload_deliver_ind_hdr_t * - MSG_TYPE * Bits 7:0 * Purpose: identifies this as an rx peer unmap v2 message - * Value: peer unmap v2 -> 0x1f + * Value: peer unmap v2 -> 0x1f (HTT_T2H_MSG_TYPE_PEER_UNMAP_V2) * - VDEV_ID * Bits 15:8 * Purpose: Indicates which virtual device the peer is associated @@ -9593,6 +9744,8 @@ PREPACK struct htt_tx_offload_deliver_ind_hdr_t /** * @brief target -> host message specifying security parameters * + * MSG_TYPE => HTT_T2H_MSG_TYPE_SEC_IND + * * @details * The following diagram shows the format of the security specification * message sent from the target to the host. @@ -9624,7 +9777,7 @@ PREPACK struct htt_tx_offload_deliver_ind_hdr_t * - MSG_TYPE * Bits 7:0 * Purpose: identifies this as a security specification message - * Value: 0xb + * Value: 0xb (HTT_T2H_MSG_TYPE_SEC_IND) * - SEC_TYPE * Bits 14:8 * Purpose: specifies which type of security applies to the peer @@ -9701,6 +9854,8 @@ PREPACK struct htt_tx_offload_deliver_ind_hdr_t /** * @brief target -> host rx ADDBA / DELBA message definitions * + * MSG_TYPE => HTT_T2H_MSG_TYPE_RX_ADDBA + * * @details * The following diagram shows the format of the rx ADDBA message sent * from the target to the host: @@ -9710,6 +9865,8 @@ PREPACK struct htt_tx_offload_deliver_ind_hdr_t * | peer ID | TID | window size | msg type | * |---------------------------------------------------------------------| * + * MSG_TYPE => HTT_T2H_MSG_TYPE_RX_DELBA + * * The following diagram shows the format of the rx DELBA message sent * from the target to the host: * @@ -9723,7 +9880,8 @@ PREPACK struct htt_tx_offload_deliver_ind_hdr_t * - MSG_TYPE * Bits 7:0 * Purpose: identifies this as an rx ADDBA or DELBA message - * Value: ADDBA -> 0x5, DELBA -> 0x6 + * Value: ADDBA -> 0x5 (HTT_T2H_MSG_TYPE_RX_ADDBA), + * DELBA -> 0x6 (HTT_T2H_MSG_TYPE_RX_DELBA) * - IR (initiator / recipient) * Bits 9:8 (DELBA only) * Purpose: specify whether the DELBA handshake was initiated by the @@ -9973,6 +10131,8 @@ PREPACK struct htt_txq_group { /** * @brief target -> host TX completion indication message definition * + * MSG_TYPE => HTT_T2H_MSG_TYPE_TX_COMPL_IND + * * @details * The following diagram shows the format of the TX completion indication sent * from the target to the host @@ -10021,7 +10181,7 @@ PREPACK struct htt_txq_group { * - msg_type * Bits 7:0 * Purpose: identifies this as HTT TX completion indication - * Value: 0x7 + * Value: 0x7 (HTT_T2H_MSG_TYPE_TX_COMPL_IND) * - status * Bits 10:8 * Purpose: the TX completion status of payload fragmentations descriptors @@ -10477,6 +10637,8 @@ PREPACK struct htt_tx_compl_ind_append_tx_tsf64 { /** * @brief target -> host rate-control update indication message * + * DEPRECATED (DEPRECATED_HTT_T2H_MSG_TYPE_RC_UPDATE_IND) + * * @details * The following diagram shows the format of the RC Update message * sent from the target to the host, while processing the tx-completion @@ -10566,6 +10728,8 @@ typedef struct { /** * @brief target -> host rx fragment indication message definition * + * MSG_TYPE => HTT_T2H_MSG_TYPE_RX_FRAG_IND + * * @details * The following field definitions describe the format of the rx fragment * indication message sent from the target to the host. @@ -10591,7 +10755,7 @@ typedef struct { * - MSG_TYPE * Bits 7:0 * Purpose: identifies this as an rx fragment indication message - * Value: 0xa + * Value: 0xa (HTT_T2H_MSG_TYPE_RX_FRAG_IND) * - EXT_TID * Bits 12:8 * Purpose: identify the traffic ID of the rx data, including @@ -10671,6 +10835,8 @@ typedef struct { /** * @brief target -> host test message definition * + * MSG_TYPE => HTT_T2H_MSG_TYPE_TEST + * * @details * The following field definitions describe the format of the test * message sent from the target to the host. @@ -10727,6 +10893,8 @@ typedef struct { /** * @brief target -> host packet log message * + * MSG_TYPE => HTT_T2H_MSG_TYPE_PKTLOG + * * @details * The following field definitions describe the format of the packet log * message sent from the target to the host. @@ -10742,7 +10910,7 @@ typedef struct { * - MSG_TYPE * Bits 7:0 * Purpose: identifies this as a pktlog message - * Value: HTT_T2H_MSG_TYPE_PKTLOG + * Value: 0x8 (HTT_T2H_MSG_TYPE_PKTLOG) * - mac_id * Bits 9:8 * Purpose: identifies which MAC/PHY instance generated this pktlog info @@ -11009,6 +11177,8 @@ enum htt_dbg_stats_status { /** * @brief target -> host statistics upload * + * MSG_TYPE => HTT_T2H_MSG_TYPE_STATS_CONF + * * @details * The following field definitions describe the format of the HTT target * to host stats upload confirmation message. @@ -11049,7 +11219,7 @@ enum htt_dbg_stats_status { * - MSG_TYPE * Bits 7:0 * Purpose: identifies this is a statistics upload confirmation message - * Value: 0x9 + * Value: 0x9 (HTT_T2H_MSG_TYPE_STATS_CONF) * - COOKIE_LSBS * Bits 31:0 * Purpose: Provide a mechanism to match a target->host stats confirmation @@ -11131,6 +11301,8 @@ enum htt_dbg_stats_status { /** * @brief host -> target FRAG DESCRIPTOR/MSDU_EXT DESC bank * + * MSG_TYPE => HTT_H2T_MSG_TYPE_FRAG_DESC_BANK_CFG + * * @details * The following field definitions describe the format of the HTT host * to target frag_desc/msdu_ext bank configuration message. @@ -11170,7 +11342,7 @@ enum htt_dbg_stats_status { * Header fields: * - MSG_TYPE * Bits 7:0 - * Value: 0x6 + * Value: 0x6 (HTT_H2T_MSG_TYPE_FRAG_DESC_BANK_CFG) * for systems with 64-bit format for bus addresses: * - BANKx_BASE_ADDRESS_LO * Bits 31:0 @@ -11317,6 +11489,8 @@ TEMPLATE_HTT_TX_FRAG_DESC_BANK_CFG_T(64, HTT_VAR_PADDR64_LE(bank_base_address)); /** * @brief target -> host HTT TX Credit total count update message definition * + * MSG_TYPE => HTT_T2H_MSG_TYPE_TX_CREDIT_UPDATE_IND + * *|31 16|15|14 9| 8 |7 0 | *|---------------------+--+----------+-------+----------| *|cur htt credit delta | Q| reserved | sign | msg type | @@ -11326,7 +11500,7 @@ TEMPLATE_HTT_TX_FRAG_DESC_BANK_CFG_T(64, HTT_VAR_PADDR64_LE(bank_base_address)); * - MSG_TYPE * Bits 7:0 * Purpose: identifies this as a htt tx credit delta update message - * Value: 0xe + * Value: 0xf (HTT_T2H_MSG_TYPE_TX_CREDIT_UPDATE_IND) * - SIGN * Bits 8 * identifies whether credit delta is positive or negative @@ -11392,6 +11566,8 @@ TEMPLATE_HTT_TX_FRAG_DESC_BANK_CFG_T(64, HTT_VAR_PADDR64_LE(bank_base_address)); /** * @brief HTT WDI_IPA Operation Response Message * + * MSG_TYPE => HTT_T2H_MSG_TYPE_WDI_IPA_OP_RESPONSE + * * @details * HTT WDI_IPA Operation Response message is sent by target * to host confirming suspend or resume operation. @@ -11410,7 +11586,7 @@ TEMPLATE_HTT_TX_FRAG_DESC_BANK_CFG_T(64, HTT_VAR_PADDR64_LE(bank_base_address)); * - MSG_TYPE * Bits 7:0 * Purpose: Identifies this as WDI_IPA Operation Response message - * value: = 0x13 + * value: = 0x14 (HTT_T2H_MSG_TYPE_WDI_IPA_OP_RESPONSE) * - OP_CODE * Bits 31:16 * Purpose: Identifies the operation target is responding to (e.g. TX suspend) @@ -11483,6 +11659,9 @@ enum htt_phy_mode { /** * @brief target -> host HTT channel change indication + * + * MSG_TYPE => HTT_T2H_MSG_TYPE_CHAN_CHANGE + * * @details * Specify when a channel change occurs. * This allows the host to precisely determine which rx frames arrived @@ -11505,7 +11684,7 @@ enum htt_phy_mode { * - MSG_TYPE * Bits 7:0 * Purpose: identifies this as a htt channel change indication message - * Value: 0x15 + * Value: 0x15 (HTT_T2H_MSG_TYPE_CHAN_CHANGE) * - PRIMARY_CHAN_CENTER_FREQ_MHZ * Bits 31:0 * Purpose: identify the (center of the) new 20 MHz primary channel @@ -11545,6 +11724,20 @@ PREPACK struct htt_chan_change_t A_UINT32 contig_chan2_center_freq_mhz; A_UINT32 phy_mode; } POSTPACK; +/* + * Due to historical / backwards-compatibility reasons, maintain the + * below htt_chan_change_msg struct definition, which needs to be + * consistent with the above htt_chan_change_t struct definition + * (aside from the htt_chan_change_t definition including the msg_type + * dword within the message, and the htt_chan_change_msg only containing + * the payload of the message that follows the msg_type dword). + */ +PREPACK struct htt_chan_change_msg { + A_UINT32 chan_mhz; /* frequency in mhz */ + A_UINT32 band_center_freq1; /* Center frequency 1 in MHz */ + A_UINT32 band_center_freq2; /* Center frequency 2 in MHz - valid only for 11acvht 80plus80 mode*/ + A_UINT32 chan_mode; /* WLAN_PHY_MODE of the channel defined in wlan_defs.h */ +} POSTPACK; #define HTT_CHAN_CHANGE_PRIMARY_CHAN_CENTER_FREQ_MHZ_M 0xffffffff #define HTT_CHAN_CHANGE_PRIMARY_CHAN_CENTER_FREQ_MHZ_S 0 @@ -11598,6 +11791,8 @@ PREPACK struct htt_chan_change_t /** * @brief rx offload packet error message * + * MSG_TYPE => HTT_T2H_MSG_TYPE_RX_OFLD_PKT_ERR + * * @details * HTT_RX_OFLD_PKT_ERR message is sent by target to host to indicate err * of target payload like mic err. @@ -11828,7 +12023,9 @@ enum htt_rx_ofld_pkt_err_type { } while (0) /** - * @brief peer rate report message + * @brief target -> host peer rate report message + * + * MSG_TYPE => HTT_T2H_MSG_TYPE_RATE_REPORT * * @details * HTT_T2H_MSG_TYPE_RATE_REPORT message is sent by target to host to indicate the @@ -11942,7 +12139,9 @@ enum htt_peer_rate_report_phy_type { } while (0) /** - * @brief HTT_T2H_MSG_TYPE_FLOW_POOL_MAP Message + * @brief target -> host flow pool map message + * + * MSG_TYPE => HTT_T2H_MSG_TYPE_FLOW_POOL_MAP * * @details * HTT_T2H_MSG_TYPE_FLOW_POOL_MAP message is sent by the target when setting up @@ -11964,7 +12163,7 @@ enum htt_peer_rate_report_phy_type { * |-------------------------------------------------------------------| * * The header field is one DWORD long and is interpreted as follows: - * b'0:7 - msg_type: This will be set to HTT_T2H_MSG_TYPE_FLOW_POOL_MAP + * b'0:7 - msg_type: Set to 0x18 (HTT_T2H_MSG_TYPE_FLOW_POOL_MAP) * b'8-15 - num_flows: This will indicate the number of flows being setup in * this message * b'16-31 - reserved: These bits are reserved for future use @@ -12120,7 +12319,9 @@ PREPACK struct htt_flow_pool_map_payload_t { } while (0) /** - * @brief HTT_T2H_MSG_TYPE_FLOW_POOL_UNMAP Message + * @brief target -> host flow pool unmap message + * + * MSG_TYPE => HTT_T2H_MSG_TYPE_FLOW_POOL_UNMAP * * @details * HTT_T2H_MSG_TYPE_FLOW_POOL_UNMAP message is sent by the target when tearing @@ -12145,8 +12346,8 @@ PREPACK struct htt_flow_pool_map_payload_t { * |-------------------------------------------------------------------| * * The message is interpreted as follows: - * dword0 - b'0:7 - msg_type: This will be set to - * HTT_T2H_MSG_TYPE_FLOW_POOL_UNMAP + * dword0 - b'0:7 - msg_type: This will be set to 0x19 + * (HTT_T2H_MSG_TYPE_FLOW_POOL_UNMAP) * b'8:31 - reserved0: Reserved for future use * * dword1 - b'0:31 - flow_type: This indicates the type of the entity to which @@ -12215,7 +12416,9 @@ PREPACK struct htt_flow_pool_unmap_t { /** - * @brief HTT_T2H_MSG_TYPE_SRING_SETUP_DONE Message + * @brief target -> host SRING setup done message + * + * MSG_TYPE => HTT_T2H_MSG_TYPE_SRING_SETUP_DONE * * @details * HTT_T2H_MSG_TYPE_SRING_SETUP_DONE message is sent by the target when @@ -12232,8 +12435,8 @@ PREPACK struct htt_flow_pool_unmap_t { * |-------------------------------------------------------------------| * * The message is interpreted as follows: - * dword0 - b'0:7 - msg_type: This will be set to - * HTT_T2H_MSG_TYPE_SRING_SETUP_DONE + * dword0 - b'0:7 - msg_type: This will be set to 0x1a + * (HTT_T2H_MSG_TYPE_SRING_SETUP_DONE) * b'8:15 - pdev_id: * 0 (for rings at SOC/UMAC level), * 1/2/3 mac id (for rings at LMAC level) @@ -12292,7 +12495,9 @@ enum htt_ring_setup_status { /** - * @brief HTT_T2H_MSG_TYPE_MAP_FLOW_INFO Message + * @brief target -> flow map flow info + * + * MSG_TYPE => HTT_T2H_MSG_TYPE_MAP_FLOW_INFO * * @details * HTT TX map flow entry with tqm flow pointer @@ -12317,8 +12522,8 @@ enum htt_ring_setup_status { * * The message is interpreted as follows: * - * dword0 - b'0:7 - msg_type: This will be set to - * HTT_T2H_MSG_TYPE_MAP_FLOW_INFO + * dword0 - b'0:7 - msg_type: This will be set to 0x1b + * (HTT_T2H_MSG_TYPE_MAP_FLOW_INFO) * * dword0 - b'8:27 - fse_hsh_idx: Flow search table index provided by host * for this flow entry @@ -12419,6 +12624,8 @@ enum htt_dbg_ext_stats_status { /** * @brief target -> host ppdu stats upload * + * MSG_TYPE => HTT_T2H_MSG_TYPE_PPDU_STATS_IND + * * @details * The following field definitions describe the format of the HTT target * to host ppdu stats indication message. @@ -12442,7 +12649,7 @@ enum htt_dbg_ext_stats_status { * Bits 7:0 * Purpose: Identifies this is a PPDU STATS indication * message. - * Value: 0x1d + * Value: 0x1d (HTT_T2H_MSG_TYPE_PPDU_STATS_IND) * - mac_id * Bits 9:8 * Purpose: mac_id of this ppdu_id @@ -12532,6 +12739,8 @@ typedef struct { /** * @brief target -> host extended statistics upload * + * MSG_TYPE => HTT_T2H_MSG_TYPE_EXT_STATS_CONF + * * @details * The following field definitions describe the format of the HTT target * to host stats upload confirmation message. @@ -12565,7 +12774,7 @@ typedef struct { * Bits 7:0 * Purpose: Identifies this is a extended statistics upload confirmation * message. - * Value: 0x1c + * Value: 0x1c (HTT_T2H_MSG_TYPE_EXT_STATS_CONF) * - COOKIE_LSBS * Bits 31:0 * Purpose: Provide a mechanism to match a target->host stats confirmation @@ -12678,6 +12887,8 @@ typedef enum { /** * @brief target -> host monitor mac header indication message * + * MSG_TYPE => HTT_T2H_MSG_TYPE_MONITOR_MAC_HEADER_IND + * * @details * The following diagram shows the format of the monitor mac header message * sent from the target to the host. @@ -12706,7 +12917,7 @@ typedef enum { * - msg_type * Bits 7:0 * Purpose: Identifies this is a monitor mac header indication message. - * Value: 0x20 + * Value: 0x20 (HTT_T2H_MSG_TYPE_MONITOR_MAC_HEADER_IND) * - peer_id * Bits 31:16 * Purpose: Software peer id given by host during association, @@ -12751,7 +12962,9 @@ typedef enum { HTT_T2H_MONITOR_MAC_HEADER_NUM_MPDU_S) /** - * @brief HTT_T2H_MSG_TYPE_FLOW_POOL_RESIZE Message + * @brief target -> host flow pool resize Message + * + * MSG_TYPE => HTT_T2H_MSG_TYPE_FLOW_POOL_RESIZE * * @details * HTT_T2H_MSG_TYPE_FLOW_POOL_RESIZE message is sent by the target when @@ -12767,8 +12980,8 @@ typedef enum { * |-------------------------------------------------------------------| * * The message is interpreted as follows: - * b'0:7 - msg_type: This will be set to - * HTT_T2H_MSG_TYPE_FLOW_POOL_RESIZE + * b'0:7 - msg_type: This will be set to 0x21 + * (HTT_T2H_MSG_TYPE_FLOW_POOL_RESIZE) * * b'0:15 - flow pool ID: Existing flow pool ID * @@ -12813,53 +13026,7 @@ PREPACK struct htt_flow_pool_resize_t { ((_var) |= ((_val) << HTT_FLOW_POOL_RESIZE_FLOW_POOL_NEW_SIZE_S)); \ } while (0) -/** - * @brief host -> target channel change message - * - * @details - * the meesage is generated by FW every time FW changes channel. This will be used by host mainly - * to associate RX frames to correct channel they were received on. - * The following field definitions describe the format of the HTT target - * to host channel change message. - * |31 16|15 8|7 5|4 0| - * |------------------------------------------------------------| - * | reserved | MSG_TYPE | - * |------------------------------------------------------------| - * | CHAN_MHZ | - * |------------------------------------------------------------| - * | BAND_CENTER_FREQ1 | - * |------------------------------------------------------------| - * | BAND_CENTER_FREQ2 | - * |------------------------------------------------------------| - * | CHAN_PHY_MODE | - * |------------------------------------------------------------| - * Header fields: - * - MSG_TYPE - * Bits 7:0 - * Value: 0xf - * - CHAN_MHZ - * Bits 31:0 - * Purpose: frequency of the primary 20mhz channel. - * - BAND_CENTER_FREQ1 - * Bits 31:0 - * Purpose: centre frequency of the full channel. - * - BAND_CENTER_FREQ2 - * Bits 31:0 - * Purpose: centre frequency2 of the channel. is only valid for 11acvht 80plus80. - * - CHAN_PHY_MODE - * Bits 31:0 - * Purpose: phy mode of the channel. -*/ - -PREPACK struct htt_chan_change_msg { - A_UINT32 chan_mhz; /* frequency in mhz */ - A_UINT32 band_center_freq1; /* Center frequency 1 in MHz*/ - - A_UINT32 band_center_freq2; /* Center frequency 2 in MHz - valid only for 11acvht 80plus80 mode*/ - - A_UINT32 chan_mode; /* WLAN_PHY_MODE of the channel defined in wlan_defs.h */ -} POSTPACK; #define HTT_CFR_CAPTURE_MAGIC_PATTERN 0xCCCCCCCC #define HTT_CFR_CAPTURE_READ_INDEX_OFFSET 0 /* bytes */ @@ -12995,6 +13162,8 @@ typedef enum { * @brief target -> host CFR dump completion indication message definition * htt_cfr_dump_compl_ind when the version is HTT_PEER_CFR_CAPTURE_MSG_TYPE_1. * + * MSG_TYPE => HTT_T2H_MSG_TYPE_CFR_DUMP_COMPL_IND + * * @details * The following diagram shows the format of the Channel Frequency Response * (CFR) dump completion indication. This inidcation is sent to the Host when @@ -13067,7 +13236,7 @@ typedef enum { * - msg_type * Bits 7:0 * Purpose: Identifies this as CFR TX completion indication - * Value: HTT_T2H_MSG_TYPE_CFR_DUMP_COMPL_IND + * Value: 0x22 (HTT_T2H_MSG_TYPE_CFR_DUMP_COMPL_IND) * - payload_present * Bit 8 * Purpose: Identifies how CFR data is sent to host @@ -13337,7 +13506,9 @@ PREPACK struct htt_cfr_dump_compl_ind { /** * @brief target -> host peer (PPDU) stats message - * HTT_T2H_MSG_TYPE_PEER_STATS_IND + * + * MSG_TYPE => HTT_T2H_MSG_TYPE_PEER_STATS_IND + * * @details * This message is generated by FW when FW is sending stats to host * about one or more PPDUs that the FW has transmitted to one or more peers. @@ -13399,7 +13570,7 @@ PREPACK struct htt_cfr_dump_compl_ind { * * Header * ------ - * dword0 - b'0:7 - msg_type : HTT_T2H_MSG_TYPE_PEER_STATS_IND + * dword0 - b'0:7 - msg_type : 0x23 (HTT_T2H_MSG_TYPE_PEER_STATS_IND) * dword0 - b'8:31 - reserved : Reserved for future use * * payload include below peer_stats information @@ -13430,7 +13601,9 @@ PREPACK struct htt_cfr_dump_compl_ind { /** - * @brief HTT_T2H_MSG_TYPE_BKPRESSURE_EVENTID Message + * @brief target -> host backpressure event + * + * MSG_TYPE => HTT_T2H_MSG_TYPE_BKPRESSURE_EVENT_IND * * @details * HTT_T2H_MSG_TYPE_BKPRESSURE_EVENTID message is sent by the target when @@ -13456,8 +13629,8 @@ PREPACK struct htt_cfr_dump_compl_ind { * |-------------------------------------------------------------------| * * The message is interpreted as follows: - * dword0 - b'0:7 - msg_type: This will be set to - * HTT_T2H_MSG_TYPE_BKPRESSURE_EVENT_IND + * dword0 - b'0:7 - msg_type: This will be set to 0x24 + * (HTT_T2H_MSG_TYPE_BKPRESSURE_EVENT_IND) * b'8:15 - pdev_id: 0 indicates msg is for UMAC ring. * 1, 2, 3 indicates pdev_id 0,1,2 and the msg is for LMAC ring. @@ -13864,8 +14037,13 @@ enum HTT_UL_OFDMA_TRIG_TYPE { /** * @brief target -> host channel calibration data message + * + * MSG_TYPE => HTT_T2H_MSG_TYPE_CHAN_CALDATA + * * @brief host -> target channel calibration data message * + * MSG_TYPE => HTT_H2T_MSG_TYPE_CHAN_CALDATA + * * @details * The following field definitions describe the format of the channel * calibration data message sent from the target to the host when @@ -13890,8 +14068,8 @@ enum HTT_UL_OFDMA_TRIG_TYPE { * - MSG_TYPE * Bits 7:0 * Purpose: identifies this as a channel calibration data message - * Value: HTT_T2H_MSG_TYPE_CHAN_CALDATA (0x15) or - * HTT_H2T_MSG_TYPE_CHAN_CALDATA (0xb) + * Value: 0x25 (HTT_T2H_MSG_TYPE_CHAN_CALDATA) + * 0x14 (HTT_H2T_MSG_TYPE_CHAN_CALDATA) * - SUB_TYPE * Bits 11:8 * Purpose: T2H: indicates whether target is providing chan cal data @@ -14067,7 +14245,9 @@ PREPACK struct htt_chan_caldata_msg { /** - * @brief HTT_T2H_MSG_TYPE_FSE_CMEM_BASE_SEND Message + * @brief target -> host FSE CMEM based send + * + * MSG_TYPE => HTT_T2H_MSG_TYPE_FSE_CMEM_BASE_SEND * * @details * HTT_T2H_MSG_TYPE_FSE_CMEM_BASE_SEND message is sent by the target when @@ -14088,8 +14268,8 @@ PREPACK struct htt_chan_caldata_msg { * |-------------------------------------------------------------------| * * The message is interpreted as follows: - * dword0 - b'0:7 - msg_type: This will be set to - * HTT_T2H_MSG_TYPE_FSE_CMEM_BASE_SEND + * dword0 - b'0:7 - msg_type: This will be set to 0x27 + * (HTT_T2H_MSG_TYPE_FSE_CMEM_BASE_SEND) * b'8:15 - number_entries: Indicated the number of entries * programmed. * b'16:31 - reserved. @@ -14354,5 +14534,68 @@ PREPACK struct htt_rx_peer_metadata_v1 { ((_var) |= ((_val) << HTT_RX_PEER_META_DATA_V1_CHIP_ID_S)); \ } while (0) +/* + * In some systems, the host SW wants to specify priorities between + * different MSDU / flow queues within the same peer-TID. + * The below enums are used for the host to identify to the target + * which MSDU queue's priority it wants to adjust. + */ + +/* + * The MSDUQ index describe index of TCL HW, where each index is + * used for queuing particular types of MSDUs. + * The different MSDU queue types are defined in HTT_MSDU_QTYPE. + */ +enum HTT_MSDUQ_INDEX { + HTT_MSDUQ_INDEX_NON_UDP, /* NON UDP MSDUQ index */ + HTT_MSDUQ_INDEX_UDP, /* UDP MSDUQ index */ + + HTT_MSDUQ_INDEX_CUSTOM_PRIO_0, /* Latency priority 0 index */ + HTT_MSDUQ_INDEX_CUSTOM_PRIO_1, /* Latency priority 1 index */ + + HTT_MSDUQ_INDEX_CUSTOM_EXT_PRIO_0, /* High num TID cases/ MLO dedicate link cases */ + HTT_MSDUQ_INDEX_CUSTOM_EXT_PRIO_1, /* High num TID cases/ MLO dedicate link cases */ + + HTT_MSDUQ_INDEX_CUSTOM_EXT_PRIO_2, /* High num TID cases/ MLO dedicate link cases */ + HTT_MSDUQ_INDEX_CUSTOM_EXT_PRIO_3, /* High num TID cases/ MLO dedicate link cases */ + + HTT_MSDUQ_MAX_INDEX, +}; + +/* MSDU qtype definition */ +enum HTT_MSDU_QTYPE { + /* + * The LATENCY_CRIT_0 and LATENCY_CRIT_1 queue types don't have a fixed + * relative priority. Instead, the relative priority of CRIT_0 versus + * CRIT_1 is controlled by the FW, through the configuration parameters + * it applies to the queues. + */ + HTT_MSDU_QTYPE_LATENCY_CRIT_0, /* Specified MSDUQ index used for latency critical 0 */ + HTT_MSDU_QTYPE_LATENCY_CRIT_1, /* Specified MSDUQ index used for latency critical 1 */ + HTT_MSDU_QTYPE_UDP, /* Specifies MSDUQ index used for UDP flow */ + HTT_MSDU_QTYPE_NON_UDP, /* Specifies MSDUQ index used for non-udp flow */ + HTT_MSDU_QTYPE_HOL, /* Specified MSDUQ index used for Head of Line */ + + + /* New MSDU_QTYPE should be added above this line */ + /* + * Below QTYPE_MAX will increase if additional QTYPEs are defined + * in the future. Hence HTT_MSDU_QTYPE_MAX can't be used in + * any host/target message definitions. The QTYPE_MAX value can + * only be used internally within the host or within the target. + * If host or target find a qtype value is >= HTT_MSDU_QTYPE_MAX + * it must regard the unexpected value as a default qtype value, + * or ignore it. + */ + HTT_MSDU_QTYPE_MAX, + HTT_MSDU_QTYPE_NOT_IN_USE = 255, /* corresponding MSDU index is not in use */ +}; + +enum HTT_MSDUQ_LEGACY_FLOW_INDEX { + HTT_MSDUQ_LEGACY_HI_PRI_FLOW_INDEX = 0, + HTT_MSDUQ_LEGACY_LO_PRI_FLOW_INDEX = 1, + HTT_MSDUQ_LEGACY_UDP_FLOW_INDEX = 2, + HTT_MSDUQ_LEGACY_NON_UDP_FLOW_INDEX = 3, +}; #endif diff --git a/drivers/staging/fw-api/fw/wlan_defs.h b/drivers/staging/fw-api/fw/wlan_defs.h index fee5cb68c06a..591ced86231e 100755 --- a/drivers/staging/fw-api/fw/wlan_defs.h +++ b/drivers/staging/fw-api/fw/wlan_defs.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2013-2016, 2018-2020 The Linux Foundation. All rights reserved.* + * Copyright (c) 2013-2016, 2018-2021 The Linux Foundation. All rights reserved.* * * Previously licensed under the ISC license by Qualcomm Atheros, Inc. * @@ -351,6 +351,15 @@ enum { REGDMN_MODE_11AXA_HE80_BIT = 38, /* 5Ghz, HE80 */ REGDMN_MODE_11AXA_HE160_BIT = 39, /* 5Ghz, HE160 */ REGDMN_MODE_11AXA_HE80_80_BIT = 40, /* 5Ghz, HE80+80 */ + REGDMN_MODE_11BEG_EHT20_BIT = 41, /* 2Ghz, EHT20 */ + REGDMN_MODE_11BEA_EHT20_BIT = 42, /* 5Ghz, EHT20 */ + REGDMN_MODE_11BEG_EHT40PLUS_BIT = 43, /* 2Ghz, EHT40+ */ + REGDMN_MODE_11BEG_EHT40MINUS_BIT = 44, /* 2Ghz, EHT40- */ + REGDMN_MODE_11BEA_EHT40PLUS_BIT = 45, /* 5Ghz, EHT40+ */ + REGDMN_MODE_11BEA_EHT40MINUS_BIT = 46, /* 5Ghz, EHT40- */ + REGDMN_MODE_11BEA_EHT80_BIT = 47, /* 5Ghz, EHT80 */ + REGDMN_MODE_11BEA_EHT160_BIT = 48, /* 5Ghz, EHT160 */ + REGDMN_MODE_11BEA_EHT320_BIT = 49, /* 5Ghz, EHT320 */ }; enum { @@ -393,6 +402,15 @@ enum { REGDMN_MODE_U32_11AXA_HE80 = 1 << (REGDMN_MODE_11AXA_HE80_BIT - 32), REGDMN_MODE_U32_11AXA_HE160 = 1 << (REGDMN_MODE_11AXA_HE160_BIT - 32), REGDMN_MODE_U32_11AXA_HE80_80 = 1 << (REGDMN_MODE_11AXA_HE80_80_BIT - 32), + REGDMN_MODE_U32_11BEG_EHT20 = 1 << (REGDMN_MODE_11BEG_EHT20_BIT - 32), + REGDMN_MODE_U32_11BEA_EHT20 = 1 << (REGDMN_MODE_11BEA_EHT20_BIT - 32), + REGDMN_MODE_U32_11BEG_EHT40PLUS = 1 << (REGDMN_MODE_11BEG_EHT40PLUS_BIT - 32), + REGDMN_MODE_U32_11BEG_EHT40MINUS = 1 << (REGDMN_MODE_11BEG_EHT40MINUS_BIT - 32), + REGDMN_MODE_U32_11BEA_EHT40PLUS = 1 << (REGDMN_MODE_11BEA_EHT40PLUS_BIT - 32), + REGDMN_MODE_U32_11BEA_EHT40MINUS = 1 << (REGDMN_MODE_11BEA_EHT40MINUS_BIT - 32), + REGDMN_MODE_U32_11BEA_EHT80 = 1 << (REGDMN_MODE_11BEA_EHT80_BIT - 32), + REGDMN_MODE_U32_11BEA_EHT160 = 1 << (REGDMN_MODE_11BEA_EHT160_BIT - 32), + REGDMN_MODE_U32_11BEA_EHT320 = 1 << (REGDMN_MODE_11BEA_EHT320_BIT - 32), }; #define REGDMN_MODE_ALL (0xFFFFFFFF) /* REGDMN_MODE_ALL is defined out of the enum @@ -536,7 +554,7 @@ typedef struct { #define PROD_SCHED_BW_ENTRIES (NUM_SCHED_ENTRIES * NUM_DYN_BW) -#if NUM_DYN_BW > 4 +#if NUM_DYN_BW > 5 /* Extend rate table module first */ #error "Extend rate table module first" #endif diff --git a/drivers/staging/fw-api/fw/wlan_module_ids.h b/drivers/staging/fw-api/fw/wlan_module_ids.h index 3cf84c8913ff..d299b0a588e3 100644 --- a/drivers/staging/fw-api/fw/wlan_module_ids.h +++ b/drivers/staging/fw-api/fw/wlan_module_ids.h @@ -119,6 +119,7 @@ typedef enum { WLAN_MODULE_CODE_COVER, /* 0x55 */ /* code coverage */ WLAN_MODULE_SHO, /* 0x56 */ /* SAP HW offload */ WLAN_MODULE_MLO_MGR, /* 0x57 */ /* MLO manager */ + WLAN_MODULE_PEER_INIT, /* 0x58 */ /* peer init connection handling */ WLAN_MODULE_ID_MAX, diff --git a/drivers/staging/fw-api/fw/wmi_services.h b/drivers/staging/fw-api/fw/wmi_services.h index 6664ebfad107..fb5a5d1b34bf 100644 --- a/drivers/staging/fw-api/fw/wmi_services.h +++ b/drivers/staging/fw-api/fw/wmi_services.h @@ -528,6 +528,8 @@ typedef enum { WMI_SERVICE_ENABLE_LOWER_6G_EDGE_CH_SUPP = 283, /* Indicates FW support for enabling lower 6 GHz edge channel 5935 */ WMI_SERVICE_DISABLE_UPPER_6G_EDGE_CH_SUPP = 284, /* Indicates FW support for disabling upper 6 GHz edge channel 7115 */ WMI_SERVICE_FORCED_DTIM_SUPP = 285, /* Indicates FW supports forced DTIM configuration */ + WMI_SERVICE_DCS_AWGN_INT_SUPPORT = 286, /* Indicates FW supports AWGN Int */ + WMI_SERVICE_IGMP_OFFLOAD_SUPPORT = 287, /* FW supports igmp offload during APPS suspend */ WMI_MAX_EXT2_SERVICE diff --git a/drivers/staging/fw-api/fw/wmi_tlv_defs.h b/drivers/staging/fw-api/fw/wmi_tlv_defs.h index b6cfbcabe42a..98b9c19ade02 100644 --- a/drivers/staging/fw-api/fw/wmi_tlv_defs.h +++ b/drivers/staging/fw-api/fw/wmi_tlv_defs.h @@ -1141,6 +1141,15 @@ typedef enum { WMITLV_TAG_STRUC_wmi_pdev_get_dpd_status_evt_fixed_param, WMITLV_TAG_STRUC_wmi_eht_rate_set, WMITLV_TAG_STRUC_wmi_dcs_awgn_int_t, + WMITLV_TAG_STRUC_wmi_mlo_tx_send_params, + WMITLV_TAG_STRUC_wmi_partner_link_params, + WMITLV_TAG_STRUC_wmi_peer_assoc_mlo_partner_link_params, + WMITLV_TAG_STRUC_wmi_mlo_setup_cmd_fixed_param, + WMITLV_TAG_STRUC_wmi_mlo_setup_complete_event_fixed_param, + WMITLV_TAG_STRUC_wmi_mlo_ready_cmd_fixed_param, + WMITLV_TAG_STRUC_wmi_mlo_teardown_fixed_param, + WMITLV_TAG_STRUC_wmi_mlo_teardown_complete_fixed_param, + WMITLV_TAG_STRUC_wmi_igmp_offload_fixed_param, } WMITLV_TAG_ID; /* @@ -1599,6 +1608,10 @@ typedef enum { OP(WMI_PEER_TID_LATENCY_CONFIG_CMDID) \ OP(WMI_MLO_LINK_SET_ACTIVE_CMDID) \ OP(WMI_PDEV_GET_DPD_STATUS_CMDID) \ + OP(WMI_MLO_SETUP_CMDID) \ + OP(WMI_MLO_READY_CMDID) \ + OP(WMI_MLO_TEARDOWN_CMDID) \ + OP(WMI_VDEV_IGMP_OFFLOAD_CMDID) \ /* add new CMD_LIST elements above this line */ @@ -1862,6 +1875,8 @@ typedef enum { OP(WMI_TWT_NOTIFY_EVENTID) \ OP(WMI_MLO_LINK_SET_ACTIVE_RESP_EVENTID) \ OP(WMI_PDEV_GET_DPD_STATUS_EVENTID) \ + OP(WMI_MLO_SETUP_COMPLETE_EVENTID) \ + OP(WMI_MLO_TEARDOWN_COMPLETE_EVENTID) \ /* add new EVT_LIST elements above this line */ @@ -2241,7 +2256,8 @@ WMITLV_CREATE_PARAM_STRUC(WMI_PDEV_SET_WMM_PARAMS_CMDID); WMITLV_ELEM(id,op,buf,len, WMITLV_TAG_STRUC_wmi_vdev_start_request_cmd_fixed_param, wmi_vdev_start_request_cmd_fixed_param, fixed_param, WMITLV_SIZE_FIX) \ WMITLV_ELEM(id,op,buf,len, WMITLV_TAG_STRUC_wmi_channel, wmi_channel, chan, WMITLV_SIZE_FIX) \ WMITLV_ELEM(id,op,buf,len, WMITLV_TAG_ARRAY_STRUC, wmi_p2p_noa_descriptor, noa_descriptors, WMITLV_SIZE_VAR) \ - WMITLV_ELEM(id,op,buf,len, WMITLV_TAG_ARRAY_STRUC, wmi_vdev_start_mlo_params, mlo_params, WMITLV_SIZE_VAR) + WMITLV_ELEM(id,op,buf,len, WMITLV_TAG_ARRAY_STRUC, wmi_vdev_start_mlo_params, mlo_params, WMITLV_SIZE_VAR) \ + WMITLV_ELEM(id,op,buf,len, WMITLV_TAG_ARRAY_STRUC, wmi_partner_link_params, partner_link_params, WMITLV_SIZE_VAR) WMITLV_CREATE_PARAM_STRUC(WMI_VDEV_START_REQUEST_CMDID); @@ -2336,7 +2352,8 @@ WMITLV_CREATE_PARAM_STRUC(WMI_VDEV_IPSEC_NATKEEPALIVE_FILTER_CMDID); WMITLV_ELEM(id,op,buf,len, WMITLV_TAG_STRUC_wmi_vht_rate_set, wmi_vht_rate_set, peer_vht_rates, WMITLV_SIZE_FIX) \ WMITLV_ELEM(id,op,buf,len, WMITLV_TAG_ARRAY_STRUC, wmi_he_rate_set, peer_he_rates, WMITLV_SIZE_VAR) \ WMITLV_ELEM(id,op,buf,len, WMITLV_TAG_ARRAY_STRUC, wmi_peer_assoc_mlo_params, mlo_params, WMITLV_SIZE_VAR) \ - WMITLV_ELEM(id,op,buf,len, WMITLV_TAG_ARRAY_STRUC, wmi_eht_rate_set, peer_eht_rates, WMITLV_SIZE_VAR) + WMITLV_ELEM(id,op,buf,len, WMITLV_TAG_ARRAY_STRUC, wmi_eht_rate_set, peer_eht_rates, WMITLV_SIZE_VAR) \ + WMITLV_ELEM(id,op,buf,len, WMITLV_TAG_ARRAY_STRUC, wmi_peer_assoc_mlo_partner_link_params, partner_link_params, WMITLV_SIZE_VAR) WMITLV_CREATE_PARAM_STRUC(WMI_PEER_ASSOC_CMDID); @@ -2708,7 +2725,8 @@ WMITLV_CREATE_PARAM_STRUC(WMI_MGMT_TX_CMDID); #define WMITLV_TABLE_WMI_MGMT_TX_SEND_CMDID(id,op,buf,len) \ WMITLV_ELEM(id,op,buf,len, WMITLV_TAG_STRUC_wmi_mgmt_tx_send_cmd_fixed_param, wmi_mgmt_tx_send_cmd_fixed_param, fixed_param, WMITLV_SIZE_FIX) \ WMITLV_ELEM(id,op,buf,len, WMITLV_TAG_ARRAY_BYTE, A_UINT8, bufp, WMITLV_SIZE_VAR) \ - WMITLV_ELEM(id,op,buf,len, WMITLV_TAG_STRUC_wmi_tx_send_params, wmi_tx_send_params, tx_send_params, WMITLV_SIZE_FIX) + WMITLV_ELEM(id,op,buf,len, WMITLV_TAG_STRUC_wmi_tx_send_params, wmi_tx_send_params, tx_send_params, WMITLV_SIZE_FIX) \ + WMITLV_ELEM(id,op,buf,len, WMITLV_TAG_ARRAY_STRUC, wmi_mlo_tx_send_params, mlo_tx_send_params, WMITLV_SIZE_VAR) WMITLV_CREATE_PARAM_STRUC(WMI_MGMT_TX_SEND_CMDID); @@ -4610,6 +4628,28 @@ WMITLV_CREATE_PARAM_STRUC(WMI_MLO_LINK_SET_ACTIVE_CMDID); WMITLV_ELEM(id,op,buf,len, WMITLV_TAG_STRUC_wmi_pdev_get_dpd_status_cmd_fixed_param, wmi_pdev_get_dpd_status_cmd_fixed_param, fixed_param, WMITLV_SIZE_FIX) WMITLV_CREATE_PARAM_STRUC(WMI_PDEV_GET_DPD_STATUS_CMDID); +/** WMI cmd used to indicate hw_links part of MLO */ +#define WMITLV_TABLE_WMI_MLO_SETUP_CMDID(id,op,buf,len) \ + WMITLV_ELEM(id,op,buf,len, WMITLV_TAG_STRUC_wmi_mlo_setup_cmd_fixed_param, wmi_mlo_setup_cmd_fixed_param, fixed_param, WMITLV_SIZE_FIX) \ + WMITLV_ELEM(id,op,buf,len, WMITLV_TAG_ARRAY_UINT32, A_UINT32, hw_link_ids, WMITLV_SIZE_VAR) +WMITLV_CREATE_PARAM_STRUC(WMI_MLO_SETUP_CMDID); + +/** WMI cmd used for init synchronization of hw_links part of MLO */ +#define WMITLV_TABLE_WMI_MLO_READY_CMDID(id,op,buf,len) \ + WMITLV_ELEM(id,op,buf,len, WMITLV_TAG_STRUC_wmi_mlo_ready_cmd_fixed_param, wmi_mlo_ready_cmd_fixed_param, fixed_param, WMITLV_SIZE_FIX) +WMITLV_CREATE_PARAM_STRUC(WMI_MLO_READY_CMDID); + +/** WMI cmd used for tearing down a hw_link part of MLO */ +#define WMITLV_TABLE_WMI_MLO_TEARDOWN_CMDID(id,op,buf,len) \ + WMITLV_ELEM(id,op,buf,len, WMITLV_TAG_STRUC_wmi_mlo_teardown_fixed_param, wmi_mlo_teardown_fixed_param, fixed_param, WMITLV_SIZE_FIX) +WMITLV_CREATE_PARAM_STRUC(WMI_MLO_TEARDOWN_CMDID); + +/* Mcast ipv4 address filter list cmd */ +#define WMITLV_TABLE_WMI_VDEV_IGMP_OFFLOAD_CMDID(id,op,buf,len) \ + WMITLV_ELEM(id, op, buf, len, WMITLV_TAG_STRUC_wmi_igmp_offload_fixed_param, wmi_igmp_offload_fixed_param, fixed_param, WMITLV_SIZE_FIX) \ + WMITLV_ELEM(id, op, buf, len, WMITLV_TAG_ARRAY_FIXED_STRUC, WMI_IPV4_ADDR, mc_ipv4_list, WMITLV_SIZE_VAR) +WMITLV_CREATE_PARAM_STRUC(WMI_VDEV_IGMP_OFFLOAD_CMDID); + /************************** TLV definitions of WMI events *******************************/ @@ -6224,6 +6264,16 @@ WMITLV_CREATE_PARAM_STRUC(WMI_MLO_LINK_SET_ACTIVE_RESP_EVENTID); WMITLV_ELEM(id,op,buf,len, WMITLV_TAG_STRUC_wmi_pdev_get_dpd_status_evt_fixed_param, wmi_pdev_get_dpd_status_evt_fixed_param, fixed_param, WMITLV_SIZE_FIX) WMITLV_CREATE_PARAM_STRUC(WMI_PDEV_GET_DPD_STATUS_EVENTID); +/* Response event for MLO setup cmd */ +#define WMITLV_TABLE_WMI_MLO_SETUP_COMPLETE_EVENTID(id,op,buf,len) \ + WMITLV_ELEM(id,op,buf,len, WMITLV_TAG_STRUC_wmi_mlo_setup_complete_event_fixed_param, wmi_mlo_setup_complete_event_fixed_param, fixed_param, WMITLV_SIZE_FIX) +WMITLV_CREATE_PARAM_STRUC(WMI_MLO_SETUP_COMPLETE_EVENTID); + +/* Response event for MLO teardown cmd */ +#define WMITLV_TABLE_WMI_MLO_TEARDOWN_COMPLETE_EVENTID(id,op,buf,len) \ + WMITLV_ELEM(id,op,buf,len, WMITLV_TAG_STRUC_wmi_mlo_teardown_complete_fixed_param, wmi_mlo_teardown_complete_fixed_param, fixed_param, WMITLV_SIZE_FIX) +WMITLV_CREATE_PARAM_STRUC(WMI_MLO_TEARDOWN_COMPLETE_EVENTID); + #ifdef __cplusplus } diff --git a/drivers/staging/fw-api/fw/wmi_unified.h b/drivers/staging/fw-api/fw/wmi_unified.h index cab8a0313443..94f6cdee48ae 100644 --- a/drivers/staging/fw-api/fw/wmi_unified.h +++ b/drivers/staging/fw-api/fw/wmi_unified.h @@ -535,6 +535,8 @@ typedef enum { WMI_VDEV_GET_BIG_DATA_P2_CMDID, /** set TPC PSD/non-PSD power */ WMI_VDEV_SET_TPC_POWER_CMDID, + /** IGMP OFFLOAD */ + WMI_VDEV_IGMP_OFFLOAD_CMDID, /* peer specific commands */ @@ -1366,6 +1368,12 @@ typedef enum { /** WMI commands specific to MLO **/ /** MLO link active / inactive Request command */ WMI_MLO_LINK_SET_ACTIVE_CMDID = WMI_CMD_GRP_START_ID(WMI_GRP_MLO), + /** WMI cmd used to indicate hw_links part of MLO */ + WMI_MLO_SETUP_CMDID, + /** WMI cmd used for init synchronization of hw_links part of MLO */ + WMI_MLO_READY_CMDID, + /** WMI cmd used for tearing down a hw_link part of MLO */ + WMI_MLO_TEARDOWN_CMDID, } WMI_CMD_ID; typedef enum { @@ -2077,6 +2085,10 @@ typedef enum { /** WMI event specific to MLO **/ /** MLO link active / inactive response event */ WMI_MLO_LINK_SET_ACTIVE_RESP_EVENTID = WMI_EVT_GRP_START_ID(WMI_GRP_MLO), + /* Response event for MLO setup cmd */ + WMI_MLO_SETUP_COMPLETE_EVENTID, + /* Response event for MLO teardown cmd */ + WMI_MLO_TEARDOWN_COMPLETE_EVENTID, } WMI_EVT_ID; /* defines for OEM message sub-types */ @@ -5660,6 +5672,14 @@ typedef struct { } wmi_tx_send_params; typedef struct { + A_UINT32 tlv_header; /* TLV tag (WMITLV_TAG_STRUC_wmi_mlo_tx_send_params) and len */ + A_UINT32 hw_link_id; /** Unique link id across SOCs, provided by QMI handshake. + * If 0xFFFF then the frame will be queued in the MLO queue + * If valid hw_link_id + */ +} wmi_mlo_tx_send_params; + +typedef struct { A_UINT32 tlv_header; /* TLV tag and len; tag equals WMITLV_TAG_STRUC_wmi_mgmt_tx_send_cmd_fixed_param */ A_UINT32 vdev_id; A_UINT32 desc_id; /* echoed in tx_compl_event */ @@ -5699,6 +5719,7 @@ typedef struct { */ /* This TLV is followed by wmi_tx_send_params * wmi_tx_send_params tx_send_params; + * wmi_mlo_tx_send_params mlo_tx_send_params[]; */ } wmi_mgmt_tx_send_cmd_fixed_param; @@ -10352,6 +10373,8 @@ typedef struct { * bits 0 - mlo enable flag; * bits 1 - assoc link flag; * bits 2 - primary_umac flag; + * bits 3 - is logical link index valid + * bits 4 - is mlo peer id valid */ #define WMI_MLO_FLAGS_GET_ENABLED(mlo_flags) WMI_GET_BITS(mlo_flags, 0, 1) #define WMI_MLO_FLAGS_SET_ENABLED(mlo_flags, value) WMI_SET_BITS(mlo_flags, 0, 1, value) @@ -10359,6 +10382,10 @@ typedef struct { #define WMI_MLO_FLAGS_SET_ASSOC_LINK(mlo_flags, value) WMI_SET_BITS(mlo_flags, 1, 1, value) #define WMI_MLO_FLAGS_GET_PRIMARY_UMAC(mlo_flags) WMI_GET_BITS(mlo_flags, 2, 1) #define WMI_MLO_FLAGS_SET_PRIMARY_UMAC(mlo_flags, value) WMI_SET_BITS(mlo_flags, 2, 1, value) +#define WMI_MLO_FLAGS_GET_LINK_INDEX_VALID(mlo_flags) WMI_GET_BITS(mlo_flags, 3, 1) +#define WMI_MLO_FLAGS_SET_LINK_INDEX_VALID(mlo_flags, value) WMI_SET_BITS(mlo_flags, 3, 1, value) +#define WMI_MLO_FLAGS_GET_PEER_ID_VALID(mlo_flags) WMI_GET_BITS(mlo_flags, 4, 1) +#define WMI_MLO_FLAGS_SET_PEER_ID_VALID(mlo_flags, value) WMI_SET_BITS(mlo_flags, 4, 1, value) /* this structure used for pass mlo flags*/ typedef struct { @@ -10367,12 +10394,21 @@ typedef struct { A_UINT32 mlo_enabled:1, /* indicate is MLO enabled */ mlo_assoc_link:1, /* indicate is the link used to initialize the association of mlo connection */ mlo_primary_umac:1, /* indicate is the link on primary UMAC, WIN only flag */ - unused: 29; + mlo_logical_link_index_valid:1, /* indicate if the logial link index in wmi_peer_assoc_mlo_params is valid */ + mlo_peer_id_valid:1, /* indicate if the mlo peer id in wmi_peer_assoc_mlo_params is valid */ + unused: 27; }; A_UINT32 mlo_flags; }; } wmi_mlo_flags; +typedef struct { + A_UINT32 tlv_header;/** TLV tag (WMITLV_TAG_STRUC_wmi_partner_link_params) and len;*/ + A_UINT32 vdev_id; /** partner vdev_id */ + A_UINT32 hw_link_id; /** hw_link_id: Unique link id across SOCs, got as part of QMI handshake */ + wmi_mac_addr vdev_macaddr; /** VDEV MAC address */ +} wmi_partner_link_params; + /* this TLV structure used for pass mlo parameters on vdev create*/ typedef struct { A_UINT32 tlv_header; /** TLV tag and len; */ @@ -11147,6 +11183,9 @@ typedef struct { * wmi_vdev_start_mlo_params mlo_params[0,1]; <-- vdev start MLO parameters * optional TLV, only present for MLO vdevs, * If the vdev is non-MLO the array length should be 0. + * wmi_partner_link_info link_info[]; <-- partner link info + * optional TLV, only present for MLO vdevs, + * If the vdev is non-MLO the array length should be 0. */ } wmi_vdev_start_request_cmd_fixed_param; @@ -14234,6 +14273,12 @@ enum WMI_PEER_STA_TYPE { #define WMI_PEER_ASSOC_GET_BSS_MAX_IDLE_PERIOD(_dword) \ WMI_GET_BITS(_dword, WMI_PEER_ASSOC_BSS_MAX_IDLE_PERIOD_BITPOS, 16) +typedef struct { + A_UINT32 tlv_header; /** TLV tag (MITLV_TAG_STRUC_wmi_peer_assoc_mlo_partner_link_params) and len */ + A_UINT32 vdev_id; /** unique id identifying the VDEV, generated by the caller */ + A_UINT32 hw_mld_link_id; /** Unique link id across SOCs, got as part of QMI handshake. */ +} wmi_peer_assoc_mlo_partner_link_params; + /* This TLV structure used to pass mlo Parameters on peer assoc, only apply for mlo-peers */ typedef struct { A_UINT32 tlv_header; /** TLV tag and len; */ @@ -14241,6 +14286,13 @@ typedef struct { wmi_mlo_flags mlo_flags; /** MLD MAC address */ wmi_mac_addr mld_macaddr; + /** Unique index for links of the mlo. Starts with Zero */ + A_UINT32 logical_link_index; + /** ML Peer ID + * In WIN systems, mld_peer_id is generated by Host. + * In MCL systems, mld_peer_id will be set to invalid peer id. + */ + A_UINT32 mld_peer_id; } wmi_peer_assoc_mlo_params; typedef struct { @@ -14383,6 +14435,7 @@ typedef struct { * Only present for MLO peers. * For non-MLO peers the array length should be 0. * wmi_eht_rate_set_peer_eht_rates; <-- EHT capabilities of the peer + * wmi_peer_assoc_mlo_partner_link_params link_info[] <-- partner link info */ } wmi_peer_assoc_complete_cmd_fixed_param; @@ -26907,7 +26960,7 @@ typedef enum wmi_hw_mode_config_type { /* * Per HW mode MLO capability flags - * use bits 31:28 of A_UINT32 hw_mode_config_type for Per HW mode MLO + * use bits 31:27 of A_UINT32 hw_mode_config_type for Per HW mode MLO * capability flags... * WMI_MLO_CAP_FLAG_NONE: Do not support MLO for the specific HW mode * WMI_MLO_CAP_FLAG_NON_STR_IN_DBS: Support STR MLO when DBS for the specific @@ -26918,22 +26971,35 @@ typedef enum wmi_hw_mode_config_type { * HW mode * WMI_MLO_CAP_FLAG_STR_IN_SBS: Support Non-STR MLO when SBS for the * specific HW mode + * WMI_MLO_CAP_FLAG_STR: Support STR for the specific HW mode. */ -#define WMI_MLO_CAP_FLAG_NONE 0x0 -#define WMI_MLO_CAP_FLAG_NON_STR_IN_DBS 0x1 -#define WMI_MLO_CAP_FLAG_STR_IN_DBS 0x2 -#define WMI_MLO_CAP_FLAG_NON_STR_IN_SBS 0x4 -#define WMI_MLO_CAP_FLAG_STR_IN_SBS 0x8 +#define WMI_MLO_CAP_FLAG_NONE 0x00 +#define WMI_MLO_CAP_FLAG_NON_STR_IN_DBS 0x01 +#define WMI_MLO_CAP_FLAG_STR_IN_DBS 0x02 +#define WMI_MLO_CAP_FLAG_NON_STR_IN_SBS 0x04 +#define WMI_MLO_CAP_FLAG_STR_IN_SBS 0x08 +#define WMI_MLO_CAP_FLAG_STR 0x10 /* * hw_mode_config_type sub-fields for chips that support 802.11BE/MLO: - * bits 28:0 - hw_mode_config - * bits 31:28 - per HW mode MLO capability flags + * bits 26:0 - hw_mode_config + * bits 31:27 - per HW mode MLO capability flags + */ +#define WMI_BECAP_PHY_GET_HW_MODE_CFG(hw_mode_config_type) WMI_GET_BITS(hw_mode_config_type, 0, 27) +#define WMI_BECAP_PHY_SET_HW_MODE_CFG(hw_mode_config_type, value) WMI_SET_BITS(hw_mode_config_type, 0, 27, value) +#define WMI_BECAP_PHY_GET_MLO_CAP(hw_mode_config_type) WMI_GET_BITS(hw_mode_config_type, 27, 5) +#define WMI_BECAP_PHY_SET_MLO_CAP(hw_mode_config_type, value) WMI_SET_BITS(hw_mode_config_type, 27, 5, value) + +/* + * pdev_id sub-fields for chips that support 802.11BE/MLO + * as part of WMI_MAC_PHY_CAPABILITIES and WMI_MAC_PHY_CAPABILITIES_EXT: + * bits 16:0 - pdev_id + * bits 32:16 - Unique link id across SOCs, got as part of QMI handshake. */ -#define WMI_BECAP_PHY_GET_HW_MODE_CFG(hw_mode_config_type) WMI_GET_BITS(hw_mode_config_type, 0, 28) -#define WMI_BECAP_PHY_SET_HW_MODE_CFG(hw_mode_config_type, value) WMI_SET_BITS(hw_mode_config_type, 0, 28, value) -#define WMI_BECAP_PHY_GET_MLO_CAP(hw_mode_config_type) WMI_GET_BITS(hw_mode_config_type, 28, 4) -#define WMI_BECAP_PHY_SET_MLO_CAP(hw_mode_config_type, value) WMI_SET_BITS(hw_mode_config_type, 28, 4, value) +#define WMI_PHY_GET_PDEV_ID(pdev_id) WMI_GET_BITS(pdev_id, 0, 16) +#define WMI_PHY_SET_PDEV_ID(pdev_id, value) WMI_SET_BITS(pdev_id, 0, 16, value) +#define WMI_PHY_GET_HW_LINK_ID(pdev_id) WMI_GET_BITS(pdev_id, 16, 16) +#define WMI_PHY_SET_HW_LINK_ID(pdev_id, value) WMI_SET_BITS(pdev_id, 16, 16, value) #define WMI_SUPPORT_11B_GET(flags) WMI_GET_BITS(flags, 0, 1) #define WMI_SUPPORT_11B_SET(flags, value) WMI_SET_BITS(flags, 0, 1, value) @@ -27035,8 +27101,20 @@ typedef struct { * No particular ordering of WMI_MAC_PHY_CAPABILITIES elements should be assumed, * though in practice the elements may always be ordered by hw_mode_id */ A_UINT32 hw_mode_id; - /* pdev_id starts with 1. pdev_id 1 => phy_id 0, pdev_id 2 => phy_id 1 */ - A_UINT32 pdev_id; + /* + * pdev_id starts with 1. pdev_id 1 => phy_id 0, pdev_id 2 => phy_id 1 + * hw_link_id: Unique link id across SOCs, got as part of QMI handshake. + * For legacy chips which do not support MLO, these top bits will always + * be set to 0, so it won't impact the legacy chips which treat pdev_id + * as 32 bits. + */ + union { + struct { + A_UINT32 pdev_id:16, + hw_link_id:16; + } wmi_pdev_to_link_map; + A_UINT32 pdev_id; + }; /* phy id. Starts with 0 */ A_UINT32 phy_id; /* supported modulations and number of MU beamformees */ @@ -27211,8 +27289,20 @@ typedef struct { * No particular ordering of WMI_MAC_PHY_CAPABILITIES elements should be assumed, * though in practice the elements may always be ordered by hw_mode_id */ A_UINT32 hw_mode_id; - /* pdev_id starts with 1. pdev_id 1 => phy_id 0, pdev_id 2 => phy_id 1 */ - A_UINT32 pdev_id; + /* + * pdev_id starts with 1. pdev_id 1 => phy_id 0, pdev_id 2 => phy_id 1 + * hw_link_id: Unique link id across SOCs, got as part of QMI handshake. + * For legacy chips which do not support MLO, these top bits will always + * be set to 0, so it won't impact the legacy chips which treat pdev_id + * as 32 bits. + */ + union { + struct { + A_UINT32 pdev_id:16, + hw_link_id:16; + } wmi_pdev_to_link_map; + A_UINT32 pdev_id; + }; /* phy id. Starts with 0 */ A_UINT32 phy_id; A_UINT32 wireless_modes_ext; /* REGDMN MODE EXT, see REGDMN_MODE_ enum */ @@ -27253,7 +27343,7 @@ typedef struct { * Identify a particular type of HW mode such as SBS, DBS etc. * Refer to WMI_HW_MODE_CONFIG_TYPE values. * - * Use bits 31:28 of hw_mode_config_type for Per HW mode MLO capability + * Use bits 31:27 of hw_mode_config_type for Per HW mode MLO capability * flags. * Refer to WMI_MLO_CAP_FLAG_XXX. For legacy chips which do not support * MLO, these top bits will always be set to 0, so it won't impact the @@ -27261,8 +27351,8 @@ typedef struct { */ union { struct { - A_UINT32 hw_mode_config :28, - mlo_cap_flag :4; /* see WMI_MLO_CAP_FLAG_ defs */ + A_UINT32 hw_mode_config :27, + mlo_cap_flag :5; /* see WMI_MLO_CAP_FLAG_ defs */ }; A_UINT32 hw_mode_config_type; }; @@ -28380,6 +28470,10 @@ static INLINE A_UINT8 *wmi_id_to_name(A_UINT32 wmi_command) WMI_RETURN_STRING(WMI_PEER_TID_LATENCY_CONFIG_CMDID); WMI_RETURN_STRING(WMI_MLO_LINK_SET_ACTIVE_CMDID); WMI_RETURN_STRING(WMI_PDEV_GET_DPD_STATUS_CMDID); + WMI_RETURN_STRING(WMI_MLO_SETUP_CMDID); + WMI_RETURN_STRING(WMI_MLO_READY_CMDID); + WMI_RETURN_STRING(WMI_MLO_TEARDOWN_CMDID); + WMI_RETURN_STRING(WMI_VDEV_IGMP_OFFLOAD_CMDID); } return "Invalid WMI cmd"; @@ -28506,6 +28600,7 @@ typedef enum { WMI_REGULATORY_PHYMODE_NO11N = 0x0008, /* NO 11N */ WMI_REGULATORY_PHYMODE_NO11AC = 0x0010, /* NO 11AC */ WMI_REGULATORY_PHYMODE_NO11AX = 0x0020, /* NO 11AX */ + WMI_REGULATORY_PHYMODE_NO11BE = 0x0040, /* NO 11BE */ } WMI_REGULATORY_PHYBITMAP; typedef enum { @@ -29387,6 +29482,8 @@ typedef enum _WMI_ADD_TWT_STATUS_T { WMI_ADD_TWT_STATUS_UNKNOWN_ERROR, /* adding TWT dialog failed with an unknown reason */ WMI_ADD_TWT_STATUS_AP_PARAMS_NOT_IN_RANGE, /* peer AP wake interval, duration not in range */ WMI_ADD_TWT_STATUS_AP_IE_VALIDATION_FAILED, /* peer AP IE Validation Failed */ + WMI_ADD_TWT_STATUS_ROAM_IN_PROGRESS, /* Roaming in progress */ + WMI_ADD_TWT_STATUS_CHAN_SW_IN_PROGRESS, /* Channel switch in progress */ } WMI_ADD_TWT_STATUS_T; typedef struct { @@ -29423,6 +29520,7 @@ typedef struct { * Refer to 11ax spec session "9.4.2.199 TWT element" for more info. */ A_UINT32 b_twt_persistence; + A_UINT32 is_bcast_twt; } wmi_twt_del_dialog_cmd_fixed_param; /* status code of deleting TWT dialog */ @@ -29436,6 +29534,8 @@ typedef enum _WMI_DEL_TWT_STATUS_T { WMI_DEL_TWT_STATUS_UNKNOWN_ERROR, /* deleting TWT dialog failed with an unknown reason */ WMI_DEL_TWT_STATUS_PEER_INIT_TEARDOWN, /* Peer Initiated Teardown */ WMI_DEL_TWT_STATUS_ROAMING, /* Reason Roaming Start*/ + WMI_DEL_TWT_STATUS_CONCURRENCY, /* Teardown due to concurrency */ + WMI_DEL_TWT_STATUS_CHAN_SW_IN_PROGRESS, /* Channel switch in progress */ } WMI_DEL_TWT_STATUS_T; typedef struct { @@ -29464,6 +29564,7 @@ typedef enum _WMI_PAUSE_TWT_STATUS_T { WMI_PAUSE_TWT_STATUS_UNKNOWN_ERROR, /* pausing TWT dialog failed with an unknown reason */ WMI_PAUSE_TWT_STATUS_ALREADY_PAUSED, /* The TWT dialog is already paused */ WMI_PAUSE_TWT_STATUS_TWT_INFO_FRM_NOT_SUPPORTED, /* TWT information frame is not supported by AP */ + WMI_PAUSE_TWT_STATUS_CHAN_SW_IN_PROGRESS, /* Channel switch in progress */ } WMI_PAUSE_TWT_STATUS_T; typedef struct { @@ -29494,6 +29595,7 @@ typedef enum _WMI_RESUME_TWT_STATUS_T { WMI_RESUME_TWT_STATUS_NO_ACK, /* peer AP/STA did not ACK the request/response frame */ WMI_RESUME_TWT_STATUS_UNKNOWN_ERROR, /* resuming TWT dialog failed with an unknown reason */ WMI_RESUME_TWT_STATUS_TWT_INFO_FRM_NOT_SUPPORTED, /* TWT information frame is not supported by AP */ + WMI_RESUME_TWT_STATUS_CHAN_SW_IN_PROGRESS, /* Channel switch in progress */ } WMI_RESUME_TWT_STATUS_T; typedef struct { @@ -29524,6 +29626,7 @@ typedef enum _WMI_TWT_NUDGE_STATUS_T { WMI_NUDGE_TWT_STATUS_UNKNOWN_ERROR, /* nudging TWT dialog failed with an unknown reason */ WMI_NUDGE_TWT_STATUS_ALREADY_PAUSED, /* The TWT dialog is already paused */ WMI_NUDGE_TWT_STATUS_TWT_INFO_FRM_NOT_SUPPORTED, /* TWT information frame is not supported by AP */ + WMI_NUDGE_TWT_STATUS_CHAN_SW_IN_PROGRESS, /* Channel switch in progress */ } WMI_TWT_NUDGE_STATUS_T; typedef struct { @@ -33817,6 +33920,75 @@ typedef struct wmi_mlo_link_set_active_resp_event */ } wmi_mlo_link_set_active_resp_event_fixed_param; +typedef struct { + /** TLV tag and len; tag equals WMITLV_TAG_STRUC_wmi_mlo_setup_cmd_fixed_param; */ + A_UINT32 tlv_header; + /** Unique ID reprsenting the hw_links part of the MLD */ + A_UINT32 mld_group_id; + /** pdev_id for identifying the MAC, See macros starting with WMI_PDEV_ID_ for values. */ + A_UINT32 pdev_id; +/* + * Followed by TLVs: + * A_UINT32 hw_link_ids[]; + */ +} wmi_mlo_setup_cmd_fixed_param; + +typedef struct { + /** TLV tag and len; tag equals WMITLV_TAG_STRUC_wmi_mlo_setup_complete_event_fixed_param; */ + A_UINT32 tlv_header; + /** pdev_id for identifying the MAC, See macros starting with WMI_PDEV_ID_ for values. */ + A_UINT32 pdev_id; + /** Return status. 0 for success, non-zero otherwise */ + A_UINT32 status; +} wmi_mlo_setup_complete_event_fixed_param; + +typedef struct { + /** TLV tag and len; tag equals WMITLV_TAG_STRUC_wmi_mlo_ready_cmd_fixed_param; */ + A_UINT32 tlv_header; + /** pdev_id for identifying the MAC, See macros starting with WMI_PDEV_ID_ for values. */ + A_UINT32 pdev_id; +} wmi_mlo_ready_cmd_fixed_param; + +typedef enum wmi_mlo_tear_down_reason_code_type { + WMI_MLO_TEARDOWN_SSR_REASON, +} WMI_MLO_TEARDOWN_REASON_TYPE; + +typedef struct { + /** TLV tag and len; tag equals WMITLV_TAG_STRUC_wmi_mlo_teardown_fixed_param; */ + A_UINT32 tlv_header; + /** pdev_id for identifying the MAC, See macros starting with WMI_PDEV_ID_ for values. */ + A_UINT32 pdev_id; + /** reason_code: of type WMI_TEARDOWN_REASON_TYPE */ + A_UINT32 reason_code; +} wmi_mlo_teardown_fixed_param; + +typedef struct { + /** TLV tag and len; tag equals WMITLV_TAG_STRUC_wmi_mlo_teardown_complete_fixed_param; */ + A_UINT32 tlv_header; + /** pdev_id for identifying the MAC, See macros starting with WMI_PDEV_ID_ for values. */ + A_UINT32 pdev_id; + /** Return status. 0 for success, non-zero otherwise */ + A_UINT32 status; +} wmi_mlo_teardown_complete_fixed_param; + +#define WMI_IGMP_OFFLOAD_SUPPORT_DISABLE_BITMASK 0x0 +#define WMI_IGMP_V1_OFFLOAD_SUPPORT_BITMASK 0x1 +#define WMI_IGMP_V2_OFFLOAD_SUPPORT_BITMASK 0x2 +#define WMI_IGMP_V3_OFFLOAD_SUPPORT_BITMASK 0x4 +#define WMI_IGMP_OFFLOAD_SUPPORT_ALL_VERSION 0x7 + +typedef struct { + A_UINT32 tlv_header; /** TLV tag and len; tag equals WMITLV_TAG_STRUC_wmi_igmp_offload_fixed_param */ + A_UINT32 vdev_id; /** VDEV identifier */ + A_UINT32 enable; /** IGMP offload support enable/disable */ + A_UINT32 version_support_bitmask; /** IGMP version support v1, v2 and/or v3*/ + +/* Following this structure are the TLVs: + * WMI_IPV4_ADDR grp_ip_address[num_mcast_ipv4_addr]; + */ +} wmi_igmp_offload_fixed_param; + + /* ADD NEW DEFS HERE */ diff --git a/drivers/staging/fw-api/fw/wmi_version.h b/drivers/staging/fw-api/fw/wmi_version.h index 17f8b86f01da..385e76d06272 100644 --- a/drivers/staging/fw-api/fw/wmi_version.h +++ b/drivers/staging/fw-api/fw/wmi_version.h @@ -36,7 +36,7 @@ #define __WMI_VER_MINOR_ 0 /** WMI revision number has to be incremented when there is a * change that may or may not break compatibility. */ -#define __WMI_REVISION_ 973 +#define __WMI_REVISION_ 980 /** The Version Namespace should not be normally changed. Only * host and firmware of the same WMI namespace will work diff --git a/drivers/staging/qca-wifi-host-cmn/target_if/direct_buf_rx/src/target_if_direct_buf_rx_main.c b/drivers/staging/qca-wifi-host-cmn/target_if/direct_buf_rx/src/target_if_direct_buf_rx_main.c index dc0f3d772cde..b2e99da459e2 100644 --- a/drivers/staging/qca-wifi-host-cmn/target_if/direct_buf_rx/src/target_if_direct_buf_rx_main.c +++ b/drivers/staging/qca-wifi-host-cmn/target_if/direct_buf_rx/src/target_if_direct_buf_rx_main.c @@ -1620,6 +1620,12 @@ static QDF_STATUS target_if_get_dbr_data(struct wlan_objmgr_pdev *pdev, *cookie = WMI_HOST_DBR_DATA_ADDR_HI_HOST_DATA_GET( dbr_rsp->dbr_entries[idx].paddr_hi); dbr_data->vaddr = target_if_dbr_vaddr_lookup(mod_param, paddr, *cookie); + + if (!dbr_data->vaddr) { + direct_buf_rx_err("dbr vaddr lookup failed, vaddr NULL"); + return QDF_STATUS_E_FAILURE; + } + dbr_data->cookie = *cookie; dbr_data->paddr = paddr; direct_buf_rx_debug("Cookie = %d Vaddr look up = %pK", diff --git a/drivers/staging/qcacld-3.0/components/mlme/dispatcher/inc/wlan_mlme_ucfg_api.h b/drivers/staging/qcacld-3.0/components/mlme/dispatcher/inc/wlan_mlme_ucfg_api.h index 9f2c4dcfdeac..d19b2678d0d9 100644 --- a/drivers/staging/qcacld-3.0/components/mlme/dispatcher/inc/wlan_mlme_ucfg_api.h +++ b/drivers/staging/qcacld-3.0/components/mlme/dispatcher/inc/wlan_mlme_ucfg_api.h @@ -4114,4 +4114,15 @@ ucfg_mlme_set_roam_reason_vsie_status(struct wlan_objmgr_psoc *psoc, } #endif + +/** + * ucfg_is_roaming_enabled() - Check if roaming enabled + * to firmware. + * @psoc: psoc context + * @vdev_id: vdev id + * + * Return: True if Roam state machine is in + * WLAN_ROAM_RSO_ENABLED/WLAN_ROAMING_IN_PROG/WLAN_ROAM_SYNCH_IN_PROG + */ +bool ucfg_is_roaming_enabled(struct wlan_objmgr_pdev *pdev, uint8_t vdev_id); #endif /* _WLAN_MLME_UCFG_API_H_ */ diff --git a/drivers/staging/qcacld-3.0/components/mlme/dispatcher/src/wlan_mlme_ucfg_api.c b/drivers/staging/qcacld-3.0/components/mlme/dispatcher/src/wlan_mlme_ucfg_api.c index b511cf4c5efd..19910b21e64f 100644 --- a/drivers/staging/qcacld-3.0/components/mlme/dispatcher/src/wlan_mlme_ucfg_api.c +++ b/drivers/staging/qcacld-3.0/components/mlme/dispatcher/src/wlan_mlme_ucfg_api.c @@ -1911,3 +1911,13 @@ bool ucfg_mlme_validate_scan_period(uint32_t roam_scan_period) return is_valid; } + +bool ucfg_is_roaming_enabled(struct wlan_objmgr_pdev *pdev, uint8_t vdev_id) +{ + struct wlan_objmgr_psoc *psoc = wlan_pdev_get_psoc(pdev); + + if (mlme_get_roam_state(psoc, vdev_id) == ROAM_RSO_STARTED) + return true; + + return false; +} diff --git a/drivers/staging/qcacld-3.0/core/hdd/src/wlan_hdd_cfg80211.c b/drivers/staging/qcacld-3.0/core/hdd/src/wlan_hdd_cfg80211.c index 2a81b83f87cb..52723d2f6f4b 100644 --- a/drivers/staging/qcacld-3.0/core/hdd/src/wlan_hdd_cfg80211.c +++ b/drivers/staging/qcacld-3.0/core/hdd/src/wlan_hdd_cfg80211.c @@ -13161,6 +13161,7 @@ static int __wlan_hdd_cfg80211_set_fast_roaming(struct wiphy *wiphy, struct hdd_station_ctx *hdd_sta_ctx = WLAN_HDD_GET_STATION_CTX_PTR(adapter); mac_handle_t mac_handle; + bool roaming_enabled; hdd_enter_dev(dev); @@ -13191,6 +13192,13 @@ static int __wlan_hdd_cfg80211_set_fast_roaming(struct wiphy *wiphy, tb[QCA_WLAN_VENDOR_ATTR_ROAMING_POLICY]); hdd_debug("isFastRoamEnabled %d", is_fast_roam_enabled); + /* + * Get current roaming state and decide whether to wait for RSO_STOP + * response or not. + */ + roaming_enabled = ucfg_is_roaming_enabled(hdd_ctx->pdev, + adapter->vdev_id); + /* Update roaming */ mac_handle = hdd_ctx->mac_handle; qdf_status = sme_config_fast_roaming(mac_handle, adapter->vdev_id, @@ -13201,6 +13209,7 @@ static int __wlan_hdd_cfg80211_set_fast_roaming(struct wiphy *wiphy, ret = qdf_status_to_os_return(qdf_status); if (eConnectionState_Associated == hdd_sta_ctx->conn_info.conn_state && + roaming_enabled && QDF_IS_STATUS_SUCCESS(qdf_status) && !is_fast_roam_enabled) { INIT_COMPLETION(adapter->lfr_fw_status.disable_lfr_event); diff --git a/drivers/staging/qcacld-3.0/core/mac/inc/ani_system_defs.h b/drivers/staging/qcacld-3.0/core/mac/inc/ani_system_defs.h index f47ad239c020..63a1b772d2a8 100644 --- a/drivers/staging/qcacld-3.0/core/mac/inc/ani_system_defs.h +++ b/drivers/staging/qcacld-3.0/core/mac/inc/ani_system_defs.h @@ -71,10 +71,8 @@ enum ani_akm_type { ANI_AKM_TYPE_FT_RSN_PSK, ANI_AKM_TYPE_RSN_PSK_SHA256, ANI_AKM_TYPE_RSN_8021X_SHA256, -#ifdef WLAN_FEATURE_SAE ANI_AKM_TYPE_SAE, ANI_AKM_TYPE_FT_SAE, -#endif ANI_AKM_TYPE_SUITEB_EAP_SHA256, ANI_AKM_TYPE_SUITEB_EAP_SHA384, ANI_AKM_TYPE_FT_SUITEB_EAP_SHA384, diff --git a/drivers/staging/qcacld-3.0/core/mac/inc/qwlan_version.h b/drivers/staging/qcacld-3.0/core/mac/inc/qwlan_version.h index e66c82a8f7b5..797693511b7f 100644 --- a/drivers/staging/qcacld-3.0/core/mac/inc/qwlan_version.h +++ b/drivers/staging/qcacld-3.0/core/mac/inc/qwlan_version.h @@ -32,9 +32,9 @@ #define QWLAN_VERSION_MAJOR 5 #define QWLAN_VERSION_MINOR 2 #define QWLAN_VERSION_PATCH 022 -#define QWLAN_VERSION_EXTRA "B" +#define QWLAN_VERSION_EXTRA "F" #define QWLAN_VERSION_BUILD 7 -#define QWLAN_VERSIONSTR "5.2.022.7B" +#define QWLAN_VERSIONSTR "5.2.022.7F" #endif /* QWLAN_VERSION_H */ diff --git a/drivers/staging/qcacld-3.0/core/mac/inc/sir_api.h b/drivers/staging/qcacld-3.0/core/mac/inc/sir_api.h index 70c9d6a2f86c..8f2eae634b2e 100644 --- a/drivers/staging/qcacld-3.0/core/mac/inc/sir_api.h +++ b/drivers/staging/qcacld-3.0/core/mac/inc/sir_api.h @@ -3023,6 +3023,7 @@ struct roam_offload_synch_ind { #ifdef WLAN_FEATURE_ROAM_OFFLOAD struct handoff_failure_ind { uint8_t vdev_id; + struct qdf_mac_addr bssid; }; struct roam_offload_synch_fail { diff --git a/drivers/staging/qcacld-3.0/core/mac/src/include/parser_api.h b/drivers/staging/qcacld-3.0/core/mac/src/include/parser_api.h index 32079b16a0c9..7c98e4213563 100644 --- a/drivers/staging/qcacld-3.0/core/mac/src/include/parser_api.h +++ b/drivers/staging/qcacld-3.0/core/mac/src/include/parser_api.h @@ -1229,6 +1229,20 @@ QDF_STATUS populate_dot11f_twt_extended_caps(struct mac_context *mac_ctx, #endif /** + * populate_dot11f_btm_caps() - populate btm extended capabilities + * @mac_ctx: Global MAC context. + * @pe_session: Pointer to the PE session. + * @dot11f: Pointer to the extended capabilities of the session. + * + * Disable btm for SAE types for Helium firmware limit + * + * Return: QDF_STATUS Success or Failure + */ +QDF_STATUS populate_dot11f_btm_caps(struct mac_context *mac_ctx, + struct pe_session *pe_session, + struct sDot11fIEExtCap *dot11f); + +/** * lim_truncate_ppet: truncates ppet of trailling zeros * @ppet: ppet to truncate * max_len: max length of ppet diff --git a/drivers/staging/qcacld-3.0/core/mac/src/pe/lim/lim_process_message_queue.c b/drivers/staging/qcacld-3.0/core/mac/src/pe/lim/lim_process_message_queue.c index 381bcbdfe99d..3e7dd69581ac 100644 --- a/drivers/staging/qcacld-3.0/core/mac/src/pe/lim/lim_process_message_queue.c +++ b/drivers/staging/qcacld-3.0/core/mac/src/pe/lim/lim_process_message_queue.c @@ -524,8 +524,7 @@ static bool def_msg_decision(struct mac_context *mac_ctx, if (mac_ctx->lim.gLimSmeState == eLIM_SME_OFFLINE_STATE) { /* Defer processing this message */ if (lim_defer_msg(mac_ctx, lim_msg) != TX_SUCCESS) { - QDF_TRACE(QDF_MODULE_ID_PE, LOGE, - FL("Unable to Defer Msg")); + pe_err_rl("Unable to Defer Msg"); lim_log_session_states(mac_ctx); lim_handle_defer_msg_error(mac_ctx, lim_msg); } @@ -1012,7 +1011,8 @@ uint32_t lim_defer_msg(struct mac_context *mac, struct scheduler_msg *pMsg) (mac, NO_SESSION, LIM_TRACE_MAKE_RXMSG(pMsg->type, LIM_MSG_DEFERRED))); } else { - pe_err("Dropped lim message (0x%X) Message %s", pMsg->type, lim_msg_str(pMsg->type)); + pe_err_rl("Dropped lim message (0x%X) Message %s", pMsg->type, + lim_msg_str(pMsg->type)); MTRACE(mac_trace_msg_rx (mac, NO_SESSION, LIM_TRACE_MAKE_RXMSG(pMsg->type, LIM_MSG_DROPPED))); diff --git a/drivers/staging/qcacld-3.0/core/mac/src/pe/lim/lim_send_management_frames.c b/drivers/staging/qcacld-3.0/core/mac/src/pe/lim/lim_send_management_frames.c index ca42c29be526..c6aafe9ee806 100644 --- a/drivers/staging/qcacld-3.0/core/mac/src/pe/lim/lim_send_management_frames.c +++ b/drivers/staging/qcacld-3.0/core/mac/src/pe/lim/lim_send_management_frames.c @@ -2204,6 +2204,8 @@ lim_send_assoc_req_mgmt_frame(struct mac_context *mac_ctx, lim_merge_extcap_struct(&frm->ExtCap, &bcn_ext_cap, false); } + + populate_dot11f_btm_caps(mac_ctx, pe_session, &frm->ExtCap); /* * TWT extended capabilities should be populated after the * intersection of beacon caps and self caps is done because diff --git a/drivers/staging/qcacld-3.0/core/mac/src/pe/lim/lim_utils.c b/drivers/staging/qcacld-3.0/core/mac/src/pe/lim/lim_utils.c index 5877893c207f..f67e6392948c 100644 --- a/drivers/staging/qcacld-3.0/core/mac/src/pe/lim/lim_utils.c +++ b/drivers/staging/qcacld-3.0/core/mac/src/pe/lim/lim_utils.c @@ -769,8 +769,8 @@ uint8_t lim_write_deferred_msg_q(struct mac_context *mac_ctx, * We reach the quota for management frames, * drop this one */ - pe_warn("Too many queue->MsgQ Msg: %d count: %d", - lim_msg->type, count); + pe_warn_rl("Too many queue->MsgQ Msg: %d count: %d", + lim_msg->type, count); /* Return error, caller knows what to do */ return TX_QUEUE_FULL; } diff --git a/drivers/staging/qcacld-3.0/core/mac/src/sys/legacy/src/utils/src/parser_api.c b/drivers/staging/qcacld-3.0/core/mac/src/sys/legacy/src/utils/src/parser_api.c index 8c35a9d49e92..b17c583e66c0 100644 --- a/drivers/staging/qcacld-3.0/core/mac/src/sys/legacy/src/utils/src/parser_api.c +++ b/drivers/staging/qcacld-3.0/core/mac/src/sys/legacy/src/utils/src/parser_api.c @@ -45,6 +45,7 @@ #include "wlan_mlme_public_struct.h" #include "wlan_mlme_ucfg_api.h" #include "wlan_mlme_api.h" +#include "wlan_crypto_global_api.h" #define RSN_OUI_SIZE 4 /* ////////////////////////////////////////////////////////////////////// */ @@ -6246,4 +6247,28 @@ QDF_STATUS populate_dot11f_twt_extended_caps(struct mac_context *mac_ctx, } #endif +QDF_STATUS populate_dot11f_btm_caps(struct mac_context *mac_ctx, + struct pe_session *pe_session, + struct sDot11fIEExtCap *dot11f) +{ + struct s_ext_cap *p_ext_cap; + uint32_t fw_akm_bitmap; + bool sae_can_roam; + + dot11f->num_bytes = DOT11F_IE_EXTCAP_MAX_LEN; + p_ext_cap = (struct s_ext_cap *)dot11f->bytes; + fw_akm_bitmap = mac_ctx->mlme_cfg->lfr.fw_akm_bitmap; + sae_can_roam = (((fw_akm_bitmap) & (1 << AKM_SAE)) ? true : false); + + if (pe_session->connected_akm == ANI_AKM_TYPE_SAE && + !sae_can_roam) { + p_ext_cap->bss_transition = 0; + pe_debug("Disable btm cap for SAE roam not supported"); + } + + dot11f->num_bytes = lim_compute_ext_cap_ie_length(dot11f); + + return QDF_STATUS_SUCCESS; +} + /* parser_api.c ends here. */ diff --git a/drivers/staging/qcacld-3.0/core/sme/src/csr/csr_api_roam.c b/drivers/staging/qcacld-3.0/core/sme/src/csr/csr_api_roam.c index f6d79dd8473a..2eb4d5c7548e 100644 --- a/drivers/staging/qcacld-3.0/core/sme/src/csr/csr_api_roam.c +++ b/drivers/staging/qcacld-3.0/core/sme/src/csr/csr_api_roam.c @@ -21204,6 +21204,7 @@ void csr_process_ho_fail_ind(struct mac_context *mac_ctx, void *msg_buf) struct handoff_failure_ind *pSmeHOFailInd = msg_buf; struct mlme_roam_after_data_stall *vdev_roam_params; struct wlan_objmgr_vdev *vdev; + struct reject_ap_info ap_info; uint32_t sessionId; if (!pSmeHOFailInd) { @@ -21212,6 +21213,12 @@ void csr_process_ho_fail_ind(struct mac_context *mac_ctx, void *msg_buf) } sessionId = pSmeHOFailInd->vdev_id; + ap_info.bssid = pSmeHOFailInd->bssid; + ap_info.reject_ap_type = DRIVER_AVOID_TYPE; + ap_info.reject_reason = REASON_ROAM_HO_FAILURE; + ap_info.source = ADDED_BY_DRIVER; + wlan_blm_add_bssid_to_reject_list(mac_ctx->pdev, &ap_info); + /* Roaming is supported only on Infra STA Mode. */ if (!csr_roam_is_sta_mode(mac_ctx, sessionId)) { diff --git a/drivers/staging/qcacld-3.0/core/wma/src/wma_scan_roam.c b/drivers/staging/qcacld-3.0/core/wma/src/wma_scan_roam.c index c82271375379..b97be88d38dc 100644 --- a/drivers/staging/qcacld-3.0/core/wma/src/wma_scan_roam.c +++ b/drivers/staging/qcacld-3.0/core/wma/src/wma_scan_roam.c @@ -4167,19 +4167,14 @@ wma_roam_ho_fail_handler(tp_wma_handle wma, uint32_t vdev_id, struct handoff_failure_ind *ho_failure_ind; struct scheduler_msg sme_msg = { 0 }; QDF_STATUS qdf_status; - struct reject_ap_info ap_info; - - ap_info.bssid = bssid; - ap_info.reject_ap_type = DRIVER_AVOID_TYPE; - ap_info.reject_reason = REASON_ROAM_HO_FAILURE; - ap_info.source = ADDED_BY_DRIVER; - wlan_blm_add_bssid_to_reject_list(wma->pdev, &ap_info); ho_failure_ind = qdf_mem_malloc(sizeof(*ho_failure_ind)); if (!ho_failure_ind) return; ho_failure_ind->vdev_id = vdev_id; + ho_failure_ind->bssid = bssid; + sme_msg.type = eWNI_SME_HO_FAIL_IND; sme_msg.bodyptr = ho_failure_ind; sme_msg.bodyval = 0; diff --git a/drivers/usb/class/cdc-acm.c b/drivers/usb/class/cdc-acm.c index b2f67d7ace6d..22a7f67e70e7 100644 --- a/drivers/usb/class/cdc-acm.c +++ b/drivers/usb/class/cdc-acm.c @@ -1696,12 +1696,13 @@ static int acm_resume(struct usb_interface *intf) struct urb *urb; int rv = 0; - acm_unpoison_urbs(acm); spin_lock_irq(&acm->write_lock); if (--acm->susp_count) goto out; + acm_unpoison_urbs(acm); + if (tty_port_initialized(&acm->port)) { rv = usb_submit_urb(acm->ctrlurb, GFP_ATOMIC); diff --git a/drivers/usb/gadget/function/u_bam_dmux.c b/drivers/usb/gadget/function/u_bam_dmux.c index 57aaebfcfd64..9ac3332dbf9e 100644 --- a/drivers/usb/gadget/function/u_bam_dmux.c +++ b/drivers/usb/gadget/function/u_bam_dmux.c @@ -1,4 +1,4 @@ -/* Copyright (c) 2011-2018, 2020, Linux Foundation. All rights reserved. +/* Copyright (c) 2011-2018, 2020-2021, Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -274,7 +274,7 @@ static void gbam_free_rx_skb_idle_list(struct gbam_port *port) return; d = &port->data_ch; - gadget = port->port_usb->cdev->gadget; + gadget = port->gadget; while (d->rx_skb_idle.qlen > 0) { skb = __skb_dequeue(&d->rx_skb_idle); @@ -1028,6 +1028,7 @@ static void gbam_port_free(enum bam_dmux_func_type func) if (port) { platform_driver_unregister(pdrv); + gbam_free_rx_skb_idle_list(port); kfree(port); bam_ports[func].port = NULL; } diff --git a/drivers/usb/usbip/vudc_sysfs.c b/drivers/usb/usbip/vudc_sysfs.c index f44d98eeb36a..51cc5258b63e 100644 --- a/drivers/usb/usbip/vudc_sysfs.c +++ b/drivers/usb/usbip/vudc_sysfs.c @@ -187,7 +187,7 @@ static ssize_t store_sockfd(struct device *dev, udc->ud.tcp_socket = socket; udc->ud.tcp_rx = tcp_rx; - udc->ud.tcp_rx = tcp_tx; + udc->ud.tcp_tx = tcp_tx; udc->ud.status = SDEV_ST_USED; spin_unlock_irq(&udc->ud.lock); |
