Merge branch 'akpm/master'
authorStephen Rothwell <sfr@canb.auug.org.au>
Tue, 13 Sep 2016 03:57:12 +0000 (13:57 +1000)
committerStephen Rothwell <sfr@canb.auug.org.au>
Tue, 13 Sep 2016 03:57:12 +0000 (13:57 +1000)
33 files changed:
Documentation/RCU/lockdep-splat.txt
Documentation/dev-tools/kmemleak.rst
arch/x86/kernel/machine_kexec_64.c
arch/x86/kvm/i8254.c
crypto/crypto_engine.c
drivers/block/loop.c
drivers/infiniband/sw/rdmavt/cq.c
drivers/md/dm-rq.c
drivers/md/dm.c
drivers/media/pci/ivtv/ivtv-driver.c
drivers/media/pci/ivtv/ivtv-irq.c
drivers/net/ethernet/microchip/encx24j600.c
drivers/net/wireless/intel/iwlwifi/dvm/calib.c
drivers/spi/spi.c
drivers/tty/serial/sc16is7xx.c
include/linux/kexec.h
include/linux/kmemleak.h
include/linux/kthread.h
include/linux/mlx5/device.h
kernel/hung_task.c
kernel/kthread.c
kernel/smpboot.c
kernel/workqueue.c
mm/bootmem.c
mm/cma.c
mm/kmemleak.c
mm/memblock.c
mm/nobootmem.c
scripts/tags.sh
sound/soc/intel/baytrail/sst-baytrail-ipc.c
sound/soc/intel/common/sst-ipc.c
sound/soc/intel/haswell/sst-haswell-ipc.c
sound/soc/intel/skylake/skl-sst-ipc.c

index bf9061142827fca8d7f19c6cb013f322b47bc4e2..238e9f61352f6187670675cb2457af282cc03af6 100644 (file)
@@ -57,7 +57,7 @@ Call Trace:
  [<ffffffff817db154>] kernel_thread_helper+0x4/0x10
  [<ffffffff81066430>] ? finish_task_switch+0x80/0x110
  [<ffffffff817d9c04>] ? retint_restore_args+0xe/0xe
- [<ffffffff81097510>] ? __init_kthread_worker+0x70/0x70
+ [<ffffffff81097510>] ? __kthread_init_worker+0x70/0x70
  [<ffffffff817db150>] ? gs_change+0xb/0xb
 
 Line 2776 of block/cfq-iosched.c in v3.0-rc5 is as follows:
index 1788722d549503c3164e43cdbe9b26e091d2f919..b2391b8291691b9ac6752a450a1222898c1b29a1 100644 (file)
@@ -162,6 +162,15 @@ See the include/linux/kmemleak.h header for the functions prototype.
 - ``kmemleak_alloc_recursive`` - as kmemleak_alloc but checks the recursiveness
 - ``kmemleak_free_recursive``   - as kmemleak_free but checks the recursiveness
 
+The following functions take a physical address as the object pointer
+and only perform the corresponding action if the address has a lowmem
+mapping:
+
+- ``kmemleak_alloc_phys``
+- ``kmemleak_free_part_phys``
+- ``kmemleak_not_leak_phys``
+- ``kmemleak_ignore_phys``
+
 Dealing with false positives/negatives
 --------------------------------------
 
index fc3389fc47a2f3dfacc7bff0665f1d6c4b75a245..b1f15a2dab65cc299fd5fb134ef429080e78a55e 100644 (file)
@@ -338,6 +338,9 @@ void arch_crash_save_vmcoreinfo(void)
        vmcoreinfo_append_str("KERNELOFFSET=%lx\n",
                              kaslr_offset());
        VMCOREINFO_PHYS_BASE(phys_base);
+       VMCOREINFO_PAGE_OFFSET(PAGE_OFFSET);
+       VMCOREINFO_VMALLOC_START(VMALLOC_START);
+       VMCOREINFO_VMEMMAP_START(VMEMMAP_START);
 }
 
 /* arch-dependent functionality related to kexec file-based syscall */
index 5fb6c620180e19ebae73bee9cf6e83e3c65854e8..16a7134eedacce123c55c82c0e8f31b095bc14f0 100644 (file)
@@ -212,7 +212,7 @@ static void kvm_pit_ack_irq(struct kvm_irq_ack_notifier *kian)
         */
        smp_mb();
        if (atomic_dec_if_positive(&ps->pending) > 0)
-               queue_kthread_work(&pit->worker, &pit->expired);
+               kthread_queue_work(&pit->worker, &pit->expired);
 }
 
 void __kvm_migrate_pit_timer(struct kvm_vcpu *vcpu)
@@ -233,7 +233,7 @@ void __kvm_migrate_pit_timer(struct kvm_vcpu *vcpu)
 static void destroy_pit_timer(struct kvm_pit *pit)
 {
        hrtimer_cancel(&pit->pit_state.timer);
-       flush_kthread_work(&pit->expired);
+       kthread_flush_work(&pit->expired);
 }
 
 static void pit_do_work(struct kthread_work *work)
@@ -272,7 +272,7 @@ static enum hrtimer_restart pit_timer_fn(struct hrtimer *data)
        if (atomic_read(&ps->reinject))
                atomic_inc(&ps->pending);
 
-       queue_kthread_work(&pt->worker, &pt->expired);
+       kthread_queue_work(&pt->worker, &pt->expired);
 
        if (ps->is_periodic) {
                hrtimer_add_expires_ns(&ps->timer, ps->period);
@@ -324,7 +324,7 @@ static void create_pit_timer(struct kvm_pit *pit, u32 val, int is_period)
 
        /* TODO The new value only affected after the retriggered */
        hrtimer_cancel(&ps->timer);
-       flush_kthread_work(&pit->expired);
+       kthread_flush_work(&pit->expired);
        ps->period = interval;
        ps->is_periodic = is_period;
 
@@ -667,13 +667,13 @@ struct kvm_pit *kvm_create_pit(struct kvm *kvm, u32 flags)
        pid_nr = pid_vnr(pid);
        put_pid(pid);
 
-       init_kthread_worker(&pit->worker);
+       kthread_init_worker(&pit->worker);
        pit->worker_task = kthread_run(kthread_worker_fn, &pit->worker,
                                       "kvm-pit/%d", pid_nr);
        if (IS_ERR(pit->worker_task))
                goto fail_kthread;
 
-       init_kthread_work(&pit->expired, pit_do_work);
+       kthread_init_work(&pit->expired, pit_do_work);
 
        pit->kvm = kvm;
 
@@ -730,7 +730,7 @@ void kvm_free_pit(struct kvm *kvm)
                kvm_io_bus_unregister_dev(kvm, KVM_PIO_BUS, &pit->speaker_dev);
                kvm_pit_set_reinject(pit, false);
                hrtimer_cancel(&pit->pit_state.timer);
-               flush_kthread_work(&pit->expired);
+               kthread_flush_work(&pit->expired);
                kthread_stop(pit->worker_task);
                kvm_free_irq_source_id(kvm, pit->irq_source_id);
                kfree(pit);
index bfb92ace2c91a9a3786ab3c2cedb00224d2b62f0..6989ba0046df275cb6b8d14470b8618caf40beb4 100644 (file)
@@ -47,7 +47,7 @@ static void crypto_pump_requests(struct crypto_engine *engine,
 
        /* If another context is idling then defer */
        if (engine->idling) {
-               queue_kthread_work(&engine->kworker, &engine->pump_requests);
+               kthread_queue_work(&engine->kworker, &engine->pump_requests);
                goto out;
        }
 
@@ -58,7 +58,7 @@ static void crypto_pump_requests(struct crypto_engine *engine,
 
                /* Only do teardown in the thread */
                if (!in_kthread) {
-                       queue_kthread_work(&engine->kworker,
+                       kthread_queue_work(&engine->kworker,
                                           &engine->pump_requests);
                        goto out;
                }
@@ -189,7 +189,7 @@ int crypto_transfer_cipher_request(struct crypto_engine *engine,
        ret = ablkcipher_enqueue_request(&engine->queue, req);
 
        if (!engine->busy && need_pump)
-               queue_kthread_work(&engine->kworker, &engine->pump_requests);
+               kthread_queue_work(&engine->kworker, &engine->pump_requests);
 
        spin_unlock_irqrestore(&engine->queue_lock, flags);
        return ret;
@@ -231,7 +231,7 @@ int crypto_transfer_hash_request(struct crypto_engine *engine,
        ret = ahash_enqueue_request(&engine->queue, req);
 
        if (!engine->busy && need_pump)
-               queue_kthread_work(&engine->kworker, &engine->pump_requests);
+               kthread_queue_work(&engine->kworker, &engine->pump_requests);
 
        spin_unlock_irqrestore(&engine->queue_lock, flags);
        return ret;
@@ -284,7 +284,7 @@ void crypto_finalize_cipher_request(struct crypto_engine *engine,
 
        req->base.complete(&req->base, err);
 
-       queue_kthread_work(&engine->kworker, &engine->pump_requests);
+       kthread_queue_work(&engine->kworker, &engine->pump_requests);
 }
 EXPORT_SYMBOL_GPL(crypto_finalize_cipher_request);
 
@@ -321,7 +321,7 @@ void crypto_finalize_hash_request(struct crypto_engine *engine,
 
        req->base.complete(&req->base, err);
 
-       queue_kthread_work(&engine->kworker, &engine->pump_requests);
+       kthread_queue_work(&engine->kworker, &engine->pump_requests);
 }
 EXPORT_SYMBOL_GPL(crypto_finalize_hash_request);
 
@@ -345,7 +345,7 @@ int crypto_engine_start(struct crypto_engine *engine)
        engine->running = true;
        spin_unlock_irqrestore(&engine->queue_lock, flags);
 
-       queue_kthread_work(&engine->kworker, &engine->pump_requests);
+       kthread_queue_work(&engine->kworker, &engine->pump_requests);
 
        return 0;
 }
@@ -422,7 +422,7 @@ struct crypto_engine *crypto_engine_alloc_init(struct device *dev, bool rt)
        crypto_init_queue(&engine->queue, CRYPTO_ENGINE_MAX_QLEN);
        spin_lock_init(&engine->queue_lock);
 
-       init_kthread_worker(&engine->kworker);
+       kthread_init_worker(&engine->kworker);
        engine->kworker_task = kthread_run(kthread_worker_fn,
                                           &engine->kworker, "%s",
                                           engine->name);
@@ -430,7 +430,7 @@ struct crypto_engine *crypto_engine_alloc_init(struct device *dev, bool rt)
                dev_err(dev, "failed to create crypto request pump task\n");
                return NULL;
        }
-       init_kthread_work(&engine->pump_requests, crypto_pump_work);
+       kthread_init_work(&engine->pump_requests, crypto_pump_work);
 
        if (engine->rt) {
                dev_info(dev, "will run requests pump with realtime priority\n");
@@ -455,7 +455,7 @@ int crypto_engine_exit(struct crypto_engine *engine)
        if (ret)
                return ret;
 
-       flush_kthread_worker(&engine->kworker);
+       kthread_flush_worker(&engine->kworker);
        kthread_stop(engine->kworker_task);
 
        return 0;
index c9f2107f7095148dc5e4027d7cb8fdc4794aad01..9a21c024c390461fe09eeae6fb8b286f2508b7fc 100644 (file)
@@ -840,13 +840,13 @@ static void loop_config_discard(struct loop_device *lo)
 
 static void loop_unprepare_queue(struct loop_device *lo)
 {
-       flush_kthread_worker(&lo->worker);
+       kthread_flush_worker(&lo->worker);
        kthread_stop(lo->worker_task);
 }
 
 static int loop_prepare_queue(struct loop_device *lo)
 {
-       init_kthread_worker(&lo->worker);
+       kthread_init_worker(&lo->worker);
        lo->worker_task = kthread_run(kthread_worker_fn,
                        &lo->worker, "loop%d", lo->lo_number);
        if (IS_ERR(lo->worker_task))
@@ -1658,7 +1658,7 @@ static int loop_queue_rq(struct blk_mq_hw_ctx *hctx,
                break;
        }
 
-       queue_kthread_work(&lo->worker, &cmd->work);
+       kthread_queue_work(&lo->worker, &cmd->work);
 
        return BLK_MQ_RQ_QUEUE_OK;
 }
@@ -1696,7 +1696,7 @@ static int loop_init_request(void *data, struct request *rq,
        struct loop_cmd *cmd = blk_mq_rq_to_pdu(rq);
 
        cmd->rq = rq;
-       init_kthread_work(&cmd->work, loop_queue_work);
+       kthread_init_work(&cmd->work, loop_queue_work);
 
        return 0;
 }
index f2f229efbe64d7d8ddcdf4f985ad23be76ec6977..6d9904a4a0abe7efb9e882942cd76f96dc920806 100644 (file)
@@ -129,7 +129,7 @@ void rvt_cq_enter(struct rvt_cq *cq, struct ib_wc *entry, bool solicited)
                if (likely(worker)) {
                        cq->notify = RVT_CQ_NONE;
                        cq->triggered++;
-                       queue_kthread_work(worker, &cq->comptask);
+                       kthread_queue_work(worker, &cq->comptask);
                }
        }
 
@@ -265,7 +265,7 @@ struct ib_cq *rvt_create_cq(struct ib_device *ibdev,
        cq->ibcq.cqe = entries;
        cq->notify = RVT_CQ_NONE;
        spin_lock_init(&cq->lock);
-       init_kthread_work(&cq->comptask, send_complete);
+       kthread_init_work(&cq->comptask, send_complete);
        cq->queue = wc;
 
        ret = &cq->ibcq;
@@ -295,7 +295,7 @@ int rvt_destroy_cq(struct ib_cq *ibcq)
        struct rvt_cq *cq = ibcq_to_rvtcq(ibcq);
        struct rvt_dev_info *rdi = cq->rdi;
 
-       flush_kthread_work(&cq->comptask);
+       kthread_flush_work(&cq->comptask);
        spin_lock(&rdi->n_cqs_lock);
        rdi->n_cqs_allocated--;
        spin_unlock(&rdi->n_cqs_lock);
@@ -514,7 +514,7 @@ int rvt_driver_cq_init(struct rvt_dev_info *rdi)
        rdi->worker = kzalloc(sizeof(*rdi->worker), GFP_KERNEL);
        if (!rdi->worker)
                return -ENOMEM;
-       init_kthread_worker(rdi->worker);
+       kthread_init_worker(rdi->worker);
        task = kthread_create_on_node(
                kthread_worker_fn,
                rdi->worker,
@@ -547,7 +547,7 @@ void rvt_cq_exit(struct rvt_dev_info *rdi)
        /* blocks future queuing from send_complete() */
        rdi->worker = NULL;
        smp_wmb(); /* See rdi_cq_enter */
-       flush_kthread_worker(worker);
+       kthread_flush_worker(worker);
        kthread_stop(worker->task);
        kfree(worker);
 }
index 1ca7463e8bb2b26c799f63f5d54a4a33d570704c..917dde872d6b4bb6ad5372f3f936012c7b98642e 100644 (file)
@@ -553,7 +553,7 @@ static void init_tio(struct dm_rq_target_io *tio, struct request *rq,
        if (!md->init_tio_pdu)
                memset(&tio->info, 0, sizeof(tio->info));
        if (md->kworker_task)
-               init_kthread_work(&tio->work, map_tio_request);
+               kthread_init_work(&tio->work, map_tio_request);
 }
 
 static struct dm_rq_target_io *dm_old_prep_tio(struct request *rq,
@@ -801,7 +801,7 @@ static void dm_old_request_fn(struct request_queue *q)
                tio = tio_from_request(rq);
                /* Establish tio->ti before queuing work (map_tio_request) */
                tio->ti = ti;
-               queue_kthread_work(&md->kworker, &tio->work);
+               kthread_queue_work(&md->kworker, &tio->work);
                BUG_ON(!irqs_disabled());
        }
 }
@@ -823,7 +823,7 @@ int dm_old_init_request_queue(struct mapped_device *md)
        blk_queue_prep_rq(md->queue, dm_old_prep_fn);
 
        /* Initialize the request-based DM worker thread */
-       init_kthread_worker(&md->kworker);
+       kthread_init_worker(&md->kworker);
        md->kworker_task = kthread_run(kthread_worker_fn, &md->kworker,
                                       "kdmwork-%s", dm_device_name(md));
        if (IS_ERR(md->kworker_task))
index fa9b1cb4438a6ff15b71d2fdf253834c553bd5f8..8a0cae004dfc9ad1492bbd170416e678c63a0ad0 100644 (file)
@@ -1884,7 +1884,7 @@ static void __dm_destroy(struct mapped_device *md, bool wait)
        spin_unlock(&_minor_lock);
 
        if (dm_request_based(md) && md->kworker_task)
-               flush_kthread_worker(&md->kworker);
+               kthread_flush_worker(&md->kworker);
 
        /*
         * Take suspend_lock so that presuspend and postsuspend methods
@@ -2139,7 +2139,7 @@ static int __dm_suspend(struct mapped_device *md, struct dm_table *map,
        if (dm_request_based(md)) {
                dm_stop_queue(md->queue);
                if (md->kworker_task)
-                       flush_kthread_worker(&md->kworker);
+                       kthread_flush_worker(&md->kworker);
        }
 
        flush_workqueue(md->wq);
index 374033a5bdaf5384afd22f3415f7c5add20d25ee..ee48c3e09de41d5987612afe7efb2962804195d1 100644 (file)
@@ -750,7 +750,7 @@ static int ivtv_init_struct1(struct ivtv *itv)
        spin_lock_init(&itv->lock);
        spin_lock_init(&itv->dma_reg_lock);
 
-       init_kthread_worker(&itv->irq_worker);
+       kthread_init_worker(&itv->irq_worker);
        itv->irq_worker_task = kthread_run(kthread_worker_fn, &itv->irq_worker,
                                           "%s", itv->v4l2_dev.name);
        if (IS_ERR(itv->irq_worker_task)) {
@@ -760,7 +760,7 @@ static int ivtv_init_struct1(struct ivtv *itv)
        /* must use the FIFO scheduler as it is realtime sensitive */
        sched_setscheduler(itv->irq_worker_task, SCHED_FIFO, &param);
 
-       init_kthread_work(&itv->irq_work, ivtv_irq_work_handler);
+       kthread_init_work(&itv->irq_work, ivtv_irq_work_handler);
 
        /* Initial settings */
        itv->cxhdl.port = CX2341X_PORT_MEMORY;
@@ -1441,7 +1441,7 @@ static void ivtv_remove(struct pci_dev *pdev)
        del_timer_sync(&itv->dma_timer);
 
        /* Kill irq worker */
-       flush_kthread_worker(&itv->irq_worker);
+       kthread_flush_worker(&itv->irq_worker);
        kthread_stop(itv->irq_worker_task);
 
        ivtv_streams_cleanup(itv);
index 36ca2d67c812189ffe9f1cc3b0a85c1251e4cbf3..6efe1f71262c76459a07457b8e58867cd73b5e5f 100644 (file)
@@ -1062,7 +1062,7 @@ irqreturn_t ivtv_irq_handler(int irq, void *dev_id)
        }
 
        if (test_and_clear_bit(IVTV_F_I_HAVE_WORK, &itv->i_flags)) {
-               queue_kthread_work(&itv->irq_worker, &itv->irq_work);
+               kthread_queue_work(&itv->irq_worker, &itv->irq_work);
        }
 
        spin_unlock(&itv->dma_reg_lock);
index 42e34076d2de650399a134fb21e64c119470ee64..b14f0305aa318023a530856ad03a04b962cfd539 100644 (file)
@@ -821,7 +821,7 @@ static void encx24j600_set_multicast_list(struct net_device *dev)
        }
 
        if (oldfilter != priv->rxfilter)
-               queue_kthread_work(&priv->kworker, &priv->setrx_work);
+               kthread_queue_work(&priv->kworker, &priv->setrx_work);
 }
 
 static void encx24j600_hw_tx(struct encx24j600_priv *priv)
@@ -879,7 +879,7 @@ static netdev_tx_t encx24j600_tx(struct sk_buff *skb, struct net_device *dev)
        /* Remember the skb for deferred processing */
        priv->tx_skb = skb;
 
-       queue_kthread_work(&priv->kworker, &priv->tx_work);
+       kthread_queue_work(&priv->kworker, &priv->tx_work);
 
        return NETDEV_TX_OK;
 }
@@ -1037,9 +1037,9 @@ static int encx24j600_spi_probe(struct spi_device *spi)
                goto out_free;
        }
 
-       init_kthread_worker(&priv->kworker);
-       init_kthread_work(&priv->tx_work, encx24j600_tx_proc);
-       init_kthread_work(&priv->setrx_work, encx24j600_setrx_proc);
+       kthread_init_worker(&priv->kworker);
+       kthread_init_work(&priv->tx_work, encx24j600_tx_proc);
+       kthread_init_work(&priv->setrx_work, encx24j600_setrx_proc);
 
        priv->kworker_task = kthread_run(kthread_worker_fn, &priv->kworker,
                                         "encx24j600");
index e9cef9de9ed85fffbf02fd71219ffd57323e4a9a..c96f9b1d948ab3662c0e9db5f8a7752b47b54265 100644 (file)
@@ -900,8 +900,7 @@ static void iwlagn_gain_computation(struct iwl_priv *priv,
 
                /* bound gain by 2 bits value max, 3rd bit is sign */
                data->delta_gain_code[i] =
-                       min(abs(delta_g),
-                       (s32) CHAIN_NOISE_MAX_DELTA_GAIN_CODE);
+                       min(abs(delta_g), CHAIN_NOISE_MAX_DELTA_GAIN_CODE);
 
                if (delta_g < 0)
                        /*
index 8146ccd35a1ac890aaa021beb955b4309be0fa56..5787b723b593f79bb5e55f3b68abcb2f19d4b5cb 100644 (file)
@@ -1112,7 +1112,7 @@ static void __spi_pump_messages(struct spi_master *master, bool in_kthread)
 
        /* If another context is idling the device then defer */
        if (master->idling) {
-               queue_kthread_work(&master->kworker, &master->pump_messages);
+               kthread_queue_work(&master->kworker, &master->pump_messages);
                spin_unlock_irqrestore(&master->queue_lock, flags);
                return;
        }
@@ -1126,7 +1126,7 @@ static void __spi_pump_messages(struct spi_master *master, bool in_kthread)
 
                /* Only do teardown in the thread */
                if (!in_kthread) {
-                       queue_kthread_work(&master->kworker,
+                       kthread_queue_work(&master->kworker,
                                           &master->pump_messages);
                        spin_unlock_irqrestore(&master->queue_lock, flags);
                        return;
@@ -1250,7 +1250,7 @@ static int spi_init_queue(struct spi_master *master)
        master->running = false;
        master->busy = false;
 
-       init_kthread_worker(&master->kworker);
+       kthread_init_worker(&master->kworker);
        master->kworker_task = kthread_run(kthread_worker_fn,
                                           &master->kworker, "%s",
                                           dev_name(&master->dev));
@@ -1258,7 +1258,7 @@ static int spi_init_queue(struct spi_master *master)
                dev_err(&master->dev, "failed to create message pump task\n");
                return PTR_ERR(master->kworker_task);
        }
-       init_kthread_work(&master->pump_messages, spi_pump_messages);
+       kthread_init_work(&master->pump_messages, spi_pump_messages);
 
        /*
         * Master config will indicate if this controller should run the
@@ -1331,7 +1331,7 @@ void spi_finalize_current_message(struct spi_master *master)
        spin_lock_irqsave(&master->queue_lock, flags);
        master->cur_msg = NULL;
        master->cur_msg_prepared = false;
-       queue_kthread_work(&master->kworker, &master->pump_messages);
+       kthread_queue_work(&master->kworker, &master->pump_messages);
        spin_unlock_irqrestore(&master->queue_lock, flags);
 
        trace_spi_message_done(mesg);
@@ -1357,7 +1357,7 @@ static int spi_start_queue(struct spi_master *master)
        master->cur_msg = NULL;
        spin_unlock_irqrestore(&master->queue_lock, flags);
 
-       queue_kthread_work(&master->kworker, &master->pump_messages);
+       kthread_queue_work(&master->kworker, &master->pump_messages);
 
        return 0;
 }
@@ -1404,7 +1404,7 @@ static int spi_destroy_queue(struct spi_master *master)
        ret = spi_stop_queue(master);
 
        /*
-        * flush_kthread_worker will block until all work is done.
+        * kthread_flush_worker will block until all work is done.
         * If the reason that stop_queue timed out is that the work will never
         * finish, then it does no good to call flush/stop thread, so
         * return anyway.
@@ -1414,7 +1414,7 @@ static int spi_destroy_queue(struct spi_master *master)
                return ret;
        }
 
-       flush_kthread_worker(&master->kworker);
+       kthread_flush_worker(&master->kworker);
        kthread_stop(master->kworker_task);
 
        return 0;
@@ -1438,7 +1438,7 @@ static int __spi_queued_transfer(struct spi_device *spi,
 
        list_add_tail(&msg->queue, &master->queue);
        if (!master->busy && need_pump)
-               queue_kthread_work(&master->kworker, &master->pump_messages);
+               kthread_queue_work(&master->kworker, &master->pump_messages);
 
        spin_unlock_irqrestore(&master->queue_lock, flags);
        return 0;
index a9d94f7cf683d54ecdb6b5e7b4e7c1a8cfa3f241..2675792a8f5963a37b82d708b0ce87f8f070d5dd 100644 (file)
@@ -708,7 +708,7 @@ static irqreturn_t sc16is7xx_irq(int irq, void *dev_id)
 {
        struct sc16is7xx_port *s = (struct sc16is7xx_port *)dev_id;
 
-       queue_kthread_work(&s->kworker, &s->irq_work);
+       kthread_queue_work(&s->kworker, &s->irq_work);
 
        return IRQ_HANDLED;
 }
@@ -784,7 +784,7 @@ static void sc16is7xx_ier_clear(struct uart_port *port, u8 bit)
 
        one->config.flags |= SC16IS7XX_RECONF_IER;
        one->config.ier_clear |= bit;
-       queue_kthread_work(&s->kworker, &one->reg_work);
+       kthread_queue_work(&s->kworker, &one->reg_work);
 }
 
 static void sc16is7xx_stop_tx(struct uart_port *port)
@@ -802,7 +802,7 @@ static void sc16is7xx_start_tx(struct uart_port *port)
        struct sc16is7xx_port *s = dev_get_drvdata(port->dev);
        struct sc16is7xx_one *one = to_sc16is7xx_one(port, port);
 
-       queue_kthread_work(&s->kworker, &one->tx_work);
+       kthread_queue_work(&s->kworker, &one->tx_work);
 }
 
 static unsigned int sc16is7xx_tx_empty(struct uart_port *port)
@@ -828,7 +828,7 @@ static void sc16is7xx_set_mctrl(struct uart_port *port, unsigned int mctrl)
        struct sc16is7xx_one *one = to_sc16is7xx_one(port, port);
 
        one->config.flags |= SC16IS7XX_RECONF_MD;
-       queue_kthread_work(&s->kworker, &one->reg_work);
+       kthread_queue_work(&s->kworker, &one->reg_work);
 }
 
 static void sc16is7xx_break_ctl(struct uart_port *port, int break_state)
@@ -957,7 +957,7 @@ static int sc16is7xx_config_rs485(struct uart_port *port,
 
        port->rs485 = *rs485;
        one->config.flags |= SC16IS7XX_RECONF_RS485;
-       queue_kthread_work(&s->kworker, &one->reg_work);
+       kthread_queue_work(&s->kworker, &one->reg_work);
 
        return 0;
 }
@@ -1030,7 +1030,7 @@ static void sc16is7xx_shutdown(struct uart_port *port)
 
        sc16is7xx_power(port, 0);
 
-       flush_kthread_worker(&s->kworker);
+       kthread_flush_worker(&s->kworker);
 }
 
 static const char *sc16is7xx_type(struct uart_port *port)
@@ -1176,8 +1176,8 @@ static int sc16is7xx_probe(struct device *dev,
        s->devtype = devtype;
        dev_set_drvdata(dev, s);
 
-       init_kthread_worker(&s->kworker);
-       init_kthread_work(&s->irq_work, sc16is7xx_ist);
+       kthread_init_worker(&s->kworker);
+       kthread_init_work(&s->irq_work, sc16is7xx_ist);
        s->kworker_task = kthread_run(kthread_worker_fn, &s->kworker,
                                      "sc16is7xx");
        if (IS_ERR(s->kworker_task)) {
@@ -1234,8 +1234,8 @@ static int sc16is7xx_probe(struct device *dev,
                                     SC16IS7XX_EFCR_RXDISABLE_BIT |
                                     SC16IS7XX_EFCR_TXDISABLE_BIT);
                /* Initialize kthread work structs */
-               init_kthread_work(&s->p[i].tx_work, sc16is7xx_tx_proc);
-               init_kthread_work(&s->p[i].reg_work, sc16is7xx_reg_proc);
+               kthread_init_work(&s->p[i].tx_work, sc16is7xx_tx_proc);
+               kthread_init_work(&s->p[i].reg_work, sc16is7xx_reg_proc);
                /* Register port */
                uart_add_one_port(&sc16is7xx_uart, &s->p[i].port);
 
@@ -1301,7 +1301,7 @@ static int sc16is7xx_remove(struct device *dev)
                sc16is7xx_power(&s->p[i].port, 0);
        }
 
-       flush_kthread_worker(&s->kworker);
+       kthread_flush_worker(&s->kworker);
        kthread_stop(s->kworker_task);
 
        if (!IS_ERR(s->clk))
index d3ae4292931b790415b449d0fad19c017823f4ca..cd3874c21b3cfd849c239db5e719e7e9b3aeec4e 100644 (file)
@@ -261,6 +261,12 @@ phys_addr_t paddr_vmcoreinfo_note(void);
        vmcoreinfo_append_str("CONFIG_%s=y\n", #name)
 #define VMCOREINFO_PHYS_BASE(value) \
        vmcoreinfo_append_str("PHYS_BASE=%lx\n", (unsigned long)value)
+#define VMCOREINFO_PAGE_OFFSET(value) \
+       vmcoreinfo_append_str("PAGE_OFFSET=%lx\n", (unsigned long)value)
+#define VMCOREINFO_VMALLOC_START(value) \
+       vmcoreinfo_append_str("VMALLOC_START=%lx\n", (unsigned long)value)
+#define VMCOREINFO_VMEMMAP_START(value) \
+       vmcoreinfo_append_str("VMEMMAP_START=%lx\n", (unsigned long)value)
 
 extern struct kimage *kexec_image;
 extern struct kimage *kexec_crash_image;
index 4894c6888bc6cfb95e50d43366784769faf9b281..1c2a328296200ac391ed5385d2427ffa55d225a5 100644 (file)
@@ -38,6 +38,11 @@ extern void kmemleak_not_leak(const void *ptr) __ref;
 extern void kmemleak_ignore(const void *ptr) __ref;
 extern void kmemleak_scan_area(const void *ptr, size_t size, gfp_t gfp) __ref;
 extern void kmemleak_no_scan(const void *ptr) __ref;
+extern void kmemleak_alloc_phys(phys_addr_t phys, size_t size, int min_count,
+                               gfp_t gfp) __ref;
+extern void kmemleak_free_part_phys(phys_addr_t phys, size_t size) __ref;
+extern void kmemleak_not_leak_phys(phys_addr_t phys) __ref;
+extern void kmemleak_ignore_phys(phys_addr_t phys) __ref;
 
 static inline void kmemleak_alloc_recursive(const void *ptr, size_t size,
                                            int min_count, unsigned long flags,
@@ -106,6 +111,19 @@ static inline void kmemleak_erase(void **ptr)
 static inline void kmemleak_no_scan(const void *ptr)
 {
 }
+static inline void kmemleak_alloc_phys(phys_addr_t phys, size_t size,
+                                      int min_count, gfp_t gfp)
+{
+}
+static inline void kmemleak_free_part_phys(phys_addr_t phys, size_t size)
+{
+}
+static inline void kmemleak_not_leak_phys(phys_addr_t phys)
+{
+}
+static inline void kmemleak_ignore_phys(phys_addr_t phys)
+{
+}
 
 #endif /* CONFIG_DEBUG_KMEMLEAK */
 
index e691b6a23f72230bf50652b0ebf40a3fe5ef54c9..a6e82a69c363bb3d7af21e358aaef84bc2f265aa 100644 (file)
@@ -10,6 +10,17 @@ struct task_struct *kthread_create_on_node(int (*threadfn)(void *data),
                                           int node,
                                           const char namefmt[], ...);
 
+/**
+ * kthread_create - create a kthread on the current node
+ * @threadfn: the function to run in the thread
+ * @data: data pointer for @threadfn()
+ * @namefmt: printf-style format string for the thread name
+ * @...: arguments for @namefmt.
+ *
+ * This macro will create a kthread on the current node, leaving it in
+ * the stopped state.  This is just a helper for kthread_create_on_node();
+ * see the documentation there for more details.
+ */
 #define kthread_create(threadfn, data, namefmt, arg...) \
        kthread_create_on_node(threadfn, data, NUMA_NO_NODE, namefmt, ##arg)
 
@@ -44,7 +55,7 @@ bool kthread_should_stop(void);
 bool kthread_should_park(void);
 bool kthread_freezable_should_stop(bool *was_frozen);
 void *kthread_data(struct task_struct *k);
-void *probe_kthread_data(struct task_struct *k);
+void *kthread_probe_data(struct task_struct *k);
 int kthread_park(struct task_struct *k);
 void kthread_unpark(struct task_struct *k);
 void kthread_parkme(void);
@@ -57,16 +68,23 @@ extern int tsk_fork_get_node(struct task_struct *tsk);
  * Simple work processor based on kthread.
  *
  * This provides easier way to make use of kthreads.  A kthread_work
- * can be queued and flushed using queue/flush_kthread_work()
+ * can be queued and flushed using queue/kthread_flush_work()
  * respectively.  Queued kthread_works are processed by a kthread
  * running kthread_worker_fn().
  */
 struct kthread_work;
 typedef void (*kthread_work_func_t)(struct kthread_work *work);
+void kthread_delayed_work_timer_fn(unsigned long __data);
+
+enum {
+       KTW_FREEZABLE           = 1 << 0,       /* freeze during suspend */
+};
 
 struct kthread_worker {
+       unsigned int            flags;
        spinlock_t              lock;
        struct list_head        work_list;
+       struct list_head        delayed_work_list;
        struct task_struct      *task;
        struct kthread_work     *current_work;
 };
@@ -75,11 +93,19 @@ struct kthread_work {
        struct list_head        node;
        kthread_work_func_t     func;
        struct kthread_worker   *worker;
+       /* Number of canceling calls that are running at the moment. */
+       int                     canceling;
+};
+
+struct kthread_delayed_work {
+       struct kthread_work work;
+       struct timer_list timer;
 };
 
 #define KTHREAD_WORKER_INIT(worker)    {                               \
        .lock = __SPIN_LOCK_UNLOCKED((worker).lock),                    \
        .work_list = LIST_HEAD_INIT((worker).work_list),                \
+       .delayed_work_list = LIST_HEAD_INIT((worker).delayed_work_list),\
        }
 
 #define KTHREAD_WORK_INIT(work, fn)    {                               \
@@ -87,46 +113,88 @@ struct kthread_work {
        .func = (fn),                                                   \
        }
 
+#define KTHREAD_DELAYED_WORK_INIT(dwork, fn) {                         \
+       .work = KTHREAD_WORK_INIT((dwork).work, (fn)),                  \
+       .timer = __TIMER_INITIALIZER(kthread_delayed_work_timer_fn,     \
+                                    0, (unsigned long)&(dwork),        \
+                                    TIMER_IRQSAFE),                    \
+       }
+
 #define DEFINE_KTHREAD_WORKER(worker)                                  \
        struct kthread_worker worker = KTHREAD_WORKER_INIT(worker)
 
 #define DEFINE_KTHREAD_WORK(work, fn)                                  \
        struct kthread_work work = KTHREAD_WORK_INIT(work, fn)
 
+#define DEFINE_KTHREAD_DELAYED_WORK(dwork, fn)                         \
+       struct kthread_delayed_work dwork =                             \
+               KTHREAD_DELAYED_WORK_INIT(dwork, fn)
+
 /*
  * kthread_worker.lock needs its own lockdep class key when defined on
  * stack with lockdep enabled.  Use the following macros in such cases.
  */
 #ifdef CONFIG_LOCKDEP
 # define KTHREAD_WORKER_INIT_ONSTACK(worker)                           \
-       ({ init_kthread_worker(&worker); worker; })
+       ({ kthread_init_worker(&worker); worker; })
 # define DEFINE_KTHREAD_WORKER_ONSTACK(worker)                         \
        struct kthread_worker worker = KTHREAD_WORKER_INIT_ONSTACK(worker)
 #else
 # define DEFINE_KTHREAD_WORKER_ONSTACK(worker) DEFINE_KTHREAD_WORKER(worker)
 #endif
 
-extern void __init_kthread_worker(struct kthread_worker *worker,
+extern void __kthread_init_worker(struct kthread_worker *worker,
                        const char *name, struct lock_class_key *key);
 
-#define init_kthread_worker(worker)                                    \
+#define kthread_init_worker(worker)                                    \
        do {                                                            \
                static struct lock_class_key __key;                     \
-               __init_kthread_worker((worker), "("#worker")->lock", &__key); \
+               __kthread_init_worker((worker), "("#worker")->lock", &__key); \
        } while (0)
 
-#define init_kthread_work(work, fn)                                    \
+#define kthread_init_work(work, fn)                                    \
        do {                                                            \
                memset((work), 0, sizeof(struct kthread_work));         \
                INIT_LIST_HEAD(&(work)->node);                          \
                (work)->func = (fn);                                    \
        } while (0)
 
+#define kthread_init_delayed_work(dwork, fn)                           \
+       do {                                                            \
+               kthread_init_work(&(dwork)->work, (fn));                \
+               __setup_timer(&(dwork)->timer,                          \
+                             kthread_delayed_work_timer_fn,            \
+                             (unsigned long)(dwork),                   \
+                             TIMER_IRQSAFE);                           \
+       } while (0)
+
 int kthread_worker_fn(void *worker_ptr);
 
-bool queue_kthread_work(struct kthread_worker *worker,
+__printf(2, 3)
+struct kthread_worker *
+kthread_create_worker(unsigned int flags, const char namefmt[], ...);
+
+struct kthread_worker *
+kthread_create_worker_on_cpu(int cpu, unsigned int flags,
+                            const char namefmt[], ...);
+
+bool kthread_queue_work(struct kthread_worker *worker,
                        struct kthread_work *work);
-void flush_kthread_work(struct kthread_work *work);
-void flush_kthread_worker(struct kthread_worker *worker);
+
+bool kthread_queue_delayed_work(struct kthread_worker *worker,
+                               struct kthread_delayed_work *dwork,
+                               unsigned long delay);
+
+bool kthread_mod_delayed_work(struct kthread_worker *worker,
+                             struct kthread_delayed_work *dwork,
+                             unsigned long delay);
+
+void kthread_flush_work(struct kthread_work *work);
+void kthread_flush_worker(struct kthread_worker *worker);
+
+bool kthread_cancel_work_sync(struct kthread_work *work);
+bool kthread_cancel_delayed_work_sync(struct kthread_delayed_work *work);
+
+void kthread_destroy_worker(struct kthread_worker *worker);
 
 #endif /* _LINUX_KTHREAD_H */
index 77c141797152ef80af0a9e2d0070bfc56e796f27..37a130eff270710c120801b28bff1c0add7fda66 100644 (file)
@@ -93,8 +93,6 @@ __mlx5_mask(typ, fld))
 })
 
 #define MLX5_SET64(typ, p, fld, v) do { \
-       BUILD_BUG_ON(__mlx5_bit_sz(typ, fld) != 64); \
-       BUILD_BUG_ON(__mlx5_bit_off(typ, fld) % 64); \
        *((__be64 *)(p) + __mlx5_64_off(typ, fld)) = cpu_to_be64(v); \
 } while (0)
 
index 432c3d71d19536b0f468ec84ecc15d2300752103..2b59c82cc3e1bb0813088cf5e22307d81230e13e 100644 (file)
@@ -98,26 +98,26 @@ static void check_hung_task(struct task_struct *t, unsigned long timeout)
 
        trace_sched_process_hang(t);
 
-       if (!sysctl_hung_task_warnings)
+       if (!sysctl_hung_task_warnings && !sysctl_hung_task_panic)
                return;
 
-       if (sysctl_hung_task_warnings > 0)
-               sysctl_hung_task_warnings--;
-
        /*
         * Ok, the task did not get scheduled for more than 2 minutes,
         * complain:
         */
-       pr_err("INFO: task %s:%d blocked for more than %ld seconds.\n",
-               t->comm, t->pid, timeout);
-       pr_err("      %s %s %.*s\n",
-               print_tainted(), init_utsname()->release,
-               (int)strcspn(init_utsname()->version, " "),
-               init_utsname()->version);
-       pr_err("\"echo 0 > /proc/sys/kernel/hung_task_timeout_secs\""
-               " disables this message.\n");
-       sched_show_task(t);
-       debug_show_all_locks();
+       if (sysctl_hung_task_warnings) {
+               sysctl_hung_task_warnings--;
+               pr_err("INFO: task %s:%d blocked for more than %ld seconds.\n",
+                       t->comm, t->pid, timeout);
+               pr_err("      %s %s %.*s\n",
+                       print_tainted(), init_utsname()->release,
+                       (int)strcspn(init_utsname()->version, " "),
+                       init_utsname()->version);
+               pr_err("\"echo 0 > /proc/sys/kernel/hung_task_timeout_secs\""
+                       " disables this message.\n");
+               sched_show_task(t);
+               debug_show_all_locks();
+       }
 
        touch_nmi_watchdog();
 
index 9ff173dca1aef5e09fd640dc6757fee99c7a956c..9443ddc1b96755b8c89b90b58efaa85a1b792b14 100644 (file)
@@ -138,7 +138,7 @@ void *kthread_data(struct task_struct *task)
 }
 
 /**
- * probe_kthread_data - speculative version of kthread_data()
+ * kthread_probe_data - speculative version of kthread_data()
  * @task: possible kthread task in question
  *
  * @task could be a kthread task.  Return the data value specified when it
@@ -146,7 +146,7 @@ void *kthread_data(struct task_struct *task)
  * inaccessible for any reason, %NULL is returned.  This function requires
  * that @task itself is safe to dereference.
  */
-void *probe_kthread_data(struct task_struct *task)
+void *kthread_probe_data(struct task_struct *task)
 {
        struct kthread *kthread = to_kthread(task);
        void *data = NULL;
@@ -244,33 +244,10 @@ static void create_kthread(struct kthread_create_info *create)
        }
 }
 
-/**
- * kthread_create_on_node - create a kthread.
- * @threadfn: the function to run until signal_pending(current).
- * @data: data ptr for @threadfn.
- * @node: task and thread structures for the thread are allocated on this node
- * @namefmt: printf-style name for the thread.
- *
- * Description: This helper function creates and names a kernel
- * thread.  The thread will be stopped: use wake_up_process() to start
- * it.  See also kthread_run().  The new thread has SCHED_NORMAL policy and
- * is affine to all CPUs.
- *
- * If thread is going to be bound on a particular cpu, give its node
- * in @node, to get NUMA affinity for kthread stack, or else give NUMA_NO_NODE.
- * When woken, the thread will run @threadfn() with @data as its
- * argument. @threadfn() can either call do_exit() directly if it is a
- * standalone thread for which no one will call kthread_stop(), or
- * return when 'kthread_should_stop()' is true (which means
- * kthread_stop() has been called).  The return value should be zero
- * or a negative error number; it will be passed to kthread_stop().
- *
- * Returns a task_struct or ERR_PTR(-ENOMEM) or ERR_PTR(-EINTR).
- */
-struct task_struct *kthread_create_on_node(int (*threadfn)(void *data),
-                                          void *data, int node,
-                                          const char namefmt[],
-                                          ...)
+static struct task_struct *__kthread_create_on_node(int (*threadfn)(void *data),
+                                                   void *data, int node,
+                                                   const char namefmt[],
+                                                   va_list args)
 {
        DECLARE_COMPLETION_ONSTACK(done);
        struct task_struct *task;
@@ -311,11 +288,8 @@ struct task_struct *kthread_create_on_node(int (*threadfn)(void *data),
        task = create->result;
        if (!IS_ERR(task)) {
                static const struct sched_param param = { .sched_priority = 0 };
-               va_list args;
 
-               va_start(args, namefmt);
                vsnprintf(task->comm, sizeof(task->comm), namefmt, args);
-               va_end(args);
                /*
                 * root may have changed our (kthreadd's) priority or CPU mask.
                 * The kernel thread should not inherit these properties.
@@ -326,6 +300,44 @@ struct task_struct *kthread_create_on_node(int (*threadfn)(void *data),
        kfree(create);
        return task;
 }
+
+/**
+ * kthread_create_on_node - create a kthread.
+ * @threadfn: the function to run until signal_pending(current).
+ * @data: data ptr for @threadfn.
+ * @node: task and thread structures for the thread are allocated on this node
+ * @namefmt: printf-style name for the thread.
+ *
+ * Description: This helper function creates and names a kernel
+ * thread.  The thread will be stopped: use wake_up_process() to start
+ * it.  See also kthread_run().  The new thread has SCHED_NORMAL policy and
+ * is affine to all CPUs.
+ *
+ * If thread is going to be bound on a particular cpu, give its node
+ * in @node, to get NUMA affinity for kthread stack, or else give NUMA_NO_NODE.
+ * When woken, the thread will run @threadfn() with @data as its
+ * argument. @threadfn() can either call do_exit() directly if it is a
+ * standalone thread for which no one will call kthread_stop(), or
+ * return when 'kthread_should_stop()' is true (which means
+ * kthread_stop() has been called).  The return value should be zero
+ * or a negative error number; it will be passed to kthread_stop().
+ *
+ * Returns a task_struct or ERR_PTR(-ENOMEM) or ERR_PTR(-EINTR).
+ */
+struct task_struct *kthread_create_on_node(int (*threadfn)(void *data),
+                                          void *data, int node,
+                                          const char namefmt[],
+                                          ...)
+{
+       struct task_struct *task;
+       va_list args;
+
+       va_start(args, namefmt);
+       task = __kthread_create_on_node(threadfn, data, node, namefmt, args);
+       va_end(args);
+
+       return task;
+}
 EXPORT_SYMBOL(kthread_create_on_node);
 
 static void __kthread_bind_mask(struct task_struct *p, const struct cpumask *mask, long state)
@@ -390,10 +402,10 @@ struct task_struct *kthread_create_on_cpu(int (*threadfn)(void *data),
                                   cpu);
        if (IS_ERR(p))
                return p;
+       kthread_bind(p, cpu);
+       /* CPU hotplug need to bind once again when unparking the thread. */
        set_bit(KTHREAD_IS_PER_CPU, &to_kthread(p)->flags);
        to_kthread(p)->cpu = cpu;
-       /* Park the thread to get it out of TASK_UNINTERRUPTIBLE state */
-       kthread_park(p);
        return p;
 }
 
@@ -407,6 +419,10 @@ static void __kthread_unpark(struct task_struct *k, struct kthread *kthread)
         * which might be about to be cleared.
         */
        if (test_and_clear_bit(KTHREAD_IS_PARKED, &kthread->flags)) {
+               /*
+                * Newly created kthread was parked when the CPU was offline.
+                * The binding was lost and we need to set it again.
+                */
                if (test_bit(KTHREAD_IS_PER_CPU, &kthread->flags))
                        __kthread_bind(k, kthread->cpu, TASK_PARKED);
                wake_up_state(k, TASK_PARKED);
@@ -536,39 +552,48 @@ int kthreadd(void *unused)
        return 0;
 }
 
-void __init_kthread_worker(struct kthread_worker *worker,
+void __kthread_init_worker(struct kthread_worker *worker,
                                const char *name,
                                struct lock_class_key *key)
 {
+       memset(worker, 0, sizeof(struct kthread_worker));
        spin_lock_init(&worker->lock);
        lockdep_set_class_and_name(&worker->lock, key, name);
        INIT_LIST_HEAD(&worker->work_list);
-       worker->task = NULL;
+       INIT_LIST_HEAD(&worker->delayed_work_list);
 }
-EXPORT_SYMBOL_GPL(__init_kthread_worker);
+EXPORT_SYMBOL_GPL(__kthread_init_worker);
 
 /**
  * kthread_worker_fn - kthread function to process kthread_worker
  * @worker_ptr: pointer to initialized kthread_worker
  *
- * This function can be used as @threadfn to kthread_create() or
- * kthread_run() with @worker_ptr argument pointing to an initialized
- * kthread_worker.  The started kthread will process work_list until
- * the it is stopped with kthread_stop().  A kthread can also call
- * this function directly after extra initialization.
+ * This function implements the main cycle of kthread worker. It processes
+ * work_list until it is stopped with kthread_stop(). It sleeps when the queue
+ * is empty.
+ *
+ * The works are not allowed to keep any locks, disable preemption or interrupts
+ * when they finish. There is defined a safe point for freezing when one work
+ * finishes and before a new one is started.
  *
- * Different kthreads can be used for the same kthread_worker as long
- * as there's only one kthread attached to it at any given time.  A
- * kthread_worker without an attached kthread simply collects queued
- * kthread_works.
+ * Also the works must not be handled by more than one worker at the same time,
+ * see also kthread_queue_work().
  */
 int kthread_worker_fn(void *worker_ptr)
 {
        struct kthread_worker *worker = worker_ptr;
        struct kthread_work *work;
 
-       WARN_ON(worker->task);
+       /*
+        * FIXME: Update the check and remove the assignment when all kthread
+        * worker users are created using kthread_create_worker*() functions.
+        */
+       WARN_ON(worker->task && worker->task != current);
        worker->task = current;
+
+       if (worker->flags & KTW_FREEZABLE)
+               set_freezable();
+
 repeat:
        set_current_state(TASK_INTERRUPTIBLE);  /* mb paired w/ kthread_stop */
 
@@ -601,12 +626,131 @@ repeat:
 }
 EXPORT_SYMBOL_GPL(kthread_worker_fn);
 
-/* insert @work before @pos in @worker */
-static void insert_kthread_work(struct kthread_worker *worker,
-                              struct kthread_work *work,
-                              struct list_head *pos)
+static struct kthread_worker *
+__kthread_create_worker(int cpu, unsigned int flags,
+                       const char namefmt[], va_list args)
+{
+       struct kthread_worker *worker;
+       struct task_struct *task;
+
+       worker = kzalloc(sizeof(*worker), GFP_KERNEL);
+       if (!worker)
+               return ERR_PTR(-ENOMEM);
+
+       kthread_init_worker(worker);
+
+       if (cpu >= 0) {
+               char name[TASK_COMM_LEN];
+
+               /*
+                * kthread_create_worker_on_cpu() allows to pass a generic
+                * namefmt in compare with kthread_create_on_cpu. We need
+                * to format it here.
+                */
+               vsnprintf(name, sizeof(name), namefmt, args);
+               task = kthread_create_on_cpu(kthread_worker_fn, worker,
+                                            cpu, name);
+       } else {
+               task = __kthread_create_on_node(kthread_worker_fn, worker,
+                                               -1, namefmt, args);
+       }
+
+       if (IS_ERR(task))
+               goto fail_task;
+
+       worker->flags = flags;
+       worker->task = task;
+       wake_up_process(task);
+       return worker;
+
+fail_task:
+       kfree(worker);
+       return ERR_CAST(task);
+}
+
+/**
+ * kthread_create_worker - create a kthread worker
+ * @flags: flags modifying the default behavior of the worker
+ * @namefmt: printf-style name for the kthread worker (task).
+ *
+ * Returns a pointer to the allocated worker on success, ERR_PTR(-ENOMEM)
+ * when the needed structures could not get allocated, and ERR_PTR(-EINTR)
+ * when the worker was SIGKILLed.
+ */
+struct kthread_worker *
+kthread_create_worker(unsigned int flags, const char namefmt[], ...)
+{
+       struct kthread_worker *worker;
+       va_list args;
+
+       va_start(args, namefmt);
+       worker = __kthread_create_worker(-1, flags, namefmt, args);
+       va_end(args);
+
+       return worker;
+}
+EXPORT_SYMBOL(kthread_create_worker);
+
+/**
+ * kthread_create_worker_on_cpu - create a kthread worker and bind it
+ *     it to a given CPU and the associated NUMA node.
+ * @cpu: CPU number
+ * @flags: flags modifying the default behavior of the worker
+ * @namefmt: printf-style name for the kthread worker (task).
+ *
+ * Use a valid CPU number if you want to bind the kthread worker
+ * to the given CPU and the associated NUMA node.
+ *
+ * A good practice is to add the cpu number also into the worker name.
+ * For example, use kthread_create_worker_on_cpu(cpu, "helper/%d", cpu).
+ *
+ * Returns a pointer to the allocated worker on success, ERR_PTR(-ENOMEM)
+ * when the needed structures could not get allocated, and ERR_PTR(-EINTR)
+ * when the worker was SIGKILLed.
+ */
+struct kthread_worker *
+kthread_create_worker_on_cpu(int cpu, unsigned int flags,
+                            const char namefmt[], ...)
+{
+       struct kthread_worker *worker;
+       va_list args;
+
+       va_start(args, namefmt);
+       worker = __kthread_create_worker(cpu, flags, namefmt, args);
+       va_end(args);
+
+       return worker;
+}
+EXPORT_SYMBOL(kthread_create_worker_on_cpu);
+
+/*
+ * Returns true when the work could not be queued at the moment.
+ * It happens when it is already pending in a worker list
+ * or when it is being cancelled.
+ */
+static inline bool queuing_blocked(struct kthread_worker *worker,
+                                  struct kthread_work *work)
+{
+       lockdep_assert_held(&worker->lock);
+
+       return !list_empty(&work->node) || work->canceling;
+}
+
+static void kthread_insert_work_sanity_check(struct kthread_worker *worker,
+                                            struct kthread_work *work)
 {
        lockdep_assert_held(&worker->lock);
+       WARN_ON_ONCE(!list_empty(&work->node));
+       /* Do not use a work with >1 worker, see kthread_queue_work() */
+       WARN_ON_ONCE(work->worker && work->worker != worker);
+}
+
+/* insert @work before @pos in @worker */
+static void kthread_insert_work(struct kthread_worker *worker,
+                               struct kthread_work *work,
+                               struct list_head *pos)
+{
+       kthread_insert_work_sanity_check(worker, work);
 
        list_add_tail(&work->node, pos);
        work->worker = worker;
@@ -615,29 +759,133 @@ static void insert_kthread_work(struct kthread_worker *worker,
 }
 
 /**
- * queue_kthread_work - queue a kthread_work
+ * kthread_queue_work - queue a kthread_work
  * @worker: target kthread_worker
  * @work: kthread_work to queue
  *
  * Queue @work to work processor @task for async execution.  @task
  * must have been created with kthread_worker_create().  Returns %true
  * if @work was successfully queued, %false if it was already pending.
+ *
+ * Reinitialize the work if it needs to be used by another worker.
+ * For example, when the worker was stopped and started again.
  */
-bool queue_kthread_work(struct kthread_worker *worker,
+bool kthread_queue_work(struct kthread_worker *worker,
                        struct kthread_work *work)
 {
        bool ret = false;
        unsigned long flags;
 
        spin_lock_irqsave(&worker->lock, flags);
-       if (list_empty(&work->node)) {
-               insert_kthread_work(worker, work, &worker->work_list);
+       if (!queuing_blocked(worker, work)) {
+               kthread_insert_work(worker, work, &worker->work_list);
+               ret = true;
+       }
+       spin_unlock_irqrestore(&worker->lock, flags);
+       return ret;
+}
+EXPORT_SYMBOL_GPL(kthread_queue_work);
+
+/**
+ * kthread_delayed_work_timer_fn - callback that queues the associated kthread
+ *     delayed work when the timer expires.
+ * @__data: pointer to the data associated with the timer
+ *
+ * The format of the function is defined by struct timer_list.
+ * It should have been called from irqsafe timer with irq already off.
+ */
+void kthread_delayed_work_timer_fn(unsigned long __data)
+{
+       struct kthread_delayed_work *dwork =
+               (struct kthread_delayed_work *)__data;
+       struct kthread_work *work = &dwork->work;
+       struct kthread_worker *worker = work->worker;
+
+       /*
+        * This might happen when a pending work is reinitialized.
+        * It means that it is used a wrong way.
+        */
+       if (WARN_ON_ONCE(!worker))
+               return;
+
+       spin_lock(&worker->lock);
+       /* Work must not be used with >1 worker, see kthread_queue_work(). */
+       WARN_ON_ONCE(work->worker != worker);
+
+       /* Move the work from worker->delayed_work_list. */
+       WARN_ON_ONCE(list_empty(&work->node));
+       list_del_init(&work->node);
+       kthread_insert_work(worker, work, &worker->work_list);
+
+       spin_unlock(&worker->lock);
+}
+EXPORT_SYMBOL(kthread_delayed_work_timer_fn);
+
+void __kthread_queue_delayed_work(struct kthread_worker *worker,
+                                 struct kthread_delayed_work *dwork,
+                                 unsigned long delay)
+{
+       struct timer_list *timer = &dwork->timer;
+       struct kthread_work *work = &dwork->work;
+
+       WARN_ON_ONCE(timer->function != kthread_delayed_work_timer_fn ||
+                    timer->data != (unsigned long)dwork);
+
+       /*
+        * If @delay is 0, queue @dwork->work immediately.  This is for
+        * both optimization and correctness.  The earliest @timer can
+        * expire is on the closest next tick and delayed_work users depend
+        * on that there's no such delay when @delay is 0.
+        */
+       if (!delay) {
+               kthread_insert_work(worker, work, &worker->work_list);
+               return;
+       }
+
+       /* Be paranoid and try to detect possible races already now. */
+       kthread_insert_work_sanity_check(worker, work);
+
+       list_add(&work->node, &worker->delayed_work_list);
+       work->worker = worker;
+       timer_stats_timer_set_start_info(&dwork->timer);
+       timer->expires = jiffies + delay;
+       add_timer(timer);
+}
+
+/**
+ * kthread_queue_delayed_work - queue the associated kthread work
+ *     after a delay.
+ * @worker: target kthread_worker
+ * @dwork: kthread_delayed_work to queue
+ * @delay: number of jiffies to wait before queuing
+ *
+ * If the work has not been pending it starts a timer that will queue
+ * the work after the given @delay. If @delay is zero, it queues the
+ * work immediately.
+ *
+ * Return: %false if the @work has already been pending. It means that
+ * either the timer was running or the work was queued. It returns %true
+ * otherwise.
+ */
+bool kthread_queue_delayed_work(struct kthread_worker *worker,
+                               struct kthread_delayed_work *dwork,
+                               unsigned long delay)
+{
+       struct kthread_work *work = &dwork->work;
+       unsigned long flags;
+       bool ret = false;
+
+       spin_lock_irqsave(&worker->lock, flags);
+
+       if (!queuing_blocked(worker, work)) {
+               __kthread_queue_delayed_work(worker, dwork, delay);
                ret = true;
        }
+
        spin_unlock_irqrestore(&worker->lock, flags);
        return ret;
 }
-EXPORT_SYMBOL_GPL(queue_kthread_work);
+EXPORT_SYMBOL_GPL(kthread_queue_delayed_work);
 
 struct kthread_flush_work {
        struct kthread_work     work;
@@ -652,12 +900,12 @@ static void kthread_flush_work_fn(struct kthread_work *work)
 }
 
 /**
- * flush_kthread_work - flush a kthread_work
+ * kthread_flush_work - flush a kthread_work
  * @work: work to flush
  *
  * If @work is queued or executing, wait for it to finish execution.
  */
-void flush_kthread_work(struct kthread_work *work)
+void kthread_flush_work(struct kthread_work *work)
 {
        struct kthread_flush_work fwork = {
                KTHREAD_WORK_INIT(fwork.work, kthread_flush_work_fn),
@@ -666,21 +914,19 @@ void flush_kthread_work(struct kthread_work *work)
        struct kthread_worker *worker;
        bool noop = false;
 
-retry:
        worker = work->worker;
        if (!worker)
                return;
 
        spin_lock_irq(&worker->lock);
-       if (work->worker != worker) {
-               spin_unlock_irq(&worker->lock);
-               goto retry;
-       }
+       /* Work must not be used with >1 worker, see kthread_queue_work(). */
+       WARN_ON_ONCE(work->worker != worker);
 
        if (!list_empty(&work->node))
-               insert_kthread_work(worker, &fwork.work, work->node.next);
+               kthread_insert_work(worker, &fwork.work, work->node.next);
        else if (worker->current_work == work)
-               insert_kthread_work(worker, &fwork.work, worker->work_list.next);
+               kthread_insert_work(worker, &fwork.work,
+                                   worker->work_list.next);
        else
                noop = true;
 
@@ -689,23 +935,214 @@ retry:
        if (!noop)
                wait_for_completion(&fwork.done);
 }
-EXPORT_SYMBOL_GPL(flush_kthread_work);
+EXPORT_SYMBOL_GPL(kthread_flush_work);
+
+/*
+ * This function removes the work from the worker queue. Also it makes sure
+ * that it won't get queued later via the delayed work's timer.
+ *
+ * The work might still be in use when this function finishes. See the
+ * current_work proceed by the worker.
+ *
+ * Return: %true if @work was pending and successfully canceled,
+ *     %false if @work was not pending
+ */
+static bool __kthread_cancel_work(struct kthread_work *work, bool is_dwork,
+                                 unsigned long *flags)
+{
+       /* Try to cancel the timer if exists. */
+       if (is_dwork) {
+               struct kthread_delayed_work *dwork =
+                       container_of(work, struct kthread_delayed_work, work);
+               struct kthread_worker *worker = work->worker;
+
+               /*
+                * del_timer_sync() must be called to make sure that the timer
+                * callback is not running. The lock must be temporary released
+                * to avoid a deadlock with the callback. In the meantime,
+                * any queuing is blocked by setting the canceling counter.
+                */
+               work->canceling++;
+               spin_unlock_irqrestore(&worker->lock, *flags);
+               del_timer_sync(&dwork->timer);
+               spin_lock_irqsave(&worker->lock, *flags);
+               work->canceling--;
+       }
+
+       /*
+        * Try to remove the work from a worker list. It might either
+        * be from worker->work_list or from worker->delayed_work_list.
+        */
+       if (!list_empty(&work->node)) {
+               list_del_init(&work->node);
+               return true;
+       }
+
+       return false;
+}
+
+/**
+ * kthread_mod_delayed_work - modify delay of or queue a kthread delayed work
+ * @worker: kthread worker to use
+ * @dwork: kthread delayed work to queue
+ * @delay: number of jiffies to wait before queuing
+ *
+ * If @dwork is idle, equivalent to kthread_queue_delayed_work(). Otherwise,
+ * modify @dwork's timer so that it expires after @delay. If @delay is zero,
+ * @work is guaranteed to be queued immediately.
+ *
+ * Return: %true if @dwork was pending and its timer was modified,
+ * %false otherwise.
+ *
+ * A special case is when the work is being canceled in parallel.
+ * It might be caused either by the real kthread_cancel_delayed_work_sync()
+ * or yet another kthread_mod_delayed_work() call. We let the other command
+ * win and return %false here. The caller is supposed to synchronize these
+ * operations a reasonable way.
+ *
+ * This function is safe to call from any context including IRQ handler.
+ * See __kthread_cancel_work() and kthread_delayed_work_timer_fn()
+ * for details.
+ */
+bool kthread_mod_delayed_work(struct kthread_worker *worker,
+                             struct kthread_delayed_work *dwork,
+                             unsigned long delay)
+{
+       struct kthread_work *work = &dwork->work;
+       unsigned long flags;
+       int ret = false;
+
+       spin_lock_irqsave(&worker->lock, flags);
+
+       /* Do not bother with canceling when never queued. */
+       if (!work->worker)
+               goto fast_queue;
+
+       /* Work must not be used with >1 worker, see kthread_queue_work() */
+       WARN_ON_ONCE(work->worker != worker);
+
+       /* Do not fight with another command that is canceling this work. */
+       if (work->canceling)
+               goto out;
+
+       ret = __kthread_cancel_work(work, true, &flags);
+fast_queue:
+       __kthread_queue_delayed_work(worker, dwork, delay);
+out:
+       spin_unlock_irqrestore(&worker->lock, flags);
+       return ret;
+}
+EXPORT_SYMBOL_GPL(kthread_mod_delayed_work);
+
+static bool __kthread_cancel_work_sync(struct kthread_work *work, bool is_dwork)
+{
+       struct kthread_worker *worker = work->worker;
+       unsigned long flags;
+       int ret = false;
+
+       if (!worker)
+               goto out;
+
+       spin_lock_irqsave(&worker->lock, flags);
+       /* Work must not be used with >1 worker, see kthread_queue_work(). */
+       WARN_ON_ONCE(work->worker != worker);
+
+       ret = __kthread_cancel_work(work, is_dwork, &flags);
+
+       if (worker->current_work != work)
+               goto out_fast;
+
+       /*
+        * The work is in progress and we need to wait with the lock released.
+        * In the meantime, block any queuing by setting the canceling counter.
+        */
+       work->canceling++;
+       spin_unlock_irqrestore(&worker->lock, flags);
+       kthread_flush_work(work);
+       spin_lock_irqsave(&worker->lock, flags);
+       work->canceling--;
+
+out_fast:
+       spin_unlock_irqrestore(&worker->lock, flags);
+out:
+       return ret;
+}
+
+/**
+ * kthread_cancel_work_sync - cancel a kthread work and wait for it to finish
+ * @work: the kthread work to cancel
+ *
+ * Cancel @work and wait for its execution to finish.  This function
+ * can be used even if the work re-queues itself. On return from this
+ * function, @work is guaranteed to be not pending or executing on any CPU.
+ *
+ * kthread_cancel_work_sync(&delayed_work->work) must not be used for
+ * delayed_work's. Use kthread_cancel_delayed_work_sync() instead.
+ *
+ * The caller must ensure that the worker on which @work was last
+ * queued can't be destroyed before this function returns.
+ *
+ * Return: %true if @work was pending, %false otherwise.
+ */
+bool kthread_cancel_work_sync(struct kthread_work *work)
+{
+       return __kthread_cancel_work_sync(work, false);
+}
+EXPORT_SYMBOL_GPL(kthread_cancel_work_sync);
+
+/**
+ * kthread_cancel_delayed_work_sync - cancel a kthread delayed work and
+ *     wait for it to finish.
+ * @dwork: the kthread delayed work to cancel
+ *
+ * This is kthread_cancel_work_sync() for delayed works.
+ *
+ * Return: %true if @dwork was pending, %false otherwise.
+ */
+bool kthread_cancel_delayed_work_sync(struct kthread_delayed_work *dwork)
+{
+       return __kthread_cancel_work_sync(&dwork->work, true);
+}
+EXPORT_SYMBOL_GPL(kthread_cancel_delayed_work_sync);
 
 /**
- * flush_kthread_worker - flush all current works on a kthread_worker
+ * kthread_flush_worker - flush all current works on a kthread_worker
  * @worker: worker to flush
  *
  * Wait until all currently executing or pending works on @worker are
  * finished.
  */
-void flush_kthread_worker(struct kthread_worker *worker)
+void kthread_flush_worker(struct kthread_worker *worker)
 {
        struct kthread_flush_work fwork = {
                KTHREAD_WORK_INIT(fwork.work, kthread_flush_work_fn),
                COMPLETION_INITIALIZER_ONSTACK(fwork.done),
        };
 
-       queue_kthread_work(worker, &fwork.work);
+       kthread_queue_work(worker, &fwork.work);
        wait_for_completion(&fwork.done);
 }
-EXPORT_SYMBOL_GPL(flush_kthread_worker);
+EXPORT_SYMBOL_GPL(kthread_flush_worker);
+
+/**
+ * kthread_destroy_worker - destroy a kthread worker
+ * @worker: worker to be destroyed
+ *
+ * Flush and destroy @worker.  The simple flush is enough because the kthread
+ * worker API is used only in trivial scenarios.  There are no multi-step state
+ * machines needed.
+ */
+void kthread_destroy_worker(struct kthread_worker *worker)
+{
+       struct task_struct *task;
+
+       task = worker->task;
+       if (WARN_ON(!task))
+               return;
+
+       kthread_flush_worker(worker);
+       kthread_stop(task);
+       WARN_ON(!list_empty(&worker->work_list));
+       kfree(worker);
+}
+EXPORT_SYMBOL(kthread_destroy_worker);
index 13bc43d1fb227f8ee0c55a411460200a2ac3b067..4a5c6e73ecd41e7107a89b098f4eea9fefc647f1 100644 (file)
@@ -186,6 +186,11 @@ __smpboot_create_thread(struct smp_hotplug_thread *ht, unsigned int cpu)
                kfree(td);
                return PTR_ERR(tsk);
        }
+       /*
+        * Park the thread so that it could start right on the CPU
+        * when it is available.
+        */
+       kthread_park(tsk);
        get_task_struct(tsk);
        *per_cpu_ptr(ht->store, cpu) = tsk;
        if (ht->create) {
index 9debb7c4df648776e0346211cb4ad7644e8f7b47..17dad7639b4a3f7569ca219a5bb27e95d7605507 100644 (file)
@@ -4263,7 +4263,7 @@ void print_worker_info(const char *log_lvl, struct task_struct *task)
         * This function is called without any synchronization and @task
         * could be in any state.  Be careful with dereferences.
         */
-       worker = probe_kthread_data(task);
+       worker = kthread_probe_data(task);
 
        /*
         * Carefully copy the associated workqueue's workfn and name.  Keep
index a869f84f44d38a8905619b71052bfadc0de81dea..e8a55a3c9febae80fab4627bc8f0846f4326c56a 100644 (file)
@@ -155,7 +155,7 @@ void __init free_bootmem_late(unsigned long physaddr, unsigned long size)
 {
        unsigned long cursor, end;
 
-       kmemleak_free_part(__va(physaddr), size);
+       kmemleak_free_part_phys(physaddr, size);
 
        cursor = PFN_UP(physaddr);
        end = PFN_DOWN(physaddr + size);
@@ -399,7 +399,7 @@ void __init free_bootmem_node(pg_data_t *pgdat, unsigned long physaddr,
 {
        unsigned long start, end;
 
-       kmemleak_free_part(__va(physaddr), size);
+       kmemleak_free_part_phys(physaddr, size);
 
        start = PFN_UP(physaddr);
        end = PFN_DOWN(physaddr + size);
@@ -420,7 +420,7 @@ void __init free_bootmem(unsigned long physaddr, unsigned long size)
 {
        unsigned long start, end;
 
-       kmemleak_free_part(__va(physaddr), size);
+       kmemleak_free_part_phys(physaddr, size);
 
        start = PFN_UP(physaddr);
        end = PFN_DOWN(physaddr + size);
index bd0e1412475eb872dd354999d3120a55cce925be..384c2cb51b56bf75ab2c132d0087e3757a71c276 100644 (file)
--- a/mm/cma.c
+++ b/mm/cma.c
@@ -336,7 +336,7 @@ int __init cma_declare_contiguous(phys_addr_t base,
                 * kmemleak scans/reads tracked objects for pointers to other
                 * objects but this address isn't mapped and accessible
                 */
-               kmemleak_ignore(phys_to_virt(addr));
+               kmemleak_ignore_phys(addr);
                base = addr;
        }
 
index 086292f7c59d1a69881068758150c2140feb8253..a5e453cf05c499cf5c7eeb9b66ce14936d4494fd 100644 (file)
@@ -90,6 +90,8 @@
 #include <linux/cache.h>
 #include <linux/percpu.h>
 #include <linux/hardirq.h>
+#include <linux/bootmem.h>
+#include <linux/pfn.h>
 #include <linux/mmzone.h>
 #include <linux/slab.h>
 #include <linux/thread_info.h>
@@ -1121,6 +1123,51 @@ void __ref kmemleak_no_scan(const void *ptr)
 }
 EXPORT_SYMBOL(kmemleak_no_scan);
 
+/**
+ * kmemleak_alloc_phys - similar to kmemleak_alloc but taking a physical
+ *                      address argument
+ */
+void __ref kmemleak_alloc_phys(phys_addr_t phys, size_t size, int min_count,
+                              gfp_t gfp)
+{
+       if (!IS_ENABLED(CONFIG_HIGHMEM) || PHYS_PFN(phys) < max_low_pfn)
+               kmemleak_alloc(__va(phys), size, min_count, gfp);
+}
+EXPORT_SYMBOL(kmemleak_alloc_phys);
+
+/**
+ * kmemleak_free_part_phys - similar to kmemleak_free_part but taking a
+ *                          physical address argument
+ */
+void __ref kmemleak_free_part_phys(phys_addr_t phys, size_t size)
+{
+       if (!IS_ENABLED(CONFIG_HIGHMEM) || PHYS_PFN(phys) < max_low_pfn)
+               kmemleak_free_part(__va(phys), size);
+}
+EXPORT_SYMBOL(kmemleak_free_part_phys);
+
+/**
+ * kmemleak_not_leak_phys - similar to kmemleak_not_leak but taking a physical
+ *                         address argument
+ */
+void __ref kmemleak_not_leak_phys(phys_addr_t phys)
+{
+       if (!IS_ENABLED(CONFIG_HIGHMEM) || PHYS_PFN(phys) < max_low_pfn)
+               kmemleak_not_leak(__va(phys));
+}
+EXPORT_SYMBOL(kmemleak_not_leak_phys);
+
+/**
+ * kmemleak_ignore_phys - similar to kmemleak_ignore but taking a physical
+ *                       address argument
+ */
+void __ref kmemleak_ignore_phys(phys_addr_t phys)
+{
+       if (!IS_ENABLED(CONFIG_HIGHMEM) || PHYS_PFN(phys) < max_low_pfn)
+               kmemleak_ignore(__va(phys));
+}
+EXPORT_SYMBOL(kmemleak_ignore_phys);
+
 /*
  * Update an object's checksum and return true if it was modified.
  */
index c8dfa430342be77ad35cea98dfad746c36aa26b3..7608bc305936177f03dcf92eda29c1eb818cdc2f 100644 (file)
@@ -723,7 +723,7 @@ int __init_memblock memblock_free(phys_addr_t base, phys_addr_t size)
                     (unsigned long long)base + size - 1,
                     (void *)_RET_IP_);
 
-       kmemleak_free_part(__va(base), size);
+       kmemleak_free_part_phys(base, size);
        return memblock_remove_range(&memblock.reserved, base, size);
 }
 
@@ -1152,7 +1152,7 @@ static phys_addr_t __init memblock_alloc_range_nid(phys_addr_t size,
                 * The min_count is set to 0 so that memblock allocations are
                 * never reported as leaks.
                 */
-               kmemleak_alloc(__va(found), size, 0, 0);
+               kmemleak_alloc_phys(found, size, 0, 0);
                return found;
        }
        return 0;
@@ -1399,7 +1399,7 @@ void __init __memblock_free_early(phys_addr_t base, phys_addr_t size)
        memblock_dbg("%s: [%#016llx-%#016llx] %pF\n",
                     __func__, (u64)base, (u64)base + size - 1,
                     (void *)_RET_IP_);
-       kmemleak_free_part(__va(base), size);
+       kmemleak_free_part_phys(base, size);
        memblock_remove_range(&memblock.reserved, base, size);
 }
 
@@ -1419,7 +1419,7 @@ void __init __memblock_free_late(phys_addr_t base, phys_addr_t size)
        memblock_dbg("%s: [%#016llx-%#016llx] %pF\n",
                     __func__, (u64)base, (u64)base + size - 1,
                     (void *)_RET_IP_);
-       kmemleak_free_part(__va(base), size);
+       kmemleak_free_part_phys(base, size);
        cursor = PFN_UP(base);
        end = PFN_DOWN(base + size);
 
index 490d46abddad2732346465befa7a3349bccc9e75..b001384dfbdd952c522ae6da1619af28697c0a88 100644 (file)
@@ -84,7 +84,7 @@ void __init free_bootmem_late(unsigned long addr, unsigned long size)
 {
        unsigned long cursor, end;
 
-       kmemleak_free_part(__va(addr), size);
+       kmemleak_free_part_phys(addr, size);
 
        cursor = PFN_UP(addr);
        end = PFN_DOWN(addr + size);
index b3775a9604eac9ff7d7e4b667003969408d3d491..a2ff3388e5ea396f478870b47e79faadb73cca02 100755 (executable)
@@ -263,7 +263,8 @@ exuberant()
        -I EXPORT_SYMBOL,EXPORT_SYMBOL_GPL,ACPI_EXPORT_SYMBOL   \
        -I DEFINE_TRACE,EXPORT_TRACEPOINT_SYMBOL,EXPORT_TRACEPOINT_SYMBOL_GPL \
        -I static,const                                         \
-       --extra=+f --c-kinds=+px --langmap=c:+.h "${regex[@]}"
+       --extra=+fq --c-kinds=+px --fields=+iaS --langmap=c:+.h \
+       "${regex[@]}"
 
        setup_regex exuberant kconfig
        all_kconfigs | xargs $1 -a                              \
index c8455b47388bcd901290e6a70ec178d4e48aa11a..7ab14ce65a73bff98d42d51f4479d280aee72a17 100644 (file)
@@ -338,7 +338,7 @@ static irqreturn_t sst_byt_irq_thread(int irq, void *context)
        spin_unlock_irqrestore(&sst->spinlock, flags);
 
        /* continue to send any remaining messages... */
-       queue_kthread_work(&ipc->kworker, &ipc->kwork);
+       kthread_queue_work(&ipc->kworker, &ipc->kwork);
 
        return IRQ_HANDLED;
 }
index a12c7bb08d3b88b891e30cf2d920b39929028c45..6c672ac79cce7b3b12e1a7cb65fdb8e8c4456f15 100644 (file)
@@ -111,7 +111,7 @@ static int ipc_tx_message(struct sst_generic_ipc *ipc, u64 header,
        list_add_tail(&msg->list, &ipc->tx_list);
        spin_unlock_irqrestore(&ipc->dsp->spinlock, flags);
 
-       queue_kthread_work(&ipc->kworker, &ipc->kwork);
+       kthread_queue_work(&ipc->kworker, &ipc->kwork);
 
        if (wait)
                return tx_wait_done(ipc, msg, rx_data);
@@ -281,7 +281,7 @@ int sst_ipc_init(struct sst_generic_ipc *ipc)
                return -ENOMEM;
 
        /* start the IPC message thread */
-       init_kthread_worker(&ipc->kworker);
+       kthread_init_worker(&ipc->kworker);
        ipc->tx_thread = kthread_run(kthread_worker_fn,
                                        &ipc->kworker, "%s",
                                        dev_name(ipc->dev));
@@ -292,7 +292,7 @@ int sst_ipc_init(struct sst_generic_ipc *ipc)
                return ret;
        }
 
-       init_kthread_work(&ipc->kwork, ipc_tx_msgs);
+       kthread_init_work(&ipc->kwork, ipc_tx_msgs);
        return 0;
 }
 EXPORT_SYMBOL_GPL(sst_ipc_init);
index 91565229d07422414418dc7495a3f655f321e7a1..e432a31fd9f2cbcd7a4f126bc32c048e2ba0a4ee 100644 (file)
@@ -818,7 +818,7 @@ static irqreturn_t hsw_irq_thread(int irq, void *context)
        spin_unlock_irqrestore(&sst->spinlock, flags);
 
        /* continue to send any remaining messages... */
-       queue_kthread_work(&ipc->kworker, &ipc->kwork);
+       kthread_queue_work(&ipc->kworker, &ipc->kwork);
 
        return IRQ_HANDLED;
 }
index 74dbecc3afaab7b41b9202c502b6c3be5cd4ccf9..8c5d7f5b8b8ac7bfdb882923f2ed381041ba4f52 100644 (file)
@@ -464,7 +464,7 @@ irqreturn_t skl_dsp_irq_thread_handler(int irq, void *context)
        skl_ipc_int_enable(dsp);
 
        /* continue to send any remaining messages... */
-       queue_kthread_work(&ipc->kworker, &ipc->kwork);
+       kthread_queue_work(&ipc->kworker, &ipc->kwork);
 
        return IRQ_HANDLED;
 }
This page took 0.064622 seconds and 5 git commands to generate.