diff options
author | Dan Williams <dan.j.williams@intel.com> | 2008-03-13 17:45:28 -0700 |
---|---|---|
committer | Dan Williams <dan.j.williams@intel.com> | 2008-03-13 10:57:10 -0700 |
commit | 3280ab3e8815d60cea483d49b21261972e2785d6 (patch) | |
tree | 6f74b532ce482fc8bcdb0fdbca3a823053b6cc37 /crypto | |
parent | 3d9b525b69bc3302d8355e5f5cf081a856c211e0 (diff) | |
download | kernel_samsung_crespo-3280ab3e8815d60cea483d49b21261972e2785d6.zip kernel_samsung_crespo-3280ab3e8815d60cea483d49b21261972e2785d6.tar.gz kernel_samsung_crespo-3280ab3e8815d60cea483d49b21261972e2785d6.tar.bz2 |
async_tx: checkpatch says s/__FUNCTION__/__func__/g
Signed-off-by: Dan Williams <dan.j.williams@intel.com>
Diffstat (limited to 'crypto')
-rw-r--r-- | crypto/async_tx/async_memcpy.c | 6 | ||||
-rw-r--r-- | crypto/async_tx/async_memset.c | 6 | ||||
-rw-r--r-- | crypto/async_tx/async_tx.c | 6 | ||||
-rw-r--r-- | crypto/async_tx/async_xor.c | 12 |
4 files changed, 15 insertions, 15 deletions
diff --git a/crypto/async_tx/async_memcpy.c b/crypto/async_tx/async_memcpy.c index 0f62822..84caa4e 100644 --- a/crypto/async_tx/async_memcpy.c +++ b/crypto/async_tx/async_memcpy.c @@ -66,11 +66,11 @@ async_memcpy(struct page *dest, struct page *src, unsigned int dest_offset, } if (tx) { - pr_debug("%s: (async) len: %zu\n", __FUNCTION__, len); + pr_debug("%s: (async) len: %zu\n", __func__, len); async_tx_submit(chan, tx, flags, depend_tx, cb_fn, cb_param); } else { void *dest_buf, *src_buf; - pr_debug("%s: (sync) len: %zu\n", __FUNCTION__, len); + pr_debug("%s: (sync) len: %zu\n", __func__, len); /* wait for any prerequisite operations */ if (depend_tx) { @@ -80,7 +80,7 @@ async_memcpy(struct page *dest, struct page *src, unsigned int dest_offset, BUG_ON(depend_tx->ack); if (dma_wait_for_async_tx(depend_tx) == DMA_ERROR) panic("%s: DMA_ERROR waiting for depend_tx\n", - __FUNCTION__); + __func__); } dest_buf = kmap_atomic(dest, KM_USER0) + dest_offset; diff --git a/crypto/async_tx/async_memset.c b/crypto/async_tx/async_memset.c index 09c0e83..f5ff3906 100644 --- a/crypto/async_tx/async_memset.c +++ b/crypto/async_tx/async_memset.c @@ -63,11 +63,11 @@ async_memset(struct page *dest, int val, unsigned int offset, } if (tx) { - pr_debug("%s: (async) len: %zu\n", __FUNCTION__, len); + pr_debug("%s: (async) len: %zu\n", __func__, len); async_tx_submit(chan, tx, flags, depend_tx, cb_fn, cb_param); } else { /* run the memset synchronously */ void *dest_buf; - pr_debug("%s: (sync) len: %zu\n", __FUNCTION__, len); + pr_debug("%s: (sync) len: %zu\n", __func__, len); dest_buf = (void *) (((char *) page_address(dest)) + offset); @@ -79,7 +79,7 @@ async_memset(struct page *dest, int val, unsigned int offset, BUG_ON(depend_tx->ack); if (dma_wait_for_async_tx(depend_tx) == DMA_ERROR) panic("%s: DMA_ERROR waiting for depend_tx\n", - __FUNCTION__); + __func__); } memset(dest_buf, val, len); diff --git a/crypto/async_tx/async_tx.c b/crypto/async_tx/async_tx.c index 56288218..2be3bae 100644 --- a/crypto/async_tx/async_tx.c +++ b/crypto/async_tx/async_tx.c @@ -472,11 +472,11 @@ async_trigger_callback(enum async_tx_flags flags, tx = NULL; if (tx) { - pr_debug("%s: (async)\n", __FUNCTION__); + pr_debug("%s: (async)\n", __func__); async_tx_submit(chan, tx, flags, depend_tx, cb_fn, cb_param); } else { - pr_debug("%s: (sync)\n", __FUNCTION__); + pr_debug("%s: (sync)\n", __func__); /* wait for any prerequisite operations */ if (depend_tx) { @@ -486,7 +486,7 @@ async_trigger_callback(enum async_tx_flags flags, BUG_ON(depend_tx->ack); if (dma_wait_for_async_tx(depend_tx) == DMA_ERROR) panic("%s: DMA_ERROR waiting for depend_tx\n", - __FUNCTION__); + __func__); } async_tx_sync_epilog(flags, depend_tx, cb_fn, cb_param); diff --git a/crypto/async_tx/async_xor.c b/crypto/async_tx/async_xor.c index 2259a4f..7a9db35 100644 --- a/crypto/async_tx/async_xor.c +++ b/crypto/async_tx/async_xor.c @@ -47,7 +47,7 @@ do_async_xor(struct dma_device *device, int i; unsigned long dma_prep_flags = cb_fn ? DMA_PREP_INTERRUPT : 0; - pr_debug("%s: len: %zu\n", __FUNCTION__, len); + pr_debug("%s: len: %zu\n", __func__, len); dma_dest = dma_map_page(device->dev, dest, offset, len, DMA_FROM_DEVICE); @@ -86,7 +86,7 @@ do_sync_xor(struct page *dest, struct page **src_list, unsigned int offset, void *_dest; int i; - pr_debug("%s: len: %zu\n", __FUNCTION__, len); + pr_debug("%s: len: %zu\n", __func__, len); /* reuse the 'src_list' array to convert to buffer pointers */ for (i = 0; i < src_cnt; i++) @@ -196,7 +196,7 @@ async_xor(struct page *dest, struct page **src_list, unsigned int offset, DMA_ERROR) panic("%s: DMA_ERROR waiting for " "depend_tx\n", - __FUNCTION__); + __func__); } do_sync_xor(dest, &src_list[src_off], offset, @@ -276,7 +276,7 @@ async_xor_zero_sum(struct page *dest, struct page **src_list, unsigned long dma_prep_flags = cb_fn ? DMA_PREP_INTERRUPT : 0; int i; - pr_debug("%s: (async) len: %zu\n", __FUNCTION__, len); + pr_debug("%s: (async) len: %zu\n", __func__, len); for (i = 0; i < src_cnt; i++) dma_src[i] = dma_map_page(device->dev, src_list[i], @@ -299,7 +299,7 @@ async_xor_zero_sum(struct page *dest, struct page **src_list, } else { unsigned long xor_flags = flags; - pr_debug("%s: (sync) len: %zu\n", __FUNCTION__, len); + pr_debug("%s: (sync) len: %zu\n", __func__, len); xor_flags |= ASYNC_TX_XOR_DROP_DST; xor_flags &= ~ASYNC_TX_ACK; @@ -310,7 +310,7 @@ async_xor_zero_sum(struct page *dest, struct page **src_list, if (tx) { if (dma_wait_for_async_tx(tx) == DMA_ERROR) panic("%s: DMA_ERROR waiting for tx\n", - __FUNCTION__); + __func__); async_tx_ack(tx); } |