forked from mirrors/linux
		
	 cdd30ebb1b
			
		
	
	
		cdd30ebb1b
		
	
	
	
	
		
			
			Clean up the existing export namespace code along the same lines of
commit 33def8498f ("treewide: Convert macro and uses of __section(foo)
to __section("foo")") and for the same reason, it is not desired for the
namespace argument to be a macro expansion itself.
Scripted using
  git grep -l -e MODULE_IMPORT_NS -e EXPORT_SYMBOL_NS | while read file;
  do
    awk -i inplace '
      /^#define EXPORT_SYMBOL_NS/ {
        gsub(/__stringify\(ns\)/, "ns");
        print;
        next;
      }
      /^#define MODULE_IMPORT_NS/ {
        gsub(/__stringify\(ns\)/, "ns");
        print;
        next;
      }
      /MODULE_IMPORT_NS/ {
        $0 = gensub(/MODULE_IMPORT_NS\(([^)]*)\)/, "MODULE_IMPORT_NS(\"\\1\")", "g");
      }
      /EXPORT_SYMBOL_NS/ {
        if ($0 ~ /(EXPORT_SYMBOL_NS[^(]*)\(([^,]+),/) {
  	if ($0 !~ /(EXPORT_SYMBOL_NS[^(]*)\(([^,]+), ([^)]+)\)/ &&
  	    $0 !~ /(EXPORT_SYMBOL_NS[^(]*)\(\)/ &&
  	    $0 !~ /^my/) {
  	  getline line;
  	  gsub(/[[:space:]]*\\$/, "");
  	  gsub(/[[:space:]]/, "", line);
  	  $0 = $0 " " line;
  	}
  	$0 = gensub(/(EXPORT_SYMBOL_NS[^(]*)\(([^,]+), ([^)]+)\)/,
  		    "\\1(\\2, \"\\3\")", "g");
        }
      }
      { print }' $file;
  done
Requested-by: Masahiro Yamada <masahiroy@kernel.org>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Link: https://mail.google.com/mail/u/2/#inbox/FMfcgzQXKWgMmjdFwwdsfgxzKpVHWPlc
Acked-by: Greg KH <gregkh@linuxfoundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
		
	
			
		
			
				
	
	
		
			119 lines
		
	
	
	
		
			3.2 KiB
		
	
	
	
		
			C
		
	
	
	
	
	
			
		
		
	
	
			119 lines
		
	
	
	
		
			3.2 KiB
		
	
	
	
		
			C
		
	
	
	
	
	
| // SPDX-License-Identifier: GPL-2.0-or-later
 | |
| /*
 | |
|  * Cryptographic API.
 | |
|  *
 | |
|  * Single-block cipher operations.
 | |
|  *
 | |
|  * Copyright (c) 2002 James Morris <jmorris@intercode.com.au>
 | |
|  * Copyright (c) 2005 Herbert Xu <herbert@gondor.apana.org.au>
 | |
|  */
 | |
| 
 | |
| #include <crypto/algapi.h>
 | |
| #include <crypto/internal/cipher.h>
 | |
| #include <linux/kernel.h>
 | |
| #include <linux/crypto.h>
 | |
| #include <linux/errno.h>
 | |
| #include <linux/slab.h>
 | |
| #include <linux/string.h>
 | |
| #include "internal.h"
 | |
| 
 | |
| static int setkey_unaligned(struct crypto_cipher *tfm, const u8 *key,
 | |
| 			    unsigned int keylen)
 | |
| {
 | |
| 	struct cipher_alg *cia = crypto_cipher_alg(tfm);
 | |
| 	unsigned long alignmask = crypto_cipher_alignmask(tfm);
 | |
| 	int ret;
 | |
| 	u8 *buffer, *alignbuffer;
 | |
| 	unsigned long absize;
 | |
| 
 | |
| 	absize = keylen + alignmask;
 | |
| 	buffer = kmalloc(absize, GFP_ATOMIC);
 | |
| 	if (!buffer)
 | |
| 		return -ENOMEM;
 | |
| 
 | |
| 	alignbuffer = (u8 *)ALIGN((unsigned long)buffer, alignmask + 1);
 | |
| 	memcpy(alignbuffer, key, keylen);
 | |
| 	ret = cia->cia_setkey(crypto_cipher_tfm(tfm), alignbuffer, keylen);
 | |
| 	kfree_sensitive(buffer);
 | |
| 	return ret;
 | |
| 
 | |
| }
 | |
| 
 | |
| int crypto_cipher_setkey(struct crypto_cipher *tfm,
 | |
| 			 const u8 *key, unsigned int keylen)
 | |
| {
 | |
| 	struct cipher_alg *cia = crypto_cipher_alg(tfm);
 | |
| 	unsigned long alignmask = crypto_cipher_alignmask(tfm);
 | |
| 
 | |
| 	if (keylen < cia->cia_min_keysize || keylen > cia->cia_max_keysize)
 | |
| 		return -EINVAL;
 | |
| 
 | |
| 	if ((unsigned long)key & alignmask)
 | |
| 		return setkey_unaligned(tfm, key, keylen);
 | |
| 
 | |
| 	return cia->cia_setkey(crypto_cipher_tfm(tfm), key, keylen);
 | |
| }
 | |
| EXPORT_SYMBOL_NS_GPL(crypto_cipher_setkey, "CRYPTO_INTERNAL");
 | |
| 
 | |
| static inline void cipher_crypt_one(struct crypto_cipher *tfm,
 | |
| 				    u8 *dst, const u8 *src, bool enc)
 | |
| {
 | |
| 	unsigned long alignmask = crypto_cipher_alignmask(tfm);
 | |
| 	struct cipher_alg *cia = crypto_cipher_alg(tfm);
 | |
| 	void (*fn)(struct crypto_tfm *, u8 *, const u8 *) =
 | |
| 		enc ? cia->cia_encrypt : cia->cia_decrypt;
 | |
| 
 | |
| 	if (unlikely(((unsigned long)dst | (unsigned long)src) & alignmask)) {
 | |
| 		unsigned int bs = crypto_cipher_blocksize(tfm);
 | |
| 		u8 buffer[MAX_CIPHER_BLOCKSIZE + MAX_CIPHER_ALIGNMASK];
 | |
| 		u8 *tmp = (u8 *)ALIGN((unsigned long)buffer, alignmask + 1);
 | |
| 
 | |
| 		memcpy(tmp, src, bs);
 | |
| 		fn(crypto_cipher_tfm(tfm), tmp, tmp);
 | |
| 		memcpy(dst, tmp, bs);
 | |
| 	} else {
 | |
| 		fn(crypto_cipher_tfm(tfm), dst, src);
 | |
| 	}
 | |
| }
 | |
| 
 | |
| void crypto_cipher_encrypt_one(struct crypto_cipher *tfm,
 | |
| 			       u8 *dst, const u8 *src)
 | |
| {
 | |
| 	cipher_crypt_one(tfm, dst, src, true);
 | |
| }
 | |
| EXPORT_SYMBOL_NS_GPL(crypto_cipher_encrypt_one, "CRYPTO_INTERNAL");
 | |
| 
 | |
| void crypto_cipher_decrypt_one(struct crypto_cipher *tfm,
 | |
| 			       u8 *dst, const u8 *src)
 | |
| {
 | |
| 	cipher_crypt_one(tfm, dst, src, false);
 | |
| }
 | |
| EXPORT_SYMBOL_NS_GPL(crypto_cipher_decrypt_one, "CRYPTO_INTERNAL");
 | |
| 
 | |
| struct crypto_cipher *crypto_clone_cipher(struct crypto_cipher *cipher)
 | |
| {
 | |
| 	struct crypto_tfm *tfm = crypto_cipher_tfm(cipher);
 | |
| 	struct crypto_alg *alg = tfm->__crt_alg;
 | |
| 	struct crypto_cipher *ncipher;
 | |
| 	struct crypto_tfm *ntfm;
 | |
| 
 | |
| 	if (alg->cra_init)
 | |
| 		return ERR_PTR(-ENOSYS);
 | |
| 
 | |
| 	if (unlikely(!crypto_mod_get(alg)))
 | |
| 		return ERR_PTR(-ESTALE);
 | |
| 
 | |
| 	ntfm = __crypto_alloc_tfmgfp(alg, CRYPTO_ALG_TYPE_CIPHER,
 | |
| 				     CRYPTO_ALG_TYPE_MASK, GFP_ATOMIC);
 | |
| 	if (IS_ERR(ntfm)) {
 | |
| 		crypto_mod_put(alg);
 | |
| 		return ERR_CAST(ntfm);
 | |
| 	}
 | |
| 
 | |
| 	ntfm->crt_flags = tfm->crt_flags;
 | |
| 
 | |
| 	ncipher = __crypto_cipher_cast(ntfm);
 | |
| 
 | |
| 	return ncipher;
 | |
| }
 | |
| EXPORT_SYMBOL_GPL(crypto_clone_cipher);
 |