diff options
Diffstat (limited to 'drivers/s390')
69 files changed, 753 insertions, 1462 deletions
diff --git a/drivers/s390/block/dasd.c b/drivers/s390/block/dasd.c index c8df75e99f4c..e34c6cc61983 100644 --- a/drivers/s390/block/dasd.c +++ b/drivers/s390/block/dasd.c @@ -621,7 +621,6 @@ void dasd_set_target_state(struct dasd_device *device, int target) mutex_unlock(&device->state_mutex); dasd_put_device(device); } -EXPORT_SYMBOL(dasd_set_target_state); /* * Enable devices with device numbers in [from..to]. diff --git a/drivers/s390/block/dasd_diag.c b/drivers/s390/block/dasd_diag.c index 1b9e1442e6a5..6bb775236c16 100644 --- a/drivers/s390/block/dasd_diag.c +++ b/drivers/s390/block/dasd_diag.c @@ -69,25 +69,24 @@ static const u8 DASD_DIAG_CMS1[] = { 0xc3, 0xd4, 0xe2, 0xf1 };/* EBCDIC CMS1 */ * resulting condition code and DIAG return code. */ static inline int __dia250(void *iob, int cmd) { - register unsigned long reg2 asm ("2") = (unsigned long) iob; + union register_pair rx = { .even = (unsigned long)iob, }; typedef union { struct dasd_diag_init_io init_io; struct dasd_diag_rw_io rw_io; } addr_type; - int rc; + int cc; - rc = 3; + cc = 3; asm volatile( - " diag 2,%2,0x250\n" - "0: ipm %0\n" - " srl %0,28\n" - " or %0,3\n" + " diag %[rx],%[cmd],0x250\n" + "0: ipm %[cc]\n" + " srl %[cc],28\n" "1:\n" EX_TABLE(0b,1b) - : "+d" (rc), "=m" (*(addr_type *) iob) - : "d" (cmd), "d" (reg2), "m" (*(addr_type *) iob) - : "3", "cc"); - return rc; + : [cc] "+&d" (cc), [rx] "+&d" (rx.pair), "+m" (*(addr_type *)iob) + : [cmd] "d" (cmd) + : "cc"); + return cc | rx.odd; } static inline int dia250(void *iob, int cmd) @@ -642,12 +641,18 @@ static void dasd_diag_setup_blk_queue(struct dasd_block *block) blk_queue_segment_boundary(q, PAGE_SIZE - 1); } +static int dasd_diag_pe_handler(struct dasd_device *device, + __u8 tbvpm, __u8 fcsecpm) +{ + return dasd_generic_verify_path(device, tbvpm); +} + static struct dasd_discipline dasd_diag_discipline = { .owner = THIS_MODULE, .name = "DIAG", .ebcname = "DIAG", .check_device = dasd_diag_check_device, - .verify_path = dasd_generic_verify_path, + .pe_handler = dasd_diag_pe_handler, .fill_geometry = dasd_diag_fill_geometry, .setup_blk_queue = dasd_diag_setup_blk_queue, .start_IO = dasd_start_diag, diff --git a/drivers/s390/block/dasd_eckd.c b/drivers/s390/block/dasd_eckd.c index a6ac505cbdd7..0de1a463c509 100644 --- a/drivers/s390/block/dasd_eckd.c +++ b/drivers/s390/block/dasd_eckd.c @@ -746,7 +746,7 @@ static void create_uid(struct dasd_eckd_private *private) memcpy(uid->vendor, private->ned->HDA_manufacturer, sizeof(uid->vendor) - 1); EBCASC(uid->vendor, sizeof(uid->vendor) - 1); - memcpy(uid->serial, private->ned->HDA_location, + memcpy(uid->serial, &private->ned->serial, sizeof(uid->serial) - 1); EBCASC(uid->serial, sizeof(uid->serial) - 1); uid->ssid = private->gneq->subsystemID; diff --git a/drivers/s390/block/dasd_eckd.h b/drivers/s390/block/dasd_eckd.h index 73651211789f..65e4630ad2ae 100644 --- a/drivers/s390/block/dasd_eckd.h +++ b/drivers/s390/block/dasd_eckd.h @@ -332,8 +332,10 @@ struct dasd_ned { __u8 dev_type[6]; __u8 dev_model[3]; __u8 HDA_manufacturer[3]; - __u8 HDA_location[2]; - __u8 HDA_seqno[12]; + struct { + __u8 HDA_location[2]; + __u8 HDA_seqno[12]; + } serial; __u8 ID; __u8 unit_addr; } __attribute__ ((packed)); diff --git a/drivers/s390/block/dasd_fba.c b/drivers/s390/block/dasd_fba.c index 4789410885e4..3ad319aee51e 100644 --- a/drivers/s390/block/dasd_fba.c +++ b/drivers/s390/block/dasd_fba.c @@ -794,13 +794,19 @@ static void dasd_fba_setup_blk_queue(struct dasd_block *block) blk_queue_flag_set(QUEUE_FLAG_DISCARD, q); } +static int dasd_fba_pe_handler(struct dasd_device *device, + __u8 tbvpm, __u8 fcsecpm) +{ + return dasd_generic_verify_path(device, tbvpm); +} + static struct dasd_discipline dasd_fba_discipline = { .owner = THIS_MODULE, .name = "FBA ", .ebcname = "FBA ", .check_device = dasd_fba_check_characteristics, .do_analysis = dasd_fba_do_analysis, - .verify_path = dasd_generic_verify_path, + .pe_handler = dasd_fba_pe_handler, .setup_blk_queue = dasd_fba_setup_blk_queue, .fill_geometry = dasd_fba_fill_geometry, .start_IO = dasd_start_IO, diff --git a/drivers/s390/block/dasd_genhd.c b/drivers/s390/block/dasd_genhd.c index 8d6587ec73e2..493e8469893c 100644 --- a/drivers/s390/block/dasd_genhd.c +++ b/drivers/s390/block/dasd_genhd.c @@ -109,9 +109,9 @@ int dasd_scan_partitions(struct dasd_block *block) return -ENODEV; } - mutex_lock(&bdev->bd_mutex); - rc = bdev_disk_changed(bdev, false); - mutex_unlock(&bdev->bd_mutex); + mutex_lock(&block->gdp->open_mutex); + rc = bdev_disk_changed(block->gdp, false); + mutex_unlock(&block->gdp->open_mutex); if (rc) DBF_DEV_EVENT(DBF_ERR, block->base, "scan partitions error, rc %d", rc); @@ -145,9 +145,9 @@ void dasd_destroy_partitions(struct dasd_block *block) bdev = block->bdev; block->bdev = NULL; - mutex_lock(&bdev->bd_mutex); - bdev_disk_changed(bdev, true); - mutex_unlock(&bdev->bd_mutex); + mutex_lock(&bdev->bd_disk->open_mutex); + bdev_disk_changed(bdev->bd_disk, true); + mutex_unlock(&bdev->bd_disk->open_mutex); /* Matching blkdev_put to the blkdev_get in dasd_scan_partitions. */ blkdev_put(bdev, FMODE_READ); diff --git a/drivers/s390/block/dasd_int.h b/drivers/s390/block/dasd_int.h index 1c59b0e86a9f..155428bfed8a 100644 --- a/drivers/s390/block/dasd_int.h +++ b/drivers/s390/block/dasd_int.h @@ -297,7 +297,6 @@ struct dasd_discipline { * e.g. verify that new path is compatible with the current * configuration. */ - int (*verify_path)(struct dasd_device *, __u8); int (*pe_handler)(struct dasd_device *, __u8, __u8); /* diff --git a/drivers/s390/block/dcssblk.c b/drivers/s390/block/dcssblk.c index da33cb4cba28..29180bdf0977 100644 --- a/drivers/s390/block/dcssblk.c +++ b/drivers/s390/block/dcssblk.c @@ -17,7 +17,6 @@ #include <linux/blkdev.h> #include <linux/completion.h> #include <linux/interrupt.h> -#include <linux/platform_device.h> #include <linux/pfn_t.h> #include <linux/uio.h> #include <linux/dax.h> @@ -90,7 +89,6 @@ struct dcssblk_dev_info { int segment_type; unsigned char save_pending; unsigned char is_shared; - struct request_queue *dcssblk_queue; int num_of_segments; struct list_head seg_list; struct dax_device *dax_dev; @@ -429,9 +427,7 @@ removeseg: kill_dax(dev_info->dax_dev); put_dax(dev_info->dax_dev); del_gendisk(dev_info->gd); - blk_cleanup_queue(dev_info->dcssblk_queue); - dev_info->gd->queue = NULL; - put_disk(dev_info->gd); + blk_cleanup_disk(dev_info->gd); up_write(&dcssblk_devices_sem); if (device_remove_file_self(dev, attr)) { @@ -644,18 +640,17 @@ dcssblk_add_store(struct device *dev, struct device_attribute *attr, const char dev_info->dev.release = dcssblk_release_segment; dev_info->dev.groups = dcssblk_dev_attr_groups; INIT_LIST_HEAD(&dev_info->lh); - dev_info->gd = alloc_disk(DCSSBLK_MINORS_PER_DISK); + dev_info->gd = blk_alloc_disk(NUMA_NO_NODE); if (dev_info->gd == NULL) { rc = -ENOMEM; goto seg_list_del; } dev_info->gd->major = dcssblk_major; + dev_info->gd->minors = DCSSBLK_MINORS_PER_DISK; dev_info->gd->fops = &dcssblk_devops; - dev_info->dcssblk_queue = blk_alloc_queue(NUMA_NO_NODE); - dev_info->gd->queue = dev_info->dcssblk_queue; dev_info->gd->private_data = dev_info; - blk_queue_logical_block_size(dev_info->dcssblk_queue, 4096); - blk_queue_flag_set(QUEUE_FLAG_DAX, dev_info->dcssblk_queue); + blk_queue_logical_block_size(dev_info->gd->queue, 4096); + blk_queue_flag_set(QUEUE_FLAG_DAX, dev_info->gd->queue); seg_byte_size = (dev_info->end - dev_info->start + 1); set_capacity(dev_info->gd, seg_byte_size >> 9); // size in sectors @@ -719,9 +714,7 @@ dcssblk_add_store(struct device *dev, struct device_attribute *attr, const char put_dev: list_del(&dev_info->lh); - blk_cleanup_queue(dev_info->dcssblk_queue); - dev_info->gd->queue = NULL; - put_disk(dev_info->gd); + blk_cleanup_disk(dev_info->gd); list_for_each_entry(seg_info, &dev_info->seg_list, lh) { segment_unload(seg_info->segment_name); } @@ -731,9 +724,7 @@ put_dev: dev_list_del: list_del(&dev_info->lh); release_gd: - blk_cleanup_queue(dev_info->dcssblk_queue); - dev_info->gd->queue = NULL; - put_disk(dev_info->gd); + blk_cleanup_disk(dev_info->gd); up_write(&dcssblk_devices_sem); seg_list_del: if (dev_info == NULL) @@ -801,9 +792,7 @@ dcssblk_remove_store(struct device *dev, struct device_attribute *attr, const ch kill_dax(dev_info->dax_dev); put_dax(dev_info->dax_dev); del_gendisk(dev_info->gd); - blk_cleanup_queue(dev_info->dcssblk_queue); - dev_info->gd->queue = NULL; - put_disk(dev_info->gd); + blk_cleanup_disk(dev_info->gd); /* unload all related segments */ list_for_each_entry(entry, &dev_info->seg_list, lh) @@ -994,94 +983,11 @@ dcssblk_check_params(void) } /* - * Suspend / Resume - */ -static int dcssblk_freeze(struct device *dev) -{ - struct dcssblk_dev_info *dev_info; - int rc = 0; - - list_for_each_entry(dev_info, &dcssblk_devices, lh) { - switch (dev_info->segment_type) { - case SEG_TYPE_SR: - case SEG_TYPE_ER: - case SEG_TYPE_SC: - if (!dev_info->is_shared) - rc = -EINVAL; - break; - default: - rc = -EINVAL; - break; - } - if (rc) - break; - } - if (rc) - pr_err("Suspending the system failed because DCSS device %s " - "is writable\n", - dev_info->segment_name); - return rc; -} - -static int dcssblk_restore(struct device *dev) -{ - struct dcssblk_dev_info *dev_info; - struct segment_info *entry; - unsigned long start, end; - int rc = 0; - - list_for_each_entry(dev_info, &dcssblk_devices, lh) { - list_for_each_entry(entry, &dev_info->seg_list, lh) { - segment_unload(entry->segment_name); - rc = segment_load(entry->segment_name, SEGMENT_SHARED, - &start, &end); - if (rc < 0) { -// TODO in_use check ? - segment_warning(rc, entry->segment_name); - goto out_panic; - } - if (start != entry->start || end != entry->end) { - pr_err("The address range of DCSS %s changed " - "while the system was suspended\n", - entry->segment_name); - goto out_panic; - } - } - } - return 0; -out_panic: - panic("fatal dcssblk resume error\n"); -} - -static int dcssblk_thaw(struct device *dev) -{ - return 0; -} - -static const struct dev_pm_ops dcssblk_pm_ops = { - .freeze = dcssblk_freeze, - .thaw = dcssblk_thaw, - .restore = dcssblk_restore, -}; - -static struct platform_driver dcssblk_pdrv = { - .driver = { - .name = "dcssblk", - .pm = &dcssblk_pm_ops, - }, -}; - -static struct platform_device *dcssblk_pdev; - - -/* * The init/exit functions. */ static void __exit dcssblk_exit(void) { - platform_device_unregister(dcssblk_pdev); - platform_driver_unregister(&dcssblk_pdrv); root_device_unregister(dcssblk_root_dev); unregister_blkdev(dcssblk_major, DCSSBLK_NAME); } @@ -1091,22 +997,9 @@ dcssblk_init(void) { int rc; - rc = platform_driver_register(&dcssblk_pdrv); - if (rc) - return rc; - - dcssblk_pdev = platform_device_register_simple("dcssblk", -1, NULL, - 0); - if (IS_ERR(dcssblk_pdev)) { - rc = PTR_ERR(dcssblk_pdev); - goto out_pdrv; - } - dcssblk_root_dev = root_device_register("dcssblk"); - if (IS_ERR(dcssblk_root_dev)) { - rc = PTR_ERR(dcssblk_root_dev); - goto out_pdev; - } + if (IS_ERR(dcssblk_root_dev)) + return PTR_ERR(dcssblk_root_dev); rc = device_create_file(dcssblk_root_dev, &dev_attr_add); if (rc) goto out_root; @@ -1124,10 +1017,7 @@ dcssblk_init(void) out_root: root_device_unregister(dcssblk_root_dev); -out_pdev: - platform_device_unregister(dcssblk_pdev); -out_pdrv: - platform_driver_unregister(&dcssblk_pdrv); + return rc; } diff --git a/drivers/s390/block/scm_blk.c b/drivers/s390/block/scm_blk.c index a4f6f2e62b1d..88cba6212ee2 100644 --- a/drivers/s390/block/scm_blk.c +++ b/drivers/s390/block/scm_blk.c @@ -462,12 +462,12 @@ int scm_blk_dev_setup(struct scm_blk_dev *bdev, struct scm_device *scmdev) if (ret) goto out; - rq = blk_mq_init_queue(&bdev->tag_set); - if (IS_ERR(rq)) { - ret = PTR_ERR(rq); + bdev->gendisk = blk_mq_alloc_disk(&bdev->tag_set, scmdev); + if (IS_ERR(bdev->gendisk)) { + ret = PTR_ERR(bdev->gendisk); goto out_tag; } - bdev->rq = rq; + rq = bdev->rq = bdev->gendisk->queue; nr_max_blk = min(scmdev->nr_max_block, (unsigned int) (PAGE_SIZE / sizeof(struct aidaw))); @@ -477,17 +477,11 @@ int scm_blk_dev_setup(struct scm_blk_dev *bdev, struct scm_device *scmdev) blk_queue_flag_set(QUEUE_FLAG_NONROT, rq); blk_queue_flag_clear(QUEUE_FLAG_ADD_RANDOM, rq); - bdev->gendisk = alloc_disk(SCM_NR_PARTS); - if (!bdev->gendisk) { - ret = -ENOMEM; - goto out_queue; - } - rq->queuedata = scmdev; bdev->gendisk->private_data = scmdev; bdev->gendisk->fops = &scm_blk_devops; - bdev->gendisk->queue = rq; bdev->gendisk->major = scm_major; bdev->gendisk->first_minor = devindex * SCM_NR_PARTS; + bdev->gendisk->minors = SCM_NR_PARTS; len = snprintf(bdev->gendisk->disk_name, DISK_NAME_LEN, "scm"); if (devindex > 25) { @@ -504,8 +498,6 @@ int scm_blk_dev_setup(struct scm_blk_dev *bdev, struct scm_device *scmdev) device_add_disk(&scmdev->dev, bdev->gendisk, NULL); return 0; -out_queue: - blk_cleanup_queue(rq); out_tag: blk_mq_free_tag_set(&bdev->tag_set); out: @@ -516,9 +508,8 @@ out: void scm_blk_dev_cleanup(struct scm_blk_dev *bdev) { del_gendisk(bdev->gendisk); - blk_cleanup_queue(bdev->gendisk->queue); + blk_cleanup_disk(bdev->gendisk); blk_mq_free_tag_set(&bdev->tag_set); - put_disk(bdev->gendisk); } void scm_blk_set_available(struct scm_blk_dev *bdev) diff --git a/drivers/s390/block/xpram.c b/drivers/s390/block/xpram.c index d1ed39162943..ce98fab4d43c 100644 --- a/drivers/s390/block/xpram.c +++ b/drivers/s390/block/xpram.c @@ -39,8 +39,6 @@ #include <linux/hdreg.h> /* HDIO_GETGEO */ #include <linux/device.h> #include <linux/bio.h> -#include <linux/suspend.h> -#include <linux/platform_device.h> #include <linux/gfp.h> #include <linux/uaccess.h> @@ -56,7 +54,6 @@ typedef struct { static xpram_device_t xpram_devices[XPRAM_MAX_DEVS]; static unsigned int xpram_sizes[XPRAM_MAX_DEVS]; static struct gendisk *xpram_disks[XPRAM_MAX_DEVS]; -static struct request_queue *xpram_queues[XPRAM_MAX_DEVS]; static unsigned int xpram_pages; static int xpram_devs; @@ -141,7 +138,7 @@ static long xpram_page_out (unsigned long page_addr, unsigned int xpage_index) /* * Check if xpram is available. */ -static int xpram_present(void) +static int __init xpram_present(void) { unsigned long mem_page; int rc; @@ -157,7 +154,7 @@ static int xpram_present(void) /* * Return index of the last available xpram page. */ -static unsigned long xpram_highest_page_index(void) +static unsigned long __init xpram_highest_page_index(void) { unsigned int page_index, add_bit; unsigned long mem_page; @@ -341,17 +338,13 @@ static int __init xpram_setup_blkdev(void) int i, rc = -ENOMEM; for (i = 0; i < xpram_devs; i++) { - xpram_disks[i] = alloc_disk(1); + xpram_disks[i] = blk_alloc_disk(NUMA_NO_NODE); if (!xpram_disks[i]) goto out; - xpram_queues[i] = blk_alloc_queue(NUMA_NO_NODE); - if (!xpram_queues[i]) { - put_disk(xpram_disks[i]); - goto out; - } - blk_queue_flag_set(QUEUE_FLAG_NONROT, xpram_queues[i]); - blk_queue_flag_clear(QUEUE_FLAG_ADD_RANDOM, xpram_queues[i]); - blk_queue_logical_block_size(xpram_queues[i], 4096); + blk_queue_flag_set(QUEUE_FLAG_NONROT, xpram_disks[i]->queue); + blk_queue_flag_clear(QUEUE_FLAG_ADD_RANDOM, + xpram_disks[i]->queue); + blk_queue_logical_block_size(xpram_disks[i]->queue, 4096); } /* @@ -373,9 +366,9 @@ static int __init xpram_setup_blkdev(void) offset += xpram_devices[i].size; disk->major = XPRAM_MAJOR; disk->first_minor = i; + disk->minors = 1; disk->fops = &xpram_devops; disk->private_data = &xpram_devices[i]; - disk->queue = xpram_queues[i]; sprintf(disk->disk_name, "slram%d", i); set_capacity(disk, xpram_sizes[i] << 1); add_disk(disk); @@ -383,50 +376,12 @@ static int __init xpram_setup_blkdev(void) return 0; out: - while (i--) { - blk_cleanup_queue(xpram_queues[i]); - put_disk(xpram_disks[i]); - } + while (i--) + blk_cleanup_disk(xpram_disks[i]); return rc; } /* - * Resume failed: Print error message and call panic. - */ -static void xpram_resume_error(const char *message) -{ - pr_err("Resuming the system failed: %s\n", message); - panic("xpram resume error\n"); -} - -/* - * Check if xpram setup changed between suspend and resume. - */ -static int xpram_restore(struct device *dev) -{ - if (!xpram_pages) - return 0; - if (xpram_present() != 0) - xpram_resume_error("xpram disappeared"); - if (xpram_pages != xpram_highest_page_index() + 1) - xpram_resume_error("Size of xpram changed"); - return 0; -} - -static const struct dev_pm_ops xpram_pm_ops = { - .restore = xpram_restore, -}; - -static struct platform_driver xpram_pdrv = { - .driver = { - .name = XPRAM_NAME, - .pm = &xpram_pm_ops, - }, -}; - -static struct platform_device *xpram_pdev; - -/* * Finally, the init/exit functions. */ static void __exit xpram_exit(void) @@ -434,12 +389,9 @@ static void __exit xpram_exit(void) int i; for (i = 0; i < xpram_devs; i++) { del_gendisk(xpram_disks[i]); - blk_cleanup_queue(xpram_queues[i]); - put_disk(xpram_disks[i]); + blk_cleanup_disk(xpram_disks[i]); } unregister_blkdev(XPRAM_MAJOR, XPRAM_NAME); - platform_device_unregister(xpram_pdev); - platform_driver_unregister(&xpram_pdrv); } static int __init xpram_init(void) @@ -457,24 +409,7 @@ static int __init xpram_init(void) rc = xpram_setup_sizes(xpram_pages); if (rc) return rc; - rc = platform_driver_register(&xpram_pdrv); - if (rc) - return rc; - xpram_pdev = platform_device_register_simple(XPRAM_NAME, -1, NULL, 0); - if (IS_ERR(xpram_pdev)) { - rc = PTR_ERR(xpram_pdev); - goto fail_platform_driver_unregister; - } - rc = xpram_setup_blkdev(); - if (rc) - goto fail_platform_device_unregister; - return 0; - -fail_platform_device_unregister: - platform_device_unregister(xpram_pdev); -fail_platform_driver_unregister: - platform_driver_unregister(&xpram_pdrv); - return rc; + return xpram_setup_blkdev(); } module_init(xpram_init); diff --git a/drivers/s390/char/con3215.c b/drivers/s390/char/con3215.c index 1fd5bca9fa20..67c0009ca545 100644 --- a/drivers/s390/char/con3215.c +++ b/drivers/s390/char/con3215.c @@ -19,6 +19,7 @@ #include <linux/console.h> #include <linux/interrupt.h> #include <linux/err.h> +#include <linux/panic_notifier.h> #include <linux/reboot.h> #include <linux/serial.h> /* ASYNC_* flags */ #include <linux/slab.h> @@ -924,7 +925,7 @@ static void tty3215_close(struct tty_struct *tty, struct file * filp) /* * Returns the amount of free space in the output buffer. */ -static int tty3215_write_room(struct tty_struct *tty) +static unsigned int tty3215_write_room(struct tty_struct *tty) { struct raw3215_info *raw = tty->driver_data; @@ -980,7 +981,7 @@ static void tty3215_flush_chars(struct tty_struct *tty) /* * Returns the number of characters in the output buffer */ -static int tty3215_chars_in_buffer(struct tty_struct *tty) +static unsigned int tty3215_chars_in_buffer(struct tty_struct *tty) { struct raw3215_info *raw = tty->driver_data; diff --git a/drivers/s390/char/con3270.c b/drivers/s390/char/con3270.c index e21962c0fd94..87cdbace1453 100644 --- a/drivers/s390/char/con3270.c +++ b/drivers/s390/char/con3270.c @@ -13,6 +13,7 @@ #include <linux/init.h> #include <linux/interrupt.h> #include <linux/list.h> +#include <linux/panic_notifier.h> #include <linux/types.h> #include <linux/slab.h> #include <linux/err.h> diff --git a/drivers/s390/char/monreader.c b/drivers/s390/char/monreader.c index 7bc616b253f1..9fa92e45e0ee 100644 --- a/drivers/s390/char/monreader.c +++ b/drivers/s390/char/monreader.c @@ -21,7 +21,6 @@ #include <linux/spinlock.h> #include <linux/interrupt.h> #include <linux/poll.h> -#include <linux/device.h> #include <linux/slab.h> #include <net/iucv/iucv.h> #include <linux/uaccess.h> @@ -79,8 +78,6 @@ static u8 user_data_sever[16] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, }; -static struct device *monreader_device; - /****************************************************************************** * helper functions * *****************************************************************************/ @@ -319,7 +316,6 @@ static int mon_open(struct inode *inode, struct file *filp) goto out_path; } filp->private_data = monpriv; - dev_set_drvdata(monreader_device, monpriv); return nonseekable_open(inode, filp); out_path: @@ -354,7 +350,6 @@ static int mon_close(struct inode *inode, struct file *filp) atomic_set(&monpriv->msglim_count, 0); monpriv->write_index = 0; monpriv->read_index = 0; - dev_set_drvdata(monreader_device, NULL); for (i = 0; i < MON_MSGLIM; i++) kfree(monpriv->msg_array[i]); @@ -456,94 +451,6 @@ static struct miscdevice mon_dev = { .minor = MISC_DYNAMIC_MINOR, }; - -/****************************************************************************** - * suspend / resume * - *****************************************************************************/ -static int monreader_freeze(struct device *dev) -{ - struct mon_private *monpriv = dev_get_drvdata(dev); - int rc; - - if (!monpriv) - return 0; - if (monpriv->path) { - rc = iucv_path_sever(monpriv->path, user_data_sever); - if (rc) - pr_warn("Disconnecting the z/VM *MONITOR system service failed with rc=%i\n", - rc); - iucv_path_free(monpriv->path); - } - atomic_set(&monpriv->iucv_severed, 0); - atomic_set(&monpriv->iucv_connected, 0); - atomic_set(&monpriv->read_ready, 0); - atomic_set(&monpriv->msglim_count, 0); - monpriv->write_index = 0; - monpriv->read_index = 0; - monpriv->path = NULL; - return 0; -} - -static int monreader_thaw(struct device *dev) -{ - struct mon_private *monpriv = dev_get_drvdata(dev); - int rc; - - if (!monpriv) - return 0; - rc = -ENOMEM; - monpriv->path = iucv_path_alloc(MON_MSGLIM, IUCV_IPRMDATA, GFP_KERNEL); - if (!monpriv->path) - goto out; - rc = iucv_path_connect(monpriv->path, &monreader_iucv_handler, - MON_SERVICE, NULL, user_data_connect, monpriv); - if (rc) { - pr_err("Connecting to the z/VM *MONITOR system service " - "failed with rc=%i\n", rc); - goto out_path; - } - wait_event(mon_conn_wait_queue, - atomic_read(&monpriv->iucv_connected) || - atomic_read(&monpriv->iucv_severed)); - if (atomic_read(&monpriv->iucv_severed)) - goto out_path; - return 0; -out_path: - rc = -EIO; - iucv_path_free(monpriv->path); - monpriv->path = NULL; -out: - atomic_set(&monpriv->iucv_severed, 1); - return rc; -} - -static int monreader_restore(struct device *dev) -{ - int rc; - - segment_unload(mon_dcss_name); - rc = segment_load(mon_dcss_name, SEGMENT_SHARED, - &mon_dcss_start, &mon_dcss_end); - if (rc < 0) { - segment_warning(rc, mon_dcss_name); - panic("fatal monreader resume error: no monitor dcss\n"); - } - return monreader_thaw(dev); -} - -static const struct dev_pm_ops monreader_pm_ops = { - .freeze = monreader_freeze, - .thaw = monreader_thaw, - .restore = monreader_restore, -}; - -static struct device_driver monreader_driver = { - .name = "monreader", - .bus = &iucv_bus, - .pm = &monreader_pm_ops, -}; - - /****************************************************************************** * module init/exit * *****************************************************************************/ @@ -567,36 +474,16 @@ static int __init mon_init(void) return rc; } - rc = driver_register(&monreader_driver); - if (rc) - goto out_iucv; - monreader_device = kzalloc(sizeof(struct device), GFP_KERNEL); - if (!monreader_device) { - rc = -ENOMEM; - goto out_driver; - } - - dev_set_name(monreader_device, "monreader-dev"); - monreader_device->bus = &iucv_bus; - monreader_device->parent = iucv_root; - monreader_device->driver = &monreader_driver; - monreader_device->release = (void (*)(struct device *))kfree; - rc = device_register(monreader_device); - if (rc) { - put_device(monreader_device); - goto out_driver; - } - rc = segment_type(mon_dcss_name); if (rc < 0) { segment_warning(rc, mon_dcss_name); - goto out_device; + goto out_iucv; } if (rc != SEG_TYPE_SC) { pr_err("The specified *MONITOR DCSS %s does not have the " "required type SC\n", mon_dcss_name); rc = -EINVAL; - goto out_device; + goto out_iucv; } rc = segment_load(mon_dcss_name, SEGMENT_SHARED, @@ -604,7 +491,7 @@ static int __init mon_init(void) if (rc < 0) { segment_warning(rc, mon_dcss_name); rc = -EINVAL; - goto out_device; + goto out_iucv; } dcss_mkname(mon_dcss_name, &user_data_connect[8]); @@ -619,10 +506,6 @@ static int __init mon_init(void) out: segment_unload(mon_dcss_name); -out_device: - device_unregister(monreader_device); -out_driver: - driver_unregister(&monreader_driver); out_iucv: iucv_unregister(&monreader_iucv_handler, 1); return rc; @@ -632,8 +515,6 @@ static void __exit mon_exit(void) { segment_unload(mon_dcss_name); misc_deregister(&mon_dev); - device_unregister(monreader_device); - driver_unregister(&monreader_driver); iucv_unregister(&monreader_iucv_handler, 1); return; } diff --git a/drivers/s390/char/monwriter.c b/drivers/s390/char/monwriter.c index fdc0c0b7a6f5..9cd1ea92d619 100644 --- a/drivers/s390/char/monwriter.c +++ b/drivers/s390/char/monwriter.c @@ -20,7 +20,6 @@ #include <linux/ctype.h> #include <linux/poll.h> #include <linux/mutex.h> -#include <linux/platform_device.h> #include <linux/slab.h> #include <linux/uaccess.h> #include <asm/ebcdic.h> @@ -40,10 +39,7 @@ struct mon_buf { char *data; }; -static LIST_HEAD(mon_priv_list); - struct mon_private { - struct list_head priv_list; struct list_head list; struct monwrite_hdr hdr; size_t hdr_to_read; @@ -199,7 +195,6 @@ static int monwrite_open(struct inode *inode, struct file *filp) monpriv->hdr_to_read = sizeof(monpriv->hdr); mutex_init(&monpriv->thread_mutex); filp->private_data = monpriv; - list_add_tail(&monpriv->priv_list, &mon_priv_list); return nonseekable_open(inode, filp); } @@ -217,7 +212,6 @@ static int monwrite_close(struct inode *inode, struct file *filp) kfree(entry->data); kfree(entry); } - list_del(&monpriv->priv_list); kfree(monpriv); return 0; } @@ -294,105 +288,23 @@ static struct miscdevice mon_dev = { }; /* - * suspend/resume - */ - -static int monwriter_freeze(struct device *dev) -{ - struct mon_private *monpriv; - struct mon_buf *monbuf; - - list_for_each_entry(monpriv, &mon_priv_list, priv_list) { - list_for_each_entry(monbuf, &monpriv->list, list) { - if (monbuf->hdr.mon_function != MONWRITE_GEN_EVENT) - monwrite_diag(&monbuf->hdr, monbuf->data, - APPLDATA_STOP_REC); - } - } - return 0; -} - -static int monwriter_restore(struct device *dev) -{ - struct mon_private *monpriv; - struct mon_buf *monbuf; - - list_for_each_entry(monpriv, &mon_priv_list, priv_list) { - list_for_each_entry(monbuf, &monpriv->list, list) { - if (monbuf->hdr.mon_function == MONWRITE_START_INTERVAL) - monwrite_diag(&monbuf->hdr, monbuf->data, - APPLDATA_START_INTERVAL_REC); - if (monbuf->hdr.mon_function == MONWRITE_START_CONFIG) - monwrite_diag(&monbuf->hdr, monbuf->data, - APPLDATA_START_CONFIG_REC); - } - } - return 0; -} - -static int monwriter_thaw(struct device *dev) -{ - return monwriter_restore(dev); -} - -static const struct dev_pm_ops monwriter_pm_ops = { - .freeze = monwriter_freeze, - .thaw = monwriter_thaw, - .restore = monwriter_restore, -}; - -static struct platform_driver monwriter_pdrv = { - .driver = { - .name = "monwriter", - .pm = &monwriter_pm_ops, - }, -}; - -static struct platform_device *monwriter_pdev; - -/* * module init/exit */ static int __init mon_init(void) { - int rc; - if (!MACHINE_IS_VM) return -ENODEV; - - rc = platform_driver_register(&monwriter_pdrv); - if (rc) - return rc; - - monwriter_pdev = platform_device_register_simple("monwriter", -1, NULL, - 0); - if (IS_ERR(monwriter_pdev)) { - rc = PTR_ERR(monwriter_pdev); - goto out_driver; - } - /* * misc_register() has to be the last action in module_init(), because * file operations will be available right after this. */ - rc = misc_register(&mon_dev); - if (rc) - goto out_device; - return 0; - -out_device: - platform_device_unregister(monwriter_pdev); -out_driver: - platform_driver_unregister(&monwriter_pdrv); - return rc; + return misc_register(&mon_dev); } static void __exit mon_exit(void) { misc_deregister(&mon_dev); - platform_device_unregister(monwriter_pdev); - platform_driver_unregister(&monwriter_pdrv); } module_init(mon_init); diff --git a/drivers/s390/char/sclp.c b/drivers/s390/char/sclp.c index 986bbbc23d0a..792b4bfa6d9a 100644 --- a/drivers/s390/char/sclp.c +++ b/drivers/s390/char/sclp.c @@ -11,14 +11,13 @@ #include <linux/kernel_stat.h> #include <linux/module.h> #include <linux/err.h> +#include <linux/panic_notifier.h> #include <linux/spinlock.h> #include <linux/interrupt.h> #include <linux/timer.h> #include <linux/reboot.h> #include <linux/jiffies.h> #include <linux/init.h> -#include <linux/suspend.h> -#include <linux/completion.h> #include <linux/platform_device.h> #include <asm/types.h> #include <asm/irq.h> @@ -48,9 +47,6 @@ static struct sclp_req sclp_init_req; static void *sclp_read_sccb; static struct init_sccb *sclp_init_sccb; -/* Suspend request */ -static DECLARE_COMPLETION(sclp_request_queue_flushed); - /* Number of console pages to allocate, used by sclp_con.c and sclp_vt220.c */ int sclp_console_pages = SCLP_CONSOLE_PAGES; /* Flag to indicate if buffer pages are dropped on buffer full condition */ @@ -58,11 +54,6 @@ int sclp_console_drop = 1; /* Number of times the console dropped buffer pages */ unsigned long sclp_console_full; -static void sclp_suspend_req_cb(struct sclp_req *req, void *data) -{ - complete(&sclp_request_queue_flushed); -} - static int __init sclp_setup_console_pages(char *str) { int pages, rc; @@ -87,8 +78,6 @@ static int __init sclp_setup_console_drop(char *str) __setup("sclp_con_drop=", sclp_setup_console_drop); -static struct sclp_req sclp_suspend_req; - /* Timer for request retries. */ static struct timer_list sclp_request_timer; @@ -122,12 +111,6 @@ static volatile enum sclp_mask_state_t { sclp_mask_state_initializing } sclp_mask_state = sclp_mask_state_idle; -/* Internal state: is the driver suspended? */ -static enum sclp_suspend_state_t { - sclp_suspend_state_running, - sclp_suspend_state_suspended, -} sclp_suspend_state = sclp_suspend_state_running; - /* Maximum retry counts */ #define SCLP_INIT_RETRY 3 #define SCLP_MASK_RETRY 3 @@ -313,8 +296,6 @@ sclp_process_queue(void) del_timer(&sclp_request_timer); while (!list_empty(&sclp_req_queue)) { req = list_entry(sclp_req_queue.next, struct sclp_req, list); - if (!req->sccb) - goto do_post; rc = __sclp_start_request(req); if (rc == 0) break; @@ -326,7 +307,6 @@ sclp_process_queue(void) sclp_request_timeout_normal); break; } -do_post: /* Post-processing for aborted request */ list_del(&req->list); if (req->callback) { @@ -340,10 +320,8 @@ do_post: static int __sclp_can_add_request(struct sclp_req *req) { - if (req == &sclp_suspend_req || req == &sclp_init_req) + if (req == &sclp_init_req) return 1; - if (sclp_suspend_state != sclp_suspend_state_running) - return 0; if (sclp_init_state != sclp_init_state_initialized) return 0; if (sclp_activation_state != sclp_activation_state_active) @@ -377,16 +355,10 @@ sclp_add_request(struct sclp_req *req) /* Start if request is first in list */ if (sclp_running_state == sclp_running_state_idle && req->list.prev == &sclp_req_queue) { - if (!req->sccb) { - list_del(&req->list); - rc = -ENODATA; - goto out; - } rc = __sclp_start_request(req); if (rc) list_del(&req->list); } -out: spin_unlock_irqrestore(&sclp_lock, flags); return rc; } @@ -692,7 +664,6 @@ sclp_register(struct sclp_register *reg) /* Trigger initial state change callback */ reg->sclp_receive_mask = 0; reg->sclp_send_mask = 0; - reg->pm_event_posted = 0; list_add(®->list, &sclp_reg_list); spin_unlock_irqrestore(&sclp_lock, flags); rc = sclp_init_mask(1); @@ -1010,112 +981,6 @@ static struct notifier_block sclp_reboot_notifier = { .notifier_call = sclp_reboot_event }; -/* - * Suspend/resume SCLP notifier implementation - */ - -static void sclp_pm_event(enum sclp_pm_event sclp_pm_event, int rollback) -{ - struct sclp_register *reg; - unsigned long flags; - - if (!rollback) { - spin_lock_irqsave(&sclp_lock, flags); - list_for_each_entry(reg, &sclp_reg_list, list) - reg->pm_event_posted = 0; - spin_unlock_irqrestore(&sclp_lock, flags); - } - do { - spin_lock_irqsave(&sclp_lock, flags); - list_for_each_entry(reg, &sclp_reg_list, list) { - if (rollback && reg->pm_event_posted) - goto found; - if (!rollback && !reg->pm_event_posted) - goto found; - } - spin_unlock_irqrestore(&sclp_lock, flags); - return; -found: - spin_unlock_irqrestore(&sclp_lock, flags); - if (reg->pm_event_fn) - reg->pm_event_fn(reg, sclp_pm_event); - reg->pm_event_posted = rollback ? 0 : 1; - } while (1); -} - -/* - * Susend/resume callbacks for platform device - */ - -static int sclp_freeze(struct device *dev) -{ - unsigned long flags; - int rc; - - sclp_pm_event(SCLP_PM_EVENT_FREEZE, 0); - - spin_lock_irqsave(&sclp_lock, flags); - sclp_suspend_state = sclp_suspend_state_suspended; - spin_unlock_irqrestore(&sclp_lock, flags); - - /* Init supend data */ - memset(&sclp_suspend_req, 0, sizeof(sclp_suspend_req)); - sclp_suspend_req.callback = sclp_suspend_req_cb; - sclp_suspend_req.status = SCLP_REQ_FILLED; - init_completion(&sclp_request_queue_flushed); - - rc = sclp_add_request(&sclp_suspend_req); - if (rc == 0) - wait_for_completion(&sclp_request_queue_flushed); - else if (rc != -ENODATA) - goto fail_thaw; - - rc = sclp_deactivate(); - if (rc) - goto fail_thaw; - return 0; - -fail_thaw: - spin_lock_irqsave(&sclp_lock, flags); - sclp_suspend_state = sclp_suspend_state_running; - spin_unlock_irqrestore(&sclp_lock, flags); - sclp_pm_event(SCLP_PM_EVENT_THAW, 1); - return rc; -} - -static int sclp_undo_suspend(enum sclp_pm_event event) -{ - unsigned long flags; - int rc; - - rc = sclp_reactivate(); - if (rc) - return rc; - - spin_lock_irqsave(&sclp_lock, flags); - sclp_suspend_state = sclp_suspend_state_running; - spin_unlock_irqrestore(&sclp_lock, flags); - - sclp_pm_event(event, 0); - return 0; -} - -static int sclp_thaw(struct device *dev) -{ - return sclp_undo_suspend(SCLP_PM_EVENT_THAW); -} - -static int sclp_restore(struct device *dev) -{ - return sclp_undo_suspend(SCLP_PM_EVENT_RESTORE); -} - -static const struct dev_pm_ops sclp_pm_ops = { - .freeze = sclp_freeze, - .thaw = sclp_thaw, - .restore = sclp_restore, -}; - static ssize_t con_pages_show(struct device_driver *dev, char *buf) { return sprintf(buf, "%i\n", sclp_console_pages); @@ -1154,13 +1019,10 @@ static const struct attribute_group *sclp_drv_attr_groups[] = { static struct platform_driver sclp_pdrv = { .driver = { .name = "sclp", - .pm = &sclp_pm_ops, .groups = sclp_drv_attr_groups, }, }; -static struct platform_device *sclp_pdev; - /* Initialize SCLP driver. Return zero if driver is operational, non-zero * otherwise. */ static int @@ -1214,23 +1076,6 @@ fail_unlock: return rc; } -/* - * SCLP panic notifier: If we are suspended, we thaw SCLP in order to be able - * to print the panic message. - */ -static int sclp_panic_notify(struct notifier_block *self, - unsigned long event, void *data) -{ - if (sclp_suspend_state == sclp_suspend_state_suspended) - sclp_undo_suspend(SCLP_PM_EVENT_THAW); - return NOTIFY_OK; -} - -static struct notifier_block sclp_on_panic_nb = { - .notifier_call = sclp_panic_notify, - .priority = SCLP_PANIC_PRIO, -}; - static __init int sclp_initcall(void) { int rc; @@ -1239,23 +1084,7 @@ static __init int sclp_initcall(void) if (rc) return rc; - sclp_pdev = platform_device_register_simple("sclp", -1, NULL, 0); - rc = PTR_ERR_OR_ZERO(sclp_pdev); - if (rc) - goto fail_platform_driver_unregister; - - rc = atomic_notifier_chain_register(&panic_notifier_list, - &sclp_on_panic_nb); - if (rc) - goto fail_platform_device_unregister; - return sclp_init(); - -fail_platform_device_unregister: - platform_device_unregister(sclp_pdev); -fail_platform_driver_unregister: - platform_driver_unregister(&sclp_pdrv); - return rc; } arch_initcall(sclp_initcall); diff --git a/drivers/s390/char/sclp.h b/drivers/s390/char/sclp.h index 6de919944a39..8dd8ad83b78b 100644 --- a/drivers/s390/char/sclp.h +++ b/drivers/s390/char/sclp.h @@ -81,15 +81,6 @@ typedef unsigned int sclp_cmdw_t; #define GDS_KEY_SELFDEFTEXTMSG 0x31 -enum sclp_pm_event { - SCLP_PM_EVENT_FREEZE, - SCLP_PM_EVENT_THAW, - SCLP_PM_EVENT_RESTORE, -}; - -#define SCLP_PANIC_PRIO 1 -#define SCLP_PANIC_PRIO_CLIENT 0 - typedef u64 sccb_mask_t; struct sccb_header { @@ -293,10 +284,6 @@ struct sclp_register { void (*state_change_fn)(struct sclp_register *); /* called for events in cp_receive_mask/sclp_receive_mask */ void (*receiver_fn)(struct evbuf_header *); - /* called for power management events */ - void (*pm_event_fn)(struct sclp_register *, enum sclp_pm_event); - /* pm event posted flag */ - int pm_event_posted; }; /* externals from sclp.c */ diff --git a/drivers/s390/char/sclp_cmd.c b/drivers/s390/char/sclp_cmd.c index d41bc144c183..ab0518cfdcfe 100644 --- a/drivers/s390/char/sclp_cmd.c +++ b/drivers/s390/char/sclp_cmd.c @@ -20,7 +20,6 @@ #include <linux/mmzone.h> #include <linux/memory.h> #include <linux/module.h> -#include <linux/platform_device.h> #include <asm/ctl_reg.h> #include <asm/chpid.h> #include <asm/setup.h> @@ -168,7 +167,6 @@ static DEFINE_MUTEX(sclp_mem_mutex); static LIST_HEAD(sclp_mem_list); static u8 sclp_max_storage_id; static DECLARE_BITMAP(sclp_storage_ids, 256); -static int sclp_mem_state_changed; struct memory_increment { struct list_head list; @@ -359,8 +357,6 @@ static int sclp_mem_notifier(struct notifier_block *nb, rc = -EINVAL; break; } - if (!rc) - sclp_mem_state_changed = 1; mutex_unlock(&sclp_mem_mutex); return rc ? NOTIFY_BAD : NOTIFY_OK; } @@ -456,28 +452,8 @@ static void __init insert_increment(u16 rn, int standby, int assigned) list_add(&new_incr->list, prev); } -static int sclp_mem_freeze(struct device *dev) -{ - if (!sclp_mem_state_changed) - return 0; - pr_err("Memory hotplug state changed, suspend refused.\n"); - return -EPERM; -} - -static const struct dev_pm_ops sclp_mem_pm_ops = { - .freeze = sclp_mem_freeze, -}; - -static struct platform_driver sclp_mem_pdrv = { - .driver = { - .name = "sclp_mem", - .pm = &sclp_mem_pm_ops, - }, -}; - static int __init sclp_detect_standby_memory(void) { - struct platform_device *sclp_pdev; struct read_storage_sccb *sccb; int i, id, assigned, rc; @@ -530,17 +506,7 @@ static int __init sclp_detect_standby_memory(void) rc = register_memory_notifier(&sclp_mem_nb); if (rc) goto out; - rc = platform_driver_register(&sclp_mem_pdrv); - if (rc) - goto out; - sclp_pdev = platform_device_register_simple("sclp_mem", -1, NULL, 0); - rc = PTR_ERR_OR_ZERO(sclp_pdev); - if (rc) - goto out_driver; sclp_add_standby_memory(); - goto out; -out_driver: - platform_driver_unregister(&sclp_mem_pdrv); out: free_page((unsigned long) sccb); return rc; diff --git a/drivers/s390/char/sclp_con.c b/drivers/s390/char/sclp_con.c index 9b852a47ccc1..de028868c6f4 100644 --- a/drivers/s390/char/sclp_con.c +++ b/drivers/s390/char/sclp_con.c @@ -10,6 +10,7 @@ #include <linux/kmod.h> #include <linux/console.h> #include <linux/init.h> +#include <linux/panic_notifier.h> #include <linux/timer.h> #include <linux/jiffies.h> #include <linux/termios.h> @@ -35,8 +36,6 @@ static LIST_HEAD(sclp_con_outqueue); static struct sclp_buffer *sclp_conbuf; /* Timer for delayed output of console messages */ static struct timer_list sclp_con_timer; -/* Suspend mode flag */ -static int sclp_con_suspended; /* Flag that output queue is currently running */ static int sclp_con_queue_running; @@ -63,7 +62,7 @@ sclp_conbuf_callback(struct sclp_buffer *buffer, int rc) if (!list_empty(&sclp_con_outqueue)) buffer = list_first_entry(&sclp_con_outqueue, struct sclp_buffer, list); - if (!buffer || sclp_con_suspended) { + if (!buffer) { sclp_con_queue_running = 0; spin_unlock_irqrestore(&sclp_con_lock, flags); break; @@ -85,7 +84,7 @@ static void sclp_conbuf_emit(void) if (sclp_conbuf) list_add_tail(&sclp_conbuf->list, &sclp_con_outqueue); sclp_conbuf = NULL; - if (sclp_con_queue_running || sclp_con_suspended) + if (sclp_con_queue_running) goto out_unlock; if (list_empty(&sclp_con_outqueue)) goto out_unlock; @@ -179,8 +178,6 @@ sclp_console_write(struct console *console, const char *message, if (list_empty(&sclp_con_pages)) sclp_console_full++; while (list_empty(&sclp_con_pages)) { - if (sclp_con_suspended) - goto out; if (sclp_console_drop_buffer()) break; spin_unlock_irqrestore(&sclp_con_lock, flags); @@ -213,7 +210,6 @@ sclp_console_write(struct console *console, const char *message, !timer_pending(&sclp_con_timer)) { mod_timer(&sclp_con_timer, jiffies + HZ / 10); } -out: spin_unlock_irqrestore(&sclp_con_lock, flags); } @@ -234,32 +230,6 @@ sclp_console_flush(void) sclp_console_sync_queue(); } -/* - * Resume console: If there are cached messages, emit them. - */ -static void sclp_console_resume(void) -{ - unsigned long flags; - - spin_lock_irqsave(&sclp_con_lock, flags); - sclp_con_suspended = 0; - spin_unlock_irqrestore(&sclp_con_lock, flags); - sclp_conbuf_emit(); -} - -/* - * Suspend console: Set suspend flag and flush console - */ -static void sclp_console_suspend(void) -{ - unsigned long flags; - - spin_lock_irqsave(&sclp_con_lock, flags); - sclp_con_suspended = 1; - spin_unlock_irqrestore(&sclp_con_lock, flags); - sclp_console_flush(); -} - static int sclp_console_notify(struct notifier_block *self, unsigned long event, void *data) { @@ -269,7 +239,7 @@ static int sclp_console_notify(struct notifier_block *self, static struct notifier_block on_panic_nb = { .notifier_call = sclp_console_notify, - .priority = SCLP_PANIC_PRIO_CLIENT, + .priority = 1, }; static struct notifier_block on_reboot_nb = { @@ -291,22 +261,6 @@ static struct console sclp_console = }; /* - * This function is called for SCLP suspend and resume events. - */ -void sclp_console_pm_event(enum sclp_pm_event sclp_pm_event) -{ - switch (sclp_pm_event) { - case SCLP_PM_EVENT_FREEZE: - sclp_console_suspend(); - break; - case SCLP_PM_EVENT_RESTORE: - case SCLP_PM_EVENT_THAW: - sclp_console_resume(); - break; - } -} - -/* * called by console_init() in drivers/char/tty_io.c at boot-time. */ static int __init diff --git a/drivers/s390/char/sclp_ftp.c b/drivers/s390/char/sclp_ftp.c index dfdd6c8fd17e..1e9de99dcd02 100644 --- a/drivers/s390/char/sclp_ftp.c +++ b/drivers/s390/char/sclp_ftp.c @@ -231,7 +231,6 @@ static struct sclp_register sclp_ftp_event = { .receive_mask = EVTYP_DIAG_TEST_MASK, /* want rx events */ .receiver_fn = sclp_ftp_rxcb, /* async callback (rx) */ .state_change_fn = NULL, - .pm_event_fn = NULL, }; /** diff --git a/drivers/s390/char/sclp_quiesce.c b/drivers/s390/char/sclp_quiesce.c index 76956c2131cd..ade721467804 100644 --- a/drivers/s390/char/sclp_quiesce.c +++ b/drivers/s390/char/sclp_quiesce.c @@ -18,10 +18,6 @@ #include "sclp.h" -static void (*old_machine_restart)(char *); -static void (*old_machine_halt)(void); -static void (*old_machine_power_off)(void); - /* Shutdown handler. Signal completion of shutdown by loading special PSW. */ static void do_machine_quiesce(void) { @@ -37,42 +33,15 @@ static void do_machine_quiesce(void) /* Handler for quiesce event. Start shutdown procedure. */ static void sclp_quiesce_handler(struct evbuf_header *evbuf) { - if (_machine_restart != (void *) do_machine_quiesce) { - old_machine_restart = _machine_restart; - old_machine_halt = _machine_halt; - old_machine_power_off = _machine_power_off; - _machine_restart = (void *) do_machine_quiesce; - _machine_halt = do_machine_quiesce; - _machine_power_off = do_machine_quiesce; - } + _machine_restart = (void *) do_machine_quiesce; + _machine_halt = do_machine_quiesce; + _machine_power_off = do_machine_quiesce; ctrl_alt_del(); } -/* Undo machine restart/halt/power_off modification on resume */ -static void sclp_quiesce_pm_event(struct sclp_register *reg, - enum sclp_pm_event sclp_pm_event) -{ - switch (sclp_pm_event) { - case SCLP_PM_EVENT_RESTORE: - if (old_machine_restart) { - _machine_restart = old_machine_restart; - _machine_halt = old_machine_halt; - _machine_power_off = old_machine_power_off; - old_machine_restart = NULL; - old_machine_halt = NULL; - old_machine_power_off = NULL; - } - break; - case SCLP_PM_EVENT_FREEZE: - case SCLP_PM_EVENT_THAW: - break; - } -} - static struct sclp_register sclp_quiesce_event = { .receive_mask = EVTYP_SIGQUIESCE_MASK, .receiver_fn = sclp_quiesce_handler, - .pm_event_fn = sclp_quiesce_pm_event }; /* Initialize quiesce driver. */ diff --git a/drivers/s390/char/sclp_rw.c b/drivers/s390/char/sclp_rw.c index d6c84e354df5..1690326553b1 100644 --- a/drivers/s390/char/sclp_rw.c +++ b/drivers/s390/char/sclp_rw.c @@ -26,16 +26,9 @@ */ #define MAX_SCCB_ROOM (PAGE_SIZE - sizeof(struct sclp_buffer)) -static void sclp_rw_pm_event(struct sclp_register *reg, - enum sclp_pm_event sclp_pm_event) -{ - sclp_console_pm_event(sclp_pm_event); -} - /* Event type structure for write message and write priority message */ static struct sclp_register sclp_rw_event = { .send_mask = EVTYP_MSG_MASK, - .pm_event_fn = sclp_rw_pm_event, }; /* @@ -325,10 +318,10 @@ sclp_buffer_space(struct sclp_buffer *buffer) /* * Return number of characters in buffer */ -int +unsigned int sclp_chars_in_buffer(struct sclp_buffer *buffer) { - int count; + unsigned int count; count = buffer->char_sum; if (buffer->current_line != NULL) diff --git a/drivers/s390/char/sclp_rw.h b/drivers/s390/char/sclp_rw.h index 93d706e4935c..9b779ee4f979 100644 --- a/drivers/s390/char/sclp_rw.h +++ b/drivers/s390/char/sclp_rw.h @@ -86,12 +86,6 @@ void *sclp_unmake_buffer(struct sclp_buffer *); int sclp_buffer_space(struct sclp_buffer *); int sclp_write(struct sclp_buffer *buffer, const unsigned char *, int); int sclp_emit_buffer(struct sclp_buffer *,void (*)(struct sclp_buffer *,int)); -int sclp_chars_in_buffer(struct sclp_buffer *); - -#ifdef CONFIG_SCLP_CONSOLE -void sclp_console_pm_event(enum sclp_pm_event sclp_pm_event); -#else -static inline void sclp_console_pm_event(enum sclp_pm_event sclp_pm_event) { } -#endif +unsigned int sclp_chars_in_buffer(struct sclp_buffer *); #endif /* __SCLP_RW_H__ */ diff --git a/drivers/s390/char/sclp_tty.c b/drivers/s390/char/sclp_tty.c index 4456ceb23bd2..6be9de8ed37d 100644 --- a/drivers/s390/char/sclp_tty.c +++ b/drivers/s390/char/sclp_tty.c @@ -86,12 +86,12 @@ sclp_tty_close(struct tty_struct *tty, struct file *filp) * a string of newlines. Every newline creates a new message which * needs 82 bytes. */ -static int +static unsigned int sclp_tty_write_room (struct tty_struct *tty) { unsigned long flags; struct list_head *l; - int count; + unsigned int count; spin_lock_irqsave(&sclp_tty_lock, flags); count = 0; @@ -280,20 +280,17 @@ sclp_tty_flush_chars(struct tty_struct *tty) * characters in the write buffer (will not be written as long as there is a * final line feed missing). */ -static int +static unsigned int sclp_tty_chars_in_buffer(struct tty_struct *tty) { unsigned long flags; - struct list_head *l; struct sclp_buffer *t; - int count; + unsigned int count = 0; spin_lock_irqsave(&sclp_tty_lock, flags); - count = 0; if (sclp_ttybuf != NULL) count = sclp_chars_in_buffer(sclp_ttybuf); - list_for_each(l, &sclp_tty_outqueue) { - t = list_entry(l, struct sclp_buffer, list); + list_for_each_entry(t, &sclp_tty_outqueue, list) { count += sclp_chars_in_buffer(t); } spin_unlock_irqrestore(&sclp_tty_lock, flags); diff --git a/drivers/s390/char/sclp_vt220.c b/drivers/s390/char/sclp_vt220.c index 7f4445b0f819..da2496306c04 100644 --- a/drivers/s390/char/sclp_vt220.c +++ b/drivers/s390/char/sclp_vt220.c @@ -9,6 +9,7 @@ #include <linux/module.h> #include <linux/spinlock.h> +#include <linux/panic_notifier.h> #include <linux/list.h> #include <linux/wait.h> #include <linux/timer.h> @@ -35,8 +36,8 @@ #define SCLP_VT220_MINOR 65 #define SCLP_VT220_DRIVER_NAME "sclp_vt220" #define SCLP_VT220_DEVICE_NAME "ttysclp" -#define SCLP_VT220_CONSOLE_NAME "ttyS" -#define SCLP_VT220_CONSOLE_INDEX 1 /* console=ttyS1 */ +#define SCLP_VT220_CONSOLE_NAME "ttysclp" +#define SCLP_VT220_CONSOLE_INDEX 0 /* console=ttysclp0 */ /* Representation of a single write request */ struct sclp_vt220_request { @@ -69,9 +70,6 @@ static LIST_HEAD(sclp_vt220_empty); /* List of pending requests */ static LIST_HEAD(sclp_vt220_outqueue); -/* Suspend mode flag */ -static int sclp_vt220_suspended; - /* Flag that output queue is currently running */ static int sclp_vt220_queue_running; @@ -95,15 +93,12 @@ static int __initdata sclp_vt220_init_count; static int sclp_vt220_flush_later; static void sclp_vt220_receiver_fn(struct evbuf_header *evbuf); -static void sclp_vt220_pm_event_fn(struct sclp_register *reg, - enum sclp_pm_event sclp_pm_event); static int __sclp_vt220_emit(struct sclp_vt220_request *request); static void sclp_vt220_emit_current(void); /* Registration structure for SCLP output event buffers */ static struct sclp_register sclp_vt220_register = { .send_mask = EVTYP_VT220MSG_MASK, - .pm_event_fn = sclp_vt220_pm_event_fn, }; /* Registration structure for SCLP input event buffers */ @@ -135,7 +130,7 @@ sclp_vt220_process_queue(struct sclp_vt220_request *request) if (!list_empty(&sclp_vt220_outqueue)) request = list_entry(sclp_vt220_outqueue.next, struct sclp_vt220_request, list); - if (!request || sclp_vt220_suspended) { + if (!request) { sclp_vt220_queue_running = 0; spin_unlock_irqrestore(&sclp_vt220_lock, flags); break; @@ -241,7 +236,7 @@ sclp_vt220_emit_current(void) } sclp_vt220_flush_later = 0; } - if (sclp_vt220_queue_running || sclp_vt220_suspended) + if (sclp_vt220_queue_running) goto out_unlock; if (list_empty(&sclp_vt220_outqueue)) goto out_unlock; @@ -420,7 +415,7 @@ __sclp_vt220_write(const unsigned char *buf, int count, int do_schedule, if (list_empty(&sclp_vt220_empty)) sclp_console_full++; while (list_empty(&sclp_vt220_empty)) { - if (may_fail || sclp_vt220_suspended) + if (may_fail) goto out; if (sclp_vt220_drop_buffer()) break; @@ -609,12 +604,12 @@ sclp_vt220_flush_chars(struct tty_struct *tty) * to change as output buffers get emptied, or if the output flow * control is acted. */ -static int +static unsigned int sclp_vt220_write_room(struct tty_struct *tty) { unsigned long flags; struct list_head *l; - int count; + unsigned int count; spin_lock_irqsave(&sclp_vt220_lock, flags); count = 0; @@ -629,16 +624,15 @@ sclp_vt220_write_room(struct tty_struct *tty) /* * Return number of buffered chars. */ -static int +static unsigned int sclp_vt220_chars_in_buffer(struct tty_struct *tty) { unsigned long flags; struct list_head *l; struct sclp_vt220_request *r; - int count; + unsigned int count = 0; spin_lock_irqsave(&sclp_vt220_lock, flags); - count = 0; if (sclp_vt220_current_request != NULL) count = sclp_vt220_chars_stored(sclp_vt220_current_request); list_for_each(l, &sclp_vt220_outqueue) { @@ -791,46 +785,6 @@ static void __sclp_vt220_flush_buffer(void) spin_unlock_irqrestore(&sclp_vt220_lock, flags); } -/* - * Resume console: If there are cached messages, emit them. - */ -static void sclp_vt220_resume(void) -{ - unsigned long flags; - - spin_lock_irqsave(&sclp_vt220_lock, flags); - sclp_vt220_suspended = 0; - spin_unlock_irqrestore(&sclp_vt220_lock, flags); - sclp_vt220_emit_current(); -} - -/* - * Suspend console: Set suspend flag and flush console - */ -static void sclp_vt220_suspend(void) -{ - unsigned long flags; - - spin_lock_irqsave(&sclp_vt220_lock, flags); - sclp_vt220_suspended = 1; - spin_unlock_irqrestore(&sclp_vt220_lock, flags); - __sclp_vt220_flush_buffer(); -} - -static void sclp_vt220_pm_event_fn(struct sclp_register *reg, - enum sclp_pm_event sclp_pm_event) -{ - switch (sclp_pm_event) { - case SCLP_PM_EVENT_FREEZE: - sclp_vt220_suspend(); - break; - case SCLP_PM_EVENT_RESTORE: - case SCLP_PM_EVENT_THAW: - sclp_vt220_resume(); - break; - } -} - #ifdef CONFIG_SCLP_VT220_CONSOLE static void diff --git a/drivers/s390/char/tape_char.c b/drivers/s390/char/tape_char.c index 8abb42923307..cc8237afeffa 100644 --- a/drivers/s390/char/tape_char.c +++ b/drivers/s390/char/tape_char.c @@ -371,8 +371,6 @@ __tapechar_ioctl(struct tape_device *device, case MTSEEK: if (device->required_tapemarks) tape_std_terminate_write(device); - default: - ; } rc = tape_mtop(device, op.mt_op, op.mt_count); diff --git a/drivers/s390/char/tty3270.c b/drivers/s390/char/tty3270.c index 307a80f85c07..adc33846bf8e 100644 --- a/drivers/s390/char/tty3270.c +++ b/drivers/s390/char/tty3270.c @@ -1071,7 +1071,7 @@ static void tty3270_cleanup(struct tty_struct *tty) /* * We always have room. */ -static int +static unsigned int tty3270_write_room(struct tty_struct *tty) { return INT_MAX; @@ -1640,7 +1640,7 @@ tty3270_do_write(struct tty3270 *tp, struct tty_struct *tty, int i_msg, i; spin_lock_bh(&tp->view.lock); - for (i_msg = 0; !tty->stopped && i_msg < count; i_msg++) { + for (i_msg = 0; !tty->flow.stopped && i_msg < count; i_msg++) { if (tp->esc_state != 0) { /* Continue escape sequence. */ tty3270_escape_sequence(tp, buf[i_msg]); @@ -1757,22 +1757,6 @@ tty3270_flush_chars(struct tty_struct *tty) } /* - * Returns the number of characters in the output buffer. This is - * used in tty_wait_until_sent to wait until all characters have - * appeared on the screen. - */ -static int -tty3270_chars_in_buffer(struct tty_struct *tty) -{ - return 0; -} - -static void -tty3270_flush_buffer(struct tty_struct *tty) -{ -} - -/* * Check for visible/invisible input switches */ static void @@ -1892,8 +1876,6 @@ static const struct tty_operations tty3270_ops = { .put_char = tty3270_put_char, .flush_chars = tty3270_flush_chars, .write_room = tty3270_write_room, - .chars_in_buffer = tty3270_chars_in_buffer, - .flush_buffer = tty3270_flush_buffer, .throttle = tty3270_throttle, .unthrottle = tty3270_unthrottle, .hangup = tty3270_hangup, diff --git a/drivers/s390/char/vmlogrdr.c b/drivers/s390/char/vmlogrdr.c index 58333cb4503f..ed970ecfafdf 100644 --- a/drivers/s390/char/vmlogrdr.c +++ b/drivers/s390/char/vmlogrdr.c @@ -679,34 +679,10 @@ static const struct attribute_group *vmlogrdr_attr_groups[] = { NULL, }; -static int vmlogrdr_pm_prepare(struct device *dev) -{ - int rc; - struct vmlogrdr_priv_t *priv = dev_get_drvdata(dev); - - rc = 0; - if (priv) { - spin_lock_bh(&priv->priv_lock); - if (priv->dev_in_use) - rc = -EBUSY; - spin_unlock_bh(&priv->priv_lock); - } - if (rc) - pr_err("vmlogrdr: device %s is busy. Refuse to suspend.\n", - dev_name(dev)); - return rc; -} - - -static const struct dev_pm_ops vmlogrdr_pm_ops = { - .prepare = vmlogrdr_pm_prepare, -}; - static struct class *vmlogrdr_class; static struct device_driver vmlogrdr_driver = { .name = "vmlogrdr", .bus = &iucv_bus, - .pm = &vmlogrdr_pm_ops, .groups = vmlogrdr_drv_attr_groups, }; diff --git a/drivers/s390/char/zcore.c b/drivers/s390/char/zcore.c index bd3c724bf695..b5b0848da93b 100644 --- a/drivers/s390/char/zcore.c +++ b/drivers/s390/char/zcore.c @@ -15,6 +15,7 @@ #include <linux/init.h> #include <linux/slab.h> #include <linux/debugfs.h> +#include <linux/panic_notifier.h> #include <linux/reboot.h> #include <asm/asm-offsets.h> diff --git a/drivers/s390/cio/airq.c b/drivers/s390/cio/airq.c index cb466ed7eb5e..e56535c99888 100644 --- a/drivers/s390/cio/airq.c +++ b/drivers/s390/cio/airq.c @@ -93,7 +93,7 @@ static irqreturn_t do_airq_interrupt(int irq, void *dummy) struct hlist_head *head; set_cpu_flag(CIF_NOHZ_DELAY); - tpi_info = (struct tpi_info *) &get_irq_regs()->int_code; + tpi_info = &get_irq_regs()->tpi_info; trace_s390_cio_adapter_int(tpi_info); head = &airq_lists[tpi_info->isc]; rcu_read_lock(); diff --git a/drivers/s390/cio/ccwgroup.c b/drivers/s390/cio/ccwgroup.c index 444385da5792..9748165e08e9 100644 --- a/drivers/s390/cio/ccwgroup.c +++ b/drivers/s390/cio/ccwgroup.c @@ -45,27 +45,6 @@ static void __ccwgroup_remove_symlinks(struct ccwgroup_device *gdev) } } -/* - * Remove references from ccw devices to ccw group device and from - * ccw group device to ccw devices. - */ -static void __ccwgroup_remove_cdev_refs(struct ccwgroup_device *gdev) -{ - struct ccw_device *cdev; - int i; - - for (i = 0; i < gdev->count; i++) { - cdev = gdev->cdev[i]; - if (!cdev) - continue; - spin_lock_irq(cdev->ccwlock); - dev_set_drvdata(&cdev->dev, NULL); - spin_unlock_irq(cdev->ccwlock); - gdev->cdev[i] = NULL; - put_device(&cdev->dev); - } -} - /** * ccwgroup_set_online() - enable a ccwgroup device * @gdev: target ccwgroup device @@ -175,7 +154,6 @@ static void ccwgroup_ungroup(struct ccwgroup_device *gdev) if (device_is_registered(&gdev->dev)) { __ccwgroup_remove_symlinks(gdev); device_unregister(&gdev->dev); - __ccwgroup_remove_cdev_refs(gdev); } mutex_unlock(&gdev->reg_mutex); } @@ -228,7 +206,23 @@ static void ccwgroup_ungroup_workfn(struct work_struct *work) static void ccwgroup_release(struct device *dev) { - kfree(to_ccwgroupdev(dev)); + struct ccwgroup_device *gdev = to_ccwgroupdev(dev); + unsigned int i; + + for (i = 0; i < gdev->count; i++) { + struct ccw_device *cdev = gdev->cdev[i]; + unsigned long flags; + + if (cdev) { + spin_lock_irqsave(cdev->ccwlock, flags); + if (dev_get_drvdata(&cdev->dev) == gdev) + dev_set_drvdata(&cdev->dev, NULL); + spin_unlock_irqrestore(cdev->ccwlock, flags); + put_device(&cdev->dev); + } + } + + kfree(gdev); } static int __ccwgroup_create_symlinks(struct ccwgroup_device *gdev) @@ -396,15 +390,6 @@ int ccwgroup_create_dev(struct device *parent, struct ccwgroup_driver *gdrv, mutex_unlock(&gdev->reg_mutex); return 0; error: - for (i = 0; i < num_devices; i++) - if (gdev->cdev[i]) { - spin_lock_irq(gdev->cdev[i]->ccwlock); - if (dev_get_drvdata(&gdev->cdev[i]->dev) == gdev) - dev_set_drvdata(&gdev->cdev[i]->dev, NULL); - spin_unlock_irq(gdev->cdev[i]->ccwlock); - put_device(&gdev->cdev[i]->dev); - gdev->cdev[i] = NULL; - } mutex_unlock(&gdev->reg_mutex); put_device(&gdev->dev); return rc; @@ -416,7 +401,7 @@ static int ccwgroup_notifier(struct notifier_block *nb, unsigned long action, { struct ccwgroup_device *gdev = to_ccwgroupdev(data); - if (action == BUS_NOTIFY_UNBIND_DRIVER) { + if (action == BUS_NOTIFY_UNBOUND_DRIVER) { get_device(&gdev->dev); schedule_work(&gdev->ungroup_work); } @@ -514,15 +499,6 @@ EXPORT_SYMBOL(ccwgroup_driver_register); */ void ccwgroup_driver_unregister(struct ccwgroup_driver *cdriver) { - struct device *dev; - - /* We don't want ccwgroup devices to live longer than their driver. */ - while ((dev = driver_find_next_device(&cdriver->driver, NULL))) { - struct ccwgroup_device *gdev = to_ccwgroupdev(dev); - - ccwgroup_ungroup(gdev); - put_device(dev); - } driver_unregister(&cdriver->driver); } EXPORT_SYMBOL(ccwgroup_driver_unregister); diff --git a/drivers/s390/cio/chp.c b/drivers/s390/cio/chp.c index e42113825415..1097e76982a5 100644 --- a/drivers/s390/cio/chp.c +++ b/drivers/s390/cio/chp.c @@ -255,6 +255,9 @@ static ssize_t chp_status_write(struct device *dev, if (!num_args) return count; + /* Wait until previous actions have settled. */ + css_wait_for_slow_path(); + if (!strncasecmp(cmd, "on", 2) || !strcmp(cmd, "1")) { mutex_lock(&cp->lock); error = s390_vary_chpid(cp->chpid, 1); diff --git a/drivers/s390/cio/chsc.c b/drivers/s390/cio/chsc.c index c22d9ee27ba1..297fb399363c 100644 --- a/drivers/s390/cio/chsc.c +++ b/drivers/s390/cio/chsc.c @@ -801,8 +801,6 @@ int chsc_chp_vary(struct chp_id chpid, int on) { struct channel_path *chp = chpid_to_chp(chpid); - /* Wait until previous actions have settled. */ - css_wait_for_slow_path(); /* * Redo PathVerification on the devices the chpid connects to */ diff --git a/drivers/s390/cio/cio.c b/drivers/s390/cio/cio.c index 6d716db2a46a..923f5ca4f5e6 100644 --- a/drivers/s390/cio/cio.c +++ b/drivers/s390/cio/cio.c @@ -536,7 +536,7 @@ static irqreturn_t do_cio_interrupt(int irq, void *dummy) struct irb *irb; set_cpu_flag(CIF_NOHZ_DELAY); - tpi_info = (struct tpi_info *) &get_irq_regs()->int_code; + tpi_info = &get_irq_regs()->tpi_info; trace_s390_cio_interrupt(tpi_info); irb = this_cpu_ptr(&cio_irb); sch = (struct subchannel *)(unsigned long) tpi_info->intparm; diff --git a/drivers/s390/cio/cio.h b/drivers/s390/cio/cio.h index dcdaba689b20..1cb9daf9c645 100644 --- a/drivers/s390/cio/cio.h +++ b/drivers/s390/cio/cio.h @@ -9,6 +9,7 @@ #include <asm/cio.h> #include <asm/fcx.h> #include <asm/schid.h> +#include <asm/tpi.h> #include "chsc.h" /* @@ -46,18 +47,6 @@ struct pmcw { /* ... in an operand exception. */ } __attribute__ ((packed)); -/* I/O-Interruption Code as stored by TEST PENDING INTERRUPTION (TPI). */ -struct tpi_info { - struct subchannel_id schid; - u32 intparm; - u32 adapter_IO:1; - u32 directed_irq:1; - u32 isc:3; - u32 :27; - u32 type:3; - u32 :12; -} __packed __aligned(4); - /* Target SCHIB configuration. */ struct schib_config { u64 mba; diff --git a/drivers/s390/cio/cmf.c b/drivers/s390/cio/cmf.c index b7b590646d58..5584aa46c94e 100644 --- a/drivers/s390/cio/cmf.c +++ b/drivers/s390/cio/cmf.c @@ -163,13 +163,14 @@ static inline u64 time_to_avg_nsec(u32 value, u32 count) */ static inline void cmf_activate(void *area, unsigned int onoff) { - register void * __gpr2 asm("2"); - register long __gpr1 asm("1"); - - __gpr2 = area; - __gpr1 = onoff; /* activate channel measurement */ - asm("schm" : : "d" (__gpr2), "d" (__gpr1) ); + asm volatile( + " lgr 1,%[r1]\n" + " lgr 2,%[mbo]\n" + " schm\n" + : + : [r1] "d" ((unsigned long)onoff), [mbo] "d" (area) + : "1", "2"); } static int set_schib(struct ccw_device *cdev, u32 mme, int mbfc, diff --git a/drivers/s390/cio/ioasm.c b/drivers/s390/cio/ioasm.c index 4c5244d6052b..180913007824 100644 --- a/drivers/s390/cio/ioasm.c +++ b/drivers/s390/cio/ioasm.c @@ -16,18 +16,19 @@ static inline int __stsch(struct subchannel_id schid, struct schib *addr) { - register struct subchannel_id reg1 asm ("1") = schid; + unsigned long r1 = *(unsigned int *)&schid; int ccode = -EIO; asm volatile( - " stsch 0(%3)\n" - "0: ipm %0\n" - " srl %0,28\n" + " lgr 1,%[r1]\n" + " stsch %[addr]\n" + "0: ipm %[cc]\n" + " srl %[cc],28\n" "1:\n" EX_TABLE(0b, 1b) - : "+d" (ccode), "=m" (*addr) - : "d" (reg1), "a" (addr) - : "cc"); + : [cc] "+&d" (ccode), [addr] "=Q" (*addr) + : [r1] "d" (r1) + : "cc", "1"); return ccode; } @@ -44,18 +45,19 @@ EXPORT_SYMBOL(stsch); static inline int __msch(struct subchannel_id schid, struct schib *addr) { - register struct subchannel_id reg1 asm ("1") = schid; + unsigned long r1 = *(unsigned int *)&schid; int ccode = -EIO; asm volatile( - " msch 0(%2)\n" - "0: ipm %0\n" - " srl %0,28\n" + " lgr 1,%[r1]\n" + " msch %[addr]\n" + "0: ipm %[cc]\n" + " srl %[cc],28\n" "1:\n" EX_TABLE(0b, 1b) - : "+d" (ccode) - : "d" (reg1), "a" (addr), "m" (*addr) - : "cc"); + : [cc] "+&d" (ccode) + : [r1] "d" (r1), [addr] "Q" (*addr) + : "cc", "1"); return ccode; } @@ -71,16 +73,17 @@ int msch(struct subchannel_id schid, struct schib *addr) static inline int __tsch(struct subchannel_id schid, struct irb *addr) { - register struct subchannel_id reg1 asm ("1") = schid; + unsigned long r1 = *(unsigned int *)&schid; int ccode; asm volatile( - " tsch 0(%3)\n" - " ipm %0\n" - " srl %0,28" - : "=d" (ccode), "=m" (*addr) - : "d" (reg1), "a" (addr) - : "cc"); + " lgr 1,%[r1]\n" + " tsch %[addr]\n" + " ipm %[cc]\n" + " srl %[cc],28" + : [cc] "=&d" (ccode), [addr] "=Q" (*addr) + : [r1] "d" (r1) + : "cc", "1"); return ccode; } @@ -96,18 +99,19 @@ int tsch(struct subchannel_id schid, struct irb *addr) static inline int __ssch(struct subchannel_id schid, union orb *addr) { - register struct subchannel_id reg1 asm("1") = schid; + unsigned long r1 = *(unsigned int *)&schid; int ccode = -EIO; asm volatile( - " ssch 0(%2)\n" - "0: ipm %0\n" - " srl %0,28\n" + " lgr 1,%[r1]\n" + " ssch %[addr]\n" + "0: ipm %[cc]\n" + " srl %[cc],28\n" "1:\n" EX_TABLE(0b, 1b) - : "+d" (ccode) - : "d" (reg1), "a" (addr), "m" (*addr) - : "cc", "memory"); + : [cc] "+&d" (ccode) + : [r1] "d" (r1), [addr] "Q" (*addr) + : "cc", "memory", "1"); return ccode; } @@ -124,16 +128,17 @@ EXPORT_SYMBOL(ssch); static inline int __csch(struct subchannel_id schid) { - register struct subchannel_id reg1 asm("1") = schid; + unsigned long r1 = *(unsigned int *)&schid; int ccode; asm volatile( + " lgr 1,%[r1]\n" " csch\n" - " ipm %0\n" - " srl %0,28" - : "=d" (ccode) - : "d" (reg1) - : "cc"); + " ipm %[cc]\n" + " srl %[cc],28\n" + : [cc] "=&d" (ccode) + : [r1] "d" (r1) + : "cc", "1"); return ccode; } @@ -153,11 +158,11 @@ int tpi(struct tpi_info *addr) int ccode; asm volatile( - " tpi 0(%2)\n" - " ipm %0\n" - " srl %0,28" - : "=d" (ccode), "=m" (*addr) - : "a" (addr) + " tpi %[addr]\n" + " ipm %[cc]\n" + " srl %[cc],28" + : [cc] "=&d" (ccode), [addr] "=Q" (*addr) + : : "cc"); trace_s390_cio_tpi(addr, ccode); @@ -170,13 +175,13 @@ int chsc(void *chsc_area) int cc = -EIO; asm volatile( - " .insn rre,0xb25f0000,%2,0\n" - "0: ipm %0\n" - " srl %0,28\n" + " .insn rre,0xb25f0000,%[chsc_area],0\n" + "0: ipm %[cc]\n" + " srl %[cc],28\n" "1:\n" EX_TABLE(0b, 1b) - : "+d" (cc), "=m" (*(addr_type *) chsc_area) - : "d" (chsc_area), "m" (*(addr_type *) chsc_area) + : [cc] "+&d" (cc), "+m" (*(addr_type *)chsc_area) + : [chsc_area] "d" (chsc_area) : "cc"); trace_s390_cio_chsc(chsc_area, cc); @@ -186,17 +191,17 @@ EXPORT_SYMBOL(chsc); static inline int __rsch(struct subchannel_id schid) { - register struct subchannel_id reg1 asm("1") = schid; + unsigned long r1 = *(unsigned int *)&schid; int ccode; asm volatile( + " lgr 1,%[r1]\n" " rsch\n" - " ipm %0\n" - " srl %0,28" - : "=d" (ccode) - : "d" (reg1) - : "cc", "memory"); - + " ipm %[cc]\n" + " srl %[cc],28\n" + : [cc] "=&d" (ccode) + : [r1] "d" (r1) + : "cc", "memory", "1"); return ccode; } @@ -212,16 +217,17 @@ int rsch(struct subchannel_id schid) static inline int __hsch(struct subchannel_id schid) { - register struct subchannel_id reg1 asm("1") = schid; + unsigned long r1 = *(unsigned int *)&schid; int ccode; asm volatile( + " lgr 1,%[r1]\n" " hsch\n" - " ipm %0\n" - " srl %0,28" - : "=d" (ccode) - : "d" (reg1) - : "cc"); + " ipm %[cc]\n" + " srl %[cc],28\n" + : [cc] "=&d" (ccode) + : [r1] "d" (r1) + : "cc", "1"); return ccode; } @@ -238,16 +244,17 @@ EXPORT_SYMBOL(hsch); static inline int __xsch(struct subchannel_id schid) { - register struct subchannel_id reg1 asm("1") = schid; + unsigned long r1 = *(unsigned int *)&schid; int ccode; asm volatile( + " lgr 1,%[r1]\n" " xsch\n" - " ipm %0\n" - " srl %0,28" - : "=d" (ccode) - : "d" (reg1) - : "cc"); + " ipm %[cc]\n" + " srl %[cc],28\n" + : [cc] "=&d" (ccode) + : [r1] "d" (r1) + : "cc", "1"); return ccode; } @@ -266,11 +273,11 @@ static inline int __stcrw(struct crw *crw) int ccode; asm volatile( - " stcrw 0(%2)\n" - " ipm %0\n" - " srl %0,28\n" - : "=d" (ccode), "=m" (*crw) - : "a" (crw) + " stcrw %[crw]\n" + " ipm %[cc]\n" + " srl %[cc],28\n" + : [cc] "=&d" (ccode), [crw] "=Q" (*crw) + : : "cc"); return ccode; } diff --git a/drivers/s390/cio/qdio.h b/drivers/s390/cio/qdio.h index 0e0044d70844..f69ffbb8edc9 100644 --- a/drivers/s390/cio/qdio.h +++ b/drivers/s390/cio/qdio.h @@ -88,15 +88,15 @@ enum qdio_irq_states { static inline int do_sqbs(u64 token, unsigned char state, int queue, int *start, int *count) { - register unsigned long _ccq asm ("0") = *count; - register unsigned long _token asm ("1") = token; unsigned long _queuestart = ((unsigned long)queue << 32) | *start; + unsigned long _ccq = *count; asm volatile( - " .insn rsy,0xeb000000008A,%1,0,0(%2)" - : "+d" (_ccq), "+d" (_queuestart) - : "d" ((unsigned long)state), "d" (_token) - : "memory", "cc"); + " lgr 1,%[token]\n" + " .insn rsy,0xeb000000008a,%[qs],%[ccq],0(%[state])" + : [ccq] "+&d" (_ccq), [qs] "+&d" (_queuestart) + : [state] "d" ((unsigned long)state), [token] "d" (token) + : "memory", "cc", "1"); *count = _ccq & 0xff; *start = _queuestart & 0xff; @@ -106,16 +106,17 @@ static inline int do_sqbs(u64 token, unsigned char state, int queue, static inline int do_eqbs(u64 token, unsigned char *state, int queue, int *start, int *count, int ack) { - register unsigned long _ccq asm ("0") = *count; - register unsigned long _token asm ("1") = token; unsigned long _queuestart = ((unsigned long)queue << 32) | *start; unsigned long _state = (unsigned long)ack << 63; + unsigned long _ccq = *count; asm volatile( - " .insn rrf,0xB99c0000,%1,%2,0,0" - : "+d" (_ccq), "+d" (_queuestart), "+d" (_state) - : "d" (_token) - : "memory", "cc"); + " lgr 1,%[token]\n" + " .insn rrf,0xb99c0000,%[qs],%[state],%[ccq],0" + : [ccq] "+&d" (_ccq), [qs] "+&d" (_queuestart), + [state] "+&d" (_state) + : [token] "d" (token) + : "memory", "cc", "1"); *count = _ccq & 0xff; *start = _queuestart & 0xff; *state = _state & 0xff; diff --git a/drivers/s390/cio/qdio_main.c b/drivers/s390/cio/qdio_main.c index 307ce7ff5ca4..3052fab00597 100644 --- a/drivers/s390/cio/qdio_main.c +++ b/drivers/s390/cio/qdio_main.c @@ -31,38 +31,41 @@ MODULE_DESCRIPTION("QDIO base support"); MODULE_LICENSE("GPL"); static inline int do_siga_sync(unsigned long schid, - unsigned int out_mask, unsigned int in_mask, + unsigned long out_mask, unsigned long in_mask, unsigned int fc) { - register unsigned long __fc asm ("0") = fc; - register unsigned long __schid asm ("1") = schid; - register unsigned long out asm ("2") = out_mask; - register unsigned long in asm ("3") = in_mask; int cc; asm volatile( + " lgr 0,%[fc]\n" + " lgr 1,%[schid]\n" + " lgr 2,%[out]\n" + " lgr 3,%[in]\n" " siga 0\n" - " ipm %0\n" - " srl %0,28\n" - : "=d" (cc) - : "d" (__fc), "d" (__schid), "d" (out), "d" (in) : "cc"); + " ipm %[cc]\n" + " srl %[cc],28\n" + : [cc] "=&d" (cc) + : [fc] "d" (fc), [schid] "d" (schid), + [out] "d" (out_mask), [in] "d" (in_mask) + : "cc", "0", "1", "2", "3"); return cc; } -static inline int do_siga_input(unsigned long schid, unsigned int mask, - unsigned int fc) +static inline int do_siga_input(unsigned long schid, unsigned long mask, + unsigned long fc) { - register unsigned long __fc asm ("0") = fc; - register unsigned long __schid asm ("1") = schid; - register unsigned long __mask asm ("2") = mask; int cc; asm volatile( + " lgr 0,%[fc]\n" + " lgr 1,%[schid]\n" + " lgr 2,%[mask]\n" " siga 0\n" - " ipm %0\n" - " srl %0,28\n" - : "=d" (cc) - : "d" (__fc), "d" (__schid), "d" (__mask) : "cc"); + " ipm %[cc]\n" + " srl %[cc],28\n" + : [cc] "=&d" (cc) + : [fc] "d" (fc), [schid] "d" (schid), [mask] "d" (mask) + : "cc", "0", "1", "2"); return cc; } @@ -78,23 +81,24 @@ static inline int do_siga_input(unsigned long schid, unsigned int mask, * Note: For IQDC unicast queues only the highest priority queue is processed. */ static inline int do_siga_output(unsigned long schid, unsigned long mask, - unsigned int *bb, unsigned int fc, + unsigned int *bb, unsigned long fc, unsigned long aob) { - register unsigned long __fc asm("0") = fc; - register unsigned long __schid asm("1") = schid; - register unsigned long __mask asm("2") = mask; - register unsigned long __aob asm("3") = aob; int cc; asm volatile( + " lgr 0,%[fc]\n" + " lgr 1,%[schid]\n" + " lgr 2,%[mask]\n" + " lgr 3,%[aob]\n" " siga 0\n" - " ipm %0\n" - " srl %0,28\n" - : "=d" (cc), "+d" (__fc), "+d" (__aob) - : "d" (__schid), "d" (__mask) - : "cc"); - *bb = __fc >> 31; + " lgr %[fc],0\n" + " ipm %[cc]\n" + " srl %[cc],28\n" + : [cc] "=&d" (cc), [fc] "+&d" (fc) + : [schid] "d" (schid), [mask] "d" (mask), [aob] "d" (aob) + : "cc", "0", "1", "2", "3"); + *bb = fc >> 31; return cc; } diff --git a/drivers/s390/cio/trace.h b/drivers/s390/cio/trace.h index 4803139bce14..86993de25345 100644 --- a/drivers/s390/cio/trace.h +++ b/drivers/s390/cio/trace.h @@ -168,10 +168,8 @@ TRACE_EVENT(s390_cio_tpi, memset(&__entry->tpi_info, 0, sizeof(struct tpi_info)); else if (addr) __entry->tpi_info = *addr; - else { - memcpy(&__entry->tpi_info, &S390_lowcore.subchannel_id, - sizeof(struct tpi_info)); - } + else + __entry->tpi_info = S390_lowcore.tpi_info; __entry->cssid = __entry->tpi_info.schid.cssid; __entry->ssid = __entry->tpi_info.schid.ssid; __entry->schno = __entry->tpi_info.schid.sch_no; diff --git a/drivers/s390/cio/vfio_ccw_cp.c b/drivers/s390/cio/vfio_ccw_cp.c index b9febc581b1f..8d1b2771c1aa 100644 --- a/drivers/s390/cio/vfio_ccw_cp.c +++ b/drivers/s390/cio/vfio_ccw_cp.c @@ -638,6 +638,10 @@ int cp_init(struct channel_program *cp, struct device *mdev, union orb *orb) static DEFINE_RATELIMIT_STATE(ratelimit_state, 5 * HZ, 1); int ret; + /* this is an error in the caller */ + if (cp->initialized) + return -EBUSY; + /* * We only support prefetching the channel program. We assume all channel * programs executed by supported guests likewise support prefetching. diff --git a/drivers/s390/cio/vfio_ccw_drv.c b/drivers/s390/cio/vfio_ccw_drv.c index 8c625b530035..9b61e9b131ad 100644 --- a/drivers/s390/cio/vfio_ccw_drv.c +++ b/drivers/s390/cio/vfio_ccw_drv.c @@ -86,6 +86,7 @@ static void vfio_ccw_sch_io_todo(struct work_struct *work) struct vfio_ccw_private *private; struct irb *irb; bool is_final; + bool cp_is_finished = false; private = container_of(work, struct vfio_ccw_private, io_work); irb = &private->irb; @@ -94,14 +95,21 @@ static void vfio_ccw_sch_io_todo(struct work_struct *work) (SCSW_ACTL_DEVACT | SCSW_ACTL_SCHACT)); if (scsw_is_solicited(&irb->scsw)) { cp_update_scsw(&private->cp, &irb->scsw); - if (is_final && private->state == VFIO_CCW_STATE_CP_PENDING) + if (is_final && private->state == VFIO_CCW_STATE_CP_PENDING) { cp_free(&private->cp); + cp_is_finished = true; + } } mutex_lock(&private->io_mutex); memcpy(private->io_region->irb_area, irb, sizeof(*irb)); mutex_unlock(&private->io_mutex); - if (private->mdev && is_final) + /* + * Reset to IDLE only if processing of a channel program + * has finished. Do not overwrite a possible processing + * state if the final interrupt was for HSCH or CSCH. + */ + if (private->mdev && cp_is_finished) private->state = VFIO_CCW_STATE_IDLE; if (private->io_trigger) diff --git a/drivers/s390/cio/vfio_ccw_fsm.c b/drivers/s390/cio/vfio_ccw_fsm.c index 23e61aa638e4..e435a9cd92da 100644 --- a/drivers/s390/cio/vfio_ccw_fsm.c +++ b/drivers/s390/cio/vfio_ccw_fsm.c @@ -318,6 +318,7 @@ static void fsm_io_request(struct vfio_ccw_private *private, } err_out: + private->state = VFIO_CCW_STATE_IDLE; trace_vfio_ccw_fsm_io_request(scsw->cmd.fctl, schid, io_region->ret_code, errstr); } diff --git a/drivers/s390/cio/vfio_ccw_ops.c b/drivers/s390/cio/vfio_ccw_ops.c index 491a64c61fff..c57d2a7f0919 100644 --- a/drivers/s390/cio/vfio_ccw_ops.c +++ b/drivers/s390/cio/vfio_ccw_ops.c @@ -279,8 +279,6 @@ static ssize_t vfio_ccw_mdev_write_io_region(struct vfio_ccw_private *private, } vfio_ccw_fsm_event(private, VFIO_CCW_EVENT_IO_REQ); - if (region->ret_code != 0) - private->state = VFIO_CCW_STATE_IDLE; ret = (region->ret_code != 0) ? region->ret_code : count; out_unlock: diff --git a/drivers/s390/crypto/ap_bus.c b/drivers/s390/crypto/ap_bus.c index 2758d05a802d..8d3a1d84a757 100644 --- a/drivers/s390/crypto/ap_bus.c +++ b/drivers/s390/crypto/ap_bus.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-2.0+ /* - * Copyright IBM Corp. 2006, 2020 + * Copyright IBM Corp. 2006, 2021 * Author(s): Cornelia Huck <cornelia.huck@de.ibm.com> * Martin Schwidefsky <schwidefsky@de.ibm.com> * Ralph Wuerthner <rwuerthn@de.ibm.com> @@ -61,6 +61,9 @@ static char *aqm_str; module_param_named(aqmask, aqm_str, charp, 0440); MODULE_PARM_DESC(aqmask, "AP bus domain mask."); +atomic_t ap_max_msg_size = ATOMIC_INIT(AP_DEFAULT_MAX_MSG_SIZE); +EXPORT_SYMBOL(ap_max_msg_size); + static struct device *ap_root_device; /* Hashtable of all queue devices on the AP bus */ @@ -77,6 +80,9 @@ EXPORT_SYMBOL(ap_perms_mutex); /* # of bus scans since init */ static atomic64_t ap_scan_bus_count; +/* # of bindings complete since init */ +static atomic64_t ap_bindings_complete_count = ATOMIC64_INIT(0); + /* completion for initial APQN bindings complete */ static DECLARE_COMPLETION(ap_init_apqn_bindings_complete); @@ -313,11 +319,24 @@ EXPORT_SYMBOL(ap_test_config_ctrl_domain); * Returns true if TAPQ succeeded and the info is filled or * false otherwise. */ -static bool ap_queue_info(ap_qid_t qid, int *q_type, - unsigned int *q_fac, int *q_depth, bool *q_decfg) +static bool ap_queue_info(ap_qid_t qid, int *q_type, unsigned int *q_fac, + int *q_depth, int *q_ml, bool *q_decfg) { struct ap_queue_status status; - unsigned long info = 0; + union { + unsigned long value; + struct { + unsigned int fac : 32; /* facility bits */ + unsigned int at : 8; /* ap type */ + unsigned int _res1 : 8; + unsigned int _res2 : 4; + unsigned int ml : 4; /* apxl ml */ + unsigned int _res3 : 4; + unsigned int qd : 4; /* queue depth */ + } tapq_gr2; + } tapq_info; + + tapq_info.value = 0; /* make sure we don't run into a specifiation exception */ if (AP_QID_CARD(qid) > ap_max_adapter_id || @@ -325,7 +344,7 @@ static bool ap_queue_info(ap_qid_t qid, int *q_type, return false; /* call TAPQ on this APQN */ - status = ap_test_queue(qid, ap_apft_available(), &info); + status = ap_test_queue(qid, ap_apft_available(), &tapq_info.value); switch (status.response_code) { case AP_RESPONSE_NORMAL: case AP_RESPONSE_RESET_IN_PROGRESS: @@ -337,11 +356,12 @@ static bool ap_queue_info(ap_qid_t qid, int *q_type, * info should be filled. All bits 0 is not possible as * there is at least one of the mode bits set. */ - if (WARN_ON_ONCE(!info)) + if (WARN_ON_ONCE(!tapq_info.value)) return false; - *q_type = (int)((info >> 24) & 0xff); - *q_fac = (unsigned int)(info >> 32); - *q_depth = (int)(info & 0xff); + *q_type = tapq_info.tapq_gr2.at; + *q_fac = tapq_info.tapq_gr2.fac; + *q_depth = tapq_info.tapq_gr2.qd; + *q_ml = tapq_info.tapq_gr2.ml; *q_decfg = status.response_code == AP_RESPONSE_DECONFIGURED; switch (*q_type) { /* For CEX2 and CEX3 the available functions @@ -584,22 +604,47 @@ static int ap_bus_match(struct device *dev, struct device_driver *drv) */ static int ap_uevent(struct device *dev, struct kobj_uevent_env *env) { - int rc; + int rc = 0; struct ap_device *ap_dev = to_ap_dev(dev); /* Uevents from ap bus core don't need extensions to the env */ if (dev == ap_root_device) return 0; - /* Set up DEV_TYPE environment variable. */ - rc = add_uevent_var(env, "DEV_TYPE=%04X", ap_dev->device_type); - if (rc) - return rc; + if (is_card_dev(dev)) { + struct ap_card *ac = to_ap_card(&ap_dev->device); - /* Add MODALIAS= */ - rc = add_uevent_var(env, "MODALIAS=ap:t%02X", ap_dev->device_type); - if (rc) - return rc; + /* Set up DEV_TYPE environment variable. */ + rc = add_uevent_var(env, "DEV_TYPE=%04X", ap_dev->device_type); + if (rc) + return rc; + /* Add MODALIAS= */ + rc = add_uevent_var(env, "MODALIAS=ap:t%02X", ap_dev->device_type); + if (rc) + return rc; + + /* Add MODE=<accel|cca|ep11> */ + if (ap_test_bit(&ac->functions, AP_FUNC_ACCEL)) + rc = add_uevent_var(env, "MODE=accel"); + else if (ap_test_bit(&ac->functions, AP_FUNC_COPRO)) + rc = add_uevent_var(env, "MODE=cca"); + else if (ap_test_bit(&ac->functions, AP_FUNC_EP11)) + rc = add_uevent_var(env, "MODE=ep11"); + if (rc) + return rc; + } else { + struct ap_queue *aq = to_ap_queue(&ap_dev->device); + + /* Add MODE=<accel|cca|ep11> */ + if (ap_test_bit(&aq->card->functions, AP_FUNC_ACCEL)) + rc = add_uevent_var(env, "MODE=accel"); + else if (ap_test_bit(&aq->card->functions, AP_FUNC_COPRO)) + rc = add_uevent_var(env, "MODE=cca"); + else if (ap_test_bit(&aq->card->functions, AP_FUNC_EP11)) + rc = add_uevent_var(env, "MODE=ep11"); + if (rc) + return rc; + } return 0; } @@ -613,11 +658,36 @@ static void ap_send_init_scan_done_uevent(void) static void ap_send_bindings_complete_uevent(void) { - char *envp[] = { "BINDINGS=complete", NULL }; + char buf[32]; + char *envp[] = { "BINDINGS=complete", buf, NULL }; + snprintf(buf, sizeof(buf), "COMPLETECOUNT=%llu", + atomic64_inc_return(&ap_bindings_complete_count)); kobject_uevent_env(&ap_root_device->kobj, KOBJ_CHANGE, envp); } +void ap_send_config_uevent(struct ap_device *ap_dev, bool cfg) +{ + char buf[16]; + char *envp[] = { buf, NULL }; + + snprintf(buf, sizeof(buf), "CONFIG=%d", cfg ? 1 : 0); + + kobject_uevent_env(&ap_dev->device.kobj, KOBJ_CHANGE, envp); +} +EXPORT_SYMBOL(ap_send_config_uevent); + +void ap_send_online_uevent(struct ap_device *ap_dev, int online) +{ + char buf[16]; + char *envp[] = { buf, NULL }; + + snprintf(buf, sizeof(buf), "ONLINE=%d", online ? 1 : 0); + + kobject_uevent_env(&ap_dev->device.kobj, KOBJ_CHANGE, envp); +} +EXPORT_SYMBOL(ap_send_online_uevent); + /* * calc # of bound APQNs */ @@ -885,8 +955,6 @@ int ap_driver_register(struct ap_driver *ap_drv, struct module *owner, struct device_driver *drv = &ap_drv->driver; drv->bus = &ap_bus_type; - drv->probe = ap_device_probe; - drv->remove = ap_device_remove; drv->owner = owner; drv->name = name; return driver_register(drv); @@ -1319,6 +1387,8 @@ static struct bus_type ap_bus_type = { .bus_groups = ap_bus_groups, .match = &ap_bus_match, .uevent = &ap_uevent, + .probe = ap_device_probe, + .remove = ap_device_remove, }; /** @@ -1463,7 +1533,7 @@ static inline void ap_scan_domains(struct ap_card *ac) unsigned int func; struct device *dev; struct ap_queue *aq; - int rc, dom, depth, type; + int rc, dom, depth, type, ml; /* * Go through the configuration for the domains and compare them @@ -1487,7 +1557,7 @@ static inline void ap_scan_domains(struct ap_card *ac) continue; } /* domain is valid, get info from this APQN */ - if (!ap_queue_info(qid, &type, &func, &depth, &decfg)) { + if (!ap_queue_info(qid, &type, &func, &depth, &ml, &decfg)) { if (aq) { AP_DBF_INFO( "%s(%d,%d) ap_queue_info() not successful, rm queue device\n", @@ -1540,6 +1610,7 @@ static inline void ap_scan_domains(struct ap_card *ac) spin_unlock_bh(&aq->lock); AP_DBF_INFO("%s(%d,%d) queue device config off\n", __func__, ac->id, dom); + ap_send_config_uevent(&aq->ap_dev, aq->config); /* 'receive' pending messages with -EAGAIN */ ap_flush_queue(aq); goto put_dev_and_continue; @@ -1554,6 +1625,7 @@ static inline void ap_scan_domains(struct ap_card *ac) spin_unlock_bh(&aq->lock); AP_DBF_INFO("%s(%d,%d) queue device config on\n", __func__, ac->id, dom); + ap_send_config_uevent(&aq->ap_dev, aq->config); goto put_dev_and_continue; } /* handle other error states */ @@ -1584,7 +1656,7 @@ static inline void ap_scan_adapter(int ap) unsigned int func; struct device *dev; struct ap_card *ac; - int rc, dom, depth, type, comp_type; + int rc, dom, depth, type, comp_type, ml; /* Is there currently a card device for this adapter ? */ dev = bus_find_device(&ap_bus_type, NULL, @@ -1613,7 +1685,8 @@ static inline void ap_scan_adapter(int ap) for (dom = 0; dom <= ap_max_domain_id; dom++) if (ap_test_config_usage_domain(dom)) { qid = AP_MKQID(ap, dom); - if (ap_queue_info(qid, &type, &func, &depth, &decfg)) + if (ap_queue_info(qid, &type, &func, + &depth, &ml, &decfg)) break; } if (dom > ap_max_domain_id) { @@ -1663,12 +1736,13 @@ static inline void ap_scan_adapter(int ap) ac->config = false; AP_DBF_INFO("%s(%d) card device config off\n", __func__, ap); - + ap_send_config_uevent(&ac->ap_dev, ac->config); } if (!decfg && !ac->config) { ac->config = true; AP_DBF_INFO("%s(%d) card device config on\n", __func__, ap); + ap_send_config_uevent(&ac->ap_dev, ac->config); } } } @@ -1681,7 +1755,7 @@ static inline void ap_scan_adapter(int ap) __func__, ap, type); return; } - ac = ap_card_create(ap, depth, type, comp_type, func); + ac = ap_card_create(ap, depth, type, comp_type, func, ml); if (!ac) { AP_DBF_WARN("%s(%d) ap_card_create() failed\n", __func__, ap); @@ -1692,6 +1766,12 @@ static inline void ap_scan_adapter(int ap) dev->bus = &ap_bus_type; dev->parent = ap_root_device; dev_set_name(dev, "card%02x", ap); + /* maybe enlarge ap_max_msg_size to support this card */ + if (ac->maxmsgsize > atomic_read(&ap_max_msg_size)) { + atomic_set(&ap_max_msg_size, ac->maxmsgsize); + AP_DBF_INFO("%s(%d) ap_max_msg_size update to %d byte\n", + __func__, ap, atomic_read(&ap_max_msg_size)); + } /* Register the new card device with AP bus */ rc = device_register(dev); if (rc) { diff --git a/drivers/s390/crypto/ap_bus.h b/drivers/s390/crypto/ap_bus.h index 472efd3a755c..8f18abdbbc2b 100644 --- a/drivers/s390/crypto/ap_bus.h +++ b/drivers/s390/crypto/ap_bus.h @@ -25,8 +25,11 @@ #define AP_RESET_TIMEOUT (HZ*0.7) /* Time in ticks for reset timeouts. */ #define AP_CONFIG_TIME 30 /* Time in seconds between AP bus rescans. */ #define AP_POLL_TIME 1 /* Time in ticks between receive polls. */ +#define AP_DEFAULT_MAX_MSG_SIZE (12 * 1024) +#define AP_TAPQ_ML_FIELD_CHUNK_SIZE (4096) extern int ap_domain_index; +extern atomic_t ap_max_msg_size; extern DECLARE_HASHTABLE(ap_queues, 8); extern spinlock_t ap_queues_lock; @@ -167,6 +170,7 @@ struct ap_card { unsigned int functions; /* AP device function bitfield. */ int queue_depth; /* AP queue depth.*/ int id; /* AP card number. */ + unsigned int maxmsgsize; /* AP msg limit for this card */ bool config; /* configured state */ atomic64_t total_request_count; /* # requests ever for this AP device.*/ }; @@ -228,7 +232,8 @@ struct ap_message { struct list_head list; /* Request queueing. */ unsigned long long psmid; /* Message id. */ void *msg; /* Pointer to message buffer. */ - unsigned int len; /* Message length. */ + unsigned int len; /* actual msg len in msg buffer */ + unsigned int bufsize; /* allocated msg buffer size */ u16 flags; /* Flags, see AP_MSG_FLAG_xxx */ struct ap_fi fi; /* Failure Injection cmd */ int rc; /* Return code for this message */ @@ -290,8 +295,8 @@ void ap_queue_prepare_remove(struct ap_queue *aq); void ap_queue_remove(struct ap_queue *aq); void ap_queue_init_state(struct ap_queue *aq); -struct ap_card *ap_card_create(int id, int queue_depth, int raw_device_type, - int comp_device_type, unsigned int functions); +struct ap_card *ap_card_create(int id, int queue_depth, int raw_type, + int comp_type, unsigned int functions, int ml); struct ap_perms { unsigned long ioctlm[BITS_TO_LONGS(AP_IOCTLS)]; @@ -362,4 +367,7 @@ int ap_parse_mask_str(const char *str, */ int ap_wait_init_apqn_bindings_complete(unsigned long timeout); +void ap_send_config_uevent(struct ap_device *ap_dev, bool cfg); +void ap_send_online_uevent(struct ap_device *ap_dev, int online); + #endif /* _AP_BUS_H_ */ diff --git a/drivers/s390/crypto/ap_card.c b/drivers/s390/crypto/ap_card.c index d98bdd28d23e..196325a66662 100644 --- a/drivers/s390/crypto/ap_card.c +++ b/drivers/s390/crypto/ap_card.c @@ -167,11 +167,23 @@ static ssize_t config_store(struct device *dev, ac->config = cfg ? true : false; + ap_send_config_uevent(&ac->ap_dev, ac->config); + return count; } static DEVICE_ATTR_RW(config); +static ssize_t max_msg_size_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct ap_card *ac = to_ap_card(dev); + + return scnprintf(buf, PAGE_SIZE, "%u\n", ac->maxmsgsize); +} + +static DEVICE_ATTR_RO(max_msg_size); + static struct attribute *ap_card_dev_attrs[] = { &dev_attr_hwtype.attr, &dev_attr_raw_hwtype.attr, @@ -182,6 +194,7 @@ static struct attribute *ap_card_dev_attrs[] = { &dev_attr_pendingq_count.attr, &dev_attr_modalias.attr, &dev_attr_config.attr, + &dev_attr_max_msg_size.attr, NULL }; @@ -207,7 +220,7 @@ static void ap_card_device_release(struct device *dev) } struct ap_card *ap_card_create(int id, int queue_depth, int raw_type, - int comp_type, unsigned int functions) + int comp_type, unsigned int functions, int ml) { struct ap_card *ac; @@ -221,5 +234,8 @@ struct ap_card *ap_card_create(int id, int queue_depth, int raw_type, ac->queue_depth = queue_depth; ac->functions = functions; ac->id = id; + ac->maxmsgsize = ml > 0 ? + ml * AP_TAPQ_ML_FIELD_CHUNK_SIZE : AP_DEFAULT_MAX_MSG_SIZE; + return ac; } diff --git a/drivers/s390/crypto/ap_queue.c b/drivers/s390/crypto/ap_queue.c index ecefc25eff0c..669f96fddad6 100644 --- a/drivers/s390/crypto/ap_queue.c +++ b/drivers/s390/crypto/ap_queue.c @@ -101,7 +101,7 @@ int ap_recv(ap_qid_t qid, unsigned long long *psmid, void *msg, size_t length) if (msg == NULL) return -EINVAL; - status = ap_dqap(qid, psmid, msg, length); + status = ap_dqap(qid, psmid, msg, length, NULL, NULL); switch (status.response_code) { case AP_RESPONSE_NORMAL: return 0; @@ -135,12 +135,28 @@ static struct ap_queue_status ap_sm_recv(struct ap_queue *aq) { struct ap_queue_status status; struct ap_message *ap_msg; + bool found = false; + size_t reslen; + unsigned long resgr0 = 0; + int parts = 0; + + /* + * DQAP loop until response code and resgr0 indicate that + * the msg is totally received. As we use the very same buffer + * the msg is overwritten with each invocation. That's intended + * and the receiver of the msg is informed with a msg rc code + * of EMSGSIZE in such a case. + */ + do { + status = ap_dqap(aq->qid, &aq->reply->psmid, + aq->reply->msg, aq->reply->bufsize, + &reslen, &resgr0); + parts++; + } while (status.response_code == 0xFF && resgr0 != 0); - status = ap_dqap(aq->qid, &aq->reply->psmid, - aq->reply->msg, aq->reply->len); switch (status.response_code) { case AP_RESPONSE_NORMAL: - aq->queue_count--; + aq->queue_count = max_t(int, 0, aq->queue_count - 1); if (aq->queue_count > 0) mod_timer(&aq->timeout, jiffies + aq->request_timeout); @@ -149,9 +165,20 @@ static struct ap_queue_status ap_sm_recv(struct ap_queue *aq) continue; list_del_init(&ap_msg->list); aq->pendingq_count--; - ap_msg->receive(aq, ap_msg, aq->reply); + if (parts > 1) { + ap_msg->rc = -EMSGSIZE; + ap_msg->receive(aq, ap_msg, NULL); + } else { + ap_msg->receive(aq, ap_msg, aq->reply); + } + found = true; break; } + if (!found) { + AP_DBF_WARN("%s unassociated reply psmid=0x%016llx on 0x%02x.%04x\n", + __func__, aq->reply->psmid, + AP_QID_CARD(aq->qid), AP_QID_QUEUE(aq->qid)); + } fallthrough; case AP_RESPONSE_NO_PENDING_REPLY: if (!status.queue_empty || aq->queue_count <= 0) @@ -232,7 +259,7 @@ static enum ap_sm_wait ap_sm_write(struct ap_queue *aq) ap_msg->flags & AP_MSG_FLAG_SPECIAL); switch (status.response_code) { case AP_RESPONSE_NORMAL: - aq->queue_count++; + aq->queue_count = max_t(int, 1, aq->queue_count + 1); if (aq->queue_count == 1) mod_timer(&aq->timeout, jiffies + aq->request_timeout); list_move_tail(&ap_msg->list, &aq->pendingq); diff --git a/drivers/s390/crypto/vfio_ap_drv.c b/drivers/s390/crypto/vfio_ap_drv.c index 7dc72cb718b0..4d2556bc7fe5 100644 --- a/drivers/s390/crypto/vfio_ap_drv.c +++ b/drivers/s390/crypto/vfio_ap_drv.c @@ -22,8 +22,6 @@ MODULE_AUTHOR("IBM Corporation"); MODULE_DESCRIPTION("VFIO AP device driver, Copyright IBM Corp. 2018"); MODULE_LICENSE("GPL v2"); -static struct ap_driver vfio_ap_drv; - struct ap_matrix_dev *matrix_dev; /* Only type 10 adapters (CEX4 and later) are supported @@ -80,6 +78,12 @@ static void vfio_ap_queue_dev_remove(struct ap_device *apdev) mutex_unlock(&matrix_dev->lock); } +static struct ap_driver vfio_ap_drv = { + .probe = vfio_ap_queue_dev_probe, + .remove = vfio_ap_queue_dev_remove, + .ids = ap_queue_ids, +}; + static void vfio_ap_matrix_dev_release(struct device *dev) { struct ap_matrix_dev *matrix_dev = dev_get_drvdata(dev); @@ -181,11 +185,6 @@ static int __init vfio_ap_init(void) if (ret) return ret; - memset(&vfio_ap_drv, 0, sizeof(vfio_ap_drv)); - vfio_ap_drv.probe = vfio_ap_queue_dev_probe; - vfio_ap_drv.remove = vfio_ap_queue_dev_remove; - vfio_ap_drv.ids = ap_queue_ids; - ret = ap_driver_register(&vfio_ap_drv, THIS_MODULE, VFIO_AP_DRV_NAME); if (ret) { vfio_ap_matrix_dev_destroy(); diff --git a/drivers/s390/crypto/vfio_ap_ops.c b/drivers/s390/crypto/vfio_ap_ops.c index b2c7e10dfdcd..122c85c22469 100644 --- a/drivers/s390/crypto/vfio_ap_ops.c +++ b/drivers/s390/crypto/vfio_ap_ops.c @@ -366,16 +366,6 @@ static int vfio_ap_mdev_remove(struct mdev_device *mdev) struct ap_matrix_mdev *matrix_mdev = mdev_get_drvdata(mdev); mutex_lock(&matrix_dev->lock); - - /* - * If the KVM pointer is in flux or the guest is running, disallow - * un-assignment of control domain. - */ - if (matrix_mdev->kvm_busy || matrix_mdev->kvm) { - mutex_unlock(&matrix_dev->lock); - return -EBUSY; - } - vfio_ap_mdev_reset_queues(mdev); list_del(&matrix_mdev->node); kfree(matrix_mdev); diff --git a/drivers/s390/crypto/zcrypt_api.c b/drivers/s390/crypto/zcrypt_api.c index 52eaf51c9bb6..529ffe26ea9d 100644 --- a/drivers/s390/crypto/zcrypt_api.c +++ b/drivers/s390/crypto/zcrypt_api.c @@ -59,7 +59,6 @@ MODULE_PARM_DESC(hwrng_seed, "Turn on/off hwrng auto seed, default is 1 (on)."); DEFINE_SPINLOCK(zcrypt_list_lock); LIST_HEAD(zcrypt_card_list); -int zcrypt_device_count; static atomic_t zcrypt_open_count = ATOMIC_INIT(0); static atomic_t zcrypt_rescan_count = ATOMIC_INIT(0); @@ -901,6 +900,9 @@ static long _zcrypt_send_cprb(bool userspace, struct ap_perms *perms, if (xcRB->user_defined != AUTOSELECT && xcRB->user_defined != zc->card->id) continue; + /* check if request size exceeds card max msg size */ + if (ap_msg.len > zc->card->maxmsgsize) + continue; /* check if device node has admission for this card */ if (!zcrypt_check_card(perms, zc->card->id)) continue; @@ -1069,6 +1071,9 @@ static long _zcrypt_send_ep11_cprb(bool userspace, struct ap_perms *perms, if (targets && !is_desired_ep11_card(zc->card->id, target_num, targets)) continue; + /* check if request size exceeds card max msg size */ + if (ap_msg.len > zc->card->maxmsgsize) + continue; /* check if device node has admission for this card */ if (!zcrypt_check_card(perms, zc->card->id)) continue; diff --git a/drivers/s390/crypto/zcrypt_api.h b/drivers/s390/crypto/zcrypt_api.h index 16219efb2f61..93e77e83ad14 100644 --- a/drivers/s390/crypto/zcrypt_api.h +++ b/drivers/s390/crypto/zcrypt_api.h @@ -124,7 +124,6 @@ struct zcrypt_queue { extern atomic_t zcrypt_rescan_req; extern spinlock_t zcrypt_list_lock; -extern int zcrypt_device_count; extern struct list_head zcrypt_card_list; #define for_each_zcrypt_card(_zc) \ @@ -146,7 +145,7 @@ void zcrypt_queue_get(struct zcrypt_queue *); int zcrypt_queue_put(struct zcrypt_queue *); int zcrypt_queue_register(struct zcrypt_queue *); void zcrypt_queue_unregister(struct zcrypt_queue *); -void zcrypt_queue_force_online(struct zcrypt_queue *, int); +bool zcrypt_queue_force_online(struct zcrypt_queue *zq, int online); int zcrypt_rng_device_add(void); void zcrypt_rng_device_remove(void); diff --git a/drivers/s390/crypto/zcrypt_card.c b/drivers/s390/crypto/zcrypt_card.c index 09fe6bb8880b..40fd5d37d26a 100644 --- a/drivers/s390/crypto/zcrypt_card.c +++ b/drivers/s390/crypto/zcrypt_card.c @@ -64,7 +64,8 @@ static ssize_t online_store(struct device *dev, struct ap_card *ac = to_ap_card(dev); struct zcrypt_card *zc = ac->private; struct zcrypt_queue *zq; - int online, id; + int online, id, i = 0, maxzqs = 0; + struct zcrypt_queue **zq_uelist = NULL; if (sscanf(buf, "%d\n", &online) != 1 || online < 0 || online > 1) return -EINVAL; @@ -77,10 +78,35 @@ static ssize_t online_store(struct device *dev, ZCRYPT_DBF(DBF_INFO, "card=%02x online=%d\n", id, online); + ap_send_online_uevent(&ac->ap_dev, online); + spin_lock(&zcrypt_list_lock); + /* + * As we are in atomic context here, directly sending uevents + * does not work. So collect the zqueues in a dynamic array + * and process them after zcrypt_list_lock release. As we get/put + * the zqueue objects, we make sure they exist after lock release. + */ + list_for_each_entry(zq, &zc->zqueues, list) + maxzqs++; + if (maxzqs > 0) + zq_uelist = kcalloc(maxzqs + 1, sizeof(zq), GFP_ATOMIC); list_for_each_entry(zq, &zc->zqueues, list) - zcrypt_queue_force_online(zq, online); + if (zcrypt_queue_force_online(zq, online)) + if (zq_uelist) { + zcrypt_queue_get(zq); + zq_uelist[i++] = zq; + } spin_unlock(&zcrypt_list_lock); + if (zq_uelist) { + for (i = 0; zq_uelist[i]; i++) { + zq = zq_uelist[i]; + ap_send_online_uevent(&zq->queue->ap_dev, online); + zcrypt_queue_put(zq); + } + kfree(zq_uelist); + } + return count; } diff --git a/drivers/s390/crypto/zcrypt_ccamisc.c b/drivers/s390/crypto/zcrypt_ccamisc.c index d68c0ed5e0dd..bc34bedf9db8 100644 --- a/drivers/s390/crypto/zcrypt_ccamisc.c +++ b/drivers/s390/crypto/zcrypt_ccamisc.c @@ -295,7 +295,7 @@ static inline void prep_xcrb(struct ica_xcRB *pxcrb, * Generate (random) CCA AES DATA secure key. */ int cca_genseckey(u16 cardnr, u16 domain, - u32 keybitsize, u8 seckey[SECKEYBLOBSIZE]) + u32 keybitsize, u8 *seckey) { int i, rc, keysize; int seckeysize; @@ -330,7 +330,7 @@ int cca_genseckey(u16 cardnr, u16 domain, struct { u16 toklen; u16 tokattr; - u8 tok[0]; + u8 tok[]; /* ... some more data ... */ } keyblock; } lv3; @@ -438,7 +438,7 @@ EXPORT_SYMBOL(cca_genseckey); * Generate an CCA AES DATA secure key with given key value. */ int cca_clr2seckey(u16 cardnr, u16 domain, u32 keybitsize, - const u8 *clrkey, u8 seckey[SECKEYBLOBSIZE]) + const u8 *clrkey, u8 *seckey) { int rc, keysize, seckeysize; u8 *mem, *ptr; @@ -471,7 +471,7 @@ int cca_clr2seckey(u16 cardnr, u16 domain, u32 keybitsize, struct { u16 toklen; u16 tokattr; - u8 tok[0]; + u8 tok[]; /* ... some more data ... */ } keyblock; } lv3; @@ -577,8 +577,8 @@ EXPORT_SYMBOL(cca_clr2seckey); * Derive proteced key from an CCA AES DATA secure key. */ int cca_sec2protkey(u16 cardnr, u16 domain, - const u8 seckey[SECKEYBLOBSIZE], - u8 *protkey, u32 *protkeylen, u32 *protkeytype) + const u8 *seckey, u8 *protkey, u32 *protkeylen, + u32 *protkeytype) { int rc; u8 *mem, *ptr; @@ -596,7 +596,7 @@ int cca_sec2protkey(u16 cardnr, u16 domain, u16 len; u16 attr_len; u16 attr_flags; - u8 token[0]; /* cca secure key token */ + u8 token[]; /* cca secure key token */ } lv2; } __packed * preqparm; struct uskrepparm { diff --git a/drivers/s390/crypto/zcrypt_ccamisc.h b/drivers/s390/crypto/zcrypt_ccamisc.h index e7105443d5cb..3513cd8ab9bc 100644 --- a/drivers/s390/crypto/zcrypt_ccamisc.h +++ b/drivers/s390/crypto/zcrypt_ccamisc.h @@ -171,8 +171,8 @@ int cca_clr2seckey(u16 cardnr, u16 domain, u32 keybitsize, * Derive proteced key from an CCA AES DATA secure key. */ int cca_sec2protkey(u16 cardnr, u16 domain, - const u8 seckey[SECKEYBLOBSIZE], - u8 *protkey, u32 *protkeylen, u32 *protkeytype); + const u8 *seckey, u8 *protkey, u32 *protkeylen, + u32 *protkeytype); /* * Generate (random) CCA AES CIPHER secure key. diff --git a/drivers/s390/crypto/zcrypt_cex4.c b/drivers/s390/crypto/zcrypt_cex4.c index f4a6d3744241..f518b5fc7e5d 100644 --- a/drivers/s390/crypto/zcrypt_cex4.c +++ b/drivers/s390/crypto/zcrypt_cex4.c @@ -28,9 +28,6 @@ #define CEX4C_MIN_MOD_SIZE 16 /* 256 bits */ #define CEX4C_MAX_MOD_SIZE 512 /* 4096 bits */ -#define CEX4A_MAX_MESSAGE_SIZE MSGTYPE50_CRB3_MAX_MSG_SIZE -#define CEX4C_MAX_MESSAGE_SIZE MSGTYPE06_MAX_MSG_SIZE - /* Waiting time for requests to be processed. * Currently there are some types of request which are not deterministic. * But the maximum time limit managed by the stomper code is set to 60sec. @@ -605,19 +602,19 @@ static int zcrypt_cex4_queue_probe(struct ap_device *ap_dev) int rc; if (ap_test_bit(&aq->card->functions, AP_FUNC_ACCEL)) { - zq = zcrypt_queue_alloc(CEX4A_MAX_MESSAGE_SIZE); + zq = zcrypt_queue_alloc(aq->card->maxmsgsize); if (!zq) return -ENOMEM; zq->ops = zcrypt_msgtype(MSGTYPE50_NAME, MSGTYPE50_VARIANT_DEFAULT); } else if (ap_test_bit(&aq->card->functions, AP_FUNC_COPRO)) { - zq = zcrypt_queue_alloc(CEX4C_MAX_MESSAGE_SIZE); + zq = zcrypt_queue_alloc(aq->card->maxmsgsize); if (!zq) return -ENOMEM; zq->ops = zcrypt_msgtype(MSGTYPE06_NAME, MSGTYPE06_VARIANT_DEFAULT); } else if (ap_test_bit(&aq->card->functions, AP_FUNC_EP11)) { - zq = zcrypt_queue_alloc(CEX4C_MAX_MESSAGE_SIZE); + zq = zcrypt_queue_alloc(aq->card->maxmsgsize); if (!zq) return -ENOMEM; zq->ops = zcrypt_msgtype(MSGTYPE06_NAME, diff --git a/drivers/s390/crypto/zcrypt_msgtype50.c b/drivers/s390/crypto/zcrypt_msgtype50.c index bf14ee445f89..99405472824d 100644 --- a/drivers/s390/crypto/zcrypt_msgtype50.c +++ b/drivers/s390/crypto/zcrypt_msgtype50.c @@ -375,6 +375,7 @@ static int convert_type80(struct zcrypt_queue *zq, AP_QID_CARD(zq->queue->qid), AP_QID_QUEUE(zq->queue->qid), t80h->code); + ap_send_online_uevent(&zq->queue->ap_dev, zq->online); return -EAGAIN; } if (zq->zcard->user_space_type == ZCRYPT_CEX2A) @@ -412,6 +413,7 @@ static int convert_response_cex2a(struct zcrypt_queue *zq, AP_QID_CARD(zq->queue->qid), AP_QID_QUEUE(zq->queue->qid), (int) rtype); + ap_send_online_uevent(&zq->queue->ap_dev, zq->online); return -EAGAIN; } } @@ -440,11 +442,13 @@ static void zcrypt_cex2a_receive(struct ap_queue *aq, goto out; /* ap_msg->rc indicates the error */ t80h = reply->msg; if (t80h->type == TYPE80_RSP_CODE) { - if (aq->ap_dev.device_type == AP_DEVICE_TYPE_CEX2A) - len = min_t(int, CEX2A_MAX_RESPONSE_SIZE, t80h->len); - else - len = min_t(int, CEX3A_MAX_RESPONSE_SIZE, t80h->len); - memcpy(msg->msg, reply->msg, len); + len = t80h->len; + if (len > reply->bufsize || len > msg->bufsize) { + msg->rc = -EMSGSIZE; + } else { + memcpy(msg->msg, reply->msg, len); + msg->len = len; + } } else memcpy(msg->msg, reply->msg, sizeof(error_reply)); out: @@ -467,10 +471,9 @@ static long zcrypt_cex2a_modexpo(struct zcrypt_queue *zq, struct completion work; int rc; - if (zq->zcard->user_space_type == ZCRYPT_CEX2A) - ap_msg->msg = kmalloc(MSGTYPE50_CRB2_MAX_MSG_SIZE, GFP_KERNEL); - else - ap_msg->msg = kmalloc(MSGTYPE50_CRB3_MAX_MSG_SIZE, GFP_KERNEL); + ap_msg->bufsize = (zq->zcard->user_space_type == ZCRYPT_CEX2A) ? + MSGTYPE50_CRB2_MAX_MSG_SIZE : MSGTYPE50_CRB3_MAX_MSG_SIZE; + ap_msg->msg = kmalloc(ap_msg->bufsize, GFP_KERNEL); if (!ap_msg->msg) return -ENOMEM; ap_msg->receive = zcrypt_cex2a_receive; @@ -513,10 +516,9 @@ static long zcrypt_cex2a_modexpo_crt(struct zcrypt_queue *zq, struct completion work; int rc; - if (zq->zcard->user_space_type == ZCRYPT_CEX2A) - ap_msg->msg = kmalloc(MSGTYPE50_CRB2_MAX_MSG_SIZE, GFP_KERNEL); - else - ap_msg->msg = kmalloc(MSGTYPE50_CRB3_MAX_MSG_SIZE, GFP_KERNEL); + ap_msg->bufsize = (zq->zcard->user_space_type == ZCRYPT_CEX2A) ? + MSGTYPE50_CRB2_MAX_MSG_SIZE : MSGTYPE50_CRB3_MAX_MSG_SIZE; + ap_msg->msg = kmalloc(ap_msg->bufsize, GFP_KERNEL); if (!ap_msg->msg) return -ENOMEM; ap_msg->receive = zcrypt_cex2a_receive; diff --git a/drivers/s390/crypto/zcrypt_msgtype6.c b/drivers/s390/crypto/zcrypt_msgtype6.c index 307f90657d1d..752c6398fcd6 100644 --- a/drivers/s390/crypto/zcrypt_msgtype6.c +++ b/drivers/s390/crypto/zcrypt_msgtype6.c @@ -403,7 +403,7 @@ static int XCRB_msg_to_type6CPRB_msgX(bool userspace, struct ap_message *ap_msg, } __packed * msg = ap_msg->msg; int rcblen = CEIL4(xcRB->request_control_blk_length); - int replylen, req_sumlen, resp_sumlen; + int req_sumlen, resp_sumlen; char *req_data = ap_msg->msg + sizeof(struct type6_hdr) + rcblen; char *function_code; @@ -415,7 +415,7 @@ static int XCRB_msg_to_type6CPRB_msgX(bool userspace, struct ap_message *ap_msg, ap_msg->len = sizeof(struct type6_hdr) + CEIL4(xcRB->request_control_blk_length) + xcRB->request_data_length; - if (ap_msg->len > MSGTYPE06_MAX_MSG_SIZE) + if (ap_msg->len > ap_msg->bufsize) return -EINVAL; /* @@ -435,12 +435,6 @@ static int XCRB_msg_to_type6CPRB_msgX(bool userspace, struct ap_message *ap_msg, xcRB->reply_control_blk_length) return -EINVAL; /* overflow after alignment*/ - replylen = sizeof(struct type86_fmt2_msg) + - CEIL4(xcRB->reply_control_blk_length) + - xcRB->reply_data_length; - if (replylen > MSGTYPE06_MAX_MSG_SIZE) - return -EINVAL; - /* * Overflow check * sum must be greater (or equal) than the largest operand @@ -530,18 +524,13 @@ static int xcrb_msg_to_type6_ep11cprb_msgx(bool userspace, struct ap_message *ap return -EINVAL; /* overflow after alignment*/ /* length checks */ - ap_msg->len = sizeof(struct type6_hdr) + xcRB->req_len; - if (CEIL4(xcRB->req_len) > MSGTYPE06_MAX_MSG_SIZE - - (sizeof(struct type6_hdr))) + ap_msg->len = sizeof(struct type6_hdr) + CEIL4(xcRB->req_len); + if (ap_msg->len > ap_msg->bufsize) return -EINVAL; if (CEIL4(xcRB->resp_len) < xcRB->resp_len) return -EINVAL; /* overflow after alignment*/ - if (CEIL4(xcRB->resp_len) > MSGTYPE06_MAX_MSG_SIZE - - (sizeof(struct type86_fmt2_msg))) - return -EINVAL; - /* prepare type6 header */ msg->hdr = static_type6_ep11_hdr; msg->hdr.ToCardLen1 = xcRB->req_len; @@ -675,6 +664,7 @@ static int convert_type86_ica(struct zcrypt_queue *zq, AP_QID_CARD(zq->queue->qid), AP_QID_QUEUE(zq->queue->qid), (int) service_rc, (int) service_rs); + ap_send_online_uevent(&zq->queue->ap_dev, zq->online); return -EAGAIN; } data = msg->text; @@ -820,6 +810,7 @@ static int convert_response_ica(struct zcrypt_queue *zq, AP_QID_CARD(zq->queue->qid), AP_QID_QUEUE(zq->queue->qid), (int) msg->hdr.type); + ap_send_online_uevent(&zq->queue->ap_dev, zq->online); return -EAGAIN; } } @@ -854,6 +845,7 @@ static int convert_response_xcrb(bool userspace, struct zcrypt_queue *zq, AP_QID_CARD(zq->queue->qid), AP_QID_QUEUE(zq->queue->qid), (int) msg->hdr.type); + ap_send_online_uevent(&zq->queue->ap_dev, zq->online); return -EAGAIN; } } @@ -883,6 +875,7 @@ static int convert_response_ep11_xcrb(bool userspace, struct zcrypt_queue *zq, AP_QID_CARD(zq->queue->qid), AP_QID_QUEUE(zq->queue->qid), (int) msg->hdr.type); + ap_send_online_uevent(&zq->queue->ap_dev, zq->online); return -EAGAIN; } } @@ -913,6 +906,7 @@ static int convert_response_rng(struct zcrypt_queue *zq, AP_QID_CARD(zq->queue->qid), AP_QID_QUEUE(zq->queue->qid), (int) msg->hdr.type); + ap_send_online_uevent(&zq->queue->ap_dev, zq->online); return -EAGAIN; } } @@ -947,13 +941,21 @@ static void zcrypt_msgtype6_receive(struct ap_queue *aq, switch (resp_type->type) { case CEXXC_RESPONSE_TYPE_ICA: len = sizeof(struct type86x_reply) + t86r->length - 2; - len = min_t(int, CEXXC_MAX_ICA_RESPONSE_SIZE, len); - memcpy(msg->msg, reply->msg, len); + if (len > reply->bufsize || len > msg->bufsize) { + msg->rc = -EMSGSIZE; + } else { + memcpy(msg->msg, reply->msg, len); + msg->len = len; + } break; case CEXXC_RESPONSE_TYPE_XCRB: len = t86r->fmt2.offset2 + t86r->fmt2.count2; - len = min_t(int, MSGTYPE06_MAX_MSG_SIZE, len); - memcpy(msg->msg, reply->msg, len); + if (len > reply->bufsize || len > msg->bufsize) { + msg->rc = -EMSGSIZE; + } else { + memcpy(msg->msg, reply->msg, len); + msg->len = len; + } break; default: memcpy(msg->msg, &error_reply, sizeof(error_reply)); @@ -994,8 +996,12 @@ static void zcrypt_msgtype6_receive_ep11(struct ap_queue *aq, switch (resp_type->type) { case CEXXC_RESPONSE_TYPE_EP11: len = t86r->fmt2.offset1 + t86r->fmt2.count1; - len = min_t(int, MSGTYPE06_MAX_MSG_SIZE, len); - memcpy(msg->msg, reply->msg, len); + if (len > reply->bufsize || len > msg->bufsize) { + msg->rc = -EMSGSIZE; + } else { + memcpy(msg->msg, reply->msg, len); + msg->len = len; + } break; default: memcpy(msg->msg, &error_reply, sizeof(error_reply)); @@ -1028,6 +1034,7 @@ static long zcrypt_msgtype6_modexpo(struct zcrypt_queue *zq, ap_msg->msg = (void *) get_zeroed_page(GFP_KERNEL); if (!ap_msg->msg) return -ENOMEM; + ap_msg->bufsize = PAGE_SIZE; ap_msg->receive = zcrypt_msgtype6_receive; ap_msg->psmid = (((unsigned long long) current->pid) << 32) + atomic_inc_return(&zcrypt_step); @@ -1075,6 +1082,7 @@ static long zcrypt_msgtype6_modexpo_crt(struct zcrypt_queue *zq, ap_msg->msg = (void *) get_zeroed_page(GFP_KERNEL); if (!ap_msg->msg) return -ENOMEM; + ap_msg->bufsize = PAGE_SIZE; ap_msg->receive = zcrypt_msgtype6_receive; ap_msg->psmid = (((unsigned long long) current->pid) << 32) + atomic_inc_return(&zcrypt_step); @@ -1119,7 +1127,8 @@ unsigned int get_cprb_fc(bool userspace, struct ica_xcRB *xcRB, .type = CEXXC_RESPONSE_TYPE_XCRB, }; - ap_msg->msg = kmalloc(MSGTYPE06_MAX_MSG_SIZE, GFP_KERNEL); + ap_msg->bufsize = atomic_read(&ap_max_msg_size); + ap_msg->msg = kmalloc(ap_msg->bufsize, GFP_KERNEL); if (!ap_msg->msg) return -ENOMEM; ap_msg->receive = zcrypt_msgtype6_receive; @@ -1176,7 +1185,8 @@ unsigned int get_ep11cprb_fc(bool userspace, struct ep11_urb *xcrb, .type = CEXXC_RESPONSE_TYPE_EP11, }; - ap_msg->msg = kmalloc(MSGTYPE06_MAX_MSG_SIZE, GFP_KERNEL); + ap_msg->bufsize = atomic_read(&ap_max_msg_size); + ap_msg->msg = kmalloc(ap_msg->bufsize, GFP_KERNEL); if (!ap_msg->msg) return -ENOMEM; ap_msg->receive = zcrypt_msgtype6_receive_ep11; @@ -1272,7 +1282,8 @@ unsigned int get_rng_fc(struct ap_message *ap_msg, int *func_code, .type = CEXXC_RESPONSE_TYPE_XCRB, }; - ap_msg->msg = kmalloc(MSGTYPE06_MAX_MSG_SIZE, GFP_KERNEL); + ap_msg->bufsize = AP_DEFAULT_MAX_MSG_SIZE; + ap_msg->msg = kmalloc(ap_msg->bufsize, GFP_KERNEL); if (!ap_msg->msg) return -ENOMEM; ap_msg->receive = zcrypt_msgtype6_receive; diff --git a/drivers/s390/crypto/zcrypt_msgtype6.h b/drivers/s390/crypto/zcrypt_msgtype6.h index 0a0bf074206b..155c73514bac 100644 --- a/drivers/s390/crypto/zcrypt_msgtype6.h +++ b/drivers/s390/crypto/zcrypt_msgtype6.h @@ -19,8 +19,6 @@ #define MSGTYPE06_VARIANT_NORNG 1 #define MSGTYPE06_VARIANT_EP11 2 -#define MSGTYPE06_MAX_MSG_SIZE (12*1024) - /** * The type 6 message family is associated with CEXxC/CEXxP cards. * diff --git a/drivers/s390/crypto/zcrypt_queue.c b/drivers/s390/crypto/zcrypt_queue.c index c3ffbd26b73f..20f12288a8c1 100644 --- a/drivers/s390/crypto/zcrypt_queue.c +++ b/drivers/s390/crypto/zcrypt_queue.c @@ -70,6 +70,8 @@ static ssize_t online_store(struct device *dev, AP_QID_QUEUE(zq->queue->qid), online); + ap_send_online_uevent(&aq->ap_dev, online); + if (!online) ap_flush_queue(zq->queue); return count; @@ -98,24 +100,28 @@ static const struct attribute_group zcrypt_queue_attr_group = { .attrs = zcrypt_queue_attrs, }; -void zcrypt_queue_force_online(struct zcrypt_queue *zq, int online) +bool zcrypt_queue_force_online(struct zcrypt_queue *zq, int online) { - zq->online = online; - if (!online) - ap_flush_queue(zq->queue); + if (!!zq->online != !!online) { + zq->online = online; + if (!online) + ap_flush_queue(zq->queue); + return true; + } + return false; } -struct zcrypt_queue *zcrypt_queue_alloc(size_t max_response_size) +struct zcrypt_queue *zcrypt_queue_alloc(size_t reply_buf_size) { struct zcrypt_queue *zq; zq = kzalloc(sizeof(struct zcrypt_queue), GFP_KERNEL); if (!zq) return NULL; - zq->reply.msg = kmalloc(max_response_size, GFP_KERNEL); + zq->reply.msg = kmalloc(reply_buf_size, GFP_KERNEL); if (!zq->reply.msg) goto out_free; - zq->reply.len = max_response_size; + zq->reply.bufsize = reply_buf_size; INIT_LIST_HEAD(&zq->list); kref_init(&zq->refcount); return zq; @@ -173,7 +179,6 @@ int zcrypt_queue_register(struct zcrypt_queue *zq) AP_QID_CARD(zq->queue->qid), AP_QID_QUEUE(zq->queue->qid)); list_add_tail(&zq->list, &zc->zqueues); - zcrypt_device_count++; spin_unlock(&zcrypt_list_lock); rc = sysfs_create_group(&zq->queue->ap_dev.device.kobj, @@ -216,7 +221,6 @@ void zcrypt_queue_unregister(struct zcrypt_queue *zq) zc = zq->zcard; spin_lock(&zcrypt_list_lock); list_del_init(&zq->list); - zcrypt_device_count--; spin_unlock(&zcrypt_list_lock); if (zq->ops->rng) zcrypt_rng_device_remove(); diff --git a/drivers/s390/net/ctcm_fsms.c b/drivers/s390/net/ctcm_fsms.c index b341075397d9..377e3689d1d4 100644 --- a/drivers/s390/net/ctcm_fsms.c +++ b/drivers/s390/net/ctcm_fsms.c @@ -1454,6 +1454,7 @@ again: get_ccwdev_lock(ch->cdev), saveflags); if (rc != 0) ctcm_ccw_check_rc(ch, rc, "normal RX"); + break; default: break; } diff --git a/drivers/s390/net/netiucv.c b/drivers/s390/net/netiucv.c index 260860cf3aa1..5a0c2f07a3a2 100644 --- a/drivers/s390/net/netiucv.c +++ b/drivers/s390/net/netiucv.c @@ -118,24 +118,6 @@ static struct device_driver netiucv_driver = { .bus = &iucv_bus, }; -static int netiucv_callback_connreq(struct iucv_path *, u8 *, u8 *); -static void netiucv_callback_connack(struct iucv_path *, u8 *); -static void netiucv_callback_connrej(struct iucv_path *, u8 *); -static void netiucv_callback_connsusp(struct iucv_path *, u8 *); -static void netiucv_callback_connres(struct iucv_path *, u8 *); -static void netiucv_callback_rx(struct iucv_path *, struct iucv_message *); -static void netiucv_callback_txdone(struct iucv_path *, struct iucv_message *); - -static struct iucv_handler netiucv_handler = { - .path_pending = netiucv_callback_connreq, - .path_complete = netiucv_callback_connack, - .path_severed = netiucv_callback_connrej, - .path_quiesced = netiucv_callback_connsusp, - .path_resumed = netiucv_callback_connres, - .message_pending = netiucv_callback_rx, - .message_complete = netiucv_callback_txdone -}; - /** * Per connection profiling data */ @@ -774,6 +756,16 @@ static void conn_action_txdone(fsm_instance *fi, int event, void *arg) } } +static struct iucv_handler netiucv_handler = { + .path_pending = netiucv_callback_connreq, + .path_complete = netiucv_callback_connack, + .path_severed = netiucv_callback_connrej, + .path_quiesced = netiucv_callback_connsusp, + .path_resumed = netiucv_callback_connres, + .message_pending = netiucv_callback_rx, + .message_complete = netiucv_callback_txdone, +}; + static void conn_action_connaccept(fsm_instance *fi, int event, void *arg) { struct iucv_event *ev = arg; diff --git a/drivers/s390/net/qeth_core.h b/drivers/s390/net/qeth_core.h index fd9b869d278e..f4d554ea0c93 100644 --- a/drivers/s390/net/qeth_core.h +++ b/drivers/s390/net/qeth_core.h @@ -417,13 +417,17 @@ enum qeth_qdio_out_buffer_state { QETH_QDIO_BUF_EMPTY, /* Filled by driver; owned by hardware in order to be sent. */ QETH_QDIO_BUF_PRIMED, - /* Discovered by the TX completion code: */ - QETH_QDIO_BUF_PENDING, - /* Finished by the TX completion code: */ - QETH_QDIO_BUF_NEED_QAOB, - /* Received QAOB notification on CQ: */ - QETH_QDIO_BUF_QAOB_OK, - QETH_QDIO_BUF_QAOB_ERROR, +}; + +enum qeth_qaob_state { + QETH_QAOB_ISSUED, + QETH_QAOB_PENDING, + QETH_QAOB_DONE, +}; + +struct qeth_qaob_priv1 { + unsigned int state; + u8 queue_no; }; struct qeth_qdio_out_buffer { @@ -433,9 +437,8 @@ struct qeth_qdio_out_buffer { unsigned int frames; unsigned int bytes; struct sk_buff_head skb_list; - int is_header[QDIO_MAX_ELEMENTS_PER_BUFFER]; + DECLARE_BITMAP(from_kmem_cache, QDIO_MAX_ELEMENTS_PER_BUFFER); - struct qeth_qdio_out_q *q; struct list_head list_entry; struct qaob *aob; }; @@ -483,6 +486,7 @@ struct qeth_out_q_stats { u64 stopped; u64 doorbell; u64 coal_frames; + u64 completion_irq; u64 completion_yield; u64 completion_timer; @@ -526,6 +530,7 @@ struct qeth_qdio_out_q { unsigned int coalesce_usecs; unsigned int max_coalesced_frames; + unsigned int rescan_usecs; }; #define qeth_for_each_output_queue(card, q, i) \ @@ -612,7 +617,6 @@ struct qeth_channel { struct ccw_device *ccwdev; struct qeth_cmd_buffer *active_cmd; enum qeth_channel_states state; - atomic_t irq_pending; }; struct qeth_reply { @@ -662,11 +666,6 @@ static inline struct ccw1 *__ccw_from_cmd(struct qeth_cmd_buffer *iob) return (struct ccw1 *)(iob->data + ALIGN(iob->length, 8)); } -static inline bool qeth_trylock_channel(struct qeth_channel *channel) -{ - return atomic_cmpxchg(&channel->irq_pending, 0, 1) == 0; -} - /** * OSA card related definitions */ @@ -886,13 +885,24 @@ static inline bool qeth_card_hw_is_reachable(struct qeth_card *card) return card->state == CARD_STATE_SOFTSETUP; } +static inline bool qeth_use_tx_irqs(struct qeth_card *card) +{ + return !IS_IQD(card); +} + static inline void qeth_unlock_channel(struct qeth_card *card, struct qeth_channel *channel) { - atomic_set(&channel->irq_pending, 0); + xchg(&channel->active_cmd, NULL); wake_up(&card->wait_q); } +static inline bool qeth_trylock_channel(struct qeth_channel *channel, + struct qeth_cmd_buffer *cmd) +{ + return cmpxchg(&channel->active_cmd, NULL, cmd) == NULL; +} + struct qeth_trap_id { __u16 lparnr; char vmname[8]; diff --git a/drivers/s390/net/qeth_core_main.c b/drivers/s390/net/qeth_core_main.c index a1f08e9aa064..62f88ccbd03f 100644 --- a/drivers/s390/net/qeth_core_main.c +++ b/drivers/s390/net/qeth_core_main.c @@ -70,9 +70,6 @@ static void qeth_issue_next_read_cb(struct qeth_card *card, unsigned int data_length); static int qeth_qdio_establish(struct qeth_card *); static void qeth_free_qdio_queues(struct qeth_card *card); -static void qeth_notify_skbs(struct qeth_qdio_out_q *queue, - struct qeth_qdio_out_buffer *buf, - enum iucv_tx_notify notification); static void qeth_close_dev_handler(struct work_struct *work) { @@ -434,65 +431,6 @@ static enum iucv_tx_notify qeth_compute_cq_notification(int sbalf15, return n; } -static void qeth_qdio_handle_aob(struct qeth_card *card, - unsigned long phys_aob_addr) -{ - enum qeth_qdio_out_buffer_state new_state = QETH_QDIO_BUF_QAOB_OK; - struct qaob *aob; - struct qeth_qdio_out_buffer *buffer; - enum iucv_tx_notify notification; - struct qeth_qdio_out_q *queue; - unsigned int i; - - aob = (struct qaob *) phys_to_virt(phys_aob_addr); - QETH_CARD_TEXT(card, 5, "haob"); - QETH_CARD_TEXT_(card, 5, "%lx", phys_aob_addr); - buffer = (struct qeth_qdio_out_buffer *) aob->user1; - QETH_CARD_TEXT_(card, 5, "%lx", aob->user1); - - if (aob->aorc) { - QETH_CARD_TEXT_(card, 2, "aorc%02X", aob->aorc); - new_state = QETH_QDIO_BUF_QAOB_ERROR; - } - - switch (atomic_xchg(&buffer->state, new_state)) { - case QETH_QDIO_BUF_PRIMED: - /* Faster than TX completion code, let it handle the async - * completion for us. It will also recycle the QAOB. - */ - break; - case QETH_QDIO_BUF_PENDING: - /* TX completion code is active and will handle the async - * completion for us. It will also recycle the QAOB. - */ - break; - case QETH_QDIO_BUF_NEED_QAOB: - /* TX completion code is already finished. */ - notification = qeth_compute_cq_notification(aob->aorc, 1); - qeth_notify_skbs(buffer->q, buffer, notification); - - /* Free dangling allocations. The attached skbs are handled by - * qeth_tx_complete_pending_bufs(), and so is the QAOB. - */ - for (i = 0; - i < aob->sb_count && i < QETH_MAX_BUFFER_ELEMENTS(card); - i++) { - void *data = phys_to_virt(aob->sba[i]); - - if (data && buffer->is_header[i]) - kmem_cache_free(qeth_core_header_cache, data); - buffer->is_header[i] = 0; - } - - queue = buffer->q; - atomic_set(&buffer->state, QETH_QDIO_BUF_EMPTY); - napi_schedule(&queue->napi); - break; - default: - WARN_ON_ONCE(1); - } -} - static void qeth_setup_ccw(struct ccw1 *ccw, u8 cmd_code, u8 flags, u32 len, void *data) { @@ -1268,7 +1206,6 @@ static void qeth_irq(struct ccw_device *cdev, unsigned long intparm, iob = (struct qeth_cmd_buffer *) (addr_t)intparm; } - channel->active_cmd = NULL; qeth_unlock_channel(card, channel); rc = qeth_check_irb_error(card, cdev, irb); @@ -1353,10 +1290,10 @@ static void qeth_notify_skbs(struct qeth_qdio_out_q *q, } } -static void qeth_tx_complete_buf(struct qeth_qdio_out_buffer *buf, bool error, +static void qeth_tx_complete_buf(struct qeth_qdio_out_q *queue, + struct qeth_qdio_out_buffer *buf, bool error, int budget) { - struct qeth_qdio_out_q *queue = buf->q; struct sk_buff *skb; /* Empty buffer? */ @@ -1400,17 +1337,18 @@ static void qeth_clear_output_buffer(struct qeth_qdio_out_q *queue, int i; /* is PCI flag set on buffer? */ - if (buf->buffer->element[0].sflags & SBAL_SFLAGS0_PCI_REQ) + if (buf->buffer->element[0].sflags & SBAL_SFLAGS0_PCI_REQ) { atomic_dec(&queue->set_pci_flags_count); + QETH_TXQ_STAT_INC(queue, completion_irq); + } - qeth_tx_complete_buf(buf, error, budget); + qeth_tx_complete_buf(queue, buf, error, budget); for (i = 0; i < queue->max_elements; ++i) { void *data = phys_to_virt(buf->buffer->element[i].addr); - if (data && buf->is_header[i]) + if (__test_and_clear_bit(i, buf->from_kmem_cache) && data) kmem_cache_free(qeth_core_header_cache, data); - buf->is_header[i] = 0; } qeth_scrub_qdio_buffer(buf->buffer, queue->max_elements); @@ -1434,14 +1372,30 @@ static void qeth_tx_complete_pending_bufs(struct qeth_card *card, struct qeth_qdio_out_buffer *buf, *tmp; list_for_each_entry_safe(buf, tmp, &queue->pending_bufs, list_entry) { - if (drain || atomic_read(&buf->state) == QETH_QDIO_BUF_EMPTY) { + struct qeth_qaob_priv1 *priv; + struct qaob *aob = buf->aob; + enum iucv_tx_notify notify; + unsigned int i; + + priv = (struct qeth_qaob_priv1 *)&aob->user1; + if (drain || READ_ONCE(priv->state) == QETH_QAOB_DONE) { QETH_CARD_TEXT(card, 5, "fp"); QETH_CARD_TEXT_(card, 5, "%lx", (long) buf); - if (drain) - qeth_notify_skbs(queue, buf, - TX_NOTIFY_GENERALERROR); - qeth_tx_complete_buf(buf, drain, budget); + notify = drain ? TX_NOTIFY_GENERALERROR : + qeth_compute_cq_notification(aob->aorc, 1); + qeth_notify_skbs(queue, buf, notify); + qeth_tx_complete_buf(queue, buf, drain, budget); + + for (i = 0; + i < aob->sb_count && i < queue->max_elements; + i++) { + void *data = phys_to_virt(aob->sba[i]); + + if (test_bit(i, buf->from_kmem_cache) && data) + kmem_cache_free(qeth_core_header_cache, + data); + } list_del(&buf->list_entry); qeth_free_out_buf(buf); @@ -1713,11 +1667,10 @@ static int qeth_stop_channel(struct qeth_channel *channel) rc = ccw_device_set_offline(cdev); spin_lock_irq(get_ccwdev_lock(cdev)); - if (channel->active_cmd) { + if (channel->active_cmd) dev_err(&cdev->dev, "Stopped channel while cmd %px was still active\n", channel->active_cmd); - channel->active_cmd = NULL; - } + cdev->handler = NULL; spin_unlock_irq(get_ccwdev_lock(cdev)); @@ -1730,7 +1683,7 @@ static int qeth_start_channel(struct qeth_channel *channel) int rc; channel->state = CH_STATE_DOWN; - atomic_set(&channel->irq_pending, 0); + xchg(&channel->active_cmd, NULL); spin_lock_irq(get_ccwdev_lock(cdev)); cdev->handler = qeth_irq; @@ -2037,7 +1990,7 @@ static int qeth_send_control_data(struct qeth_card *card, reply->param = reply_param; timeout = wait_event_interruptible_timeout(card->wait_q, - qeth_trylock_channel(channel), + qeth_trylock_channel(channel, iob), timeout); if (timeout <= 0) { qeth_put_cmd(iob); @@ -2057,8 +2010,6 @@ static int qeth_send_control_data(struct qeth_card *card, spin_lock_irq(get_ccwdev_lock(channel->ccwdev)); rc = ccw_device_start_timeout(channel->ccwdev, __ccw_from_cmd(iob), (addr_t) iob, 0, 0, timeout); - if (!rc) - channel->active_cmd = iob; spin_unlock_irq(get_ccwdev_lock(channel->ccwdev)); if (rc) { QETH_DBF_MESSAGE(2, "qeth_send_control_data on device %x: ccw_device_start rc = %i\n", @@ -2578,7 +2529,6 @@ static int qeth_alloc_out_buf(struct qeth_qdio_out_q *q, unsigned int bidx, newbuf->buffer = q->qdio_bufs[bidx]; skb_queue_head_init(&newbuf->skb_list); lockdep_set_class(&newbuf->skb_list.lock, &qdio_out_skb_queue_key); - newbuf->q = q; atomic_set(&newbuf->state, QETH_QDIO_BUF_EMPTY); q->bufs[bidx] = newbuf; return 0; @@ -2663,8 +2613,15 @@ static int qeth_alloc_qdio_queues(struct qeth_card *card) INIT_LIST_HEAD(&queue->pending_bufs); spin_lock_init(&queue->lock); timer_setup(&queue->timer, qeth_tx_completion_timer, 0); - queue->coalesce_usecs = QETH_TX_COALESCE_USECS; - queue->max_coalesced_frames = QETH_TX_MAX_COALESCED_FRAMES; + if (IS_IQD(card)) { + queue->coalesce_usecs = QETH_TX_COALESCE_USECS; + queue->max_coalesced_frames = QETH_TX_MAX_COALESCED_FRAMES; + queue->rescan_usecs = QETH_TX_TIMER_USECS; + } else { + queue->coalesce_usecs = USEC_PER_SEC; + queue->max_coalesced_frames = 0; + queue->rescan_usecs = 10 * USEC_PER_SEC; + } queue->priority = QETH_QIB_PQUE_PRIO_DEFAULT; } @@ -3601,8 +3558,8 @@ static void qeth_flush_buffers(struct qeth_qdio_out_q *queue, int index, int count) { struct qeth_qdio_out_buffer *buf = queue->bufs[index]; - unsigned int qdio_flags = QDIO_FLAG_SYNC_OUTPUT; struct qeth_card *card = queue->card; + unsigned int frames, usecs; struct qaob *aob = NULL; int rc; int i; @@ -3629,8 +3586,12 @@ static void qeth_flush_buffers(struct qeth_qdio_out_q *queue, int index, if (!buf->aob) buf->aob = qdio_allocate_aob(); if (buf->aob) { + struct qeth_qaob_priv1 *priv; + aob = buf->aob; - aob->user1 = (u64) buf; + priv = (struct qeth_qaob_priv1 *)&aob->user1; + priv->state = QETH_QAOB_ISSUED; + priv->queue_no = queue->queue_no; } } } else { @@ -3658,14 +3619,11 @@ static void qeth_flush_buffers(struct qeth_qdio_out_q *queue, int index, buf->buffer->element[0].sflags |= SBAL_SFLAGS0_PCI_REQ; } } - - if (atomic_read(&queue->set_pci_flags_count)) - qdio_flags |= QDIO_FLAG_PCI_OUT; } QETH_TXQ_STAT_INC(queue, doorbell); - rc = do_QDIO(CARD_DDEV(card), qdio_flags, queue->queue_no, index, count, - aob); + rc = do_QDIO(CARD_DDEV(card), QDIO_FLAG_SYNC_OUTPUT, queue->queue_no, + index, count, aob); switch (rc) { case 0: @@ -3673,17 +3631,20 @@ static void qeth_flush_buffers(struct qeth_qdio_out_q *queue, int index, /* ignore temporary SIGA errors without busy condition */ /* Fake the TX completion interrupt: */ - if (IS_IQD(card)) { - unsigned int frames = READ_ONCE(queue->max_coalesced_frames); - unsigned int usecs = READ_ONCE(queue->coalesce_usecs); + frames = READ_ONCE(queue->max_coalesced_frames); + usecs = READ_ONCE(queue->coalesce_usecs); - if (frames && queue->coalesced_frames >= frames) { - napi_schedule(&queue->napi); - queue->coalesced_frames = 0; - QETH_TXQ_STAT_INC(queue, coal_frames); - } else if (usecs) { - qeth_tx_arm_timer(queue, usecs); - } + if (frames && queue->coalesced_frames >= frames) { + napi_schedule(&queue->napi); + queue->coalesced_frames = 0; + QETH_TXQ_STAT_INC(queue, coal_frames); + } else if (qeth_use_tx_irqs(card) && + atomic_read(&queue->used_buffers) >= 32) { + /* Old behaviour carried over from the qdio layer: */ + napi_schedule(&queue->napi); + QETH_TXQ_STAT_INC(queue, coal_frames); + } else if (usecs) { + qeth_tx_arm_timer(queue, usecs); } break; @@ -3769,6 +3730,18 @@ out: } EXPORT_SYMBOL_GPL(qeth_configure_cq); +static void qeth_qdio_handle_aob(struct qeth_card *card, struct qaob *aob) +{ + struct qeth_qaob_priv1 *priv = (struct qeth_qaob_priv1 *)&aob->user1; + unsigned int queue_no = priv->queue_no; + + BUILD_BUG_ON(sizeof(*priv) > ARRAY_SIZE(aob->user1)); + + if (xchg(&priv->state, QETH_QAOB_DONE) == QETH_QAOB_PENDING && + queue_no < card->qdio.no_out_queues) + napi_schedule(&card->qdio.out_qs[queue_no]->napi); +} + static void qeth_qdio_cq_handler(struct qeth_card *card, unsigned int qdio_err, unsigned int queue, int first_element, int count) @@ -3795,7 +3768,7 @@ static void qeth_qdio_cq_handler(struct qeth_card *card, unsigned int qdio_err, buffer->element[e].addr) { unsigned long phys_aob_addr = buffer->element[e].addr; - qeth_qdio_handle_aob(card, phys_aob_addr); + qeth_qdio_handle_aob(card, phys_to_virt(phys_aob_addr)); ++e; } qeth_scrub_qdio_buffer(buffer, QDIO_MAX_ELEMENTS_PER_BUFFER); @@ -3831,36 +3804,14 @@ static void qeth_qdio_output_handler(struct ccw_device *ccwdev, unsigned long card_ptr) { struct qeth_card *card = (struct qeth_card *) card_ptr; - struct qeth_qdio_out_q *queue = card->qdio.out_qs[__queue]; struct net_device *dev = card->dev; - struct netdev_queue *txq; - int i; QETH_CARD_TEXT(card, 6, "qdouhdl"); if (qdio_error & QDIO_ERROR_FATAL) { QETH_CARD_TEXT(card, 2, "achkcond"); netif_tx_stop_all_queues(dev); qeth_schedule_recovery(card); - return; } - - for (i = first_element; i < (first_element + count); ++i) { - struct qeth_qdio_out_buffer *buf = queue->bufs[QDIO_BUFNR(i)]; - - qeth_handle_send_error(card, buf, qdio_error); - qeth_clear_output_buffer(queue, buf, qdio_error, 0); - } - - atomic_sub(count, &queue->used_buffers); - qeth_check_outbound_queue(queue); - - txq = netdev_get_tx_queue(dev, __queue); - /* xmit may have observed the full-condition, but not yet stopped the - * txq. In which case the code below won't trigger. So before returning, - * xmit will re-check the txq's fill level and wake it up if needed. - */ - if (netif_tx_queue_stopped(txq) && !qeth_out_queue_is_full(queue)) - netif_tx_wake_queue(txq); } /** @@ -4101,7 +4052,7 @@ static unsigned int qeth_fill_buffer(struct qeth_qdio_out_buffer *buf, /* HW header is allocated from cache: */ if ((void *)hdr != skb->data) - buf->is_header[element] = 1; + __set_bit(element, buf->from_kmem_cache); /* HW header was pushed and is contiguous with linear part: */ else if (length > 0 && !PAGE_ALIGNED(data) && (data == (char *)hdr + hd_len)) @@ -5256,7 +5207,6 @@ static int qeth_qdio_establish(struct qeth_card *card) init_data.int_parm = (unsigned long) card; init_data.input_sbal_addr_array = in_sbal_ptrs; init_data.output_sbal_addr_array = out_sbal_ptrs; - init_data.scan_threshold = IS_IQD(card) ? 0 : 32; if (atomic_cmpxchg(&card->qdio.state, QETH_QDIO_ALLOCATED, QETH_QDIO_ESTABLISHED) == QETH_QDIO_ALLOCATED) { @@ -5956,9 +5906,10 @@ static unsigned int qeth_rx_poll(struct qeth_card *card, int budget) /* Fetch completed RX buffers: */ if (!card->rx.b_count) { card->rx.qdio_err = 0; - card->rx.b_count = qdio_get_next_buffers( - card->data.ccwdev, 0, &card->rx.b_index, - &card->rx.qdio_err); + card->rx.b_count = qdio_inspect_queue(CARD_DDEV(card), + 0, true, + &card->rx.b_index, + &card->rx.qdio_err); if (card->rx.b_count <= 0) { card->rx.b_count = 0; break; @@ -6022,6 +5973,16 @@ int qeth_poll(struct napi_struct *napi, int budget) work_done = qeth_rx_poll(card, budget); + if (qeth_use_tx_irqs(card)) { + struct qeth_qdio_out_q *queue; + unsigned int i; + + qeth_for_each_output_queue(card, queue, i) { + if (!qeth_out_queue_is_empty(queue)) + napi_schedule(&queue->napi); + } + } + if (card->options.cq == QETH_CQ_ENABLED) qeth_cq_poll(card); @@ -6055,6 +6016,8 @@ static void qeth_iqd_tx_complete(struct qeth_qdio_out_q *queue, if (qdio_error == QDIO_ERROR_SLSB_PENDING) { struct qaob *aob = buffer->aob; + struct qeth_qaob_priv1 *priv; + enum iucv_tx_notify notify; if (!aob) { netdev_WARN_ONCE(card->dev, @@ -6066,60 +6029,27 @@ static void qeth_iqd_tx_complete(struct qeth_qdio_out_q *queue, QETH_CARD_TEXT_(card, 5, "pel%u", bidx); - switch (atomic_cmpxchg(&buffer->state, - QETH_QDIO_BUF_PRIMED, - QETH_QDIO_BUF_PENDING)) { - case QETH_QDIO_BUF_PRIMED: - /* We have initial ownership, no QAOB (yet): */ + priv = (struct qeth_qaob_priv1 *)&aob->user1; + /* QAOB hasn't completed yet: */ + if (xchg(&priv->state, QETH_QAOB_PENDING) != QETH_QAOB_DONE) { qeth_notify_skbs(queue, buffer, TX_NOTIFY_PENDING); - /* Handle race with qeth_qdio_handle_aob(): */ - switch (atomic_xchg(&buffer->state, - QETH_QDIO_BUF_NEED_QAOB)) { - case QETH_QDIO_BUF_PENDING: - /* No concurrent QAOB notification. */ - - /* Prepare the queue slot for immediate re-use: */ - qeth_scrub_qdio_buffer(buffer->buffer, queue->max_elements); - if (qeth_alloc_out_buf(queue, bidx, - GFP_ATOMIC)) { - QETH_CARD_TEXT(card, 2, "outofbuf"); - qeth_schedule_recovery(card); - } - - list_add(&buffer->list_entry, - &queue->pending_bufs); - /* Skip clearing the buffer: */ - return; - case QETH_QDIO_BUF_QAOB_OK: - qeth_notify_skbs(queue, buffer, - TX_NOTIFY_DELAYED_OK); - error = false; - break; - case QETH_QDIO_BUF_QAOB_ERROR: - qeth_notify_skbs(queue, buffer, - TX_NOTIFY_DELAYED_GENERALERROR); - error = true; - break; - default: - WARN_ON_ONCE(1); + /* Prepare the queue slot for immediate re-use: */ + qeth_scrub_qdio_buffer(buffer->buffer, queue->max_elements); + if (qeth_alloc_out_buf(queue, bidx, GFP_ATOMIC)) { + QETH_CARD_TEXT(card, 2, "outofbuf"); + qeth_schedule_recovery(card); } - break; - case QETH_QDIO_BUF_QAOB_OK: - /* qeth_qdio_handle_aob() already received a QAOB: */ - qeth_notify_skbs(queue, buffer, TX_NOTIFY_OK); - error = false; - break; - case QETH_QDIO_BUF_QAOB_ERROR: - /* qeth_qdio_handle_aob() already received a QAOB: */ - qeth_notify_skbs(queue, buffer, TX_NOTIFY_GENERALERROR); - error = true; - break; - default: - WARN_ON_ONCE(1); + list_add(&buffer->list_entry, &queue->pending_bufs); + /* Skip clearing the buffer: */ + return; } + /* QAOB already completed: */ + notify = qeth_compute_cq_notification(aob->aorc, 0); + qeth_notify_skbs(queue, buffer, notify); + error = !!aob->aorc; memset(aob, 0, sizeof(*aob)); } else if (card->options.cq == QETH_CQ_ENABLED) { qeth_notify_skbs(queue, buffer, @@ -6138,7 +6068,10 @@ static int qeth_tx_poll(struct napi_struct *napi, int budget) unsigned int work_done = 0; struct netdev_queue *txq; - txq = netdev_get_tx_queue(dev, qeth_iqd_translate_txq(dev, queue_no)); + if (IS_IQD(card)) + txq = netdev_get_tx_queue(dev, qeth_iqd_translate_txq(dev, queue_no)); + else + txq = netdev_get_tx_queue(dev, queue_no); while (1) { unsigned int start, error, i; @@ -6165,8 +6098,9 @@ static int qeth_tx_poll(struct napi_struct *napi, int budget) &start, &error); if (completed <= 0) { /* Ensure we see TX completion for pending work: */ - if (napi_complete_done(napi, 0)) - qeth_tx_arm_timer(queue, QETH_TX_TIMER_USECS); + if (napi_complete_done(napi, 0) && + !atomic_read(&queue->set_pci_flags_count)) + qeth_tx_arm_timer(queue, queue->rescan_usecs); return 0; } @@ -6179,12 +6113,19 @@ static int qeth_tx_poll(struct napi_struct *napi, int budget) bytes += buffer->bytes; qeth_handle_send_error(card, buffer, error); - qeth_iqd_tx_complete(queue, bidx, error, budget); + if (IS_IQD(card)) + qeth_iqd_tx_complete(queue, bidx, error, budget); + else + qeth_clear_output_buffer(queue, buffer, error, + budget); } - netdev_tx_completed_queue(txq, packets, bytes); atomic_sub(completed, &queue->used_buffers); work_done += completed; + if (IS_IQD(card)) + netdev_tx_completed_queue(txq, packets, bytes); + else + qeth_check_outbound_queue(queue); /* xmit may have observed the full-condition, but not yet * stopped the txq. In which case the code below won't trigger. @@ -7228,6 +7169,8 @@ EXPORT_SYMBOL_GPL(qeth_iqd_select_queue); int qeth_open(struct net_device *dev) { struct qeth_card *card = dev->ml_priv; + struct qeth_qdio_out_q *queue; + unsigned int i; QETH_CARD_TEXT(card, 4, "qethopen"); @@ -7235,16 +7178,11 @@ int qeth_open(struct net_device *dev) netif_tx_start_all_queues(dev); local_bh_disable(); - if (IS_IQD(card)) { - struct qeth_qdio_out_q *queue; - unsigned int i; - - qeth_for_each_output_queue(card, queue, i) { - netif_tx_napi_add(dev, &queue->napi, qeth_tx_poll, - QETH_NAPI_WEIGHT); - napi_enable(&queue->napi); - napi_schedule(&queue->napi); - } + qeth_for_each_output_queue(card, queue, i) { + netif_tx_napi_add(dev, &queue->napi, qeth_tx_poll, + QETH_NAPI_WEIGHT); + napi_enable(&queue->napi); + napi_schedule(&queue->napi); } napi_enable(&card->napi); @@ -7259,6 +7197,8 @@ EXPORT_SYMBOL_GPL(qeth_open); int qeth_stop(struct net_device *dev) { struct qeth_card *card = dev->ml_priv; + struct qeth_qdio_out_q *queue; + unsigned int i; QETH_CARD_TEXT(card, 4, "qethstop"); @@ -7266,24 +7206,17 @@ int qeth_stop(struct net_device *dev) cancel_delayed_work_sync(&card->buffer_reclaim_work); qdio_stop_irq(CARD_DDEV(card)); - if (IS_IQD(card)) { - struct qeth_qdio_out_q *queue; - unsigned int i; - - /* Quiesce the NAPI instances: */ - qeth_for_each_output_queue(card, queue, i) - napi_disable(&queue->napi); + /* Quiesce the NAPI instances: */ + qeth_for_each_output_queue(card, queue, i) + napi_disable(&queue->napi); - /* Stop .ndo_start_xmit, might still access queue->napi. */ - netif_tx_disable(dev); + /* Stop .ndo_start_xmit, might still access queue->napi. */ + netif_tx_disable(dev); - qeth_for_each_output_queue(card, queue, i) { - del_timer_sync(&queue->timer); - /* Queues may get re-allocated, so remove the NAPIs. */ - netif_napi_del(&queue->napi); - } - } else { - netif_tx_disable(dev); + qeth_for_each_output_queue(card, queue, i) { + del_timer_sync(&queue->timer); + /* Queues may get re-allocated, so remove the NAPIs. */ + netif_napi_del(&queue->napi); } return 0; diff --git a/drivers/s390/net/qeth_ethtool.c b/drivers/s390/net/qeth_ethtool.c index 3a51bbff0ffe..2c4cb300a8fc 100644 --- a/drivers/s390/net/qeth_ethtool.c +++ b/drivers/s390/net/qeth_ethtool.c @@ -41,6 +41,7 @@ static const struct qeth_stats txq_stats[] = { QETH_TXQ_STAT("Queue stopped", stopped), QETH_TXQ_STAT("Doorbell", doorbell), QETH_TXQ_STAT("IRQ for frames", coal_frames), + QETH_TXQ_STAT("Completion IRQ", completion_irq), QETH_TXQ_STAT("Completion yield", completion_yield), QETH_TXQ_STAT("Completion timer", completion_timer), }; @@ -79,10 +80,8 @@ static void qeth_add_stat_strings(u8 **data, const char *prefix, { unsigned int i; - for (i = 0; i < size; i++) { - snprintf(*data, ETH_GSTRING_LEN, "%s%s", prefix, stats[i].name); - *data += ETH_GSTRING_LEN; - } + for (i = 0; i < size; i++) + ethtool_sprintf(data, "%s%s", prefix, stats[i].name); } static int qeth_get_sset_count(struct net_device *dev, int stringset) diff --git a/drivers/s390/net/qeth_l2_main.c b/drivers/s390/net/qeth_l2_main.c index ca44421a6d6e..2abf86c104d5 100644 --- a/drivers/s390/net/qeth_l2_main.c +++ b/drivers/s390/net/qeth_l2_main.c @@ -805,8 +805,6 @@ static int qeth_l2_bridge_setlink(struct net_device *dev, struct nlmsghdr *nlh, if (!netif_device_present(dev)) return -ENODEV; - if (!(priv->brport_hw_features)) - return -EOPNOTSUPP; nlmsg_for_each_attr(attr, nlh, sizeof(struct ifinfomsg), rem1) { if (nla_type(attr) == IFLA_PROTINFO) { @@ -832,6 +830,16 @@ static int qeth_l2_bridge_setlink(struct net_device *dev, struct nlmsghdr *nlh, return 0; if (!bp_tb[IFLA_BRPORT_LEARNING_SYNC]) return -EINVAL; + if (!(priv->brport_hw_features & BR_LEARNING_SYNC)) { + NL_SET_ERR_MSG_ATTR(extack, bp_tb[IFLA_BRPORT_LEARNING_SYNC], + "Operation not supported by HW"); + return -EOPNOTSUPP; + } + if (!IS_ENABLED(CONFIG_NET_SWITCHDEV)) { + NL_SET_ERR_MSG_ATTR(extack, bp_tb[IFLA_BRPORT_LEARNING_SYNC], + "Requires NET_SWITCHDEV"); + return -EOPNOTSUPP; + } enable = !!nla_get_u8(bp_tb[IFLA_BRPORT_LEARNING_SYNC]); if (enable == !!(priv->brport_features & BR_LEARNING_SYNC)) diff --git a/drivers/s390/net/qeth_l3_main.c b/drivers/s390/net/qeth_l3_main.c index d308ff744a29..f0d6f205c53c 100644 --- a/drivers/s390/net/qeth_l3_main.c +++ b/drivers/s390/net/qeth_l3_main.c @@ -434,6 +434,7 @@ static int qeth_l3_correct_routing_type(struct qeth_card *card, if (qeth_is_ipafunc_supported(card, prot, IPA_OSA_MC_ROUTER)) return 0; + goto out_inval; default: goto out_inval; } diff --git a/drivers/s390/scsi/zfcp_scsi.c b/drivers/s390/scsi/zfcp_scsi.c index d58bf79892f2..9da9b2b2a580 100644 --- a/drivers/s390/scsi/zfcp_scsi.c +++ b/drivers/s390/scsi/zfcp_scsi.c @@ -856,10 +856,7 @@ void zfcp_scsi_set_prot(struct zfcp_adapter *adapter) */ void zfcp_scsi_dif_sense_error(struct scsi_cmnd *scmd, int ascq) { - scsi_build_sense_buffer(1, scmd->sense_buffer, - ILLEGAL_REQUEST, 0x10, ascq); - set_driver_byte(scmd, DRIVER_SENSE); - scmd->result |= SAM_STAT_CHECK_CONDITION; + scsi_build_sense(scmd, 1, ILLEGAL_REQUEST, 0x10, ascq); set_host_byte(scmd, DID_SOFT_ERROR); } diff --git a/drivers/s390/scsi/zfcp_sysfs.c b/drivers/s390/scsi/zfcp_sysfs.c index 544efd4c42f0..b8cd75a872ee 100644 --- a/drivers/s390/scsi/zfcp_sysfs.c +++ b/drivers/s390/scsi/zfcp_sysfs.c @@ -487,6 +487,7 @@ static ssize_t zfcp_sysfs_port_fc_security_show(struct device *dev, if (0 == (status & ZFCP_STATUS_COMMON_OPEN) || 0 == (status & ZFCP_STATUS_COMMON_UNBLOCKED) || 0 == (status & ZFCP_STATUS_PORT_PHYS_OPEN) || + 0 != (status & ZFCP_STATUS_PORT_LINK_TEST) || 0 != (status & ZFCP_STATUS_COMMON_ERP_FAILED) || 0 != (status & ZFCP_STATUS_COMMON_ACCESS_BOXED)) i = sprintf(buf, "unknown\n"); diff --git a/drivers/s390/virtio/virtio_ccw.c b/drivers/s390/virtio/virtio_ccw.c index 54e686dca6de..d35e7a3f7067 100644 --- a/drivers/s390/virtio/virtio_ccw.c +++ b/drivers/s390/virtio/virtio_ccw.c @@ -388,31 +388,6 @@ static void virtio_ccw_drop_indicator(struct virtio_ccw_device *vcdev, ccw_device_dma_free(vcdev->cdev, thinint_area, sizeof(*thinint_area)); } -static inline long __do_kvm_notify(struct subchannel_id schid, - unsigned long queue_index, - long cookie) -{ - register unsigned long __nr asm("1") = KVM_S390_VIRTIO_CCW_NOTIFY; - register struct subchannel_id __schid asm("2") = schid; - register unsigned long __index asm("3") = queue_index; - register long __rc asm("2"); - register long __cookie asm("4") = cookie; - - asm volatile ("diag 2,4,0x500\n" - : "=d" (__rc) : "d" (__nr), "d" (__schid), "d" (__index), - "d"(__cookie) - : "memory", "cc"); - return __rc; -} - -static inline long do_kvm_notify(struct subchannel_id schid, - unsigned long queue_index, - long cookie) -{ - diag_stat_inc(DIAG_STAT_X500); - return __do_kvm_notify(schid, queue_index, cookie); -} - static bool virtio_ccw_kvm_notify(struct virtqueue *vq) { struct virtio_ccw_vq_info *info = vq->priv; @@ -421,7 +396,10 @@ static bool virtio_ccw_kvm_notify(struct virtqueue *vq) vcdev = to_vc_device(info->vq->vdev); ccw_device_get_schid(vcdev->cdev, &schid); - info->cookie = do_kvm_notify(schid, vq->index, info->cookie); + BUILD_BUG_ON(sizeof(struct subchannel_id) != sizeof(unsigned int)); + info->cookie = kvm_hypercall3(KVM_S390_VIRTIO_CCW_NOTIFY, + *((unsigned int *)&schid), + vq->index, info->cookie); if (info->cookie < 0) return false; return true; |