diff options
author | Chris Wilson <chris@chris-wilson.co.uk> | 2010-09-19 12:53:44 -0400 |
---|---|---|
committer | Chris Wilson <chris@chris-wilson.co.uk> | 2010-09-21 06:20:06 -0400 |
commit | ab6f8e325083f138ce5da8417baf48887d62da3c (patch) | |
tree | dfb99e29c0a73c1c74c392aa67fff083d82f3d3e /drivers/gpu/drm/i915/intel_ringbuffer.c | |
parent | a9db5c8fdd8c6e6e966897e05e2c2acd99bcdb6e (diff) |
drm/i915/ringbuffer: whitespace cleanup
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Diffstat (limited to 'drivers/gpu/drm/i915/intel_ringbuffer.c')
-rw-r--r-- | drivers/gpu/drm/i915/intel_ringbuffer.c | 123 |
1 files changed, 64 insertions, 59 deletions
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c index c9894c2bcd65..1bcea7c85238 100644 --- a/drivers/gpu/drm/i915/intel_ringbuffer.c +++ b/drivers/gpu/drm/i915/intel_ringbuffer.c | |||
@@ -50,9 +50,9 @@ static u32 i915_gem_get_seqno(struct drm_device *dev) | |||
50 | 50 | ||
51 | static void | 51 | static void |
52 | render_ring_flush(struct drm_device *dev, | 52 | render_ring_flush(struct drm_device *dev, |
53 | struct intel_ring_buffer *ring, | 53 | struct intel_ring_buffer *ring, |
54 | u32 invalidate_domains, | 54 | u32 invalidate_domains, |
55 | u32 flush_domains) | 55 | u32 flush_domains) |
56 | { | 56 | { |
57 | drm_i915_private_t *dev_priv = dev->dev_private; | 57 | drm_i915_private_t *dev_priv = dev->dev_private; |
58 | u32 cmd; | 58 | u32 cmd; |
@@ -128,7 +128,7 @@ static void ring_set_tail(struct drm_device *dev, | |||
128 | } | 128 | } |
129 | 129 | ||
130 | static unsigned int render_ring_get_active_head(struct drm_device *dev, | 130 | static unsigned int render_ring_get_active_head(struct drm_device *dev, |
131 | struct intel_ring_buffer *ring) | 131 | struct intel_ring_buffer *ring) |
132 | { | 132 | { |
133 | drm_i915_private_t *dev_priv = dev->dev_private; | 133 | drm_i915_private_t *dev_priv = dev->dev_private; |
134 | u32 acthd_reg = INTEL_INFO(dev)->gen ? ACTHD_I965 : ACTHD; | 134 | u32 acthd_reg = INTEL_INFO(dev)->gen ? ACTHD_I965 : ACTHD; |
@@ -137,7 +137,7 @@ static unsigned int render_ring_get_active_head(struct drm_device *dev, | |||
137 | } | 137 | } |
138 | 138 | ||
139 | static int init_ring_common(struct drm_device *dev, | 139 | static int init_ring_common(struct drm_device *dev, |
140 | struct intel_ring_buffer *ring) | 140 | struct intel_ring_buffer *ring) |
141 | { | 141 | { |
142 | u32 head; | 142 | u32 head; |
143 | drm_i915_private_t *dev_priv = dev->dev_private; | 143 | drm_i915_private_t *dev_priv = dev->dev_private; |
@@ -204,7 +204,7 @@ static int init_ring_common(struct drm_device *dev, | |||
204 | } | 204 | } |
205 | 205 | ||
206 | static int init_render_ring(struct drm_device *dev, | 206 | static int init_render_ring(struct drm_device *dev, |
207 | struct intel_ring_buffer *ring) | 207 | struct intel_ring_buffer *ring) |
208 | { | 208 | { |
209 | drm_i915_private_t *dev_priv = dev->dev_private; | 209 | drm_i915_private_t *dev_priv = dev->dev_private; |
210 | int ret = init_ring_common(dev, ring); | 210 | int ret = init_ring_common(dev, ring); |
@@ -238,9 +238,9 @@ do { \ | |||
238 | */ | 238 | */ |
239 | static u32 | 239 | static u32 |
240 | render_ring_add_request(struct drm_device *dev, | 240 | render_ring_add_request(struct drm_device *dev, |
241 | struct intel_ring_buffer *ring, | 241 | struct intel_ring_buffer *ring, |
242 | struct drm_file *file_priv, | 242 | struct drm_file *file_priv, |
243 | u32 flush_domains) | 243 | u32 flush_domains) |
244 | { | 244 | { |
245 | drm_i915_private_t *dev_priv = dev->dev_private; | 245 | drm_i915_private_t *dev_priv = dev->dev_private; |
246 | u32 seqno; | 246 | u32 seqno; |
@@ -304,7 +304,7 @@ render_ring_add_request(struct drm_device *dev, | |||
304 | 304 | ||
305 | static u32 | 305 | static u32 |
306 | render_ring_get_gem_seqno(struct drm_device *dev, | 306 | render_ring_get_gem_seqno(struct drm_device *dev, |
307 | struct intel_ring_buffer *ring) | 307 | struct intel_ring_buffer *ring) |
308 | { | 308 | { |
309 | drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; | 309 | drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; |
310 | if (HAS_PIPE_CONTROL(dev)) | 310 | if (HAS_PIPE_CONTROL(dev)) |
@@ -315,7 +315,7 @@ render_ring_get_gem_seqno(struct drm_device *dev, | |||
315 | 315 | ||
316 | static void | 316 | static void |
317 | render_ring_get_user_irq(struct drm_device *dev, | 317 | render_ring_get_user_irq(struct drm_device *dev, |
318 | struct intel_ring_buffer *ring) | 318 | struct intel_ring_buffer *ring) |
319 | { | 319 | { |
320 | drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; | 320 | drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; |
321 | unsigned long irqflags; | 321 | unsigned long irqflags; |
@@ -332,7 +332,7 @@ render_ring_get_user_irq(struct drm_device *dev, | |||
332 | 332 | ||
333 | static void | 333 | static void |
334 | render_ring_put_user_irq(struct drm_device *dev, | 334 | render_ring_put_user_irq(struct drm_device *dev, |
335 | struct intel_ring_buffer *ring) | 335 | struct intel_ring_buffer *ring) |
336 | { | 336 | { |
337 | drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; | 337 | drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; |
338 | unsigned long irqflags; | 338 | unsigned long irqflags; |
@@ -349,7 +349,7 @@ render_ring_put_user_irq(struct drm_device *dev, | |||
349 | } | 349 | } |
350 | 350 | ||
351 | static void render_setup_status_page(struct drm_device *dev, | 351 | static void render_setup_status_page(struct drm_device *dev, |
352 | struct intel_ring_buffer *ring) | 352 | struct intel_ring_buffer *ring) |
353 | { | 353 | { |
354 | drm_i915_private_t *dev_priv = dev->dev_private; | 354 | drm_i915_private_t *dev_priv = dev->dev_private; |
355 | if (IS_GEN6(dev)) { | 355 | if (IS_GEN6(dev)) { |
@@ -362,7 +362,7 @@ static void render_setup_status_page(struct drm_device *dev, | |||
362 | 362 | ||
363 | } | 363 | } |
364 | 364 | ||
365 | void | 365 | static void |
366 | bsd_ring_flush(struct drm_device *dev, | 366 | bsd_ring_flush(struct drm_device *dev, |
367 | struct intel_ring_buffer *ring, | 367 | struct intel_ring_buffer *ring, |
368 | u32 invalidate_domains, | 368 | u32 invalidate_domains, |
@@ -374,24 +374,24 @@ bsd_ring_flush(struct drm_device *dev, | |||
374 | intel_ring_advance(dev, ring); | 374 | intel_ring_advance(dev, ring); |
375 | } | 375 | } |
376 | 376 | ||
377 | static inline unsigned int bsd_ring_get_active_head(struct drm_device *dev, | 377 | static unsigned int bsd_ring_get_active_head(struct drm_device *dev, |
378 | struct intel_ring_buffer *ring) | 378 | struct intel_ring_buffer *ring) |
379 | { | 379 | { |
380 | drm_i915_private_t *dev_priv = dev->dev_private; | 380 | drm_i915_private_t *dev_priv = dev->dev_private; |
381 | return I915_READ(BSD_RING_ACTHD); | 381 | return I915_READ(BSD_RING_ACTHD); |
382 | } | 382 | } |
383 | 383 | ||
384 | static int init_bsd_ring(struct drm_device *dev, | 384 | static int init_bsd_ring(struct drm_device *dev, |
385 | struct intel_ring_buffer *ring) | 385 | struct intel_ring_buffer *ring) |
386 | { | 386 | { |
387 | return init_ring_common(dev, ring); | 387 | return init_ring_common(dev, ring); |
388 | } | 388 | } |
389 | 389 | ||
390 | static u32 | 390 | static u32 |
391 | bsd_ring_add_request(struct drm_device *dev, | 391 | bsd_ring_add_request(struct drm_device *dev, |
392 | struct intel_ring_buffer *ring, | 392 | struct intel_ring_buffer *ring, |
393 | struct drm_file *file_priv, | 393 | struct drm_file *file_priv, |
394 | u32 flush_domains) | 394 | u32 flush_domains) |
395 | { | 395 | { |
396 | u32 seqno; | 396 | u32 seqno; |
397 | 397 | ||
@@ -411,7 +411,7 @@ bsd_ring_add_request(struct drm_device *dev, | |||
411 | } | 411 | } |
412 | 412 | ||
413 | static void bsd_setup_status_page(struct drm_device *dev, | 413 | static void bsd_setup_status_page(struct drm_device *dev, |
414 | struct intel_ring_buffer *ring) | 414 | struct intel_ring_buffer *ring) |
415 | { | 415 | { |
416 | drm_i915_private_t *dev_priv = dev->dev_private; | 416 | drm_i915_private_t *dev_priv = dev->dev_private; |
417 | I915_WRITE(BSD_HWS_PGA, ring->status_page.gfx_addr); | 417 | I915_WRITE(BSD_HWS_PGA, ring->status_page.gfx_addr); |
@@ -420,30 +420,30 @@ static void bsd_setup_status_page(struct drm_device *dev, | |||
420 | 420 | ||
421 | static void | 421 | static void |
422 | bsd_ring_get_user_irq(struct drm_device *dev, | 422 | bsd_ring_get_user_irq(struct drm_device *dev, |
423 | struct intel_ring_buffer *ring) | 423 | struct intel_ring_buffer *ring) |
424 | { | 424 | { |
425 | /* do nothing */ | 425 | /* do nothing */ |
426 | } | 426 | } |
427 | static void | 427 | static void |
428 | bsd_ring_put_user_irq(struct drm_device *dev, | 428 | bsd_ring_put_user_irq(struct drm_device *dev, |
429 | struct intel_ring_buffer *ring) | 429 | struct intel_ring_buffer *ring) |
430 | { | 430 | { |
431 | /* do nothing */ | 431 | /* do nothing */ |
432 | } | 432 | } |
433 | 433 | ||
434 | static u32 | 434 | static u32 |
435 | bsd_ring_get_gem_seqno(struct drm_device *dev, | 435 | bsd_ring_get_gem_seqno(struct drm_device *dev, |
436 | struct intel_ring_buffer *ring) | 436 | struct intel_ring_buffer *ring) |
437 | { | 437 | { |
438 | return intel_read_status_page(ring, I915_GEM_HWS_INDEX); | 438 | return intel_read_status_page(ring, I915_GEM_HWS_INDEX); |
439 | } | 439 | } |
440 | 440 | ||
441 | static int | 441 | static int |
442 | bsd_ring_dispatch_gem_execbuffer(struct drm_device *dev, | 442 | bsd_ring_dispatch_gem_execbuffer(struct drm_device *dev, |
443 | struct intel_ring_buffer *ring, | 443 | struct intel_ring_buffer *ring, |
444 | struct drm_i915_gem_execbuffer2 *exec, | 444 | struct drm_i915_gem_execbuffer2 *exec, |
445 | struct drm_clip_rect *cliprects, | 445 | struct drm_clip_rect *cliprects, |
446 | uint64_t exec_offset) | 446 | uint64_t exec_offset) |
447 | { | 447 | { |
448 | uint32_t exec_start; | 448 | uint32_t exec_start; |
449 | exec_start = (uint32_t) exec_offset + exec->batch_start_offset; | 449 | exec_start = (uint32_t) exec_offset + exec->batch_start_offset; |
@@ -458,10 +458,10 @@ bsd_ring_dispatch_gem_execbuffer(struct drm_device *dev, | |||
458 | 458 | ||
459 | static int | 459 | static int |
460 | render_ring_dispatch_gem_execbuffer(struct drm_device *dev, | 460 | render_ring_dispatch_gem_execbuffer(struct drm_device *dev, |
461 | struct intel_ring_buffer *ring, | 461 | struct intel_ring_buffer *ring, |
462 | struct drm_i915_gem_execbuffer2 *exec, | 462 | struct drm_i915_gem_execbuffer2 *exec, |
463 | struct drm_clip_rect *cliprects, | 463 | struct drm_clip_rect *cliprects, |
464 | uint64_t exec_offset) | 464 | uint64_t exec_offset) |
465 | { | 465 | { |
466 | drm_i915_private_t *dev_priv = dev->dev_private; | 466 | drm_i915_private_t *dev_priv = dev->dev_private; |
467 | int nbox = exec->num_cliprects; | 467 | int nbox = exec->num_cliprects; |
@@ -520,7 +520,7 @@ render_ring_dispatch_gem_execbuffer(struct drm_device *dev, | |||
520 | } | 520 | } |
521 | 521 | ||
522 | static void cleanup_status_page(struct drm_device *dev, | 522 | static void cleanup_status_page(struct drm_device *dev, |
523 | struct intel_ring_buffer *ring) | 523 | struct intel_ring_buffer *ring) |
524 | { | 524 | { |
525 | drm_i915_private_t *dev_priv = dev->dev_private; | 525 | drm_i915_private_t *dev_priv = dev->dev_private; |
526 | struct drm_gem_object *obj; | 526 | struct drm_gem_object *obj; |
@@ -540,7 +540,7 @@ static void cleanup_status_page(struct drm_device *dev, | |||
540 | } | 540 | } |
541 | 541 | ||
542 | static int init_status_page(struct drm_device *dev, | 542 | static int init_status_page(struct drm_device *dev, |
543 | struct intel_ring_buffer *ring) | 543 | struct intel_ring_buffer *ring) |
544 | { | 544 | { |
545 | drm_i915_private_t *dev_priv = dev->dev_private; | 545 | drm_i915_private_t *dev_priv = dev->dev_private; |
546 | struct drm_gem_object *obj; | 546 | struct drm_gem_object *obj; |
@@ -584,9 +584,8 @@ err: | |||
584 | return ret; | 584 | return ret; |
585 | } | 585 | } |
586 | 586 | ||
587 | |||
588 | int intel_init_ring_buffer(struct drm_device *dev, | 587 | int intel_init_ring_buffer(struct drm_device *dev, |
589 | struct intel_ring_buffer *ring) | 588 | struct intel_ring_buffer *ring) |
590 | { | 589 | { |
591 | struct drm_i915_private *dev_priv = dev->dev_private; | 590 | struct drm_i915_private *dev_priv = dev->dev_private; |
592 | struct drm_i915_gem_object *obj_priv; | 591 | struct drm_i915_gem_object *obj_priv; |
@@ -659,7 +658,7 @@ err_hws: | |||
659 | } | 658 | } |
660 | 659 | ||
661 | void intel_cleanup_ring_buffer(struct drm_device *dev, | 660 | void intel_cleanup_ring_buffer(struct drm_device *dev, |
662 | struct intel_ring_buffer *ring) | 661 | struct intel_ring_buffer *ring) |
663 | { | 662 | { |
664 | if (ring->gem_object == NULL) | 663 | if (ring->gem_object == NULL) |
665 | return; | 664 | return; |
@@ -672,8 +671,8 @@ void intel_cleanup_ring_buffer(struct drm_device *dev, | |||
672 | cleanup_status_page(dev, ring); | 671 | cleanup_status_page(dev, ring); |
673 | } | 672 | } |
674 | 673 | ||
675 | int intel_wrap_ring_buffer(struct drm_device *dev, | 674 | static int intel_wrap_ring_buffer(struct drm_device *dev, |
676 | struct intel_ring_buffer *ring) | 675 | struct intel_ring_buffer *ring) |
677 | { | 676 | { |
678 | unsigned int *virt; | 677 | unsigned int *virt; |
679 | int rem; | 678 | int rem; |
@@ -699,7 +698,7 @@ int intel_wrap_ring_buffer(struct drm_device *dev, | |||
699 | } | 698 | } |
700 | 699 | ||
701 | int intel_wait_ring_buffer(struct drm_device *dev, | 700 | int intel_wait_ring_buffer(struct drm_device *dev, |
702 | struct intel_ring_buffer *ring, int n) | 701 | struct intel_ring_buffer *ring, int n) |
703 | { | 702 | { |
704 | unsigned long end; | 703 | unsigned long end; |
705 | drm_i915_private_t *dev_priv = dev->dev_private; | 704 | drm_i915_private_t *dev_priv = dev->dev_private; |
@@ -729,7 +728,8 @@ int intel_wait_ring_buffer(struct drm_device *dev, | |||
729 | } | 728 | } |
730 | 729 | ||
731 | void intel_ring_begin(struct drm_device *dev, | 730 | void intel_ring_begin(struct drm_device *dev, |
732 | struct intel_ring_buffer *ring, int num_dwords) | 731 | struct intel_ring_buffer *ring, |
732 | int num_dwords) | ||
733 | { | 733 | { |
734 | int n = 4*num_dwords; | 734 | int n = 4*num_dwords; |
735 | if (unlikely(ring->tail + n > ring->size)) | 735 | if (unlikely(ring->tail + n > ring->size)) |
@@ -741,16 +741,16 @@ void intel_ring_begin(struct drm_device *dev, | |||
741 | } | 741 | } |
742 | 742 | ||
743 | void intel_ring_advance(struct drm_device *dev, | 743 | void intel_ring_advance(struct drm_device *dev, |
744 | struct intel_ring_buffer *ring) | 744 | struct intel_ring_buffer *ring) |
745 | { | 745 | { |
746 | ring->tail &= ring->size - 1; | 746 | ring->tail &= ring->size - 1; |
747 | ring->set_tail(dev, ring, ring->tail); | 747 | ring->set_tail(dev, ring, ring->tail); |
748 | } | 748 | } |
749 | 749 | ||
750 | void intel_fill_struct(struct drm_device *dev, | 750 | void intel_fill_struct(struct drm_device *dev, |
751 | struct intel_ring_buffer *ring, | 751 | struct intel_ring_buffer *ring, |
752 | void *data, | 752 | void *data, |
753 | unsigned int len) | 753 | unsigned int len) |
754 | { | 754 | { |
755 | unsigned int *virt = ring->virtual_start + ring->tail; | 755 | unsigned int *virt = ring->virtual_start + ring->tail; |
756 | BUG_ON((len&~(4-1)) != 0); | 756 | BUG_ON((len&~(4-1)) != 0); |
@@ -800,16 +800,16 @@ static const struct intel_ring_buffer bsd_ring = { | |||
800 | 800 | ||
801 | 801 | ||
802 | static void gen6_bsd_setup_status_page(struct drm_device *dev, | 802 | static void gen6_bsd_setup_status_page(struct drm_device *dev, |
803 | struct intel_ring_buffer *ring) | 803 | struct intel_ring_buffer *ring) |
804 | { | 804 | { |
805 | drm_i915_private_t *dev_priv = dev->dev_private; | 805 | drm_i915_private_t *dev_priv = dev->dev_private; |
806 | I915_WRITE(GEN6_BSD_HWS_PGA, ring->status_page.gfx_addr); | 806 | I915_WRITE(GEN6_BSD_HWS_PGA, ring->status_page.gfx_addr); |
807 | I915_READ(GEN6_BSD_HWS_PGA); | 807 | I915_READ(GEN6_BSD_HWS_PGA); |
808 | } | 808 | } |
809 | 809 | ||
810 | static inline void gen6_bsd_ring_set_tail(struct drm_device *dev, | 810 | static void gen6_bsd_ring_set_tail(struct drm_device *dev, |
811 | struct intel_ring_buffer *ring, | 811 | struct intel_ring_buffer *ring, |
812 | u32 value) | 812 | u32 value) |
813 | { | 813 | { |
814 | drm_i915_private_t *dev_priv = dev->dev_private; | 814 | drm_i915_private_t *dev_priv = dev->dev_private; |
815 | 815 | ||
@@ -830,17 +830,17 @@ static inline void gen6_bsd_ring_set_tail(struct drm_device *dev, | |||
830 | GEN6_BSD_SLEEP_PSMI_CONTROL_RC_ILDL_MESSAGE_ENABLE); | 830 | GEN6_BSD_SLEEP_PSMI_CONTROL_RC_ILDL_MESSAGE_ENABLE); |
831 | } | 831 | } |
832 | 832 | ||
833 | static inline unsigned int gen6_bsd_ring_get_active_head(struct drm_device *dev, | 833 | static unsigned int gen6_bsd_ring_get_active_head(struct drm_device *dev, |
834 | struct intel_ring_buffer *ring) | 834 | struct intel_ring_buffer *ring) |
835 | { | 835 | { |
836 | drm_i915_private_t *dev_priv = dev->dev_private; | 836 | drm_i915_private_t *dev_priv = dev->dev_private; |
837 | return I915_READ(GEN6_BSD_RING_ACTHD); | 837 | return I915_READ(GEN6_BSD_RING_ACTHD); |
838 | } | 838 | } |
839 | 839 | ||
840 | static void gen6_bsd_ring_flush(struct drm_device *dev, | 840 | static void gen6_bsd_ring_flush(struct drm_device *dev, |
841 | struct intel_ring_buffer *ring, | 841 | struct intel_ring_buffer *ring, |
842 | u32 invalidate_domains, | 842 | u32 invalidate_domains, |
843 | u32 flush_domains) | 843 | u32 flush_domains) |
844 | { | 844 | { |
845 | intel_ring_begin(dev, ring, 4); | 845 | intel_ring_begin(dev, ring, 4); |
846 | intel_ring_emit(dev, ring, MI_FLUSH_DW); | 846 | intel_ring_emit(dev, ring, MI_FLUSH_DW); |
@@ -852,17 +852,22 @@ static void gen6_bsd_ring_flush(struct drm_device *dev, | |||
852 | 852 | ||
853 | static int | 853 | static int |
854 | gen6_bsd_ring_dispatch_gem_execbuffer(struct drm_device *dev, | 854 | gen6_bsd_ring_dispatch_gem_execbuffer(struct drm_device *dev, |
855 | struct intel_ring_buffer *ring, | 855 | struct intel_ring_buffer *ring, |
856 | struct drm_i915_gem_execbuffer2 *exec, | 856 | struct drm_i915_gem_execbuffer2 *exec, |
857 | struct drm_clip_rect *cliprects, | 857 | struct drm_clip_rect *cliprects, |
858 | uint64_t exec_offset) | 858 | uint64_t exec_offset) |
859 | { | 859 | { |
860 | uint32_t exec_start; | 860 | uint32_t exec_start; |
861 | |||
861 | exec_start = (uint32_t) exec_offset + exec->batch_start_offset; | 862 | exec_start = (uint32_t) exec_offset + exec->batch_start_offset; |
863 | |||
862 | intel_ring_begin(dev, ring, 2); | 864 | intel_ring_begin(dev, ring, 2); |
863 | intel_ring_emit(dev, ring, MI_BATCH_BUFFER_START | MI_BATCH_NON_SECURE_I965); /* bit0-7 is the length on GEN6+ */ | 865 | intel_ring_emit(dev, ring, |
866 | MI_BATCH_BUFFER_START | MI_BATCH_NON_SECURE_I965); | ||
867 | /* bit0-7 is the length on GEN6+ */ | ||
864 | intel_ring_emit(dev, ring, exec_start); | 868 | intel_ring_emit(dev, ring, exec_start); |
865 | intel_ring_advance(dev, ring); | 869 | intel_ring_advance(dev, ring); |
870 | |||
866 | return 0; | 871 | return 0; |
867 | } | 872 | } |
868 | 873 | ||